diff options
Diffstat (limited to 'drivers')
218 files changed, 81433 insertions, 1680 deletions
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index cc273226dbd..e98838a060e 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -117,7 +117,7 @@ static int amba_legacy_resume(struct device *dev) #ifdef CONFIG_SUSPEND -static int amba_pm_suspend(struct device *dev) +int amba_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -135,7 +135,7 @@ static int amba_pm_suspend(struct device *dev) return ret; } -static int amba_pm_resume(struct device *dev) +int amba_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -162,7 +162,7 @@ static int amba_pm_resume(struct device *dev) #ifdef CONFIG_HIBERNATE_CALLBACKS -static int amba_pm_freeze(struct device *dev) +int amba_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -180,7 +180,7 @@ static int amba_pm_freeze(struct device *dev) return ret; } -static int amba_pm_thaw(struct device *dev) +int amba_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -198,7 +198,7 @@ static int amba_pm_thaw(struct device *dev) return ret; } -static int amba_pm_poweroff(struct device *dev) +int amba_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -216,7 +216,7 @@ static int amba_pm_poweroff(struct device *dev) return ret; } -static int amba_pm_restore(struct device *dev) +int amba_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 5138927a416..6f86f8ca9b0 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -19,13 +19,27 @@ config DW_APB_TIMER config CLKSRC_DBX500_PRCMU bool "Clocksource PRCMU Timer" depends on UX500_SOC_DB5500 || UX500_SOC_DB8500 - default y + default y if UX500_SOC_DB8500 help Use the always on PRCMU Timer as clocksource config CLKSRC_DBX500_PRCMU_SCHED_CLOCK - bool "Clocksource PRCMU Timer sched_clock" - depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK) + bool + depends on CLKSRC_DBX500_PRCMU + select HAVE_SCHED_CLOCK + help + Use the always on PRCMU Timer as sched_clock + +config CLKSRC_DB5500_MTIMER + bool "Clocksource MTIMER" + depends on UX500_SOC_DB5500 default y help + Use the always on MTIMER as clocksource + +config CLKSRC_DB5500_MTIMER_SCHED_CLOCK + bool + depends on CLKSRC_DB5500_MTIMER + select HAVE_SCHED_CLOCK + help Use the always on PRCMU Timer as sched_clock diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 8d81a1d3265..9b10f6b7536 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o obj-$(CONFIG_CLKBLD_I8253) += i8253.o obj-$(CONFIG_CLKSRC_MMIO) += mmio.o obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o -obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
\ No newline at end of file +obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o +obj-$(CONFIG_CLKSRC_DB5500_MTIMER) += db5500-mtimer.o diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index c26c369eb9e..dc71e432dc5 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -14,6 +14,9 @@ */ #include <linux/clockchips.h> #include <linux/clksrc-dbx500-prcmu.h> +#ifdef CONFIG_BOOTTIME +#include <linux/boottime.h> +#endif #include <asm/sched_clock.h> @@ -68,6 +71,23 @@ static u32 notrace dbx500_prcmu_sched_clock_read(void) #endif +#ifdef CONFIG_BOOTTIME +static unsigned long __init boottime_get_time(void) +{ + return div_s64(clocksource_cyc2ns(clocksource_dbx500_prcmu.read( + &clocksource_dbx500_prcmu), + clocksource_dbx500_prcmu.mult, + clocksource_dbx500_prcmu.shift), + 1000); +} + +static struct boottime_timer __initdata boottime_timer = { + .init = NULL, + .get_time = boottime_get_time, + .finalize = NULL, +}; +#endif + void __init clksrc_dbx500_prcmu_init(void __iomem *base) { clksrc_dbx500_timer_base = base; @@ -90,4 +110,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base) 32, RATE_32K); #endif clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K); +#ifdef CONFIG_BOOTTIME + boottime_activate(&boottime_timer); +#endif } diff --git a/drivers/clocksource/db5500-mtimer.c b/drivers/clocksource/db5500-mtimer.c new file mode 100644 index 00000000000..5e64da19e66 --- /dev/null +++ b/drivers/clocksource/db5500-mtimer.c @@ -0,0 +1,67 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + */ + +#include <linux/io.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/clockchips.h> +#include <linux/clksrc-db5500-mtimer.h> +#include <linux/boottime.h> + +#include <asm/sched_clock.h> + +#define MTIMER_PRIMARY_COUNTER 0x18 + +static void __iomem *db5500_mtimer_base; + +#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK +static DEFINE_CLOCK_DATA(cd); + +unsigned long long notrace sched_clock(void) +{ + u32 cyc; + + if (unlikely(!db5500_mtimer_base)) + return 0; + + cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER); + + return cyc_to_sched_clock(&cd, cyc, (u32)~0); +} + +static void notrace db5500_mtimer_update_sched_clock(void) +{ + u32 cyc = readl_relaxed(db5500_mtimer_base + MTIMER_PRIMARY_COUNTER); + update_sched_clock(&cd, cyc, (u32)~0); +} +#endif + +#ifdef CONFIG_BOOTTIME +static unsigned long __init boottime_get_time(void) +{ + return sched_clock(); +} + +static struct boottime_timer __initdata boottime_timer = { + .init = NULL, + .get_time = boottime_get_time, + .finalize = NULL, +}; +#endif + +void __init db5500_mtimer_init(void __iomem *base) +{ + db5500_mtimer_base = base; + + clocksource_mmio_init(base + MTIMER_PRIMARY_COUNTER, "mtimer", 32768, + 400, 32, clocksource_mmio_readl_up); + +#ifdef CONFIG_CLKSRC_DB5500_MTIMER_SCHED_CLOCK + init_sched_clock(&cd, db5500_mtimer_update_sched_clock, + 32, 32768); +#endif + boottime_activate(&boottime_timer); +} diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 9531fc2eda2..44aa7093235 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -39,7 +39,8 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o ################################################################################## # ARM SoC drivers -obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o +obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o +obj-$(CONFIG_UX500_SOC_DB5500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 7f2f149ae40..4de1fff790f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -376,6 +376,27 @@ show_one(scaling_cur_freq, cur); static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy); +int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max) +{ + int ret; + struct cpufreq_policy new_policy; + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + ret = cpufreq_get_policy(&new_policy, cpu); + if (ret) + return -EINVAL; + + new_policy.min = min; + new_policy.max = max; + + ret = __cpufreq_set_policy(policy, &new_policy); + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; + + return ret; +} +EXPORT_SYMBOL(cpufreq_update_freq); + /** * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 836e9b062e5..765256e4904 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -79,7 +79,6 @@ struct cpu_dbs_info_s { cputime64_t prev_cpu_wall; cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; - struct delayed_work work; struct cpufreq_frequency_table *freq_table; unsigned int freq_lo; unsigned int freq_lo_jiffies; @@ -95,8 +94,10 @@ struct cpu_dbs_info_s { struct mutex timer_mutex; }; static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); +static DEFINE_PER_CPU(struct delayed_work, ondemand_work); static unsigned int dbs_enable; /* number of CPUs using this policy */ +static ktime_t time_stamp; /* * dbs_mutex protects dbs_enable in governor start/stop. @@ -290,22 +291,23 @@ static void update_sampling_rate(unsigned int new_rate) mutex_lock(&dbs_info->timer_mutex); - if (!delayed_work_pending(&dbs_info->work)) { + if (!delayed_work_pending(&per_cpu(ondemand_work, cpu))) { mutex_unlock(&dbs_info->timer_mutex); continue; } next_sampling = jiffies + usecs_to_jiffies(new_rate); - appointed_at = dbs_info->work.timer.expires; + appointed_at = per_cpu(ondemand_work, cpu).timer.expires; if (time_before(next_sampling, appointed_at)) { mutex_unlock(&dbs_info->timer_mutex); - cancel_delayed_work_sync(&dbs_info->work); + cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu)); mutex_lock(&dbs_info->timer_mutex); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, + schedule_delayed_work_on(dbs_info->cpu, + &per_cpu(ondemand_work, cpu), usecs_to_jiffies(new_rate)); } @@ -449,6 +451,26 @@ static struct attribute_group dbs_attr_group = { /************************** sysfs end ************************/ +static bool dbs_sw_coordinated_cpus(void) +{ + struct cpu_dbs_info_s *dbs_info; + struct cpufreq_policy *policy; + int i = 0; + int j; + + dbs_info = &per_cpu(od_cpu_dbs_info, 0); + policy = dbs_info->cur_policy; + + for_each_cpu(j, policy->cpus) { + i++; + } + + if (i > 1) + return true; /* Dependant CPUs */ + else + return false; +} + static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) { if (dbs_tuners_ins.powersave_bias) @@ -463,7 +485,6 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { unsigned int max_load_freq; - struct cpufreq_policy *policy; unsigned int j; @@ -598,20 +619,42 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) static void do_dbs_timer(struct work_struct *work) { - struct cpu_dbs_info_s *dbs_info = - container_of(work, struct cpu_dbs_info_s, work.work); - unsigned int cpu = dbs_info->cpu; - int sample_type = dbs_info->sample_type; - + struct cpu_dbs_info_s *dbs_info; + unsigned int cpu = smp_processor_id(); + int sample_type; int delay; + bool sample = true; + + /* If SW dependant CPUs, use CPU 0 as leader */ + if (dbs_sw_coordinated_cpus()) { + + ktime_t time_now; + s64 delta_us; - mutex_lock(&dbs_info->timer_mutex); + dbs_info = &per_cpu(od_cpu_dbs_info, 0); + mutex_lock(&dbs_info->timer_mutex); + + time_now = ktime_get(); + delta_us = ktime_us_delta(time_now, time_stamp); + + /* Do nothing if we recently have sampled */ + if (delta_us < (s64)(dbs_tuners_ins.sampling_rate / 2)) + sample = false; + else + time_stamp = time_now; + } else { + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + mutex_lock(&dbs_info->timer_mutex); + } + + sample_type = dbs_info->sample_type; /* Common NORMAL_SAMPLE setup */ dbs_info->sample_type = DBS_NORMAL_SAMPLE; if (!dbs_tuners_ins.powersave_bias || sample_type == DBS_NORMAL_SAMPLE) { - dbs_check_cpu(dbs_info); + if (sample) + dbs_check_cpu(dbs_info); if (dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; @@ -627,15 +670,17 @@ static void do_dbs_timer(struct work_struct *work) delay -= jiffies % delay; } } else { - __cpufreq_driver_target(dbs_info->cur_policy, - dbs_info->freq_lo, CPUFREQ_RELATION_H); + if (sample) + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, + CPUFREQ_RELATION_H); delay = dbs_info->freq_lo_jiffies; } - schedule_delayed_work_on(cpu, &dbs_info->work, delay); + schedule_delayed_work_on(cpu, &per_cpu(ondemand_work, cpu), delay); mutex_unlock(&dbs_info->timer_mutex); } -static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info, int cpu) { /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); @@ -644,13 +689,18 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) delay -= jiffies % delay; dbs_info->sample_type = DBS_NORMAL_SAMPLE; - INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); - schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); + cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu)); + schedule_delayed_work_on(cpu, &per_cpu(ondemand_work, cpu), delay); +} + +static inline void dbs_timer_exit(int cpu) +{ + cancel_delayed_work_sync(&per_cpu(ondemand_work, cpu)); } -static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +static void dbs_timer_exit_per_cpu(struct work_struct *dummy) { - cancel_delayed_work_sync(&dbs_info->work); + dbs_timer_exit(smp_processor_id()); } /* @@ -676,6 +726,42 @@ static int should_io_be_busy(void) return 0; } +static int __cpuinit cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct device *cpu_dev; + struct cpu_dbs_info_s *dbs_info; + + if (dbs_sw_coordinated_cpus()) + dbs_info = &per_cpu(od_cpu_dbs_info, 0); + else + dbs_info = &per_cpu(od_cpu_dbs_info, cpu); + + cpu_dev = get_cpu_device(cpu); + if (cpu_dev) { + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + dbs_timer_init(dbs_info, cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + dbs_timer_exit(cpu); + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + dbs_timer_init(dbs_info, cpu); + break; + } + } + return NOTIFY_OK; +} + +static struct notifier_block __refdata ondemand_cpu_notifier = { + .notifier_call = cpu_callback, +}; + static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { @@ -704,9 +790,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + mutex_init(&j_dbs_info->timer_mutex); + INIT_DELAYED_WORK_DEFERRABLE(&per_cpu(ondemand_work, j), + do_dbs_timer); + + j_dbs_info->rate_mult = 1; } this_dbs_info->cpu = cpu; - this_dbs_info->rate_mult = 1; ondemand_powersave_bias_init_cpu(cpu); /* * Start the timerschedule work, when this governor @@ -736,21 +826,46 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, } mutex_unlock(&dbs_mutex); - mutex_init(&this_dbs_info->timer_mutex); - dbs_timer_init(this_dbs_info); + /* If SW coordinated CPUs then register notifier */ + if (dbs_sw_coordinated_cpus()) { + register_hotcpu_notifier(&ondemand_cpu_notifier); + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + + j_dbs_info = &per_cpu(od_cpu_dbs_info, 0); + dbs_timer_init(j_dbs_info, j); + } + + /* Initiate timer time stamp */ + time_stamp = ktime_get(); + + + } else + dbs_timer_init(this_dbs_info, cpu); break; case CPUFREQ_GOV_STOP: - dbs_timer_exit(this_dbs_info); + + dbs_timer_exit(cpu); mutex_lock(&dbs_mutex); mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); - if (!dbs_enable) + if (!dbs_enable) { sysfs_remove_group(cpufreq_global_kobject, &dbs_attr_group); + if (dbs_sw_coordinated_cpus()) { + /* + * Make sure all pending timers/works are + * stopped. + */ + schedule_on_each_cpu(dbs_timer_exit_per_cpu); + unregister_hotcpu_notifier(&ondemand_cpu_notifier); + } + } break; case CPUFREQ_GOV_LIMITS: diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c deleted file mode 100644 index 0bf1b8910ee..00000000000 --- a/drivers/cpufreq/db8500-cpufreq.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Copyright (C) STMicroelectronics 2009 - * Copyright (C) ST-Ericsson SA 2010 - * - * License Terms: GNU General Public License v2 - * Author: Sundar Iyer <sundar.iyer@stericsson.com> - * Author: Martin Persson <martin.persson@stericsson.com> - * Author: Jonas Aaberg <jonas.aberg@stericsson.com> - * - */ -#include <linux/kernel.h> -#include <linux/cpufreq.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/mfd/dbx500-prcmu.h> -#include <mach/id.h> - -static struct cpufreq_frequency_table freq_table[] = { - [0] = { - .index = 0, - .frequency = 200000, - }, - [1] = { - .index = 1, - .frequency = 400000, - }, - [2] = { - .index = 2, - .frequency = 800000, - }, - [3] = { - /* Used for MAX_OPP, if available */ - .index = 3, - .frequency = CPUFREQ_TABLE_END, - }, - [4] = { - .index = 4, - .frequency = CPUFREQ_TABLE_END, - }, -}; - -static enum arm_opp idx2opp[] = { - ARM_EXTCLK, - ARM_50_OPP, - ARM_100_OPP, - ARM_MAX_OPP -}; - -static struct freq_attr *db8500_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, freq_table); -} - -static int db8500_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - struct cpufreq_freqs freqs; - unsigned int idx; - - /* scale the target frequency to one of the extremes supported */ - if (target_freq < policy->cpuinfo.min_freq) - target_freq = policy->cpuinfo.min_freq; - if (target_freq > policy->cpuinfo.max_freq) - target_freq = policy->cpuinfo.max_freq; - - /* Lookup the next frequency */ - if (cpufreq_frequency_table_target - (policy, freq_table, target_freq, relation, &idx)) { - return -EINVAL; - } - - freqs.old = policy->cur; - freqs.new = freq_table[idx].frequency; - - if (freqs.old == freqs.new) - return 0; - - /* pre-change notification */ - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* request the PRCM unit for opp change */ - if (prcmu_set_arm_opp(idx2opp[idx])) { - pr_err("db8500-cpufreq: Failed to set OPP level\n"); - return -EINVAL; - } - - /* post change notification */ - for_each_cpu(freqs.cpu, policy->cpus) - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - return 0; -} - -static unsigned int db8500_cpufreq_getspeed(unsigned int cpu) -{ - int i; - /* request the prcm to get the current ARM opp */ - for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++) - ; - return freq_table[i].frequency; -} - -static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) -{ - int i, res; - - BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); - - if (prcmu_has_arm_maxopp()) - freq_table[3].frequency = 1000000; - - pr_info("db8500-cpufreq : Available frequencies:\n"); - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) - pr_info(" %d Mhz\n", freq_table[i].frequency/1000); - - /* get policy fields based on the table */ - res = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!res) - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - else { - pr_err("db8500-cpufreq : Failed to read policy table\n"); - return res; - } - - policy->min = policy->cpuinfo.min_freq; - policy->max = policy->cpuinfo.max_freq; - policy->cur = db8500_cpufreq_getspeed(policy->cpu); - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; - - /* - * FIXME : Need to take time measurement across the target() - * function with no/some/all drivers in the notification - * list. - */ - policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ - - /* policy sharing between dual CPUs */ - cpumask_copy(policy->cpus, cpu_present_mask); - - policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; - - return 0; -} - -static struct cpufreq_driver db8500_cpufreq_driver = { - .flags = CPUFREQ_STICKY, - .verify = db8500_cpufreq_verify_speed, - .target = db8500_cpufreq_target, - .get = db8500_cpufreq_getspeed, - .init = db8500_cpufreq_init, - .name = "DB8500", - .attr = db8500_cpufreq_attr, -}; - -static int __init db8500_cpufreq_register(void) -{ - if (!cpu_is_u8500v20_or_later()) - return -ENODEV; - - pr_info("cpufreq for DB8500 started\n"); - return cpufreq_register_driver(&db8500_cpufreq_driver); -} -device_initcall(db8500_cpufreq_register); diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c new file mode 100644 index 00000000000..a6f991e2fb6 --- /dev/null +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -0,0 +1,340 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010-2011 + * + * License Terms: GNU General Public License v2 + * Author: Sundar Iyer <sundar.iyer@stericsson.com> + * Author: Martin Persson <martin.persson@stericsson.com> + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> + */ + +#include <linux/kernel.h> +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/mfd/dbx500-prcmu.h> +#include <mach/id.h> + +static struct cpufreq_frequency_table db8500_freq_table[] = { + [0] = { + .index = 0, + .frequency = 200000, + }, + [1] = { + .index = 1, + .frequency = 400000, + }, + [2] = { + .index = 2, + .frequency = 800000, + }, + [3] = { + /* Used for MAX_OPP, if available */ + .index = 3, + .frequency = CPUFREQ_TABLE_END, + }, + [4] = { + .index = 4, + .frequency = CPUFREQ_TABLE_END, + }, +}; + +static struct cpufreq_frequency_table db5500_freq_table[] = { + [0] = { + .index = 0, + .frequency = 200000, + }, + [1] = { + .index = 1, + .frequency = 396500, + }, + [2] = { + .index = 2, + .frequency = 793000, + }, + [3] = { + .index = 3, + .frequency = CPUFREQ_TABLE_END, + }, +}; + +static struct cpufreq_frequency_table *freq_table; +static int freq_table_len; + +static enum arm_opp db8500_idx2opp[] = { + ARM_EXTCLK, + ARM_50_OPP, + ARM_100_OPP, + ARM_MAX_OPP +}; + +static enum arm_opp db5500_idx2opp[] = { + ARM_EXTCLK, + ARM_50_OPP, + ARM_100_OPP, +}; + +static enum arm_opp *idx2opp; + +static struct freq_attr *dbx500_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int dbx500_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_freqs freqs; + unsigned int idx; + + /* scale the target frequency to one of the extremes supported */ + if (target_freq < policy->cpuinfo.min_freq) + target_freq = policy->cpuinfo.min_freq; + if (target_freq > policy->cpuinfo.max_freq) + target_freq = policy->cpuinfo.max_freq; + + /* Lookup the next frequency */ + if (cpufreq_frequency_table_target + (policy, freq_table, target_freq, relation, &idx)) { + return -EINVAL; + } + + freqs.old = policy->cur; + freqs.new = freq_table[idx].frequency; + + if (freqs.old == freqs.new) + return 0; + + /* pre-change notification */ + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + BUG_ON(idx >= freq_table_len); + + /* request the PRCM unit for opp change */ + if (prcmu_set_arm_opp(idx2opp[idx])) { + pr_err("ux500-cpufreq: Failed to set OPP level\n"); + return -EINVAL; + } + + /* post change notification */ + for_each_cpu(freqs.cpu, policy->cpus) + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu) +{ + int i; + enum arm_opp current_opp; + + current_opp = prcmu_get_arm_opp(); + + /* request the prcm to get the current ARM opp */ + for (i = 0; i < freq_table_len; i++) { + if (current_opp == idx2opp[i]) + return freq_table[i].frequency; + } + + pr_err("cpufreq: ERROR: unknown opp %d given from prcmufw!\n", + current_opp); + BUG_ON(1); + + /* + * Better to return something that might be correct than + * errno or zero, since clk_get_rate() won't do well with an errno. + */ + return freq_table[0].frequency; +} + +static void __init dbx500_cpufreq_init_maxopp_freq(void) +{ + struct prcmu_fw_version *fw_version = prcmu_get_fw_version(); + + if ((fw_version == NULL) || !prcmu_has_arm_maxopp()) + return; + + switch (fw_version->project) { + case PRCMU_FW_PROJECT_U8500: + case PRCMU_FW_PROJECT_U9500: + case PRCMU_FW_PROJECT_U8420: + freq_table[3].frequency = 1000000; + break; + case PRCMU_FW_PROJECT_U8500_C2: + case PRCMU_FW_PROJECT_U9500_C2: + case PRCMU_FW_PROJECT_U8520: + freq_table[3].frequency = 1150000; + break; + default: + break; + } +} + +static bool initialized; + +static void __init dbx500_cpufreq_early_init(void) +{ + if (cpu_is_u5500()) { + freq_table = db5500_freq_table; + idx2opp = db5500_idx2opp; + freq_table_len = ARRAY_SIZE(db5500_freq_table); + } else if (cpu_is_u8500()) { + freq_table = db8500_freq_table; + idx2opp = db8500_idx2opp; + dbx500_cpufreq_init_maxopp_freq(); + freq_table_len = ARRAY_SIZE(db8500_freq_table); + if (!prcmu_has_arm_maxopp()) + freq_table_len--; + } else { + ux500_unknown_soc(); + } + initialized = true; +} + +/* + * This is called from localtimer initialization, via the clk_get_rate() for + * the smp_twd clock. This is way before cpufreq is initialized. + */ +unsigned long dbx500_cpufreq_getfreq(void) +{ + if (!initialized) + dbx500_cpufreq_early_init(); + + return dbx500_cpufreq_getspeed(0) * 1000; +} + +int dbx500_cpufreq_percent2freq(int percent) +{ + int op; + int i; + + switch (percent) { + case 0: + /* Fall through */ + case 25: + op = ARM_EXTCLK; + break; + case 50: + op = ARM_50_OPP; + break; + case 100: + op = ARM_100_OPP; + break; + case 125: + if (cpu_is_u8500() && prcmu_has_arm_maxopp()) + op = ARM_MAX_OPP; + else + op = ARM_100_OPP; + break; + default: + pr_err("cpufreq-dbx500: Incorrect arm target value (%d).\n", + percent);; + return -EINVAL; + break; + } + + for (i = 0; idx2opp[i] != op && i < freq_table_len; i++) + ; + + if (freq_table[i].frequency == CPUFREQ_TABLE_END) { + pr_err("cpufreq-dbx500: Matching frequency does not exist!\n"); + return -EINVAL; + } + + return freq_table[i].frequency; +} + +int dbx500_cpufreq_get_limits(int cpu, int r, + unsigned int *min, unsigned int *max) +{ + int freq; + int ret; + static int old_freq; + struct cpufreq_policy p; + + freq = dbx500_cpufreq_percent2freq(r); + + if (freq < 0) + return -EINVAL; + + if (freq != old_freq) + pr_debug("cpufreq-dbx500: set min arm freq to %d\n", + freq); + + (*min) = freq; + + ret = cpufreq_get_policy(&p, cpu); + if (ret) { + pr_err("cpufreq-dbx500: Failed to get policy.\n"); + return -EINVAL; + } + + (*max) = p.max; + return 0; +} + +static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy) +{ + int res; + + /* get policy fields based on the table */ + res = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (!res) + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + else { + pr_err("dbx500-cpufreq : Failed to read policy table\n"); + return res; + } + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + policy->cur = dbx500_cpufreq_getspeed(policy->cpu); + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; + + /* + * FIXME : Need to take time measurement across the target() + * function with no/some/all drivers in the notification + * list. + */ + policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ + + /* policy sharing between dual CPUs */ + cpumask_copy(policy->cpus, cpu_present_mask); + + policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; + + return 0; +} + +static struct cpufreq_driver dbx500_cpufreq_driver = { + .flags = CPUFREQ_STICKY, + .verify = dbx500_cpufreq_verify_speed, + .target = dbx500_cpufreq_target, + .get = dbx500_cpufreq_getspeed, + .init = dbx500_cpufreq_init, + .name = "DBX500", + .attr = dbx500_cpufreq_attr, +}; + +static int __init dbx500_cpufreq_register(void) +{ + int i; + + if (!initialized) + dbx500_cpufreq_early_init(); + + pr_info("dbx500-cpufreq : Available frequencies:\n"); + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + pr_info(" %d Mhz\n", freq_table[i].frequency / 1000); + + return cpufreq_register_driver(&dbx500_cpufreq_driver); +} +device_initcall(dbx500_cpufreq_register); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 2ed1ac3513f..0fe43c3f598 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -345,6 +345,7 @@ struct d40_base { int irq; int num_phy_chans; int num_log_chans; + struct device_dma_parameters dma_parms; struct dma_device dma_both; struct dma_device dma_slave; struct dma_device dma_memcpy; @@ -2577,6 +2578,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, return -EINVAL; } + if (src_maxburst > 16) { + src_maxburst = 16; + dst_maxburst = src_maxburst * src_addr_width / dst_addr_width; + } else if (dst_maxburst > 16) { + dst_maxburst = 16; + src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; + } + ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, src_addr_width, src_maxburst); @@ -2639,6 +2648,56 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return -ENXIO; } +dma_addr_t stedma40_get_src_addr(struct dma_chan *chan) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + dma_addr_t addr; + + if (chan_is_physical(d40c)) + addr = readl(d40c->base->virtbase + D40_DREG_PCBASE + + d40c->phy_chan->num * D40_DREG_PCDELTA + + D40_CHAN_REG_SSPTR); + else { + unsigned long lower; + unsigned long upper; + + /* + * There is a potential for overflow between the time the two + * halves of the pointer are read. + */ + lower = d40c->lcpa->lcsp0 & D40_MEM_LCSP0_SPTR_MASK; + upper = d40c->lcpa->lcsp1 & D40_MEM_LCSP1_SPTR_MASK; + + addr = upper | lower; + } + + return addr; +} +EXPORT_SYMBOL(stedma40_get_src_addr); + +dma_addr_t stedma40_get_dst_addr(struct dma_chan *chan) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + dma_addr_t addr; + + if (chan_is_physical(d40c)) + addr = readl(d40c->base->virtbase + D40_DREG_PCBASE + + d40c->phy_chan->num * D40_DREG_PCDELTA + + D40_CHAN_REG_SDPTR); + else { + unsigned long lower; + unsigned long upper; + + lower = d40c->lcpa->lcsp2 & D40_MEM_LCSP2_DPTR_MASK; + upper = d40c->lcpa->lcsp3 & D40_MEM_LCSP3_DPTR_MASK; + + addr = upper | lower; + } + + return addr; +} +EXPORT_SYMBOL(stedma40_get_dst_addr); + /* Initialization functions */ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, @@ -2773,8 +2832,6 @@ static int dma40_pm_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct d40_base *base = platform_get_drvdata(pdev); int ret = 0; - if (!pm_runtime_suspended(dev)) - return -EBUSY; if (base->lcpa_regulator) ret = regulator_disable(base->lcpa_regulator); @@ -3358,6 +3415,13 @@ static int __init d40_probe(struct platform_device *pdev) if (err) goto failure; + base->dev->dma_parms = &base->dma_parms; + err = dma_set_max_seg_size(base->dev, 0xffff); + if (err) { + d40_err(&pdev->dev, "Failed to set dma max seg size\n"); + goto failure; + } + d40_hw_init(base); dev_info(base->dev, "initialized\n"); diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index cad9e1daedf..d47d1fa36b9 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -102,16 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; + /* Set the priority bit to high for the physical channel */ + if (cfg->high_priority) { + src |= 1 << D40_SREG_CFG_PRI_POS; + dst |= 1 << D40_SREG_CFG_PRI_POS; + } + } else { /* Logical channel */ dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; src |= 1 << D40_SREG_CFG_LOG_GIM_POS; } - if (cfg->high_priority) { - src |= 1 << D40_SREG_CFG_PRI_POS; - dst |= 1 << D40_SREG_CFG_PRI_POS; - } if (cfg->src_info.big_endian) src |= 1 << D40_SREG_CFG_LBE_POS; @@ -331,10 +333,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, { d40_log_lli_link(lli_dst, lli_src, next, flags); - writel(lli_src->lcsp02, &lcpa[0].lcsp0); - writel(lli_src->lcsp13, &lcpa[0].lcsp1); - writel(lli_dst->lcsp02, &lcpa[0].lcsp2); - writel(lli_dst->lcsp13, &lcpa[0].lcsp3); + writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0); + writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1); + writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2); + writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3); } void d40_log_lli_lcla_write(struct d40_log_lli *lcla, @@ -344,10 +346,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, { d40_log_lli_link(lli_dst, lli_src, next, flags); - writel(lli_src->lcsp02, &lcla[0].lcsp02); - writel(lli_src->lcsp13, &lcla[0].lcsp13); - writel(lli_dst->lcsp02, &lcla[1].lcsp02); - writel(lli_dst->lcsp13, &lcla[1].lcsp13); + writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02); + writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13); + writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02); + writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13); } static void d40_log_fill_lli(struct d40_log_lli *lli, diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 51e8e5396e9..2f3395d6850 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -94,10 +94,13 @@ /* LCSP2 */ #define D40_MEM_LCSP2_ECNT_POS 16 +#define D40_MEM_LCSP2_DPTR_POS 0 #define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS) +#define D40_MEM_LCSP2_DPTR_MASK (0xFFFF << D40_MEM_LCSP2_DPTR_POS) /* LCSP3 */ +#define D40_MEM_LCSP3_DPTR_POS 16 #define D40_MEM_LCSP3_DCFG_MST_POS 15 #define D40_MEM_LCSP3_DCFG_TIM_POS 14 #define D40_MEM_LCSP3_DCFG_EIM_POS 13 @@ -107,6 +110,7 @@ #define D40_MEM_LCSP3_DLOS_POS 1 #define D40_MEM_LCSP3_DTCP_POS 0 +#define D40_MEM_LCSP3_DPTR_MASK (0xFFFF << D40_MEM_LCSP3_DPTR_POS) #define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS) #define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index e03653d6935..6226d0cd30a 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -504,7 +504,7 @@ config GPIO_JANZ_TTL config GPIO_AB8500 bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions" - depends on AB8500_CORE && BROKEN + depends on AB8500_CORE help Select this to enable the AB8500 IC GPIO driver diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c index 050c05d9189..ef75984486c 100644 --- a/drivers/gpio/gpio-ab8500.c +++ b/drivers/gpio/gpio-ab8500.c @@ -18,9 +18,16 @@ #include <linux/gpio.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <linux/mfd/ab8500.h> #include <linux/mfd/abx500.h> -#include <linux/mfd/ab8500/gpio.h> +#include <linux/mfd/abx500/ab8500-gpio.h> + + +/* + * The AB9540 GPIO support is an extended version of the + * AB8500 GPIO support. The AB9540 supports an additional + * (7th) register so that more GPIO may be configured and + * used. + */ /* * GPIO registers offset @@ -32,6 +39,7 @@ #define AB8500_GPIO_SEL4_REG 0x03 #define AB8500_GPIO_SEL5_REG 0x04 #define AB8500_GPIO_SEL6_REG 0x05 +#define AB9540_GPIO_SEL7_REG 0x06 #define AB8500_GPIO_DIR1_REG 0x10 #define AB8500_GPIO_DIR2_REG 0x11 @@ -39,6 +47,7 @@ #define AB8500_GPIO_DIR4_REG 0x13 #define AB8500_GPIO_DIR5_REG 0x14 #define AB8500_GPIO_DIR6_REG 0x15 +#define AB9540_GPIO_DIR7_REG 0x16 #define AB8500_GPIO_OUT1_REG 0x20 #define AB8500_GPIO_OUT2_REG 0x21 @@ -46,6 +55,7 @@ #define AB8500_GPIO_OUT4_REG 0x23 #define AB8500_GPIO_OUT5_REG 0x24 #define AB8500_GPIO_OUT6_REG 0x25 +#define AB9540_GPIO_OUT7_REG 0x26 #define AB8500_GPIO_PUD1_REG 0x30 #define AB8500_GPIO_PUD2_REG 0x31 @@ -53,6 +63,7 @@ #define AB8500_GPIO_PUD4_REG 0x33 #define AB8500_GPIO_PUD5_REG 0x34 #define AB8500_GPIO_PUD6_REG 0x35 +#define AB9540_GPIO_PUD7_REG 0x36 #define AB8500_GPIO_IN1_REG 0x40 #define AB8500_GPIO_IN2_REG 0x41 @@ -60,9 +71,13 @@ #define AB8500_GPIO_IN4_REG 0x43 #define AB8500_GPIO_IN5_REG 0x44 #define AB8500_GPIO_IN6_REG 0x45 -#define AB8500_GPIO_ALTFUN_REG 0x45 -#define ALTFUN_REG_INDEX 6 +#define AB9540_GPIO_IN7_REG 0x46 +#define AB8500_GPIO_ALTFUN_REG 0x50 +#define AB8500_ALTFUN_REG_INDEX 6 +#define AB9540_ALTFUN_REG_INDEX 7 #define AB8500_NUM_GPIO 42 +#define AB9540_NUM_GPIO 54 +#define AB8505_NUM_GPIO 53 #define AB8500_NUM_VIR_GPIO_IRQ 16 enum ab8500_gpio_action { @@ -73,6 +88,11 @@ enum ab8500_gpio_action { UNMASK }; +struct ab8500_gpio_irq_cluster { + int start; + int end; +}; + struct ab8500_gpio { struct gpio_chip chip; struct ab8500 *parent; @@ -82,7 +102,50 @@ struct ab8500_gpio { enum ab8500_gpio_action irq_action; u16 rising; u16 falling; + struct ab8500_gpio_irq_cluster *irq_cluster; + int irq_cluster_size; +}; + +/* + * Only some GPIOs are interrupt capable, and they are + * organized in discontiguous clusters: + * + * GPIO6 to GPIO13 + * GPIO24 and GPIO25 + * GPIO36 to GPIO41 + * GPIO50 to GPIO54 (AB9540 only) + */ +static struct ab8500_gpio_irq_cluster ab8500_irq_clusters[] = { + {.start = 5, .end = 12}, /* GPIO numbers start from 1 */ + {.start = 23, .end = 24}, + {.start = 35, .end = 40}, +}; + +static struct ab8500_gpio_irq_cluster ab9540_irq_clusters[] = { + {.start = 5, .end = 12}, /* GPIO numbers start from 1 */ + {.start = 23, .end = 24}, + {.start = 35, .end = 40}, + {.start = 49, .end = 53}, +}; + +/* + * For AB8505 Only some GPIOs are interrupt capable, and they are + * organized in discontiguous clusters: + * + * GPIO10 to GPIO11 + * GPIO13 + * GPIO40 and GPIO41 + * GPIO50 + * GPIO52 to GPIO53 + */ +static struct ab8500_gpio_irq_cluster ab8505_irq_clusters[] = { + {.start = 9, .end = 10}, /* GPIO numbers start from 1 */ + {.start = 12, .end = 12}, + {.start = 39, .end = 40}, + {.start = 49, .end = 49}, + {.start = 51, .end = 52}, }; + /** * to_ab8500_gpio() - get the pointer to ab8500_gpio * @chip: Member of the structure ab8500_gpio @@ -115,7 +178,7 @@ static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset) { struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); u8 mask = 1 << (offset % 8); - u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8); + u8 reg = AB8500_GPIO_IN1_REG + (offset / 8); int ret; u8 data; ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC, @@ -132,7 +195,7 @@ static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); int ret; /* Write the data */ - ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1); + ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val); if (ret < 0) dev_err(ab8500_gpio->dev, "%s write failed\n", __func__); } @@ -162,28 +225,13 @@ static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { - /* - * Only some GPIOs are interrupt capable, and they are - * organized in discontiguous clusters: - * - * GPIO6 to GPIO13 - * GPIO24 and GPIO25 - * GPIO36 to GPIO41 - */ - static struct ab8500_gpio_irq_cluster { - int start; - int end; - } clusters[] = { - {.start = 6, .end = 13}, - {.start = 24, .end = 25}, - {.start = 36, .end = 41}, - }; struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip); int base = ab8500_gpio->irq_base; int i; - for (i = 0; i < ARRAY_SIZE(clusters); i++) { - struct ab8500_gpio_irq_cluster *cluster = &clusters[i]; + for (i = 0; i < ab8500_gpio->irq_cluster_size; i++) { + struct ab8500_gpio_irq_cluster *cluster = + &ab8500_gpio->irq_cluster[i]; if (offset >= cluster->start && offset <= cluster->end) return base + offset - cluster->start; @@ -207,7 +255,7 @@ static struct gpio_chip ab8500gpio_chip = { static unsigned int irq_to_rising(unsigned int irq) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq); int offset = irq - ab8500_gpio->irq_base; int new_irq = offset + AB8500_INT_GPIO6R + ab8500_gpio->parent->irq_base; @@ -216,7 +264,7 @@ static unsigned int irq_to_rising(unsigned int irq) static unsigned int irq_to_falling(unsigned int irq) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_get_chip_data(irq); int offset = irq - ab8500_gpio->irq_base; int new_irq = offset + AB8500_INT_GPIO6F + ab8500_gpio->parent->irq_base; @@ -261,15 +309,16 @@ static irqreturn_t handle_falling(int irq, void *dev) return IRQ_HANDLED; } -static void ab8500_gpio_irq_lock(unsigned int irq) +static void ab8500_gpio_irq_lock(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); mutex_lock(&ab8500_gpio->lock); } -static void ab8500_gpio_irq_sync_unlock(unsigned int irq) +static void ab8500_gpio_irq_sync_unlock(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); + unsigned int irq = data->irq; int offset = irq - ab8500_gpio->irq_base; bool rising = ab8500_gpio->rising & BIT(offset); bool falling = ab8500_gpio->falling & BIT(offset); @@ -280,12 +329,12 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq) if (rising) ret = request_threaded_irq(irq_to_rising(irq), NULL, handle_rising, - IRQF_TRIGGER_RISING, + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "ab8500-gpio-r", ab8500_gpio); if (falling) ret = request_threaded_irq(irq_to_falling(irq), NULL, handle_falling, - IRQF_TRIGGER_FALLING, + IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND, "ab8500-gpio-f", ab8500_gpio); break; case SHUTDOWN: @@ -316,21 +365,22 @@ static void ab8500_gpio_irq_sync_unlock(unsigned int irq) } -static void ab8500_gpio_irq_mask(unsigned int irq) +static void ab8500_gpio_irq_mask(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); ab8500_gpio->irq_action = MASK; } -static void ab8500_gpio_irq_unmask(unsigned int irq) +static void ab8500_gpio_irq_unmask(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); ab8500_gpio->irq_action = UNMASK; } -static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type) +static int ab8500_gpio_irq_set_type(struct irq_data *data, unsigned int type) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); + unsigned int irq = data->irq; int offset = irq - ab8500_gpio->irq_base; if (type == IRQ_TYPE_EDGE_BOTH) { @@ -344,28 +394,28 @@ static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type) return 0; } -unsigned int ab8500_gpio_irq_startup(unsigned int irq) +unsigned int ab8500_gpio_irq_startup(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); ab8500_gpio->irq_action = STARTUP; return 0; } -void ab8500_gpio_irq_shutdown(unsigned int irq) +void ab8500_gpio_irq_shutdown(struct irq_data *data) { - struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq); + struct ab8500_gpio *ab8500_gpio = irq_data_get_irq_chip_data(data); ab8500_gpio->irq_action = SHUTDOWN; } static struct irq_chip ab8500_gpio_irq_chip = { .name = "ab8500-gpio", - .startup = ab8500_gpio_irq_startup, - .shutdown = ab8500_gpio_irq_shutdown, - .bus_lock = ab8500_gpio_irq_lock, - .bus_sync_unlock = ab8500_gpio_irq_sync_unlock, - .mask = ab8500_gpio_irq_mask, - .unmask = ab8500_gpio_irq_unmask, - .set_type = ab8500_gpio_irq_set_type, + .irq_startup = ab8500_gpio_irq_startup, + .irq_shutdown = ab8500_gpio_irq_shutdown, + .irq_bus_lock = ab8500_gpio_irq_lock, + .irq_bus_sync_unlock = ab8500_gpio_irq_sync_unlock, + .irq_mask = ab8500_gpio_irq_mask, + .irq_unmask = ab8500_gpio_irq_unmask, + .irq_set_type = ab8500_gpio_irq_set_type, }; static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio) @@ -374,14 +424,14 @@ static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio) int irq; for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) { - set_irq_chip_data(irq, ab8500_gpio); - set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip, + irq_set_chip_data(irq, ab8500_gpio); + irq_set_chip_and_handler(irq, &ab8500_gpio_irq_chip, handle_simple_irq); - set_irq_nested_thread(irq, 1); + irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else - set_irq_noprobe(irq); + irq_set_noprobe(irq); #endif } @@ -397,19 +447,22 @@ static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio) #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif - set_irq_chip_and_handler(irq, NULL, NULL); - set_irq_chip_data(irq, NULL); + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); } } static int __devinit ab8500_gpio_probe(struct platform_device *pdev) { + struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent); struct ab8500_platform_data *ab8500_pdata = dev_get_platdata(pdev->dev.parent); struct ab8500_gpio_platform_data *pdata; struct ab8500_gpio *ab8500_gpio; int ret; int i; + int last_gpio_sel_reg; + int altfun_reg_index; pdata = ab8500_pdata->gpio; if (!pdata) { @@ -425,10 +478,36 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev) ab8500_gpio->dev = &pdev->dev; ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent); ab8500_gpio->chip = ab8500gpio_chip; - ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO; ab8500_gpio->chip.dev = &pdev->dev; ab8500_gpio->chip.base = pdata->gpio_base; ab8500_gpio->irq_base = pdata->irq_base; + + /* Configure GPIO Settings for specific AB devices */ + if (cpu_is_u9540()) { + ab8500_gpio->chip.ngpio = AB9540_NUM_GPIO; + ab8500_gpio->irq_cluster = ab9540_irq_clusters; + ab8500_gpio->irq_cluster_size = + ARRAY_SIZE(ab9540_irq_clusters); + last_gpio_sel_reg = AB9540_GPIO_SEL7_REG; + altfun_reg_index = AB9540_ALTFUN_REG_INDEX; + } else { + if (is_ab8505(parent)) { + ab8500_gpio->chip.ngpio = AB8505_NUM_GPIO; + ab8500_gpio->irq_cluster = ab8505_irq_clusters; + ab8500_gpio->irq_cluster_size = + ARRAY_SIZE(ab8505_irq_clusters); + last_gpio_sel_reg = AB9540_GPIO_SEL7_REG; + altfun_reg_index = AB9540_ALTFUN_REG_INDEX; + } else { + ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO; + ab8500_gpio->irq_cluster = ab8500_irq_clusters; + ab8500_gpio->irq_cluster_size = + ARRAY_SIZE(ab8500_irq_clusters); + last_gpio_sel_reg = AB8500_GPIO_SEL6_REG; + altfun_reg_index = AB8500_ALTFUN_REG_INDEX; + } + } + /* initialize the lock */ mutex_init(&ab8500_gpio->lock); /* @@ -437,16 +516,28 @@ static int __devinit ab8500_gpio_probe(struct platform_device *pdev) * These values are for selecting the PINs as * GPIO or alternate function */ - for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++) { + for (i = AB8500_GPIO_SEL1_REG; i <= last_gpio_sel_reg; i++) { ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC, i, pdata->config_reg[i]); if (ret < 0) goto out_free; + + ret = abx500_set_register_interruptible(ab8500_gpio->dev, + AB8500_MISC, i + AB8500_GPIO_DIR1_REG, + pdata->config_direction[i]); + if (ret < 0) + goto out_free; + + ret = abx500_set_register_interruptible(ab8500_gpio->dev, + AB8500_MISC, i + AB8500_GPIO_PUD1_REG, + pdata->config_pullups[i]); + if (ret < 0) + goto out_free; } ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC, AB8500_GPIO_ALTFUN_REG, - pdata->config_reg[ALTFUN_REG_INDEX]); + pdata->config_reg[altfun_reg_index]); if (ret < 0) goto out_free; @@ -493,6 +584,86 @@ static int __devexit ab8500_gpio_remove(struct platform_device *pdev) return 0; } +int ab8500_config_pulldown(struct device *dev, + enum ab8500_pin gpio, bool enable) +{ + u8 offset = gpio - AB8500_PIN_GPIO1; + u8 pos = offset % 8; + u8 val = enable ? 0 : 1; + u8 reg = AB8500_GPIO_PUD1_REG + (offset / 8); + int ret; + + ret = abx500_mask_and_set_register_interruptible(dev, + AB8500_MISC, reg, 1 << pos, val << pos); + if (ret < 0) + dev_err(dev, "%s write failed\n", __func__); + return ret; +} +EXPORT_SYMBOL(ab8500_config_pulldown); + +/* + * ab8500_gpio_config_select() + * + * Configure functionality of pin, either specific use or GPIO. + * @dev: device pointer + * @gpio: gpio number + * @gpio_select: true if the pin should be used as GPIO + */ +int ab8500_gpio_config_select(struct device *dev, + enum ab8500_pin gpio, bool gpio_select) +{ + u8 offset = gpio - AB8500_PIN_GPIO1; + u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8); + u8 pos = offset % 8; + u8 val = gpio_select ? 1 : 0; + int ret; + + ret = abx500_mask_and_set_register_interruptible(dev, + AB8500_MISC, reg, 1 << pos, val << pos); + if (ret < 0) + dev_err(dev, "%s write failed\n", __func__); + + dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", + __func__, AB8500_MISC, reg, 1 << pos, val << pos); + + return ret; +} + +/* + * ab8500_gpio_config_get_select() + * + * Read currently configured functionality, either specific use or GPIO. + * @dev: device pointer + * @gpio: gpio number + * @gpio_select: pointer to pin selection status + */ +int ab8500_gpio_config_get_select(struct device *dev, + enum ab8500_pin gpio, bool *gpio_select) +{ + u8 offset = gpio - AB8500_PIN_GPIO1; + u8 reg = AB8500_GPIO_SEL1_REG + (offset / 8); + u8 pos = offset % 8; + u8 val; + int ret; + + ret = abx500_get_register_interruptible(dev, + AB8500_MISC, reg, &val); + if (ret < 0) { + dev_err(dev, "%s read failed\n", __func__); + return ret; + } + + if (val & (1 << pos)) + *gpio_select = true; + else + *gpio_select = false; + + dev_vdbg(dev, "%s (bank, addr, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", + __func__, AB8500_MISC, reg, 1 << pos, val); + + return 0; +} + static struct platform_driver ab8500_gpio_driver = { .driver = { .name = "ab8500-gpio", diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c index 839624f9fe6..05547ed48ff 100644 --- a/drivers/gpio/gpio-nomadik.c +++ b/drivers/gpio/gpio-nomadik.c @@ -23,12 +23,11 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/slab.h> +#include <linux/gpio/nomadik.h> #include <asm/mach/irq.h> #include <plat/pincfg.h> -#include <plat/gpio-nomadik.h> -#include <mach/hardware.h> #include <asm/gpio.h> /* @@ -58,8 +57,11 @@ struct nmk_gpio_chip { u32 real_wake; u32 rwimsc; u32 fwimsc; + u32 rimsc; + u32 fimsc; u32 slpm; u32 pull_up; + u32 lowemi; }; static struct nmk_gpio_chip * @@ -124,6 +126,24 @@ static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip, } } +static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip, + unsigned offset, bool lowemi) +{ + u32 bit = BIT(offset); + bool enabled = nmk_chip->lowemi & bit; + + if (lowemi == enabled) + return; + + if (lowemi) + nmk_chip->lowemi |= bit; + else + nmk_chip->lowemi &= ~bit; + + writel_relaxed(nmk_chip->lowemi, + nmk_chip->addr + NMK_GPIO_LOWEMI); +} + static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip, unsigned offset) { @@ -150,8 +170,8 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip, unsigned offset, int gpio_mode, bool glitch) { - u32 rwimsc = readl(nmk_chip->addr + NMK_GPIO_RWIMSC); - u32 fwimsc = readl(nmk_chip->addr + NMK_GPIO_FWIMSC); + u32 rwimsc = nmk_chip->rwimsc; + u32 fwimsc = nmk_chip->fwimsc; if (glitch && nmk_chip->set_ioforce) { u32 bit = BIT(offset); @@ -173,6 +193,36 @@ static void __nmk_gpio_set_mode_safe(struct nmk_gpio_chip *nmk_chip, } } +static void +nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset) +{ + u32 falling = nmk_chip->fimsc & BIT(offset); + u32 rising = nmk_chip->rimsc & BIT(offset); + int gpio = nmk_chip->chip.base + offset; + int irq = NOMADIK_GPIO_TO_IRQ(gpio); + struct irq_data *d = irq_get_irq_data(irq); + + if (!rising && !falling) + return; + + if (!d || !irqd_irq_disabled(d)) + return; + + if (rising) { + nmk_chip->rimsc &= ~BIT(offset); + writel_relaxed(nmk_chip->rimsc, + nmk_chip->addr + NMK_GPIO_RIMSC); + } + + if (falling) { + nmk_chip->fimsc &= ~BIT(offset); + writel_relaxed(nmk_chip->fimsc, + nmk_chip->addr + NMK_GPIO_FIMSC); + } + + dev_dbg(nmk_chip->chip.dev, "%d: clearing interrupt mask\n", gpio); +} + static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset, pin_cfg_t cfg, bool sleep, unsigned int *slpmregs) { @@ -238,6 +288,17 @@ static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset, __nmk_gpio_set_pull(nmk_chip, offset, pull); } + __nmk_gpio_set_lowemi(nmk_chip, offset, PIN_LOWEMI(cfg)); + + /* + * If the pin is switching to altfunc, and there was an interrupt + * installed on it which has been lazy disabled, actually mask the + * interrupt to prevent spurious interrupts that would occur while the + * pin is under control of the peripheral. Only SKE does this. + */ + if (af != NMK_GPIO_ALT_GPIO) + nmk_gpio_disable_lazy_irq(nmk_chip, offset); + /* * If we've backed up the SLPM registers (glitch workaround), modify * the backups since they will be restored. @@ -359,7 +420,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep) /** * nmk_config_pin - configure a pin's mux attributes * @cfg: pin confguration - * + * @sleep: Non-zero to apply the sleep mode configuration * Configures a pin's mode (alternate function or GPIO), its pull up status, * and its sleep mode based on the specified configuration. The @cfg is * usually one of the SoC specific macros defined in mach/<soc>-pins.h. These @@ -556,27 +617,38 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip, int gpio, enum nmk_gpio_irq_type which, bool enable) { - u32 rimsc = which == WAKE ? NMK_GPIO_RWIMSC : NMK_GPIO_RIMSC; - u32 fimsc = which == WAKE ? NMK_GPIO_FWIMSC : NMK_GPIO_FIMSC; u32 bitmask = nmk_gpio_get_bitmask(gpio); - u32 reg; + u32 *rimscval; + u32 *fimscval; + u32 rimscreg; + u32 fimscreg; + + if (which == NORMAL) { + rimscreg = NMK_GPIO_RIMSC; + fimscreg = NMK_GPIO_FIMSC; + rimscval = &nmk_chip->rimsc; + fimscval = &nmk_chip->fimsc; + } else { + rimscreg = NMK_GPIO_RWIMSC; + fimscreg = NMK_GPIO_FWIMSC; + rimscval = &nmk_chip->rwimsc; + fimscval = &nmk_chip->fwimsc; + } /* we must individually set/clear the two edges */ if (nmk_chip->edge_rising & bitmask) { - reg = readl(nmk_chip->addr + rimsc); if (enable) - reg |= bitmask; + *rimscval |= bitmask; else - reg &= ~bitmask; - writel(reg, nmk_chip->addr + rimsc); + *rimscval &= ~bitmask; + writel(*rimscval, nmk_chip->addr + rimscreg); } if (nmk_chip->edge_falling & bitmask) { - reg = readl(nmk_chip->addr + fimsc); if (enable) - reg |= bitmask; + *fimscval |= bitmask; else - reg &= ~bitmask; - writel(reg, nmk_chip->addr + fimsc); + *fimscval &= ~bitmask; + writel(*fimscval, nmk_chip->addr + fimscreg); } } @@ -1008,9 +1080,6 @@ void nmk_gpio_wakeups_suspend(void) clk_enable(chip->clk); - chip->rwimsc = readl(chip->addr + NMK_GPIO_RWIMSC); - chip->fwimsc = readl(chip->addr + NMK_GPIO_FWIMSC); - writel(chip->rwimsc & chip->real_wake, chip->addr + NMK_GPIO_RWIMSC); writel(chip->fwimsc & chip->real_wake, @@ -1076,6 +1145,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) struct resource *res; struct clk *clk; int secondary_irq; + void __iomem *base; int irq; int ret; @@ -1106,10 +1176,16 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) goto out; } + base = ioremap(res->start, resource_size(res)); + if (!base) { + ret = -ENOMEM; + goto out_release; + } + clk = clk_get(&dev->dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); - goto out_release; + goto out_unmap; } nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); @@ -1123,7 +1199,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) */ nmk_chip->bank = dev->id; nmk_chip->clk = clk; - nmk_chip->addr = io_p2v(res->start); + nmk_chip->addr = base; nmk_chip->chip = nmk_gpio_template; nmk_chip->parent_irq = irq; nmk_chip->secondary_parent_irq = secondary_irq; @@ -1139,6 +1215,10 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) chip->dev = &dev->dev; chip->owner = THIS_MODULE; + clk_enable(nmk_chip->clk); + nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI); + clk_disable(nmk_chip->clk); + ret = gpiochip_add(&nmk_chip->chip); if (ret) goto out_free; @@ -1159,6 +1239,8 @@ out_free: out_clk: clk_disable(clk); clk_put(clk); +out_unmap: + iounmap(base); out_release: release_mem_region(res->start, resource_size(res)); out: diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 8deedc1b984..58b2a6c93c8 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -39,6 +39,44 @@ config HWMON_DEBUG_CHIP comment "Native drivers" +config SENSORS_AB8500 + tristate "AB8500 thermal monitoring" + depends on AB8500_GPADC + default n + help + If you say yes here you get support for the thermal sensor part + of the AB8500 chip. The driver includes thermal management for + AB8500 die and two GPADC channels. The GPADC channel are preferably + used to access sensors outside the AB8500 chip. + + This driver can also be built as a module. If so, the module + will be called abx500-temp. + +config SENSORS_AB5500 + tristate "AB5500 thermal monitoring" + depends on AB5500_GPADC + default n + help + If you say yes here you get support for the thermal sensor part + of the AB5500 chip. The driver includes thermal management for + AB5500 die, pcb and RF XTAL temperature. + + This driver can also be built as a module. If so, the module + will be called abx500-temp. + +config SENSORS_DBX500 + tristate "DBX500 thermal monitoring" + depends on MFD_DB8500_PRCMU || MFD_DB5500_PRCMU + default n + help + If you say yes here you get support for the thermal sensor part + of the DBX500 chip. The driver includes thermal management for + DBX500 die. + + This driver can also be built as a module. If so, the module + will be called dbx500_temp. + + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI && EXPERIMENTAL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 6d3f11f7181..1e893cbdb83 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -19,6 +19,9 @@ obj-$(CONFIG_SENSORS_W83795) += w83795.o obj-$(CONFIG_SENSORS_W83781D) += w83781d.o obj-$(CONFIG_SENSORS_W83791D) += w83791d.o +obj-$(CONFIG_SENSORS_AB8500) += abx500.o ab8500.o +obj-$(CONFIG_SENSORS_AB5500) += abx500.o ab5500.o +obj-$(CONFIG_SENSORS_DBX500) += dbx500.o obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o obj-$(CONFIG_SENSORS_AD7314) += ad7314.o diff --git a/drivers/hwmon/ab5500.c b/drivers/hwmon/ab5500.c new file mode 100644 index 00000000000..cafadeba51c --- /dev/null +++ b/drivers/hwmon/ab5500.c @@ -0,0 +1,212 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Martin Persson <martin.persson@stericsson.com> for + * ST-Ericsson. + * License terms: GNU Gereral Public License (GPL) version 2 + * + * Note: + * + * If/when the AB5500 thermal warning temperature is reached (threshold + * 125C cannot be changed by SW), an interrupt is set and the driver + * notifies user space via a sysfs event. If a shut down is not + * triggered by user space and temperature reaches beyond critical + * limit(130C) pm_power off is called. + * + * If/when AB5500 thermal shutdown temperature is reached a hardware + * shutdown of the AB5500 will occur. + */ + +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/sysfs.h> +#include <linux/hwmon-sysfs.h> +#include <linux/platform_device.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> +#include <linux/mfd/abx500/ab5500-bm.h> +#include "abx500.h" +#include <asm/mach-types.h> + +/* AB5500 driver monitors GPADC - XTAL_TEMP, PCB_TEMP, + * BTEMP_BALL, BAT_CTRL and DIE_TEMP + */ +#define NUM_MONITORED_SENSORS 5 + +#define SHUTDOWN_AUTO_MIN_LIMIT -25 +#define SHUTDOWN_AUTO_MAX_LIMIT 130 + +static int ab5500_output_convert(int val, u8 sensor) +{ + int res = val; + /* GPADC returns die temperature in Celsius + * convert it to millidegree celsius + */ + if (sensor == DIE_TEMP) + res = val * 1000; + + return res; +} + +static int ab5500_read_sensor(struct abx500_temp *data, u8 sensor) +{ + int val; + /* + * Special treatment for BAT_CTRL node, since this + * temperature measurement is more complex than just + * an ADC readout + */ + if (sensor == BAT_CTRL) + val = ab5500_btemp_get_batctrl_temp(data->ab5500_btemp); + else + val = ab5500_gpadc_convert(data->ab5500_gpadc, sensor); + + if (val < 0) + return val; + else + return ab5500_output_convert(val, sensor); +} + +static ssize_t ab5500_show_name(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + return sprintf(buf, "ab5500\n"); +} + +static ssize_t ab5500_show_label(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + char *name; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int index = attr->index; + + /* + * Make sure these labels correspond to the attribute indexes + * used when calling SENSOR_DEVICE_ATRR. + * Temperature sensors outside ab8500 (read via GPADC) are marked + * with prefix ext_ + */ + switch (index) { + case 1: + name = "xtal_temp"; + break; + case 2: + name = "pcb_temp"; + break; + case 3: + name = "bat_temp"; + break; + case 4: + name = "bat_ctrl"; + break; + case 5: + name = "ab5500"; + break; + default: + return -EINVAL; + } + return sprintf(buf, "%s\n", name); +} + +static int temp_shutdown_trig(int mux) +{ + pm_power_off(); + return 0; +} + +static int ab5500_temp_shutdown_auto(struct abx500_temp *data) +{ + int ret; + struct adc_auto_input *auto_ip; + + auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL); + if (!auto_ip) { + dev_err(&data->pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + auto_ip->mux = DIE_TEMP; + auto_ip->freq = MS500; + /* + * As per product specification, voltage decreases as + * temperature increases. Hence the min and max values + * should be passed in reverse order. + */ + auto_ip->min = SHUTDOWN_AUTO_MAX_LIMIT; + auto_ip->max = SHUTDOWN_AUTO_MIN_LIMIT; + auto_ip->auto_adc_callback = temp_shutdown_trig; + data->gpadc_auto = auto_ip; + ret = ab5500_gpadc_convert_auto(data->ab5500_gpadc, + data->gpadc_auto); + if (ret < 0) + kfree(auto_ip); + + return ret; +} + +static int ab5500_is_visible(struct attribute *attr, int n) +{ + return attr->mode; +} + +static int ab5500_temp_irq_handler(int irq, struct abx500_temp *data) +{ + /* + * Make sure the magic numbers below corresponds to the node + * used for AB5500 thermal warning from HW. + */ + mutex_lock(&data->lock); + data->crit_alarm[4] = 1; + mutex_unlock(&data->lock); + sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm"); + dev_info(&data->pdev->dev, "ABX500 thermal warning," + " power off system now!\n"); + return 0; +} + +int __init ab5500_hwmon_init(struct abx500_temp *data) +{ + int err; + + data->ab5500_gpadc = ab5500_gpadc_get("ab5500-adc.0"); + if (IS_ERR(data->ab5500_gpadc)) + return PTR_ERR(data->ab5500_gpadc); + + data->ab5500_btemp = ab5500_btemp_get(); + if (IS_ERR(data->ab5500_btemp)) + return PTR_ERR(data->ab5500_btemp); + + err = ab5500_temp_shutdown_auto(data); + if (err < 0) { + dev_err(&data->pdev->dev, "Failed to register" + " auto trigger(%d)\n", err); + return err; + } + + /* + * Setup HW defined data. + * + * Reference hardware (HREF): + * + * XTAL_TEMP, PCB_TEMP, BTEMP_BALL refer to millivolts and + * BAT_CTRL and DIE_TEMP refer to millidegrees + * + * Make sure indexes correspond to the attribute indexes + * used when calling SENSOR_DEVICE_ATRR + */ + data->gpadc_addr[0] = XTAL_TEMP; + data->gpadc_addr[1] = PCB_TEMP; + data->gpadc_addr[2] = BTEMP_BALL; + data->gpadc_addr[3] = BAT_CTRL; + data->gpadc_addr[4] = DIE_TEMP; + data->monitored_sensors = NUM_MONITORED_SENSORS; + + data->ops.read_sensor = ab5500_read_sensor; + data->ops.irq_handler = ab5500_temp_irq_handler; + data->ops.show_name = ab5500_show_name; + data->ops.show_label = ab5500_show_label; + data->ops.is_visible = ab5500_is_visible; + + return 0; +} diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c new file mode 100644 index 00000000000..65a0f381d01 --- /dev/null +++ b/drivers/hwmon/ab8500.c @@ -0,0 +1,184 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Martin Persson <martin.persson@stericsson.com> for + * ST-Ericsson. + * License terms: GNU Gereral Public License (GPL) version 2 + * + * Note: + * + * If/when the AB8500 thermal warning temperature is reached (threshold + * cannot be changed by SW), an interrupt is set and the driver + * notifies user space via a sysfs event. If a shut down is not + * triggered by user space within a certain time frame, + * pm_power off is called. + * + * If/when AB8500 thermal shutdown temperature is reached a hardware + * shutdown of the AB8500 will occur. + */ + +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/sysfs.h> +#include <linux/hwmon-sysfs.h> +#include <linux/platform_device.h> +#include <linux/mfd/abx500/ab8500-gpadc.h> +#include <linux/mfd/abx500/ab8500-bm.h> +#include "abx500.h" +#include <asm/mach-types.h> + +#define DEFAULT_POWER_OFF_DELAY 10000 + +/* + * The driver monitors GPADC - ADC_AUX1, ADC_AUX2, BTEMP_BALL + * and BAT_CTRL. + */ +#define NUM_MONITORED_SENSORS 4 + +static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor) +{ + int val; + /* + * Special treatment for the BAT_CTRL node, since this + * temperature measurement is more complex than just + * an ADC readout + */ + if (sensor == BAT_CTRL) + val = ab8500_btemp_get_batctrl_temp(data->ab8500_btemp); + else + val = ab8500_gpadc_convert(data->ab8500_gpadc, sensor); + + return val; +} + +static void ab8500_thermal_power_off(struct work_struct *work) +{ + struct abx500_temp *data = container_of(work, struct abx500_temp, + power_off_work.work); + + dev_warn(&data->pdev->dev, "Power off due to AB8500 thermal warning\n"); + pm_power_off(); +} + +static ssize_t ab8500_show_name(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + return sprintf(buf, "ab8500\n"); +} + +static ssize_t ab8500_show_label(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + char *name; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int index = attr->index; + + /* + * Make sure these labels correspond to the attribute indexes + * used when calling SENSOR_DEVICE_ATRR. + * Temperature sensors outside ab8500 (read via GPADC) are marked + * with prefix ext_ + */ + switch (index) { + case 1: + name = "ext_rtc_xtal"; + break; + case 2: + name = "ext_db8500"; + break; + case 3: + name = "bat_temp"; + break; + case 4: + name = "bat_ctrl"; + break; + case 5: + name = "ab8500"; + break; + default: + return -EINVAL; + } + return sprintf(buf, "%s\n", name); +} + +static int ab8500_is_visible(struct attribute *attr, int n) +{ + if (!strcmp(attr->name, "temp5_input") || + !strcmp(attr->name, "temp5_min") || + !strcmp(attr->name, "temp5_max") || + !strcmp(attr->name, "temp5_max_hyst") || + !strcmp(attr->name, "temp5_min_alarm") || + !strcmp(attr->name, "temp5_max_alarm") || + !strcmp(attr->name, "temp5_max_hyst_alarm")) + return 0; + + return attr->mode; +} + +static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data) +{ + unsigned long delay_in_jiffies; + /* + * Make sure the magic numbers below corresponds to the node + * used for AB8500 thermal warning from HW. + */ + mutex_lock(&data->lock); + data->crit_alarm[4] = 1; + mutex_unlock(&data->lock); + + hwmon_notify(data->crit_alarm[4], NULL); + sysfs_notify(&data->pdev->dev.kobj, NULL, "temp5_crit_alarm"); + dev_info(&data->pdev->dev, "AB8500 thermal warning," + " power off in %lu s\n", data->power_off_delay); + delay_in_jiffies = msecs_to_jiffies(data->power_off_delay); + schedule_delayed_work(&data->power_off_work, delay_in_jiffies); + return 0; +} + +int __init ab8500_hwmon_init(struct abx500_temp *data) +{ + data->ab8500_gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + if (IS_ERR(data->ab8500_gpadc)) + return PTR_ERR(data->ab8500_gpadc); + + data->ab8500_btemp = ab8500_btemp_get(); + if (IS_ERR(data->ab8500_btemp)) + return PTR_ERR(data->ab8500_btemp); + + INIT_DELAYED_WORK(&data->power_off_work, ab8500_thermal_power_off); + + /* + * Setup HW defined data. + * + * Reference hardware (HREF): + * + * GPADC - ADC_AUX1, connected to NTC R2148 next to RTC_XTAL on HREF + * GPADC - ADC_AUX2, connected to NTC R2150 near DB8500 on HREF + * Hence, temp#_min/max/max_hyst refer to millivolts and not + * millidegrees + * This is not the case for BAT_CTRL where millidegrees is used + * + * HREF HW does not support reading AB8500 temperature. BUT an + * AB8500 IRQ will be launched if die crit temp limit is reached. + * + * Make sure indexes correspond to the attribute indexes + * used when calling SENSOR_DEVICE_ATRR + */ + data->gpadc_addr[0] = ADC_AUX1; + data->gpadc_addr[1] = ADC_AUX2; + data->gpadc_addr[2] = BTEMP_BALL; + data->gpadc_addr[3] = BAT_CTRL; + data->gpadc_addr[4] = DIE_TEMP; + data->power_off_delay = DEFAULT_POWER_OFF_DELAY; + data->monitored_sensors = NUM_MONITORED_SENSORS; + + data->ops.read_sensor = ab8500_read_sensor; + data->ops.irq_handler = ab8500_temp_irq_handler; + data->ops.show_name = ab8500_show_name; + data->ops.show_label = ab8500_show_label; + data->ops.is_visible = ab8500_is_visible; + + return 0; +} diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c new file mode 100644 index 00000000000..7aa9994c54a --- /dev/null +++ b/drivers/hwmon/abx500.c @@ -0,0 +1,698 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Martin Persson <martin.persson@stericsson.com> for + * ST-Ericsson. + * License terms: GNU Gereral Public License (GPL) version 2 + * + * Note: + * + * ABX500 does not provide auto ADC, so to monitor the required + * temperatures, a periodic work is used. It is more important + * to not wake up the CPU than to perform this job, hence the use + * of a deferred delay. + * + * A deferred delay for thermal monitor is considered safe because: + * If the chip gets too hot during a sleep state it's most likely + * due to external factors, such as the surrounding temperature. + * I.e. no SW decisions will make any difference. + * + * If/when the ABX500 thermal warning temperature is reached (threshold + * cannot be changed by SW), an interrupt is set and the driver + * notifies user space via a sysfs event. + * + * If/when ABX500 thermal shutdown temperature is reached a hardware + * shutdown of the ABX500 will occur. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/hwmon.h> +#include <linux/sysfs.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/jiffies.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <asm/mach-types.h> + +#include "abx500.h" + +#define DEFAULT_MONITOR_DELAY 1000 + +/* + * Thresholds are considered inactive if set to 0. + * To avoid confusion for user space applications, + * the temp monitor delay is set to 0 if all thresholds + * are 0. + */ +static bool find_active_thresholds(struct abx500_temp *data) +{ + int i; + for (i = 0; i < data->monitored_sensors; i++) + if (data->max[i] != 0 || data->max_hyst[i] != 0 + || data->min[i] != 0) + return true; + + dev_dbg(&data->pdev->dev, "No active thresholds," + "cancel deferred job (if it exists)" + "and reset temp monitor delay\n"); + cancel_delayed_work_sync(&data->work); + return false; +} + +static inline void schedule_monitor(struct abx500_temp *data) +{ + unsigned long delay_in_jiffies; + delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay); + schedule_delayed_work(&data->work, delay_in_jiffies); +} + +static inline void gpadc_monitor_exit(struct abx500_temp *data) +{ + cancel_delayed_work_sync(&data->work); +} + +static void gpadc_monitor(struct work_struct *work) +{ + unsigned long delay_in_jiffies; + int val, i, ret; + /* Container for alarm node name */ + char alarm_node[30]; + + bool updated_min_alarm = false; + bool updated_max_alarm = false; + bool updated_max_hyst_alarm = false; + struct abx500_temp *data = container_of(work, struct abx500_temp, + work.work); + + for (i = 0; i < data->monitored_sensors; i++) { + /* Thresholds are considered inactive if set to 0 */ + if (data->max[i] == 0 && data->max_hyst[i] == 0 + && data->min[i] == 0) + continue; + + val = data->ops.read_sensor(data, data->gpadc_addr[i]); + if (val < 0) { + dev_err(&data->pdev->dev, "GPADC read failed\n"); + continue; + } + + mutex_lock(&data->lock); + if (data->min[i] != 0) { + if (val < data->min[i]) { + if (data->min_alarm[i] == 0) { + data->min_alarm[i] = 1; + updated_min_alarm = true; + } + } else { + if (data->min_alarm[i] == 1) { + data->min_alarm[i] = 0; + updated_min_alarm = true; + } + } + + } + if (data->max[i] != 0) { + if (val > data->max[i]) { + if (data->max_alarm[i] == 0) { + data->max_alarm[i] = 1; + updated_max_alarm = true; + } + } else { + if (data->max_alarm[i] == 1) { + data->max_alarm[i] = 0; + updated_max_alarm = true; + } + } + + } + if (data->max_hyst[i] != 0) { + if (val > data->max_hyst[i]) { + if (data->max_hyst_alarm[i] == 0) { + data->max_hyst_alarm[i] = 1; + updated_max_hyst_alarm = true; + } + } else { + if (data->max_hyst_alarm[i] == 1) { + data->max_hyst_alarm[i] = 0; + updated_max_hyst_alarm = true; + } + } + } + mutex_unlock(&data->lock); + + /* hwmon attr index starts at 1, thus "i+1" below */ + if (updated_min_alarm) { + ret = snprintf(alarm_node, 16, "temp%d_min_alarm", + (i + 1)); + if (ret < 0) { + dev_err(&data->pdev->dev, + "Unable to update alarm node (%d)", + ret); + break; + } + sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); + } + if (updated_max_alarm) { + ret = snprintf(alarm_node, 16, "temp%d_max_alarm", + (i + 1)); + if (ret < 0) { + dev_err(&data->pdev->dev, + "Unable to update alarm node (%d)", + ret); + break; + } + hwmon_notify(data->max_alarm[i], NULL); + sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); + } + if (updated_max_hyst_alarm) { + ret = snprintf(alarm_node, 21, "temp%d_max_hyst_alarm", + (i + 1)); + if (ret < 0) { + dev_err(&data->pdev->dev, + "Unable to update alarm node (%d)", + ret); + break; + } + sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node); + } + } + delay_in_jiffies = msecs_to_jiffies(data->gpadc_monitor_delay); + schedule_delayed_work(&data->work, delay_in_jiffies); +} + +static ssize_t set_temp_monitor_delay(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + int res; + unsigned long delay_in_s; + struct abx500_temp *data = dev_get_drvdata(dev); + + res = strict_strtoul(buf, 10, &delay_in_s); + if (res < 0) + return res; + + mutex_lock(&data->lock); + data->gpadc_monitor_delay = delay_in_s * 1000; + + if (find_active_thresholds(data)) + schedule_monitor(data); + + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t set_temp_power_off_delay(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + int res; + unsigned long delay_in_s; + struct abx500_temp *data = dev_get_drvdata(dev); + + res = strict_strtoul(buf, 10, &delay_in_s); + if (res < 0) + return res; + + mutex_lock(&data->lock); + data->power_off_delay = delay_in_s * 1000; + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t show_temp_monitor_delay(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + /* return time in s, not ms */ + return sprintf(buf, "%lu\n", (data->gpadc_monitor_delay) / 1000); +} + +static ssize_t show_temp_power_off_delay(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + /* return time in s, not ms */ + return sprintf(buf, "%lu\n", (data->power_off_delay) / 1000); +} + +/* HWMON sysfs interface */ +static ssize_t show_name(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + /* + * To avoid confusion between sensor label and chip name, the function + * "show_label" is not used to return the chip name. + */ + struct abx500_temp *data = dev_get_drvdata(dev); + return data->ops.show_name(dev, devattr, buf); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + return data->ops.show_label(dev, devattr, buf); +} + +static ssize_t show_input(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + int val; + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + u8 gpadc_addr = data->gpadc_addr[attr->index - 1]; + + val = data->ops.read_sensor(data, gpadc_addr); + if (val < 0) + dev_err(&data->pdev->dev, "GPADC read failed\n"); + + return sprintf(buf, "%d\n", val); +} + +/* set functions (RW nodes) */ +static ssize_t set_min(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + /* + * Threshold is considered inactive if set to 0 + * hwmon attr index starts at 1, thus "attr->index-1" below + */ + if (val == 0) + data->min_alarm[attr->index - 1] = 0; + + data->min[attr->index - 1] = val; + + if (val == 0) + (void) find_active_thresholds(data); + else + schedule_monitor(data); + + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t set_max(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + /* + * Threshold is considered inactive if set to 0 + * hwmon attr index starts at 1, thus "attr->index-1" below + */ + if (val == 0) + data->max_alarm[attr->index - 1] = 0; + + data->max[attr->index - 1] = val; + + if (val == 0) + (void) find_active_thresholds(data); + else + schedule_monitor(data); + + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t set_max_hyst(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + /* + * Threshold is considered inactive if set to 0 + * hwmon attr index starts at 1, thus "attr->index-1" below + */ + if (val == 0) + data->max_hyst_alarm[attr->index - 1] = 0; + + data->max_hyst[attr->index - 1] = val; + + if (val == 0) + (void) find_active_thresholds(data); + else + schedule_monitor(data); + + mutex_unlock(&data->lock); + + return count; +} + +/* + * show functions (RO nodes) + */ +static ssize_t show_min(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->min[attr->index - 1]); +} + +static ssize_t show_max(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->max[attr->index - 1]); +} + +static ssize_t show_max_hyst(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->max_hyst[attr->index - 1]); +} + +/* Alarms */ +static ssize_t show_min_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->min_alarm[attr->index - 1]); +} + +static ssize_t show_max_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->max_alarm[attr->index - 1]); +} + +static ssize_t show_max_hyst_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->max_hyst_alarm[attr->index - 1]); +} + +static ssize_t show_crit_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct abx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%ld\n", data->crit_alarm[attr->index - 1]); +} + +static mode_t abx500_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct abx500_temp *data = dev_get_drvdata(dev); + return data->ops.is_visible(a, n); +} + +static SENSOR_DEVICE_ATTR(temp_monitor_delay, S_IRUGO | S_IWUSR, + show_temp_monitor_delay, set_temp_monitor_delay, 0); +static SENSOR_DEVICE_ATTR(temp_power_off_delay, S_IRUGO | S_IWUSR, + show_temp_power_off_delay, + set_temp_power_off_delay, 0); + +/* Chip name, required by hwmon*/ +static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); + +/* GPADC - SENSOR1 */ +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1); +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1); +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, + show_max_hyst, set_max_hyst, 1); +static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_max_hyst_alarm, S_IRUGO, + show_max_hyst_alarm, NULL, 1); + +/* GPADC - SENSOR2 */ +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 2); +static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 2); +static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO, + show_max_hyst, set_max_hyst, 2); +static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_max_hyst_alarm, S_IRUGO, + show_max_hyst_alarm, NULL, 2); + +/* GPADC - SENSOR3 */ +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 3); +static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 3); +static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO, + show_max_hyst, set_max_hyst, 3); +static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_max_hyst_alarm, S_IRUGO, + show_max_hyst_alarm, NULL, 3); + +/* GPADC - SENSOR4 */ +static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 4); +static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 4); +static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO, + show_max_hyst, set_max_hyst, 4); +static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_max_hyst_alarm, S_IRUGO, + show_max_hyst_alarm, NULL, 4); + +/* GPADC - SENSOR5 */ +static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, show_label, NULL, 5); +static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_input, NULL, 5); +static SENSOR_DEVICE_ATTR(temp5_min, S_IWUSR | S_IRUGO, show_min, set_min, 5); +static SENSOR_DEVICE_ATTR(temp5_max, S_IWUSR | S_IRUGO, show_max, set_max, 5); +static SENSOR_DEVICE_ATTR(temp5_max_hyst, S_IWUSR | S_IRUGO, + show_max_hyst, set_max_hyst, 5); +static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_min_alarm, NULL, 5); +static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_max_alarm, NULL, 5); +static SENSOR_DEVICE_ATTR(temp5_max_hyst_alarm, S_IRUGO, + show_max_hyst_alarm, NULL, 5); +static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, + show_crit_alarm, NULL, 5); + +struct attribute *abx500_temp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp_monitor_delay.dev_attr.attr, + &sensor_dev_attr_temp_power_off_delay.dev_attr.attr, + /* GPADC SENSOR1 */ + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst_alarm.dev_attr.attr, + /* GPADC SENSOR2 */ + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_hyst_alarm.dev_attr.attr, + /* GPADC SENSOR3 */ + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_hyst_alarm.dev_attr.attr, + /* GPADC SENSOR4 */ + &sensor_dev_attr_temp4_label.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_min.dev_attr.attr, + &sensor_dev_attr_temp4_max.dev_attr.attr, + &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp4_max_hyst_alarm.dev_attr.attr, + /* GPADC SENSOR5*/ + &sensor_dev_attr_temp5_label.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp5_min.dev_attr.attr, + &sensor_dev_attr_temp5_max.dev_attr.attr, + &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_max_hyst_alarm.dev_attr.attr, + &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group abx500_temp_group = { + .attrs = abx500_temp_attributes, + .is_visible = abx500_attrs_visible, +}; + +static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data) +{ + struct platform_device *pdev = irq_data; + struct abx500_temp *data = platform_get_drvdata(pdev); + data->ops.irq_handler(irq, data); + return IRQ_HANDLED; +} + +static int setup_irqs(struct platform_device *pdev) +{ + int ret; + int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM"); + + if (irq < 0) + dev_err(&pdev->dev, "Get irq by name failed\n"); + + ret = request_threaded_irq(irq, NULL, abx500_temp_irq_handler, + IRQF_NO_SUSPEND, "abx500-temp", pdev); + if (ret < 0) + dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret); + + return ret; +} + +static int __devinit abx500_temp_probe(struct platform_device *pdev) +{ + struct abx500_temp *data; + int err; + + data = kzalloc(sizeof(struct abx500_temp), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pdev = pdev; + mutex_init(&data->lock); + + /* Chip specific initialization */ + if (!machine_is_u5500()) + err = ab8500_hwmon_init(data); + else + err = ab5500_hwmon_init(data); + if (err < 0) { + dev_err(&pdev->dev, "abx500 init failed"); + goto exit; + } + + data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit; + } + + INIT_DELAYED_WORK_DEFERRABLE(&data->work, gpadc_monitor); + data->gpadc_monitor_delay = DEFAULT_MONITOR_DELAY; + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group); + if (err < 0) { + dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err); + goto exit_platform_data; + } + + err = setup_irqs(pdev); + if (err < 0) { + dev_err(&pdev->dev, "irq setup failed (%d)\n", err); + goto exit_sysfs_group; + } + return 0; + +exit_sysfs_group: + sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group); +exit_platform_data: + hwmon_device_unregister(data->hwmon_dev); + platform_set_drvdata(pdev, NULL); +exit: + kfree(data->gpadc_auto); + kfree(data); + return err; +} + +static int __devexit abx500_temp_remove(struct platform_device *pdev) +{ + struct abx500_temp *data = platform_get_drvdata(pdev); + + gpadc_monitor_exit(data); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group); + platform_set_drvdata(pdev, NULL); + kfree(data->gpadc_auto); + kfree(data); + return 0; +} + +/* No action required in suspend/resume, thus the lack of functions */ +static struct platform_driver abx500_temp_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "abx500-temp", + }, + .probe = abx500_temp_probe, + .remove = __devexit_p(abx500_temp_remove), +}; + +static int __init abx500_temp_init(void) +{ + return platform_driver_register(&abx500_temp_driver); +} + +static void __exit abx500_temp_exit(void) +{ + platform_driver_unregister(&abx500_temp_driver); +} + +MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>"); +MODULE_DESCRIPTION("ABX500 temperature driver"); +MODULE_LICENSE("GPL"); + +module_init(abx500_temp_init) +module_exit(abx500_temp_exit) diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h new file mode 100644 index 00000000000..9fe28dac28f --- /dev/null +++ b/drivers/hwmon/abx500.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * License terms: GNU General Public License v2 + * Author: Martin Persson <martin.persson@stericsson.com> + */ + +#ifndef _ABX500_H +#define _ABX500_H + +#define NUM_SENSORS 5 + +struct ab8500_gpadc; +struct ab5500_gpadc; +struct ab8500_btemp; +struct ab5500_btemp; +struct adc_auto_input; +struct abx500_temp; + +/** + * struct abx500_temp_ops - abx500 chip specific ops + * @read_sensor: reads gpadc output + * @irq_handler: irq handler + * @show_name: hwmon device name + * @show_label: hwmon attribute label + * @is_visible: is attribute visible + */ +struct abx500_temp_ops { + int (*read_sensor)(struct abx500_temp *, u8); + int (*irq_handler)(int, struct abx500_temp *); + ssize_t (*show_name)(struct device *, + struct device_attribute *, char *); + ssize_t (*show_label) (struct device *, + struct device_attribute *, char *); + int (*is_visible)(struct attribute *, int); +}; + +/** + * struct abx500_temp - representation of temp mon device + * @pdev: platform device + * @hwmon_dev: hwmon device + * @ab8500_gpadc: gpadc interface for ab8500 + * @ab5500_gpadc: gpadc interface for ab5500 + * @btemp: battery temperature interface for ab8500 + * @adc_auto_input: gpadc auto trigger + * @gpadc_addr: gpadc channel address + * @temp: sensor temperature input value + * @min: sensor temperature min value + * @max: sensor temperature max value + * @max_hyst: sensor temperature hysteresis value for max limit + * @crit: sensor temperature critical value + * @min_alarm: sensor temperature min alarm + * @max_alarm: sensor temperature max alarm + * @max_hyst_alarm: sensor temperature hysteresis alarm + * @crit_alarm: sensor temperature critical value alarm + * @work: delayed work scheduled to monitor temperature periodically + * @power_off_work: delayed work scheduled to power off the system + when critical temperature is reached + * @lock: mutex + * @gpadc_monitor_delay: delay between temperature readings in ms + * @power_off_delay: delay before power off in ms + * @monitored_sensors: number of monitored sensors + */ +struct abx500_temp { + struct platform_device *pdev; + struct device *hwmon_dev; + struct ab8500_gpadc *ab8500_gpadc; + struct ab5500_gpadc *ab5500_gpadc; + struct ab8500_btemp *ab8500_btemp; + struct ab5500_btemp *ab5500_btemp; + struct adc_auto_input *gpadc_auto; + struct abx500_temp_ops ops; + u8 gpadc_addr[NUM_SENSORS]; + unsigned long temp[NUM_SENSORS]; + unsigned long min[NUM_SENSORS]; + unsigned long max[NUM_SENSORS]; + unsigned long max_hyst[NUM_SENSORS]; + unsigned long crit[NUM_SENSORS]; + unsigned long min_alarm[NUM_SENSORS]; + unsigned long max_alarm[NUM_SENSORS]; + unsigned long max_hyst_alarm[NUM_SENSORS]; + unsigned long crit_alarm[NUM_SENSORS]; + struct delayed_work work; + struct delayed_work power_off_work; + struct mutex lock; + /* Delay (ms) between temperature readings */ + unsigned long gpadc_monitor_delay; + /* Delay (ms) before power off */ + unsigned long power_off_delay; + int monitored_sensors; +}; + +int ab8500_hwmon_init(struct abx500_temp *data) __init; +int ab5500_hwmon_init(struct abx500_temp *data) __init; + +#endif /* _ABX500_H */ diff --git a/drivers/hwmon/dbx500.c b/drivers/hwmon/dbx500.c new file mode 100644 index 00000000000..c034b48f8dd --- /dev/null +++ b/drivers/hwmon/dbx500.c @@ -0,0 +1,402 @@ +/* + * Copyright (C) ST-Ericsson SA 2010. All rights reserved. + * This code is ST-Ericsson proprietary and confidential. + * Any use of the code for whatever purpose is subject to + * specific written permission of ST-Ericsson SA. + * + * Author: WenHai Fang <wenhai.h.fang@stericsson.com> for + * ST-Ericsson. + * License terms: GNU Gereral Public License (GPL) version 2 + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/mfd/dbx500-prcmu.h> +#include <linux/hwmon.h> +#include <linux/sysfs.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/jiffies.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <linux/io.h> +#include <mach/hardware.h> + +/* + * Default measure period to 0xFF x cycle32k + */ +#define DEFAULT_MEASURE_TIME 0xFF + +/* + * Default critical sensor temperature + */ +#define DEFAULT_CRITICAL_TEMP 85 + +/* This driver monitors DB thermal*/ +#define NUM_SENSORS 1 + +struct dbx500_temp { + struct platform_device *pdev; + struct device *hwmon_dev; + unsigned char min[NUM_SENSORS]; + unsigned char max[NUM_SENSORS]; + unsigned char crit[NUM_SENSORS]; + unsigned char min_alarm[NUM_SENSORS]; + unsigned char max_alarm[NUM_SENSORS]; + unsigned short measure_time; + bool monitoring_active; + struct mutex lock; +}; + +static inline void start_temp_monitoring(struct dbx500_temp *data, + const int index) +{ + unsigned int i; + + /* determine if there are any sensors worth monitoring */ + for (i = 0; i < NUM_SENSORS; i++) + if (data->min[i] || data->max[i]) + goto start_monitoring; + + return; + +start_monitoring: + /* kick off the monitor job */ + data->min_alarm[index] = 0; + data->max_alarm[index] = 0; + + (void) prcmu_start_temp_sense(data->measure_time); + data->monitoring_active = true; +} + +static inline void stop_temp_monitoring(struct dbx500_temp *data) +{ + if (data->monitoring_active) { + (void) prcmu_stop_temp_sense(); + data->monitoring_active = false; + } +} + +/* HWMON sysfs interface */ +static ssize_t show_name(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + return sprintf(buf, "dbx500\n"); +} + +static ssize_t show_label(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + return show_name(dev, devattr, buf); +} + +/* set functions (RW nodes) */ +static ssize_t set_min(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + val &= 0xFF; + if (val > data->max[attr->index - 1]) + val = data->max[attr->index - 1]; + + data->min[attr->index - 1] = val; + + stop_temp_monitoring(data); + + (void) prcmu_config_hotmon(data->min[attr->index - 1], + data->max[attr->index - 1]); + + start_temp_monitoring(data, (attr->index - 1)); + + mutex_unlock(&data->lock); + return count; +} + +static ssize_t set_max(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + val &= 0xFF; + if (val < data->min[attr->index - 1]) + val = data->min[attr->index - 1]; + + data->max[attr->index - 1] = val; + + stop_temp_monitoring(data); + + (void) prcmu_config_hotmon(data->min[attr->index - 1], + data->max[attr->index - 1]); + + start_temp_monitoring(data, (attr->index - 1)); + + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t set_crit(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + unsigned long val; + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + int res = strict_strtoul(buf, 10, &val); + if (res < 0) + return res; + + mutex_lock(&data->lock); + val &= 0xFF; + data->crit[attr->index - 1] = val; + (void) prcmu_config_hotdog(data->crit[attr->index - 1]); + mutex_unlock(&data->lock); + + return count; +} + +/* + * show functions (RO nodes) + * Notice that min/max/crit refer to degrees + */ +static ssize_t show_min(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%d\n", data->min[attr->index - 1]); +} + +static ssize_t show_max(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%d\n", data->max[attr->index - 1]); +} + +static ssize_t show_crit(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%d\n", data->crit[attr->index - 1]); +} + +/* Alarms */ +static ssize_t show_min_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%d\n", data->min_alarm[attr->index - 1]); +} + +static ssize_t show_max_alarm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct dbx500_temp *data = dev_get_drvdata(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + /* hwmon attr index starts at 1, thus "attr->index-1" below */ + return sprintf(buf, "%d\n", data->max_alarm[attr->index - 1]); +} + +/* Chip name, required by hwmon*/ +static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 1); +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 1); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, + show_crit, set_crit, 1); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 1); + +static struct attribute *dbx500_temp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group dbx500_temp_group = { + .attrs = dbx500_temp_attributes, +}; + +static irqreturn_t prcmu_hotmon_low_irq_handler(int irq, void *irq_data) +{ + struct platform_device *pdev = irq_data; + struct dbx500_temp *data = platform_get_drvdata(pdev); + + mutex_lock(&data->lock); + data->min_alarm[0] = 1; + mutex_unlock(&data->lock); + + sysfs_notify(&pdev->dev.kobj, NULL, "temp1_min_alarm"); + dev_dbg(&pdev->dev, "DBX500 thermal low warning\n"); + return IRQ_HANDLED; +} + +static irqreturn_t prcmu_hotmon_high_irq_handler(int irq, void *irq_data) +{ + struct platform_device *pdev = irq_data; + struct dbx500_temp *data = platform_get_drvdata(pdev); + + mutex_lock(&data->lock); + data->max_alarm[0] = 1; + mutex_unlock(&data->lock); + + hwmon_notify(data->max_alarm[0], NULL); + sysfs_notify(&pdev->dev.kobj, NULL, "temp1_max_alarm"); + + return IRQ_HANDLED; +} + +static int __devinit dbx500_temp_probe(struct platform_device *pdev) +{ + struct dbx500_temp *data; + int err = 0, i; + int irq; + + dev_dbg(&pdev->dev, "dbx500_temp: Function dbx500_temp_probe.\n"); + + data = kzalloc(sizeof(struct dbx500_temp), GFP_KERNEL); + if (!data) + return -ENOMEM; + + irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); + if (irq < 0) { + dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed\n"); + goto exit; + } + + err = request_threaded_irq(irq, NULL, + prcmu_hotmon_low_irq_handler, + IRQF_NO_SUSPEND, + "dbx500_temp_low", pdev); + if (err < 0) { + dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_LOW.\n"); + goto exit; + } else { + dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_LOW.\n"); + } + + irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); + if (irq < 0) { + dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed\n"); + goto exit; + } + + err = request_threaded_irq(irq, NULL, + prcmu_hotmon_high_irq_handler, + IRQF_NO_SUSPEND, + "dbx500_temp_high", pdev); + if (err < 0) { + dev_err(&pdev->dev, "dbx500: Failed allocate HOTMON_HIGH.\n"); + goto exit; + } else { + dev_dbg(&pdev->dev, "dbx500: Succeed allocate HOTMON_HIGH.\n"); + } + + data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit; + } + + for (i = 0; i < NUM_SENSORS; i++) { + data->min[i] = 0; + data->max[i] = 0; + data->crit[i] = DEFAULT_CRITICAL_TEMP; + data->min_alarm[i] = 0; + data->max_alarm[i] = 0; + } + + mutex_init(&data->lock); + + data->pdev = pdev; + data->measure_time = DEFAULT_MEASURE_TIME; + data->monitoring_active = false; + + /* set PRCMU to disable platform when we get to the critical temp */ + (void) prcmu_config_hotdog(DEFAULT_CRITICAL_TEMP); + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &dbx500_temp_group); + if (err < 0) { + dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err); + goto exit_platform_data; + } + + return 0; + +exit_platform_data: + platform_set_drvdata(pdev, NULL); +exit: + kfree(data); + return err; +} + +static int __devexit dbx500_temp_remove(struct platform_device *pdev) +{ + struct dbx500_temp *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &dbx500_temp_group); + platform_set_drvdata(pdev, NULL); + kfree(data); + return 0; +} + +/* No action required in suspend/resume, thus the lack of functions */ +static struct platform_driver dbx500_temp_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "dbx500_temp", + }, + .probe = dbx500_temp_probe, + .remove = __devexit_p(dbx500_temp_remove), +}; + +static int __init dbx500_temp_init(void) +{ + return platform_driver_register(&dbx500_temp_driver); +} + +static void __exit dbx500_temp_exit(void) +{ + platform_driver_unregister(&dbx500_temp_driver); +} + +MODULE_AUTHOR("WenHai Fang <wenhai.h.fang@stericsson.com>"); +MODULE_DESCRIPTION("DBX500 temperature driver"); +MODULE_LICENSE("GPL"); + +module_init(dbx500_temp_init) +module_exit(dbx500_temp_exit) diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index c3c471ca202..8957bbac7a7 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -21,6 +21,7 @@ #include <linux/gfp.h> #include <linux/spinlock.h> #include <linux/pci.h> +#include <linux/notifier.h> #define HWMON_ID_PREFIX "hwmon" #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d" @@ -29,6 +30,8 @@ static struct class *hwmon_class; static DEFINE_IDA(hwmon_ida); +static BLOCKING_NOTIFIER_HEAD(hwmon_notifier_list); + /** * hwmon_device_register - register w/ hwmon * @dev: the device to register @@ -75,6 +78,24 @@ void hwmon_device_unregister(struct device *dev) } EXPORT_SYMBOL_GPL(hwmon_device_unregister); +int hwmon_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&hwmon_notifier_list, nb); +} +EXPORT_SYMBOL(hwmon_notifier_register); + +int hwmon_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&hwmon_notifier_list, nb); +} +EXPORT_SYMBOL(hwmon_notifier_unregister); + +void hwmon_notify(unsigned long val, void *v) +{ + blocking_notifier_call_chain(&hwmon_notifier_list, val, v); +} +EXPORT_SYMBOL(hwmon_notify); + static void __init hwmon_pci_quirks(void) { #if defined CONFIG_X86 && defined CONFIG_PCI diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 5267ab93d55..9ddf2c97d26 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -431,7 +431,7 @@ static int read_i2c(struct nmk_i2c_dev *dev) if (timeout == 0) { /* Controller timed out */ - dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n", + dev_err(&dev->pdev->dev, "Read from Slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } @@ -518,7 +518,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) if (timeout == 0) { /* Controller timed out */ - dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n", + dev_err(&dev->pdev->dev, "Write to slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } @@ -628,12 +628,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, dev->busy = true; - if (dev->regulator) - regulator_enable(dev->regulator); pm_runtime_get_sync(&dev->pdev->dev); - clk_enable(dev->clk); - status = init_hw(dev); if (status) goto out; @@ -666,10 +662,8 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, } out: - clk_disable(dev->clk); - pm_runtime_put_sync(&dev->pdev->dev); - if (dev->regulator) - regulator_disable(dev->regulator); + + pm_runtime_put(&dev->pdev->dev); dev->busy = false; @@ -859,9 +853,9 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) #ifdef CONFIG_PM -static int nmk_i2c_suspend(struct device *dev) + +static int nmk_i2c_suspend(struct platform_device *pdev, pm_message_t state) { - struct platform_device *pdev = to_platform_device(dev); struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev); if (nmk_i2c->busy) @@ -870,23 +864,53 @@ static int nmk_i2c_suspend(struct device *dev) return 0; } -static int nmk_i2c_resume(struct device *dev) +static int nmk_i2c_suspend_noirq(struct device *dev) { + struct nmk_i2c_dev *nmk_i2c = + platform_get_drvdata(to_platform_device(dev)); + + if (nmk_i2c->busy) + return -EBUSY; + return 0; } + #else #define nmk_i2c_suspend NULL -#define nmk_i2c_resume NULL +#define nmk_i2c_suspend_noirq NULL #endif +static int nmk_i2c_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev); + + clk_disable(nmk_i2c->clk); + if (nmk_i2c->regulator) + regulator_disable(nmk_i2c->regulator); + return 0; +} + +static int nmk_i2c_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev); + + if (nmk_i2c->regulator) + regulator_enable(nmk_i2c->regulator); + clk_enable(nmk_i2c->clk); + return 0; +} + /* * We use noirq so that we suspend late and resume before the wakeup interrupt * to ensure that we do the !pm_runtime_suspended() check in resume before * there has been a regular pm runtime resume (via pm_runtime_get_sync()). */ static const struct dev_pm_ops nmk_i2c_pm = { - .suspend_noirq = nmk_i2c_suspend, - .resume_noirq = nmk_i2c_resume, + SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume, + NULL) + .suspend_noirq = nmk_i2c_suspend_noirq, }; static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap) @@ -1047,6 +1071,7 @@ static struct platform_driver nmk_i2c_driver = { }, .probe = nmk_i2c_probe, .remove = __devexit_p(nmk_i2c_remove), + .suspend = nmk_i2c_suspend, }; static int __init nmk_i2c_init(void) diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index f354813a13e..8aba6cc2bac 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -151,6 +151,16 @@ config KEYBOARD_BFIN To compile this driver as a module, choose M here: the module will be called bf54x-keys. +config KEYBOARD_DB5500 + tristate "DB5500 keyboard" + depends on UX500_SOC_DB5500 + help + Say Y here to enable the on-chip keypad controller on the + ST-Ericsson U5500 platform. + + To compile this driver as a module, choose M here: the + module will be called db5500_keypad. + config KEYBOARD_LKKBD tristate "DECstation/VAXstation LK201/LK401 keyboard" select SERIO @@ -381,7 +391,7 @@ config KEYBOARD_NEWTON To compile this driver as a module, choose M here: the module will be called newtonkbd. -config KEYBOARD_NOMADIK +config KEYBOARD_NOMADIK_SKE tristate "ST-Ericsson Nomadik SKE keyboard" depends on PLAT_NOMADIK help diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index df7061f1291..90a01405e51 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o +obj-$(CONFIG_KEYBOARD_DB5500) += db5500_keypad.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o @@ -31,7 +32,7 @@ obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o obj-$(CONFIG_KEYBOARD_MCS) += mcs_touchkey.o obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o -obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o +obj-$(CONFIG_KEYBOARD_NOMADIK_SKE) += nomadik-ske-keypad.o obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o diff --git a/drivers/input/keyboard/db5500_keypad.c b/drivers/input/keyboard/db5500_keypad.c new file mode 100644 index 00000000000..729775d99e8 --- /dev/null +++ b/drivers/input/keyboard/db5500_keypad.c @@ -0,0 +1,799 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License terms: GNU General Public License, version 2 + * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson + */ + +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/gpio.h> +#include <linux/io.h> +#include <linux/input.h> +#include <linux/input/matrix_keypad.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <mach/db5500-keypad.h> +#include <linux/regulator/consumer.h> + +#define KEYPAD_CTR 0x0 +#define KEYPAD_IRQ_CLEAR 0x4 +#define KEYPAD_INT_ENABLE 0x8 +#define KEYPAD_INT_STATUS 0xC +#define KEYPAD_ARRAY_01 0x18 + +#define KEYPAD_NUM_ARRAY_REGS 5 + +#define KEYPAD_CTR_WRITE_IRQ_ENABLE (1 << 10) +#define KEYPAD_CTR_WRITE_CONTROL (1 << 8) +#define KEYPAD_CTR_SCAN_ENABLE (1 << 7) + +#define KEYPAD_ARRAY_CHANGEBIT (1 << 15) + +#define KEYPAD_DEBOUNCE_PERIOD_MIN 5 /* ms */ +#define KEYPAD_DEBOUNCE_PERIOD_MAX 80 /* ms */ + +#define KEYPAD_GND_ROW 8 + +#define KEYPAD_ROW_SHIFT 3 +#define KEYPAD_KEYMAP_SIZE \ + (KEYPAD_MAX_ROWS * KEYPAD_MAX_COLS) + +#define KEY_PRESSED_DELAY 10 +/** + * struct db5500_keypad - data structure used by keypad driver + * @irq: irq number + * @base: keypad registers base address + * @input: pointer to input device object + * @board: keypad platform data + * @keymap: matrix scan code table for keycodes + * @clk: clock structure pointer + * @regulator : regulator used by keypad + * @switch_work : delayed work variable for switching to gpio + * @gpio_work : delayed work variable for reporting key event in gpio mode + * @previous_set: previous set of registers + * @enable : flag to enable the driver event + * @enable_on_resume: set if keypad should be enabled on resume + * @valid_key : hold the state of valid key press + * @db5500_rows : rows gpio array for db5500 keypad + * @db5500_cols : cols gpio array for db5500 keypad + * @gpio_input_irq : array for gpio irqs + * @gpio_row : gpio row + * @gpio_col : gpio_col + */ +struct db5500_keypad { + int irq; + void __iomem *base; + struct input_dev *input; + const struct db5500_keypad_platform_data *board; + unsigned short keymap[KEYPAD_KEYMAP_SIZE]; + struct clk *clk; + struct regulator *regulator; + struct delayed_work switch_work; + struct delayed_work gpio_work; + u8 previous_set[KEYPAD_MAX_ROWS]; + bool enable; + bool enable_on_resume; + bool valid_key; + int db5500_rows[KEYPAD_MAX_ROWS]; + int db5500_cols[KEYPAD_MAX_COLS]; + int gpio_input_irq[KEYPAD_MAX_ROWS]; + int gpio_row; + int gpio_col; +}; + +/** + * db5500_keypad_report() - reports the keypad event + * @keypad: pointer to device structure + * @row: row value of keypad + * @curr: current event + * @previous: previous event + * + * This function uses to reports the event of the keypad + * and returns NONE. + * + * By default all column reads are 1111 1111b. Any press will pull the column + * down, leading to a 0 in any of these locations. We invert these values so + * that a 1 means means "column pressed". * + * If curr changes from the previous from 0 to 1, we report it as a key press. + * If curr changes from the previous from 1 to 0, we report it as a key + * release. + */ +static void db5500_keypad_report(struct db5500_keypad *keypad, int row, + u8 curr, u8 previous) +{ + struct input_dev *input = keypad->input; + u8 changed = curr ^ previous; + + while (changed) { + int col = __ffs(changed); + bool press = curr & BIT(col); + int code = MATRIX_SCAN_CODE(row, col, KEYPAD_ROW_SHIFT); + + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keypad->keymap[code], press); + input_sync(input); + + changed &= ~BIT(col); + } +} + +static void db5500_keypad_scan(struct db5500_keypad *keypad) +{ + u8 current_set[ARRAY_SIZE(keypad->previous_set)]; + int tries = 100; + bool changebit; + u32 data_reg; + u8 allrows; + u8 common; + int i; + + writel(0x1, keypad->base + KEYPAD_IRQ_CLEAR); + +again: + if (!tries--) { + dev_warn(&keypad->input->dev, "values failed to stabilize\n"); + return; + } + + changebit = readl(keypad->base + KEYPAD_ARRAY_01) + & KEYPAD_ARRAY_CHANGEBIT; + + for (i = 0; i < KEYPAD_NUM_ARRAY_REGS; i++) { + data_reg = readl(keypad->base + KEYPAD_ARRAY_01 + 4 * i); + + /* If the change bit changed, we need to reread the data */ + if (changebit != !!(data_reg & KEYPAD_ARRAY_CHANGEBIT)) + goto again; + + current_set[2 * i] = ~(data_reg & 0xff); + + /* Last array reg has only one valid set of columns */ + if (i != KEYPAD_NUM_ARRAY_REGS - 1) + current_set[2 * i + 1] = ~((data_reg & 0xff0000) >> 16); + } + + allrows = current_set[KEYPAD_GND_ROW]; + + /* + * Sometimes during a GND row release, an incorrect report is received + * where the ARRAY8 all rows setting does not match the other ARRAY* + * rows. Ignore this report; the correct one has been observed to + * follow it. + */ + common = 0xff; + for (i = 0; i < KEYPAD_GND_ROW; i++) + common &= current_set[i]; + + if ((allrows & common) != common) + return; + + for (i = 0; i < ARRAY_SIZE(current_set); i++) { + /* + * If there is an allrows press (GND row), we need to ignore + * the allrows values from the reset of the ARRAYs. + */ + if (i < KEYPAD_GND_ROW && allrows) + current_set[i] &= ~allrows; + + if (keypad->previous_set[i] == current_set[i]) + continue; + + db5500_keypad_report(keypad, i, current_set[i], + keypad->previous_set[i]); + } + + /* update the reference set of array registers */ + memcpy(keypad->previous_set, current_set, sizeof(keypad->previous_set)); + + return; +} + +/** + * db5500_keypad_writel() - write into keypad registers + * @keypad: pointer to device structure + * @val: value to write into register + * @reg: register offset + * + * This function uses to write into the keypad registers + * and returns NONE. + */ +static void db5500_keypad_writel(struct db5500_keypad *keypad, u32 val, u32 reg) +{ + int timeout = 4; + int allowedbit; + + switch (reg) { + case KEYPAD_CTR: + allowedbit = KEYPAD_CTR_WRITE_CONTROL; + break; + case KEYPAD_INT_ENABLE: + allowedbit = KEYPAD_CTR_WRITE_IRQ_ENABLE; + break; + default: + BUG(); + } + + do { + u32 ctr = readl(keypad->base + KEYPAD_CTR); + + if (ctr & allowedbit) + break; + + udelay(50); + } while (--timeout); + + /* Five 32k clk cycles (~150us) required, we waited 200us */ + WARN_ON(!timeout); + + writel(val, keypad->base + reg); +} + +/** + * db5500_keypad_chip_init() - initialize the keypad chip + * @keypad: pointer to device structure + * + * This function uses to initializes the keypad controller + * and returns integer. + */ +static int db5500_keypad_chip_init(struct db5500_keypad *keypad) +{ + int debounce = keypad->board->debounce_ms; + int debounce_hits = 0; + + if (debounce < KEYPAD_DEBOUNCE_PERIOD_MIN) + debounce = KEYPAD_DEBOUNCE_PERIOD_MIN; + + if (debounce > KEYPAD_DEBOUNCE_PERIOD_MAX) { + debounce_hits = DIV_ROUND_UP(debounce, + KEYPAD_DEBOUNCE_PERIOD_MAX) - 1; + debounce = KEYPAD_DEBOUNCE_PERIOD_MAX; + } + + /* Convert the milliseconds to the bit mask */ + debounce = DIV_ROUND_UP(debounce, KEYPAD_DEBOUNCE_PERIOD_MIN) - 1; + + clk_enable(keypad->clk); + + db5500_keypad_writel(keypad, + KEYPAD_CTR_SCAN_ENABLE + | ((debounce_hits & 0x7) << 4) + | debounce, + KEYPAD_CTR); + + db5500_keypad_writel(keypad, 0x1, KEYPAD_INT_ENABLE); + + return 0; +} + +static void db5500_mode_enable(struct db5500_keypad *keypad, bool enable) +{ + int i; + + if (!enable) { + db5500_keypad_writel(keypad, 0, KEYPAD_CTR); + db5500_keypad_writel(keypad, 0, KEYPAD_INT_ENABLE); + if (keypad->board->exit) + keypad->board->exit(); + for (i = 0; i < keypad->board->krow; i++) { + enable_irq(keypad->gpio_input_irq[i]); + enable_irq_wake(keypad->gpio_input_irq[i]); + } + clk_disable(keypad->clk); + regulator_disable(keypad->regulator); + } else { + regulator_enable(keypad->regulator); + clk_enable(keypad->clk); + for (i = 0; i < keypad->board->krow; i++) { + disable_irq_nosync(keypad->gpio_input_irq[i]); + disable_irq_wake(keypad->gpio_input_irq[i]); + } + if (keypad->board->init) + keypad->board->init(); + db5500_keypad_chip_init(keypad); + } +} + +static void db5500_gpio_switch_work(struct work_struct *work) +{ + struct db5500_keypad *keypad = container_of(work, + struct db5500_keypad, switch_work.work); + + db5500_mode_enable(keypad, false); + keypad->enable = false; +} + +static void db5500_gpio_release_work(struct work_struct *work) +{ + int code; + struct db5500_keypad *keypad = container_of(work, + struct db5500_keypad, gpio_work.work); + struct input_dev *input = keypad->input; + + code = MATRIX_SCAN_CODE(keypad->gpio_col, keypad->gpio_row, + KEYPAD_ROW_SHIFT); + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keypad->keymap[code], 1); + input_sync(input); + input_report_key(input, keypad->keymap[code], 0); + input_sync(input); +} + +static int db5500_read_get_gpio_row(struct db5500_keypad *keypad) +{ + int row; + int value = 0; + int ret; + + /* read all rows GPIO data register values */ + for (row = 0; row < keypad->board->krow; row++) { + ret = gpio_get_value(keypad->db5500_rows[row]); + value += (1 << row) * ret; + } + + /* get the exact row */ + for (row = 0; row < keypad->board->krow; row++) { + if (((1 << row) & value) == 0) + return row; + } + + return -1; +} + +static void db5500_set_cols(struct db5500_keypad *keypad, int col) +{ + int i, ret; + int value; + + /* + * Set all columns except the requested column + * output pin as high + */ + for (i = 0; i < keypad->board->kcol; i++) { + if (i == col) + value = 0; + else + value = 1; + ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd"); + + if (ret < 0) { + pr_err("db5500_set_cols: gpio request failed\n"); + continue; + } + + gpio_direction_output(keypad->db5500_cols[i], value); + gpio_free(keypad->db5500_cols[i]); + } +} + +static void db5500_free_cols(struct db5500_keypad *keypad) +{ + int i, ret; + + for (i = 0; i < keypad->board->kcol; i++) { + ret = gpio_request(keypad->db5500_cols[i], "db5500-kpd"); + + if (ret < 0) { + pr_err("db5500_free_cols: gpio request failed\n"); + continue; + } + + gpio_direction_output(keypad->db5500_cols[i], 0); + gpio_free(keypad->db5500_cols[i]); + } +} + +static void db5500_manual_scan(struct db5500_keypad *keypad) +{ + int row; + int col; + + keypad->valid_key = false; + + for (col = 0; col < keypad->board->kcol; col++) { + db5500_set_cols(keypad, col); + row = db5500_read_get_gpio_row(keypad); + if (row >= 0) { + keypad->valid_key = true; + keypad->gpio_row = row; + keypad->gpio_col = col; + break; + } + } + db5500_free_cols(keypad); +} + +static irqreturn_t db5500_keypad_gpio_irq(int irq, void *dev_id) +{ + struct db5500_keypad *keypad = dev_id; + + if (!gpio_get_value(IRQ_TO_GPIO(irq))) { + db5500_manual_scan(keypad); + if (!keypad->enable) { + keypad->enable = true; + db5500_mode_enable(keypad, true); + } + + /* + * Schedule the work queue to change it to + * report the key pressed, if it is not detected in keypad mode. + */ + if (keypad->valid_key) { + schedule_delayed_work(&keypad->gpio_work, + KEY_PRESSED_DELAY); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t db5500_keypad_irq(int irq, void *dev_id) +{ + struct db5500_keypad *keypad = dev_id; + + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->switch_work); + db5500_keypad_scan(keypad); + + /* + * Schedule the work queue to change it to + * GPIO mode, if there is no activity in keypad mode + */ + if (keypad->enable) + schedule_delayed_work(&keypad->switch_work, + keypad->board->switch_delay); + + return IRQ_HANDLED; +} + +/** + * db5500_keypad_probe() - Initialze the the keypad driver + * @pdev: pointer to platform device structure + * + * This function will allocate and initialize the instance + * data and request the irq and register to input subsystem driver. + */ +static int __devinit db5500_keypad_probe(struct platform_device *pdev) +{ + struct db5500_keypad_platform_data *plat; + struct db5500_keypad *keypad; + struct resource *res; + struct input_dev *input; + void __iomem *base; + struct clk *clk; + int ret; + int irq; + int i; + + plat = pdev->dev.platform_data; + if (!plat) { + dev_err(&pdev->dev, "invalid keypad platform data\n"); + ret = -EINVAL; + goto out_ret; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get keypad irq\n"); + ret = -EINVAL; + goto out_ret; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "missing platform resources\n"); + ret = -EINVAL; + goto out_ret; + } + + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "failed to request I/O memory\n"); + ret = -EBUSY; + goto out_ret; + } + + base = ioremap(res->start, resource_size(res)); + if (!base) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + ret = -ENXIO; + goto out_freerequest_memregions; + } + + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to clk_get\n"); + ret = PTR_ERR(clk); + goto out_iounmap; + } + + keypad = kzalloc(sizeof(struct db5500_keypad), GFP_KERNEL); + if (!keypad) { + dev_err(&pdev->dev, "failed to allocate keypad memory\n"); + ret = -ENOMEM; + goto out_freeclk; + } + + input = input_allocate_device(); + if (!input) { + dev_err(&pdev->dev, "failed to input_allocate_device\n"); + ret = -ENOMEM; + goto out_freekeypad; + } + + keypad->regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(keypad->regulator)) { + dev_err(&pdev->dev, "regulator_get failed\n"); + keypad->regulator = NULL; + ret = -EINVAL; + goto out_regulator_get; + } else { + ret = regulator_enable(keypad->regulator); + if (ret < 0) { + dev_err(&pdev->dev, "regulator_enable failed\n"); + goto out_regulator_enable; + } + } + + input->id.bustype = BUS_HOST; + input->name = "db5500-keypad"; + input->dev.parent = &pdev->dev; + + input->keycode = keypad->keymap; + input->keycodesize = sizeof(keypad->keymap[0]); + input->keycodemax = ARRAY_SIZE(keypad->keymap); + + input_set_capability(input, EV_MSC, MSC_SCAN); + + __set_bit(EV_KEY, input->evbit); + if (!plat->no_autorepeat) + __set_bit(EV_REP, input->evbit); + + matrix_keypad_build_keymap(plat->keymap_data, KEYPAD_ROW_SHIFT, + input->keycode, input->keybit); + + ret = input_register_device(input); + if (ret) { + dev_err(&pdev->dev, + "unable to register input device: %d\n", ret); + goto out_freeinput; + } + + keypad->irq = irq; + keypad->board = plat; + keypad->input = input; + keypad->base = base; + keypad->clk = clk; + + INIT_DELAYED_WORK(&keypad->switch_work, db5500_gpio_switch_work); + INIT_DELAYED_WORK(&keypad->gpio_work, db5500_gpio_release_work); + + clk_enable(keypad->clk); +if (!keypad->board->init) { + dev_err(&pdev->dev, "init funtion not defined\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + if (keypad->board->init() < 0) { + dev_err(&pdev->dev, "keyboard init config failed\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + if (!keypad->board->exit) { + dev_err(&pdev->dev, "exit funtion not defined\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + if (keypad->board->exit() < 0) { + dev_err(&pdev->dev, "keyboard exit config failed\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + for (i = 0; i < keypad->board->krow; i++) { + keypad->db5500_rows[i] = *plat->gpio_input_pins; + keypad->gpio_input_irq[i] = + GPIO_TO_IRQ(keypad->db5500_rows[i]); + plat->gpio_input_pins++; + } + + for (i = 0; i < keypad->board->kcol; i++) { + keypad->db5500_cols[i] = *plat->gpio_output_pins; + plat->gpio_output_pins++; + } + + for (i = 0; i < keypad->board->krow; i++) { + ret = request_threaded_irq(keypad->gpio_input_irq[i], + NULL, db5500_keypad_gpio_irq, + IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND, + "db5500-keypad-gpio", keypad); + if (ret) { + dev_err(&pdev->dev, "allocate gpio irq %d failed\n", + keypad->gpio_input_irq[i]); + goto out_unregisterinput; + } + enable_irq_wake(keypad->gpio_input_irq[i]); + } + + ret = request_threaded_irq(keypad->irq, NULL, db5500_keypad_irq, + IRQF_ONESHOT, "db5500-keypad", keypad); + if (ret) { + dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); + goto out_unregisterinput; + } + + platform_set_drvdata(pdev, keypad); + + clk_disable(keypad->clk); + regulator_disable(keypad->regulator); + return 0; + +out_unregisterinput: + input_unregister_device(input); + input = NULL; + clk_disable(keypad->clk); +out_freeinput: + input_free_device(input); +out_regulator_enable: + regulator_put(keypad->regulator); +out_regulator_get: + input_free_device(input); +out_freekeypad: + kfree(keypad); +out_freeclk: + clk_put(clk); +out_iounmap: + iounmap(base); +out_freerequest_memregions: + release_mem_region(res->start, resource_size(res)); +out_ret: + return ret; +} + +/** + * db5500_keypad_remove() - Removes the keypad driver + * @pdev: pointer to platform device structure + * + * This function uses to remove the keypad + * driver and returns integer. + */ +static int __devexit db5500_keypad_remove(struct platform_device *pdev) +{ + struct db5500_keypad *keypad = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->switch_work); + free_irq(keypad->irq, keypad); + input_unregister_device(keypad->input); + + clk_disable(keypad->clk); + clk_put(keypad->clk); + + if (keypad->board->exit) + keypad->board->exit(); + + regulator_put(keypad->regulator); + + iounmap(keypad->base); + + if (res) + release_mem_region(res->start, resource_size(res)); + + kfree(keypad); + + return 0; +} + +#ifdef CONFIG_PM +/** + * db5500_keypad_suspend() - suspend the keypad controller + * @dev: pointer to device structure + * + * This function is used to suspend the + * keypad controller and returns integer + */ +static int db5500_keypad_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct db5500_keypad *keypad = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) + enable_irq_wake(irq); + else { + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->switch_work); + disable_irq(irq); + keypad->enable_on_resume = keypad->enable; + if (keypad->enable) { + db5500_mode_enable(keypad, false); + keypad->enable = false; + } + } + + return 0; +} + +/** + * db5500_keypad_resume() - resume the keypad controller + * @dev: pointer to device structure + * + * This function is used to resume the keypad + * controller and returns integer. + */ +static int db5500_keypad_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct db5500_keypad *keypad = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + + if (device_may_wakeup(dev)) + disable_irq_wake(irq); + else { + if (keypad->enable_on_resume && !keypad->enable) { + keypad->enable = true; + db5500_mode_enable(keypad, true); + /* + * Schedule the work queue to change it to GPIO mode + * if there is no activity keypad mode + */ + schedule_delayed_work(&keypad->switch_work, + keypad->board->switch_delay); + } + enable_irq(irq); + } + + return 0; +} + +static const struct dev_pm_ops db5500_keypad_dev_pm_ops = { + .suspend = db5500_keypad_suspend, + .resume = db5500_keypad_resume, +}; +#endif + +static struct platform_driver db5500_keypad_driver = { + .driver = { + .name = "db5500-keypad", + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &db5500_keypad_dev_pm_ops, +#endif + }, + .probe = db5500_keypad_probe, + .remove = __devexit_p(db5500_keypad_remove), +}; + +/** + * db5500_keypad_init() - Initialize the keypad driver + * + * This function uses to initializes the db5500 + * keypad driver and returns integer. + */ +static int __init db5500_keypad_init(void) +{ + return platform_driver_register(&db5500_keypad_driver); +} +module_init(db5500_keypad_init); + +/** + * db5500_keypad_exit() - De-initialize the keypad driver + * + * This function uses to de-initialize the db5500 + * keypad driver and returns none. + */ +static void __exit db5500_keypad_exit(void) +{ + platform_driver_unregister(&db5500_keypad_driver); +} +module_exit(db5500_keypad_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>"); +MODULE_DESCRIPTION("DB5500 Keypad Driver"); +MODULE_ALIAS("platform:db5500-keypad"); diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 62bfce468f9..1fdf54bb4f6 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -29,6 +29,7 @@ #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/spinlock.h> +#include <linux/pm_runtime.h> struct gpio_button_data { const struct gpio_keys_button *button; @@ -46,6 +47,8 @@ struct gpio_keys_drvdata { struct input_dev *input; struct mutex disable_lock; unsigned int n_buttons; + bool enabled; + bool enable_after_suspend; int (*enable)(struct device *dev); void (*disable)(struct device *dev); struct gpio_button_data data[0]; @@ -524,6 +527,8 @@ static int gpio_keys_open(struct input_dev *input) { struct gpio_keys_drvdata *ddata = input_get_drvdata(input); + pm_runtime_get_sync(input->dev.parent); + ddata->enabled = true; return ddata->enable ? ddata->enable(input->dev.parent) : 0; } @@ -533,6 +538,8 @@ static void gpio_keys_close(struct input_dev *input) if (ddata->disable) ddata->disable(input->dev.parent); + ddata->enabled = false; + pm_runtime_put(input->dev.parent); } /* @@ -675,6 +682,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) ddata->n_buttons = pdata->nbuttons; ddata->enable = pdata->enable; ddata->disable = pdata->disable; + ddata->enabled = false; mutex_init(&ddata->disable_lock); platform_set_drvdata(pdev, ddata); @@ -691,6 +699,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) input->id.product = 0x0001; input->id.version = 0x0100; + pm_runtime_enable(&pdev->dev); + /* Enable auto repeat feature of Linux input subsystem */ if (pdata->rep) __set_bit(EV_REP, input->evbit); @@ -756,6 +766,8 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev) struct input_dev *input = ddata->input; int i; + pm_runtime_disable(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); device_init_wakeup(&pdev->dev, 0); @@ -790,6 +802,10 @@ static int gpio_keys_suspend(struct device *dev) if (bdata->button->wakeup) enable_irq_wake(bdata->irq); } + } else { + ddata->enable_after_suspend = ddata->enabled; + if (ddata->enabled && ddata->disable) + ddata->disable(dev); } return 0; @@ -808,6 +824,11 @@ static int gpio_keys_resume(struct device *dev) if (gpio_is_valid(bdata->button->gpio)) gpio_keys_gpio_report_event(bdata); } + + if (!device_may_wakeup(dev) && ddata->enable_after_suspend + && ddata->enable) + ddata->enable(dev); + input_sync(ddata->input); return 0; diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c index 101e245944e..6a0707f7f76 100644 --- a/drivers/input/keyboard/nomadik-ske-keypad.c +++ b/drivers/input/keyboard/nomadik-ske-keypad.c @@ -2,7 +2,7 @@ * Copyright (C) ST-Ericsson SA 2010 * * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson - * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson + * co-Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson * * License terms:GNU General Public License (GPL) version 2 * @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/delay.h> @@ -19,8 +20,10 @@ #include <linux/slab.h> #include <linux/clk.h> #include <linux/module.h> +#include <linux/regulator/consumer.h> #include <plat/ske.h> +#include <linux/gpio/nomadik.h> /* SKE_CR bits */ #define SKE_KPMLT (0x1 << 6) @@ -48,17 +51,38 @@ #define SKE_ASR3 0x2C #define SKE_NUM_ASRX_REGISTERS (4) +#define KEY_PRESSED_DELAY 10 + + +#define KEY_REPORTED 1 +#define KEY_PRESSED 2 /** * struct ske_keypad - data structure used by keypad driver - * @irq: irq no - * @reg_base: ske regsiters base address - * @input: pointer to input device object - * @board: keypad platform device - * @keymap: matrix scan code table for keycodes - * @clk: clock structure pointer + * @dev: Pointer to the structure device + * @irq: irq no + * @reg_base: ske regsiters base address + * @input: pointer to input device object + * @board: keypad platform device + * @keymap: matrix scan code table for keycodes + * @clk: clock structure pointer + * @ske_keypad_lock: lock used while writting into registers + * @enable: flag to enable the driver event + * @enable_on_resume: set if keypad should be enabled on resume + * @regulator: pointer to the regulator used for ske kyepad + * @gpio_input_irq: array for gpio irqs + * @key_pressed: hold the key state + * @work: delayed work variable for gpio switch + * @ske_rows: rows gpio array for ske + * @ske_cols: columns gpio array for ske + * @gpio_row: gpio row + * @gpio_col: gpio column + * @gpio_work: delayed work variable for release gpio key + * @keys: matrix holding key status + * @scan_work: delayed work for scaning new key actions */ struct ske_keypad { + struct device *dev; int irq; void __iomem *reg_base; struct input_dev *input; @@ -66,6 +90,19 @@ struct ske_keypad { unsigned short keymap[SKE_KPD_KEYMAP_SIZE]; struct clk *clk; spinlock_t ske_keypad_lock; + bool enable; + bool enable_on_resume; + struct regulator *regulator; + int *gpio_input_irq; + int key_pressed; + struct delayed_work work; + int *ske_rows; + int *ske_cols; + int gpio_row; + int gpio_col; + struct delayed_work gpio_work; + u8 **keys; + struct delayed_work scan_work; }; static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr, @@ -83,15 +120,15 @@ static void ske_keypad_set_bits(struct ske_keypad *keypad, u16 addr, spin_unlock(&keypad->ske_keypad_lock); } -/* +/** * ske_keypad_chip_init: init keypad controller configuration - * + * @keypad: pointer to device structure * Enable Multi key press detection, auto scan mode */ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) { u32 value; - int timeout = 50; + int timeout = keypad->board->debounce_ms; /* check SKE_RIS to be 0 */ while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--) @@ -100,7 +137,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) if (!timeout) return -EINVAL; - /* + /** * set debounce value * keypad dbounce is configured in DBCR[15:8] * dbounce value in steps of 32/32.768 ms @@ -115,7 +152,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) /* enable multi key detection */ ske_keypad_set_bits(keypad, SKE_CR, 0x0, SKE_KPMLT); - /* + /** * set up the number of columns * KPCN[5:3] defines no. of keypad columns to be auto scanned */ @@ -134,14 +171,125 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad) return 0; } +static void ske_mode_enable(struct ske_keypad *keypad, bool enable) +{ + int i; + + if (!enable) { + dev_dbg(keypad->dev, "%s disable keypad\n", __func__); + writel(0, keypad->reg_base + SKE_CR); + if (keypad->board->exit) + keypad->board->exit(); + for (i = 0; i < keypad->board->kconnected_rows; i++) { + enable_irq(keypad->gpio_input_irq[i]); + enable_irq_wake(keypad->gpio_input_irq[i]); + } + clk_disable(keypad->clk); + regulator_disable(keypad->regulator); + } else { + dev_dbg(keypad->dev, "%s enable keypad\n", __func__); + regulator_enable(keypad->regulator); + clk_enable(keypad->clk); + for (i = 0; i < keypad->board->kconnected_rows; i++) { + disable_irq_nosync(keypad->gpio_input_irq[i]); + disable_irq_wake(keypad->gpio_input_irq[i]); + } + if (keypad->board->init) + keypad->board->init(); + ske_keypad_chip_init(keypad); + } +} +static void ske_enable(struct ske_keypad *keypad, bool enable) +{ + keypad->enable = enable; + if (keypad->enable) { + enable_irq(keypad->irq); + ske_mode_enable(keypad, true); + } else { + ske_mode_enable(keypad, false); + disable_irq(keypad->irq); + } +} + +static ssize_t ske_show_attr_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ske_keypad *keypad = platform_get_drvdata(pdev); + return sprintf(buf, "%d\n", keypad->enable); +} + +static ssize_t ske_store_attr_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ske_keypad *keypad = platform_get_drvdata(pdev); + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + if (keypad->enable != val) { + keypad->enable = val ? true : false; + ske_enable(keypad, keypad->enable); + } + return count; +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, + ske_show_attr_enable, ske_store_attr_enable); + +static struct attribute *ske_keypad_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; + +static struct attribute_group ske_attr_group = { + .attrs = ske_keypad_attrs, +}; + +static void ske_keypad_report(struct ske_keypad *keypad, u8 status, int col) +{ + int row = 0, code, pos; + u32 ske_ris; + int num_of_rows; + + /* find out the row */ + num_of_rows = hweight8(status); + do { + pos = __ffs(status); + row = pos; + status &= ~(1 << pos); + + if (row >= keypad->board->krow) + /* no more rows supported by this keypad */ + break; + + code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT); + ske_ris = readl(keypad->reg_base + SKE_RIS); + keypad->key_pressed = ske_ris & SKE_KPRISA; + + dev_dbg(keypad->dev, + "%s key_pressed:%d code:%d row:%d col:%d\n", + __func__, keypad->key_pressed, code, row, col); + + if (keypad->key_pressed) + keypad->keys[row][col] |= KEY_PRESSED; + + num_of_rows--; + } while (num_of_rows); +} + static void ske_keypad_read_data(struct ske_keypad *keypad) { - struct input_dev *input = keypad->input; - u16 status; - int col = 0, row = 0, code; - int ske_asr, ske_ris, key_pressed, i; + u8 status; + int col = 0; + int ske_asr, i; - /* + /** * Read the auto scan registers * * Each SKE_ASRx (x=0 to x=3) contains two row values. @@ -153,59 +301,274 @@ static void ske_keypad_read_data(struct ske_keypad *keypad) if (!ske_asr) continue; - /* now that ASRx is zero, find out the column x and row y*/ - if (ske_asr & 0xff) { + /* now that ASRx is zero, find out the coloumn x and row y */ + status = ske_asr & 0xff; + if (status) { col = i * 2; - status = ske_asr & 0xff; - } else { + if (col >= keypad->board->kcol) + /* no more columns supported by this keypad */ + break; + ske_keypad_report(keypad, status, col); + } + status = (ske_asr & 0xff00) >> 8; + if (status) { col = (i * 2) + 1; - status = (ske_asr & 0xff00) >> 8; + if (col >= keypad->board->kcol) + /* no more columns supported by this keypad */ + break; + ske_keypad_report(keypad, status, col); } + } +} - /* find out the row */ - row = __ffs(status); +static void ske_keypad_scan_work(struct work_struct *work) +{ + int timeout = 10; + int i, j, code; + struct ske_keypad *keypad = container_of(work, + struct ske_keypad, scan_work.work); + struct input_dev *input = keypad->input; - code = MATRIX_SCAN_CODE(row, col, SKE_KEYPAD_ROW_SHIFT); - ske_ris = readl(keypad->reg_base + SKE_RIS); - key_pressed = ske_ris & SKE_KPRISA; + /* Wait for autoscan to complete */ + while (readl(keypad->reg_base + SKE_CR) & SKE_KPASON) + cpu_relax(); - input_event(input, EV_MSC, MSC_SCAN, code); - input_report_key(input, keypad->keymap[code], key_pressed); - input_sync(input); + /* SKEx registers are stable and can be read */ + ske_keypad_read_data(keypad); + + /* Check for key actions */ + for (i = 0; i < keypad->board->krow; i++) { + for (j = 0; j < keypad->board->kcol; j++) { + switch (keypad->keys[i][j]) { + case KEY_REPORTED: + /** + * Key was reported but is no longer pressed, + * report it as released. + */ + code = MATRIX_SCAN_CODE(i, j, + SKE_KEYPAD_ROW_SHIFT); + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keypad->keymap[code], + 0); + input_sync(input); + keypad->keys[i][j] = 0; + dev_dbg(keypad->dev, + "%s Key release reported, code:%d " + "(key %d)\n", + __func__, code, keypad->keymap[code]); + break; + case KEY_PRESSED: + /* Key pressed but not yet reported, report */ + code = MATRIX_SCAN_CODE(i, j, + SKE_KEYPAD_ROW_SHIFT); + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keypad->keymap[code], + 1); + input_sync(input); + dev_dbg(keypad->dev, + "%s Key press reported, code:%d " + "(key %d)\n", + __func__, code, keypad->keymap[code]); + /* Intentional fall though */ + case (KEY_REPORTED | KEY_PRESSED): + /** + * Key pressed and reported, just reset + * KEY_PRESSED for next scan + */ + keypad->keys[i][j] = KEY_REPORTED; + break; + } + } + } + + if (keypad->key_pressed) { + /* + * Key still pressed, schedule work to poll changes in 100 ms + * After increasing the delay from 50 to 100 it is taking + * 2% to 3% load on average. + */ + schedule_delayed_work(&keypad->scan_work, + msecs_to_jiffies(100)); + } else { + /* For safty measure, clear interrupt once more */ + ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA); + + /* Wait for raw interrupt to clear */ + while ((readl(keypad->reg_base + SKE_RIS) & SKE_KPRISA) && + --timeout) { + udelay(10); + } + + if (!timeout) + dev_err(keypad->dev, + "%s Timeed out waiting on irq to clear\n", + __func__); + + /* enable auto scan interrupts */ + ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); + + /** + * Schedule the work queue to change it to GPIO mode + * if there is no activity in SKE mode + */ + if (!keypad->key_pressed && keypad->enable) + schedule_delayed_work(&keypad->work, + keypad->board->switch_delay); } } +static void ske_gpio_switch_work(struct work_struct *work) +{ + struct ske_keypad *keypad = container_of(work, + struct ske_keypad, work.work); + + ske_mode_enable(keypad, false); + keypad->enable = false; +} + +static void ske_gpio_release_work(struct work_struct *work) +{ + int code; + struct ske_keypad *keypad = container_of(work, + struct ske_keypad, gpio_work.work); + struct input_dev *input = keypad->input; + + code = MATRIX_SCAN_CODE(keypad->gpio_row, keypad->gpio_col, + SKE_KEYPAD_ROW_SHIFT); + + dev_dbg(keypad->dev, "%s Key press reported, code:%d (key %d)\n", + __func__, code, keypad->keymap[code]); + + input_event(input, EV_MSC, MSC_SCAN, code); + input_report_key(input, keypad->keymap[code], 1); + input_sync(input); + input_report_key(input, keypad->keymap[code], 0); + input_sync(input); +} + +static int ske_read_get_gpio_row(struct ske_keypad *keypad) +{ + int row; + int value = 0; + int ret; + + /* read all rows GPIO data register values */ + for (row = 0; row < keypad->board->kconnected_rows ; row++) { + ret = gpio_get_value(keypad->ske_rows[row]); + value += (1 << row) * ret; + } + + /* get the exact row */ + for (row = 0; row < keypad->board->kconnected_rows; row++) { + if (((1 << row) & value) == 0) + return row; + } + + return -1; +} + +static void ske_set_cols(struct ske_keypad *keypad, int col) +{ + int i ; + int value; + + /** + * Set all columns except the requested column + * output pin as high + */ + for (i = 0; i < keypad->board->kconnected_cols; i++) { + if (i == col) + value = 0; + else + value = 1; + gpio_request(keypad->ske_cols[i], "ske-kp"); + gpio_direction_output(keypad->ske_cols[i], value); + gpio_free(keypad->ske_cols[i]); + } +} + +static void ske_free_cols(struct ske_keypad *keypad) +{ + int i ; + + for (i = 0; i < keypad->board->kconnected_cols; i++) { + gpio_request(keypad->ske_cols[i], "ske-kp"); + gpio_direction_output(keypad->ske_cols[i], 0); + gpio_free(keypad->ske_cols[i]); + } +} + +static void ske_manual_scan(struct ske_keypad *keypad) +{ + int row; + int col; + + for (col = 0; col < keypad->board->kconnected_cols; col++) { + ske_set_cols(keypad, col); + row = ske_read_get_gpio_row(keypad); + if (row >= 0) { + keypad->key_pressed = 1; + keypad->gpio_row = row; + keypad->gpio_col = col; + break; + } + } + ske_free_cols(keypad); +} + +static irqreturn_t ske_keypad_gpio_irq(int irq, void *dev_id) +{ + struct ske_keypad *keypad = dev_id; + + if (!gpio_get_value(NOMADIK_IRQ_TO_GPIO(irq))) { + ske_manual_scan(keypad); + if (!keypad->enable) { + keypad->enable = true; + ske_mode_enable(keypad, true); + /** + * Schedule the work queue to change it back to GPIO + * mode if there is no activity in SKE mode + */ + schedule_delayed_work(&keypad->work, + keypad->board->switch_delay); + } + /** + * Schedule delayed work to report key press if it is not + * detected in SKE mode. + */ + if (keypad->key_pressed) + schedule_delayed_work(&keypad->gpio_work, + KEY_PRESSED_DELAY); + } + + return IRQ_HANDLED; +} static irqreturn_t ske_keypad_irq(int irq, void *dev_id) { struct ske_keypad *keypad = dev_id; - int retries = 20; + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->work); /* disable auto scan interrupt; mask the interrupt generated */ - ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); + ske_keypad_set_bits(keypad, SKE_IMSC, SKE_KPIMA, 0x0); ske_keypad_set_bits(keypad, SKE_ICR, 0x0, SKE_KPICA); - while ((readl(keypad->reg_base + SKE_CR) & SKE_KPASON) && --retries) - msleep(5); - - if (retries) { - /* SKEx registers are stable and can be read */ - ske_keypad_read_data(keypad); - } - - /* enable auto scan interrupts */ - ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); + schedule_delayed_work(&keypad->scan_work, 0); return IRQ_HANDLED; } static int __init ske_keypad_probe(struct platform_device *pdev) { - const struct ske_keypad_platform_data *plat = pdev->dev.platform_data; struct ske_keypad *keypad; + struct resource *res = NULL; struct input_dev *input; - struct resource *res; + struct clk *clk; + void __iomem *reg_base; + int ret = 0; int irq; - int error; + int i; + struct ske_keypad_platform_data *plat = pdev->dev.platform_data; if (!plat) { dev_err(&pdev->dev, "invalid keypad platform data\n"); @@ -219,42 +582,56 @@ static int __init ske_keypad_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { + if (res == NULL) { dev_err(&pdev->dev, "missing platform resources\n"); - return -EINVAL; + return -ENXIO; } - keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL); - input = input_allocate_device(); - if (!keypad || !input) { - dev_err(&pdev->dev, "failed to allocate keypad memory\n"); - error = -ENOMEM; - goto err_free_mem; + res = request_mem_region(res->start, resource_size(res), pdev->name); + if (!res) { + dev_err(&pdev->dev, "failed to request I/O memory\n"); + return -EBUSY; } - keypad->irq = irq; - keypad->board = plat; - keypad->input = input; - spin_lock_init(&keypad->ske_keypad_lock); + reg_base = ioremap(res->start, resource_size(res)); + if (!reg_base) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + ret = -ENXIO; + goto out_freerequest_memregions; + } - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "failed to request I/O memory\n"); - error = -EBUSY; - goto err_free_mem; + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to clk_get\n"); + ret = PTR_ERR(clk); + goto out_freeioremap; } - keypad->reg_base = ioremap(res->start, resource_size(res)); - if (!keypad->reg_base) { - dev_err(&pdev->dev, "failed to remap I/O memory\n"); - error = -ENXIO; - goto err_free_mem_region; + /* resources are sane; we begin allocation */ + keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL); + if (!keypad) { + dev_err(&pdev->dev, "failed to allocate keypad memory\n"); + goto out_freeclk; } + keypad->dev = &pdev->dev; - keypad->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(keypad->clk)) { - dev_err(&pdev->dev, "failed to get clk\n"); - error = PTR_ERR(keypad->clk); - goto err_iounmap; + input = input_allocate_device(); + if (!input) { + dev_err(&pdev->dev, "failed to input_allocate_device\n"); + ret = -ENOMEM; + goto out_freekeypad; + } + keypad->regulator = regulator_get(&pdev->dev, "v-ape"); + if (IS_ERR(keypad->regulator)) { + dev_err(&pdev->dev, "regulator_get failed\n"); + keypad->regulator = NULL; + goto out_regulator_get; + } else { + ret = regulator_enable(keypad->regulator); + if (ret < 0) { + dev_err(&pdev->dev, "regulator_enable failed\n"); + goto out_regulator_enable; + } } input->id.bustype = BUS_HOST; @@ -266,38 +643,153 @@ static int __init ske_keypad_probe(struct platform_device *pdev) input->keycodemax = ARRAY_SIZE(keypad->keymap); input_set_capability(input, EV_MSC, MSC_SCAN); + input_set_drvdata(input, keypad); __set_bit(EV_KEY, input->evbit); if (!plat->no_autorepeat) __set_bit(EV_REP, input->evbit); matrix_keypad_build_keymap(plat->keymap_data, SKE_KEYPAD_ROW_SHIFT, - input->keycode, input->keybit); + input->keycode, input->keybit); + + ret = input_register_device(input); + if (ret) { + dev_err(&pdev->dev, + "unable to register input device: %d\n", ret); + goto out_freeinput; + } + + keypad->irq = irq; + keypad->board = plat; + keypad->input = input; + keypad->reg_base = reg_base; + keypad->clk = clk; + INIT_DELAYED_WORK(&keypad->work, ske_gpio_switch_work); + INIT_DELAYED_WORK(&keypad->gpio_work, ske_gpio_release_work); + INIT_DELAYED_WORK(&keypad->scan_work, ske_keypad_scan_work); + /* allocations are sane, we begin HW initialization */ clk_enable(keypad->clk); - /* go through board initialization helpers */ - if (keypad->board->init) - keypad->board->init(); + if (!keypad->board->init) { + dev_err(&pdev->dev, "init funtion not defined\n"); + ret = -EINVAL; + goto out_unregisterinput; + } - error = ske_keypad_chip_init(keypad); - if (error) { - dev_err(&pdev->dev, "unable to init keypad hardware\n"); - goto err_clk_disable; + if (keypad->board->init() < 0) { + dev_err(&pdev->dev, "keyboard init config failed\n"); + ret = -EINVAL; + goto out_unregisterinput; } - error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq, - IRQF_ONESHOT, "ske-keypad", keypad); - if (error) { - dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); - goto err_clk_disable; + if (!keypad->board->exit) { + dev_err(&pdev->dev, "exit funtion not defined\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + if (keypad->board->exit() < 0) { + dev_err(&pdev->dev, "keyboard exit config failed\n"); + ret = -EINVAL; + goto out_unregisterinput; + } + + if (plat->kconnected_rows == 0) { + /* + * Board config data does not specify the number of connected + * rows and columns; assume that it matches the specified max + * values. + */ + plat->kconnected_rows = plat->krow; + plat->kconnected_cols = plat->kcol; + } + + /* this code doesn't currently support non-square keypad */ + if (plat->kconnected_rows != plat->kconnected_cols) { + dev_err(&pdev->dev, + "invalid keypad configuration (not square)\n"), + ret = -EINVAL; + goto out_unregisterinput; } - error = input_register_device(input); - if (error) { + if (plat->kconnected_rows > SKE_KPD_MAX_ROWS || + plat->kconnected_cols > SKE_KPD_MAX_COLS) { dev_err(&pdev->dev, - "unable to register input device: %d\n", error); - goto err_free_irq; + "invalid keypad configuration (too many rows/cols)\n"), + ret = -EINVAL; + goto out_unregisterinput; + } + + keypad->gpio_input_irq = kmalloc(sizeof(*keypad->gpio_input_irq) * + plat->kconnected_rows, + GFP_KERNEL); + if (!keypad->gpio_input_irq) { + dev_err(&pdev->dev, "failed to allocate input_irq memory\n"); + goto out_unregisterinput; + } + + keypad->ske_rows = kmalloc(sizeof(*keypad->ske_rows) * + plat->kconnected_rows, GFP_KERNEL); + if (!keypad->ske_rows) { + dev_err(&pdev->dev, "failed to allocate ske_rows memory\n"); + goto out_freemem_input_irq; + } + + keypad->ske_cols = kmalloc(sizeof(*keypad->ske_cols) * + plat->kconnected_cols, GFP_KERNEL); + if (!keypad->ske_cols) { + dev_err(&pdev->dev, "failed to allocate ske_cols memory\n"); + goto out_freemem_rows; + } + + keypad->keys = kzalloc(sizeof(*keypad->keys) * plat->krow, GFP_KERNEL); + if (!keypad->keys) { + dev_err(&pdev->dev, "failed to allocate keys:rows memory\n"); + goto out_freemem_cols; + } + for (i = 0; i < plat->krow; i++) { + keypad->keys[i] = kzalloc(sizeof(*keypad->keys[i]) * + plat->kcol, GFP_KERNEL); + if (!keypad->keys[i]) { + dev_err(&pdev->dev, + "failed to allocate keys:cols memory\n"); + goto out_freemem_keys; + } + } + + for (i = 0; i < plat->kconnected_rows; i++) { + keypad->ske_rows[i] = *plat->gpio_input_pins; + keypad->ske_cols[i] = *plat->gpio_output_pins; + keypad->gpio_input_irq[i] = + NOMADIK_GPIO_TO_IRQ(keypad->ske_rows[i]); + } + + for (i = 0; i < keypad->board->kconnected_rows; i++) { + ret = request_threaded_irq(keypad->gpio_input_irq[i], + NULL, ske_keypad_gpio_irq, + IRQF_TRIGGER_FALLING | IRQF_NO_SUSPEND, + "ske-keypad-gpio", keypad); + if (ret) { + dev_err(&pdev->dev, "allocate gpio irq %d failed\n", + keypad->gpio_input_irq[i]); + goto out_freemem_keys; + } + enable_irq_wake(keypad->gpio_input_irq[i]); + } + + ret = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq, + IRQF_ONESHOT, "ske-keypad", keypad); + if (ret) { + dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq); + goto out_freemem_keys; + } + + /* sysfs implementation for dynamic enable/disable the input event */ + ret = sysfs_create_group(&pdev->dev.kobj, &ske_attr_group); + if (ret) { + dev_err(&pdev->dev, "failed to create sysfs entries\n"); + goto out_free_irq; } if (plat->wakeup_enable) @@ -305,37 +797,80 @@ static int __init ske_keypad_probe(struct platform_device *pdev) platform_set_drvdata(pdev, keypad); + clk_disable(keypad->clk); + regulator_disable(keypad->regulator); + return 0; -err_free_irq: +out_free_irq: free_irq(keypad->irq, keypad); -err_clk_disable: +out_freemem_keys: + for (i = 0; i < plat->krow; i++) + kfree(keypad->keys[i]); + kfree(keypad->keys); +out_freemem_cols: + kfree(keypad->ske_cols); +out_freemem_rows: + kfree(keypad->ske_rows); +out_freemem_input_irq: + kfree(keypad->gpio_input_irq); +out_unregisterinput: + input_unregister_device(input); + input = NULL; clk_disable(keypad->clk); - clk_put(keypad->clk); -err_iounmap: - iounmap(keypad->reg_base); -err_free_mem_region: - release_mem_region(res->start, resource_size(res)); -err_free_mem: +out_freeinput: + regulator_disable(keypad->regulator); +out_regulator_enable: + regulator_put(keypad->regulator); +out_regulator_get: input_free_device(input); +out_freekeypad: kfree(keypad); - return error; +out_freeclk: + clk_put(clk); +out_freeioremap: + iounmap(reg_base); +out_freerequest_memregions: + release_mem_region(res->start, resource_size(res)); + return ret; } static int __devexit ske_keypad_remove(struct platform_device *pdev) { struct ske_keypad *keypad = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int i; - free_irq(keypad->irq, keypad); + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->work); + cancel_delayed_work_sync(&keypad->scan_work); - input_unregister_device(keypad->input); + for (i = 0; i < keypad->board->krow; i++) + kfree(keypad->keys[i]); + kfree(keypad->keys); + kfree(keypad->ske_cols); + kfree(keypad->ske_rows); - clk_disable(keypad->clk); + input_unregister_device(keypad->input); + sysfs_remove_group(&pdev->dev.kobj, &ske_attr_group); + if (keypad->enable) + clk_disable(keypad->clk); clk_put(keypad->clk); - if (keypad->board->exit) + if (keypad->enable && keypad->board->exit) keypad->board->exit(); + else { + for (i = 0; i < keypad->board->krow; i++) { + disable_irq_nosync(keypad->gpio_input_irq[i]); + disable_irq_wake(keypad->gpio_input_irq[i]); + } + } + for (i = 0; i < keypad->board->krow; i++) + free_irq(keypad->gpio_input_irq[i], keypad); + + kfree(keypad->gpio_input_irq); + free_irq(keypad->irq, keypad); + regulator_put(keypad->regulator); iounmap(keypad->reg_base); release_mem_region(res->start, resource_size(res)); @@ -353,8 +888,19 @@ static int ske_keypad_suspend(struct device *dev) if (device_may_wakeup(dev)) enable_irq_wake(irq); - else - ske_keypad_set_bits(keypad, SKE_IMSC, ~SKE_KPIMA, 0x0); + else { + cancel_delayed_work_sync(&keypad->gpio_work); + cancel_delayed_work_sync(&keypad->work); + cancel_delayed_work_sync(&keypad->scan_work); + disable_irq(irq); + + keypad->enable_on_resume = keypad->enable; + + if (keypad->enable) { + ske_mode_enable(keypad, false); + keypad->enable = false; + } + } return 0; } @@ -367,8 +913,20 @@ static int ske_keypad_resume(struct device *dev) if (device_may_wakeup(dev)) disable_irq_wake(irq); - else - ske_keypad_set_bits(keypad, SKE_IMSC, 0x0, SKE_KPIMA); + else { + if (keypad->enable_on_resume && !keypad->enable) { + keypad->enable = true; + ske_mode_enable(keypad, true); + /* + * Schedule the work queue to change it to GPIO mode + * if there is no activity in SKE mode + */ + if (!keypad->key_pressed) + schedule_delayed_work(&keypad->work, + keypad->board->switch_delay); + } + enable_irq(irq); + } return 0; } @@ -399,6 +957,6 @@ static void __exit ske_keypad_exit(void) module_exit(ske_keypad_exit); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com> / Sundar Iyer <sundar.iyer@stericsson.com>"); +MODULE_AUTHOR("Naveen Kumar <naveen.gaddipati@stericsson.com>"); MODULE_DESCRIPTION("Nomadik Scroll-Key-Encoder Keypad Driver"); MODULE_ALIAS("platform:nomadik-ske-keypad"); diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c index 9397cf9c625..892335275dd 100644 --- a/drivers/input/keyboard/stmpe-keypad.c +++ b/drivers/input/keyboard/stmpe-keypad.c @@ -108,10 +108,52 @@ struct stmpe_keypad { unsigned int rows; unsigned int cols; + bool enable; unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE]; }; +static ssize_t stmpe_show_attr_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct stmpe_keypad *keypad = platform_get_drvdata(pdev); + return sprintf(buf, "%d\n", keypad->enable); +} + +static ssize_t stmpe_store_attr_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct stmpe_keypad *keypad = platform_get_drvdata(pdev); + struct stmpe *stmpe = keypad->stmpe; + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if (keypad->enable != val) { + keypad->enable = val; + if (!val) + stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD); + else + stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD); + } + return count; +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, + stmpe_show_attr_enable, stmpe_store_attr_enable); + +static struct attribute *stmpe_keypad_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; + +static struct attribute_group stmpe_attr_group = { + .attrs = stmpe_keypad_attrs, +}; + static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data) { const struct stmpe_keypad_variant *variant = keypad->variant; @@ -285,7 +327,7 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev) goto out_freekeypad; } - input->name = "STMPE keypad"; + input->name = "STMPE-keypad"; input->id.bustype = BUS_I2C; input->dev.parent = &pdev->dev; @@ -332,10 +374,20 @@ static int __devinit stmpe_keypad_probe(struct platform_device *pdev) goto out_unregisterinput; } + /* sysfs implementation for dynamic enable/disable the input event */ + ret = sysfs_create_group(&pdev->dev.kobj, &stmpe_attr_group); + if (ret) { + dev_err(&pdev->dev, "failed to create sysfs entries\n"); + goto out_free_irq; + } + + keypad->enable = true; platform_set_drvdata(pdev, keypad); return 0; +out_free_irq: + free_irq(irq, keypad); out_unregisterinput: input_unregister_device(input); input = NULL; @@ -354,6 +406,7 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev) stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD); + sysfs_remove_group(&pdev->dev.kobj, &stmpe_attr_group); free_irq(irq, keypad); input_unregister_device(keypad->input); platform_set_drvdata(pdev, NULL); @@ -362,9 +415,43 @@ static int __devexit stmpe_keypad_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int stmpe_keypad_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct stmpe_keypad *keypad = platform_get_drvdata(pdev); + struct stmpe *stmpe = keypad->stmpe; + + if (!device_may_wakeup(stmpe->dev)) + stmpe_disable(stmpe, STMPE_BLOCK_KEYPAD); + + return 0; +} + +static int stmpe_keypad_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct stmpe_keypad *keypad = platform_get_drvdata(pdev); + struct stmpe *stmpe = keypad->stmpe; + + if (!device_may_wakeup(stmpe->dev)) + stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD); + + return 0; +} + +static const struct dev_pm_ops stmpe_keypad_dev_pm_ops = { + .suspend = stmpe_keypad_suspend, + .resume = stmpe_keypad_resume, +}; +#endif + static struct platform_driver stmpe_keypad_driver = { .driver.name = "stmpe-keypad", .driver.owner = THIS_MODULE, +#ifdef CONFIG_PM + .driver.pm = &stmpe_keypad_dev_pm_ops, +#endif .probe = stmpe_keypad_probe, .remove = __devexit_p(stmpe_keypad_remove), }; diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 7faf4a7fcaa..dedd5d6cf7a 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -22,12 +22,26 @@ config INPUT_88PM860X_ONKEY To compile this driver as a module, choose M here: the module will be called 88pm860x_onkey. +config INPUT_AB8500_ACCDET + bool "AB8500 AV Accessory detection" + depends on AB8500_CORE && AB8500_GPADC && GPIO_AB8500 + help + Say Y here to enable AV accessory detection features for ST-Ericsson's + AB8500 Mix-Sig PMIC. + +config INPUT_AB5500_ACCDET + bool "AB5500 AV Accessory detection" + depends on AB5500_CORE && AB5500_GPADC + help + Say Y here to enable AV accessory detection features for ST-Ericsson's + AB5500 Mix-Sig PMIC. + config INPUT_AB8500_PONKEY - tristate "AB8500 Pon (PowerOn) Key" - depends on AB8500_CORE + tristate "AB5500/AB8500 Pon (PowerOn) Key" + depends on AB5500_CORE || AB8500_CORE help - Say Y here to use the PowerOn Key for ST-Ericsson's AB8500 - Mix-Sig PMIC. + Say Y here to use the PowerOn Key for ST-Ericsson's AB5500/AB8500 + Mix-Sig PMICs. To compile this driver as a module, choose M here: the module will be called ab8500-ponkey. @@ -590,4 +604,14 @@ config INPUT_XEN_KBDDEV_FRONTEND To compile this driver as a module, choose M here: the module will be called xen-kbdfront. +config INPUT_STE_FF_VIBRA + tristate "ST-Ericsson Force Feedback Vibrator" + depends on STE_AUDIO_IO_DEV + select INPUT_FF_MEMLESS + help + This option enables support for ST-Ericsson's Vibrator which + registers as an input force feedback driver. + + To compile this driver as a module, choose M here. The module will + be called ste_ff_vibra. endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index f55cdf4916f..e9aa2d854bc 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -5,6 +5,8 @@ # Each configuration option enables a list of files. obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o +obj-$(CONFIG_INPUT_AB8500_ACCDET) += abx500-accdet.o ab8500-accdet.o +obj-$(CONFIG_INPUT_AB5500_ACCDET) += abx500-accdet.o ab5500-accdet.o obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o obj-$(CONFIG_INPUT_AD714X) += ad714x.o obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o @@ -55,3 +57,4 @@ obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o obj-$(CONFIG_INPUT_YEALINK) += yealink.o +obj-$(CONFIG_INPUT_STE_FF_VIBRA) += ste_ff_vibra.o diff --git a/drivers/input/misc/ab5500-accdet.c b/drivers/input/misc/ab5500-accdet.c new file mode 100644 index 00000000000..fa8e2523126 --- /dev/null +++ b/drivers/input/misc/ab5500-accdet.c @@ -0,0 +1,284 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com> + * for ST-Ericsson. + * + * License terms: GPL V2 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> +#include <linux/mfd/abx500.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/input/abx500-accdet.h> + +/* + * Register definition for accessory detection. + */ +#define AB5500_REGU_CTRL1_SPARE_REG 0x84 +#define AB5500_ACC_DET_DB1_REG 0x20 +#define AB5500_ACC_DET_DB2_REG 0x21 +#define AB5500_ACC_DET_CTRL_REG 0x23 +#define AB5500_VDENC_CTRL0 0x80 + +/* REGISTER: AB8500_ACC_DET_CTRL_REG */ +#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08) +#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01) + +/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */ +#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01 + +/* REGISTER: AB8500_IT_SOURCE5_REG */ +#define BIT_ITSOURCE5_ACCDET1 0x02 + +static struct accessory_irq_descriptor ab5500_irq_desc[] = { + { + .irq = PLUG_IRQ, + .name = "acc_detedt1db_falling", + .isr = plug_irq_handler, + }, + { + .irq = UNPLUG_IRQ, + .name = "acc_detedt1db_rising", + .isr = unplug_irq_handler, + }, + { + .irq = BUTTON_PRESS_IRQ, + .name = "acc_detedt21db_falling", + .isr = button_press_irq_handler, + }, + { + .irq = BUTTON_RELEASE_IRQ, + .name = "acc_detedt21db_rising", + .isr = button_release_irq_handler, + }, +}; + +static struct accessory_regu_descriptor ab5500_regu_desc[] = { + { + .id = REGULATOR_VAMIC1, + .name = "v-amic", + }, +}; + + +/* + * configures accdet2 input on/off + */ +static void ab5500_config_accdetect2_hw(struct abx500_ad *dd, int enable) +{ + int ret = 0; + + if (!dd->accdet2_th_set) { + /* Configure accdetect21+22 thresholds */ + ret = abx500_set_register_interruptible(&dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_DB2_REG, + dd->pdata->accdet2122_th); + if (ret < 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to write reg (%d).\n", __func__, + ret); + goto out; + } else { + dd->accdet2_th_set = 1; + } + } + + /* Enable/Disable accdetect21 comparators + pullup */ + ret = abx500_mask_and_set_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_CTRL_REG, + BITS_ACCDETCTRL2_ENA, + enable ? BITS_ACCDETCTRL2_ENA : 0); + + if (ret < 0) + dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n", + __func__, ret); +out: + return; +} + +/* + * configures accdet1 input on/off + */ +static void ab5500_config_accdetect1_hw(struct abx500_ad *dd, int enable) +{ + int ret; + + if (!dd->accdet1_th_set) { + ret = abx500_set_register_interruptible(&dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_DB1_REG, + dd->pdata->accdet1_dbth); + if (ret < 0) + dev_err(&dd->pdev->dev, + "%s: Failed to write reg (%d).\n", __func__, + ret); + else + dd->accdet1_th_set = 1; + } + + /* enable accdetect1 comparator */ + ret = abx500_mask_and_set_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_CTRL_REG, + BITS_ACCDETCTRL1_ENA, + enable ? BITS_ACCDETCTRL1_ENA : 0); + + if (ret < 0) + dev_err(&dd->pdev->dev, + "%s: Failed to update reg (%d).\n", __func__, ret); +} + +/* + * returns the high level status whether some accessory is connected (1|0). + */ +static int ab5500_detect_plugged_in(struct abx500_ad *dd) +{ + u8 value = 0; + + int status = abx500_get_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_IT, + AB5500_IT_SOURCE3_REG, + &value); + if (status < 0) { + dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n", + __func__, status); + return 0; + } + + if (dd->pdata->is_detection_inverted) + return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0; + else + return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1; +} + +/* + * mic_line_voltage_stable - measures a relative stable voltage from spec. input + */ +static int ab5500_meas_voltage_stable(struct abx500_ad *dd) +{ + int iterations = 2; + int v1, v2, dv; + + v1 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc, + ACC_DETECT2); + do { + msleep(1); + --iterations; + v2 = ab5500_gpadc_convert((struct ab5500_gpadc *)dd->gpadc, + ACC_DETECT2); + dv = abs(v2 - v1); + v1 = v2; + } while (iterations > 0 && dv > MAX_VOLT_DIFF); + + return v1; +} + +/* + * not implemented + */ +static int ab5500_meas_alt_voltage_stable(struct abx500_ad *dd) +{ + return -1; +} + +/* + * configures HW so that it is possible to make decision whether + * accessory is connected or not. + */ +static void ab5500_config_hw_test_plug_connected(struct abx500_ad *dd, + int enable) +{ + dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable); + + /* enable mic BIAS2 */ + if (enable) + accessory_regulator_enable(dd, REGULATOR_VAMIC1); +} + +/* + * configures HW so that carkit/headset detection can be accomplished. + */ +static void ab5500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable) +{ + /* enable mic BIAS2 */ + if (enable) + accessory_regulator_disable(dd, REGULATOR_VAMIC1); +} + +static u8 acc_det_ctrl_suspend_val; + +static void ab5500_turn_off_accdet_comparator(struct platform_device *pdev) +{ + struct abx500_ad *dd = platform_get_drvdata(pdev); + + /* Turn off AccDetect comparators and pull-up */ + (void) abx500_get_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_CTRL_REG, + &acc_det_ctrl_suspend_val); + (void) abx500_set_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_CTRL_REG, + 0); +} + +static void ab5500_turn_on_accdet_comparator(struct platform_device *pdev) +{ + struct abx500_ad *dd = platform_get_drvdata(pdev); + + /* Turn on AccDetect comparators and pull-up */ + (void) abx500_set_register_interruptible( + &dd->pdev->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_ACC_DET_CTRL_REG, + acc_det_ctrl_suspend_val); +} + +static void *ab5500_accdet_abx500_gpadc_get(void) +{ + return ab5500_gpadc_get("ab5500-adc.0"); +} + +struct abx500_accdet_platform_data * + ab5500_get_platform_data(struct platform_device *pdev) +{ + return pdev->dev.platform_data; +} + +struct abx500_ad ab5500_accessory_det_callbacks = { + .irq_desc_norm = ab5500_irq_desc, + .irq_desc_inverted = NULL, + .no_irqs = ARRAY_SIZE(ab5500_irq_desc), + .regu_desc = ab5500_regu_desc, + .no_of_regu_desc = ARRAY_SIZE(ab5500_regu_desc), + .config_accdetect2_hw = ab5500_config_accdetect2_hw, + .config_accdetect1_hw = ab5500_config_accdetect1_hw, + .detect_plugged_in = ab5500_detect_plugged_in, + .meas_voltage_stable = ab5500_meas_voltage_stable, + .meas_alt_voltage_stable = ab5500_meas_alt_voltage_stable, + .config_hw_test_basic_carkit = ab5500_config_hw_test_basic_carkit, + .turn_off_accdet_comparator = ab5500_turn_off_accdet_comparator, + .turn_on_accdet_comparator = ab5500_turn_on_accdet_comparator, + .accdet_abx500_gpadc_get = ab5500_accdet_abx500_gpadc_get, + .config_hw_test_plug_connected = ab5500_config_hw_test_plug_connected, + .set_av_switch = NULL, + .get_platform_data = ab5500_get_platform_data, +}; + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/misc/ab8500-accdet.c b/drivers/input/misc/ab8500-accdet.c new file mode 100644 index 00000000000..1b96b6b3fef --- /dev/null +++ b/drivers/input/misc/ab8500-accdet.c @@ -0,0 +1,464 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com> + * for ST-Ericsson. + * + * License terms: GPL V2 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/mfd/abx500.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/mfd/abx500/ab8500-gpadc.h> +#include <linux/mfd/abx500/ab8500-gpio.h> +#include <linux/gpio.h> +#include <linux/err.h> +#include <linux/input/abx500-accdet.h> +#ifdef CONFIG_SND_SOC_UX500_AB8500 +#include <sound/ux500_ab8500_ext.h> +#endif + +#define MAX_DET_COUNT 10 +#define MAX_VOLT_DIFF 30 +#define MIN_MIC_POWER -100 + +/* Unique value used to identify Headset button input device */ +#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn" +#define BTN_INPUT_DEV_NAME "AB8500 Hs Button" + +#define DEBOUNCE_PLUG_EVENT_MS 100 +#define DEBOUNCE_PLUG_RETEST_MS 25 +#define DEBOUNCE_UNPLUG_EVENT_MS 0 + +/* + * Register definition for accessory detection. + */ +#define AB8500_REGU_CTRL1_SPARE_REG 0x84 +#define AB8500_ACC_DET_DB1_REG 0x80 +#define AB8500_ACC_DET_DB2_REG 0x81 +#define AB8500_ACC_DET_CTRL_REG 0x82 +#define AB8500_IT_SOURCE5_REG 0x04 + +/* REGISTER: AB8500_ACC_DET_CTRL_REG */ +#define BITS_ACCDETCTRL2_ENA (0x20 | 0x10 | 0x08) +#define BITS_ACCDETCTRL1_ENA (0x02 | 0x01) + +/* REGISTER: AB8500_REGU_CTRL1_SPARE_REG */ +#define BIT_REGUCTRL1SPARE_VAMIC1_GROUND 0x01 + +/* REGISTER: AB8500_IT_SOURCE5_REG */ +#define BIT_ITSOURCE5_ACCDET1 0x04 + +/* After being loaded, how fast the first check is to be made */ +#define INIT_DELAY_MS 3000 + +/* Voltage limits (mV) for various types of AV Accessories */ +#define ACCESSORY_DET_VOL_DONTCARE -1 +#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0 +#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40 +#define ACCESSORY_CARKIT_DET_VOL_MIN 1100 +#define ACCESSORY_CARKIT_DET_VOL_MAX 1300 +#define ACCESSORY_HEADSET_DET_VOL_MIN 0 +#define ACCESSORY_HEADSET_DET_VOL_MAX 200 +#define ACCESSORY_OPENCABLE_DET_VOL_MIN 1730 +#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150 + +/* Static data initialization */ + +static struct accessory_regu_descriptor ab8500_regu_desc[3] = { + { + .id = REGULATOR_VAUDIO, + .name = "v-audio", + }, + { + .id = REGULATOR_VAMIC1, + .name = "v-amic1", + }, + { + .id = REGULATOR_AVSWITCH, + .name = "vcc-N2158", + }, +}; + +static struct accessory_irq_descriptor ab8500_irq_desc_norm[] = { + { + .irq = PLUG_IRQ, + .name = "ACC_DETECT_1DB_F", + .isr = plug_irq_handler, + }, + { + .irq = UNPLUG_IRQ, + .name = "ACC_DETECT_1DB_R", + .isr = unplug_irq_handler, + }, + { + .irq = BUTTON_PRESS_IRQ, + .name = "ACC_DETECT_22DB_F", + .isr = button_press_irq_handler, + }, + { + .irq = BUTTON_RELEASE_IRQ, + .name = "ACC_DETECT_22DB_R", + .isr = button_release_irq_handler, + }, +}; + +static struct accessory_irq_descriptor ab8500_irq_desc_inverted[] = { + { + .irq = PLUG_IRQ, + .name = "ACC_DETECT_1DB_R", + .isr = plug_irq_handler, + }, + { + .irq = UNPLUG_IRQ, + .name = "ACC_DETECT_1DB_F", + .isr = unplug_irq_handler, + }, + { + .irq = BUTTON_PRESS_IRQ, + .name = "ACC_DETECT_22DB_R", + .isr = button_press_irq_handler, + }, + { + .irq = BUTTON_RELEASE_IRQ, + .name = "ACC_DETECT_22DB_F", + .isr = button_release_irq_handler, + }, +}; + +/* + * configures accdet2 input on/off + */ +static void ab8500_config_accdetect2_hw(struct abx500_ad *dd, int enable) +{ + int ret = 0; + + if (!dd->accdet2_th_set) { + /* Configure accdetect21+22 thresholds */ + ret = abx500_set_register_interruptible(&dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_DB2_REG, + dd->pdata->accdet2122_th); + if (ret < 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to write reg (%d).\n", __func__, + ret); + goto out; + } else { + dd->accdet2_th_set = 1; + } + } + + /* Enable/Disable accdetect21 comparators + pullup */ + ret = abx500_mask_and_set_register_interruptible( + &dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_CTRL_REG, + BITS_ACCDETCTRL2_ENA, + enable ? BITS_ACCDETCTRL2_ENA : 0); + + if (ret < 0) + dev_err(&dd->pdev->dev, "%s: Failed to update reg (%d).\n", + __func__, ret); + +out: + return; +} + +/* + * configures accdet1 input on/off + */ +static void ab8500_config_accdetect1_hw(struct abx500_ad *dd, int enable) +{ + int ret; + + if (!dd->accdet1_th_set) { + ret = abx500_set_register_interruptible(&dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_DB1_REG, + dd->pdata->accdet1_dbth); + if (ret < 0) + dev_err(&dd->pdev->dev, + "%s: Failed to write reg (%d).\n", __func__, + ret); + else + dd->accdet1_th_set = 1; + } + + /* enable accdetect1 comparator */ + ret = abx500_mask_and_set_register_interruptible( + &dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_CTRL_REG, + BITS_ACCDETCTRL1_ENA, + enable ? BITS_ACCDETCTRL1_ENA : 0); + + if (ret < 0) + dev_err(&dd->pdev->dev, + "%s: Failed to update reg (%d).\n", __func__, ret); +} + +/* + * returns the high level status whether some accessory is connected (1|0). + */ +static int ab8500_detect_plugged_in(struct abx500_ad *dd) +{ + u8 value = 0; + + int status = abx500_get_register_interruptible( + &dd->pdev->dev, + AB8500_INTERRUPT, + AB8500_IT_SOURCE5_REG, + &value); + if (status < 0) { + dev_err(&dd->pdev->dev, "%s: reg read failed (%d).\n", + __func__, status); + return 0; + } + + if (dd->pdata->is_detection_inverted) + return value & BIT_ITSOURCE5_ACCDET1 ? 1 : 0; + else + return value & BIT_ITSOURCE5_ACCDET1 ? 0 : 1; +} + +#ifdef CONFIG_SND_SOC_UX500_AB8500 + +/* + * meas_voltage_stable - measures relative stable voltage from spec. input + */ +static int ab8500_meas_voltage_stable(struct abx500_ad *dd) +{ + int ret, mv; + + ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc, + ACC_DETECT2, false, &mv); + + return (ret < 0) ? ret : mv; +} + +/* + * meas_alt_voltage_stable - measures relative stable voltage from spec. input + */ +static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd) +{ + int ret, mv; + + ret = ux500_ab8500_audio_gpadc_measure((struct ab8500_gpadc *)dd->gpadc, + ACC_DETECT2, true, &mv); + + return (ret < 0) ? ret : mv; +} + +#else + +/* + * meas_voltage_stable - measures relative stable voltage from spec. input + */ +static int ab8500_meas_voltage_stable(struct abx500_ad *dd) +{ + int iterations = 2; + int v1, v2, dv; + + v1 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc, + ACC_DETECT2); + do { + msleep(1); + --iterations; + v2 = ab8500_gpadc_convert((struct ab8500_gpadc *)dd->gpadc, + ACC_DETECT2); + dv = abs(v2 - v1); + v1 = v2; + } while (iterations > 0 && dv > MAX_VOLT_DIFF); + + return v1; +} + +/* + * not implemented for non soc setups + */ +static int ab8500_meas_alt_voltage_stable(struct abx500_ad *dd) +{ + return -1; +} + +#endif + +/* + * configures HW so that it is possible to make decision whether + * accessory is connected or not. + */ +static void ab8500_config_hw_test_plug_connected(struct abx500_ad *dd, + int enable) +{ + int ret; + + dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable); + + ret = ab8500_config_pulldown(&dd->pdev->dev, + dd->pdata->video_ctrl_gpio, !enable); + if (ret < 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to update reg (%d).\n", __func__, ret); + return; + } + + if (enable) + accessory_regulator_enable(dd, REGULATOR_VAMIC1); +} + +/* + * configures HW so that carkit/headset detection can be accomplished. + */ +static void ab8500_config_hw_test_basic_carkit(struct abx500_ad *dd, int enable) +{ + int ret; + + dev_dbg(&dd->pdev->dev, "%s:%d\n", __func__, enable); + + if (enable) + accessory_regulator_disable(dd, REGULATOR_VAMIC1); + + /* Un-Ground the VAMic1 output when enabled */ + ret = abx500_mask_and_set_register_interruptible( + &dd->pdev->dev, + AB8500_REGU_CTRL1, + AB8500_REGU_CTRL1_SPARE_REG, + BIT_REGUCTRL1SPARE_VAMIC1_GROUND, + enable ? BIT_REGUCTRL1SPARE_VAMIC1_GROUND : 0); + if (ret < 0) + dev_err(&dd->pdev->dev, + "%s: Failed to update reg (%d).\n", __func__, ret); +} + +/* + * sets the av switch direction - audio-in vs video-out + */ +static void ab8500_set_av_switch(struct abx500_ad *dd, + enum accessory_avcontrol_dir dir) +{ + int ret; + + dev_dbg(&dd->pdev->dev, "%s: Enter (%d)\n", __func__, dir); + if (dir == NOT_SET) { + ret = gpio_direction_input(dd->pdata->video_ctrl_gpio); + dd->gpio35_dir_set = 0; + ret = gpio_direction_output(dd->pdata->video_ctrl_gpio, 0); + if (dd->pdata->mic_ctrl) + gpio_direction_output(dd->pdata->mic_ctrl, 0); + } else if (!dd->gpio35_dir_set) { + ret = gpio_direction_output(dd->pdata->video_ctrl_gpio, + dir == AUDIO_IN ? 1 : 0); + if (ret < 0) { + dev_err(&dd->pdev->dev, + "%s: video_ctrl pin output config failed (%d).\n", + __func__, ret); + return; + } + + if (dd->pdata->mic_ctrl) { + ret = gpio_direction_output(dd->pdata->mic_ctrl, + dir == AUDIO_IN ? 1 : 0); + if (ret < 0) { + dev_err(&dd->pdev->dev, + "%s: mic_ctrl pin output" + "config failed (%d).\n", + __func__, ret); + return; + } + } + + dd->gpio35_dir_set = 1; + dev_dbg(&dd->pdev->dev, "AV-SWITCH: %s\n", + dir == AUDIO_IN ? "AUDIO_IN" : "VIDEO_OUT"); + } else { + gpio_set_value(dd->pdata->video_ctrl_gpio, + dir == AUDIO_IN ? 1 : 0); + } +} + +static u8 acc_det_ctrl_suspend_val; + +static void ab8500_turn_off_accdet_comparator(struct platform_device *pdev) +{ + struct abx500_ad *dd = platform_get_drvdata(pdev); + + /* Turn off AccDetect comparators and pull-up */ + (void) abx500_get_register_interruptible( + &dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_CTRL_REG, + &acc_det_ctrl_suspend_val); + (void) abx500_set_register_interruptible( + &dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_CTRL_REG, + 0); + +} + +static void ab8500_turn_on_accdet_comparator(struct platform_device *pdev) +{ + struct abx500_ad *dd = platform_get_drvdata(pdev); + + /* Turn on AccDetect comparators and pull-up */ + (void) abx500_set_register_interruptible( + &dd->pdev->dev, + AB8500_ECI_AV_ACC, + AB8500_ACC_DET_CTRL_REG, + acc_det_ctrl_suspend_val); + +} + +static void *ab8500_accdet_abx500_gpadc_get(void) +{ + return ab8500_gpadc_get("ab8500-gpadc.0"); +} + +struct abx500_accdet_platform_data * + ab8500_get_platform_data(struct platform_device *pdev) +{ + struct ab8500_platform_data *plat; + + plat = dev_get_platdata(pdev->dev.parent); + + if (!plat || !plat->accdet) { + dev_err(&pdev->dev, "%s: Failed to get accdet plat data.\n", + __func__); + return ERR_PTR(-ENODEV); + } + + return plat->accdet; +} + +struct abx500_ad ab8500_accessory_det_callbacks = { + .irq_desc_norm = ab8500_irq_desc_norm, + .irq_desc_inverted = ab8500_irq_desc_inverted, + .no_irqs = ARRAY_SIZE(ab8500_irq_desc_norm), + .regu_desc = ab8500_regu_desc, + .no_of_regu_desc = ARRAY_SIZE(ab8500_regu_desc), + .config_accdetect2_hw = ab8500_config_accdetect2_hw, + .config_accdetect1_hw = ab8500_config_accdetect1_hw, + .detect_plugged_in = ab8500_detect_plugged_in, + .meas_voltage_stable = ab8500_meas_voltage_stable, + .meas_alt_voltage_stable = ab8500_meas_alt_voltage_stable, + .config_hw_test_basic_carkit = ab8500_config_hw_test_basic_carkit, + .turn_off_accdet_comparator = ab8500_turn_off_accdet_comparator, + .turn_on_accdet_comparator = ab8500_turn_on_accdet_comparator, + .accdet_abx500_gpadc_get = ab8500_accdet_abx500_gpadc_get, + .config_hw_test_plug_connected = ab8500_config_hw_test_plug_connected, + .set_av_switch = ab8500_set_av_switch, + .get_platform_data = ab8500_get_platform_data, +}; + +MODULE_DESCRIPTION("AB8500 AV Accessory detection driver"); +MODULE_ALIAS("platform:ab8500-acc-det"); +MODULE_AUTHOR("ST-Ericsson"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c index 350fd0c385d..c3c3c51d302 100644 --- a/drivers/input/misc/ab8500-ponkey.c +++ b/drivers/input/misc/ab8500-ponkey.c @@ -6,7 +6,6 @@ * * AB8500 Power-On Key handler */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -14,128 +13,208 @@ #include <linux/interrupt.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/slab.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> + +/* Ponkey time control bits */ +#define AB5500_MCB 0x2F +#define AB5500_PONKEY_10SEC 0x0 +#define AB5500_PONKEY_5SEC 0x1 +#define AB5500_PONKEY_DISABLE 0x2 +#define AB5500_PONKEY_TMR_MASK 0x1 +#define AB5500_PONKEY_TR_MASK 0x2 + +static int ab5500_ponkey_hw_init(struct platform_device *); + +struct ab8500_ponkey_variant { + const char *irq_falling; + const char *irq_rising; + int (*hw_init)(struct platform_device *); +}; + +static const struct ab8500_ponkey_variant ab5500_onswa = { + .irq_falling = "ONSWAn_falling", + .irq_rising = "ONSWAn_rising", + .hw_init = ab5500_ponkey_hw_init, +}; + +static const struct ab8500_ponkey_variant ab8500_ponkey = { + .irq_falling = "ONKEY_DBF", + .irq_rising = "ONKEY_DBR", +}; /** - * struct ab8500_ponkey - ab8500 ponkey information + * struct ab8500_ponkey_info - ab8500 ponkey information * @input_dev: pointer to input device - * @ab8500: ab8500 parent * @irq_dbf: irq number for falling transition * @irq_dbr: irq number for rising transition */ -struct ab8500_ponkey { +struct ab8500_ponkey_info { struct input_dev *idev; - struct ab8500 *ab8500; int irq_dbf; int irq_dbr; }; +static int ab5500_ponkey_hw_init(struct platform_device *pdev) +{ + u8 val; + struct ab5500_ponkey_platform_data *pdata; + + pdata = pdev->dev.platform_data; + if (pdata) { + switch (pdata->shutdown_secs) { + case 0: + val = AB5500_PONKEY_DISABLE; + break; + case 5: + val = AB5500_PONKEY_5SEC; + break; + case 10: + val = AB5500_PONKEY_10SEC; + break; + default: + val = AB5500_PONKEY_10SEC; + } + } else { + val = AB5500_PONKEY_10SEC; + } + return abx500_mask_and_set( + &pdev->dev, + AB5500_BANK_STARTUP, + AB5500_MCB, + AB5500_PONKEY_TMR_MASK | AB5500_PONKEY_TR_MASK, + val); +} + /* AB8500 gives us an interrupt when ONKEY is held */ static irqreturn_t ab8500_ponkey_handler(int irq, void *data) { - struct ab8500_ponkey *ponkey = data; + struct ab8500_ponkey_info *info = data; - if (irq == ponkey->irq_dbf) - input_report_key(ponkey->idev, KEY_POWER, true); - else if (irq == ponkey->irq_dbr) - input_report_key(ponkey->idev, KEY_POWER, false); + if (irq == info->irq_dbf) + input_report_key(info->idev, KEY_POWER, true); + else if (irq == info->irq_dbr) + input_report_key(info->idev, KEY_POWER, false); - input_sync(ponkey->idev); + input_sync(info->idev); return IRQ_HANDLED; } static int __devinit ab8500_ponkey_probe(struct platform_device *pdev) { - struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); - struct ab8500_ponkey *ponkey; - struct input_dev *input; - int irq_dbf, irq_dbr; - int error; + const struct ab8500_ponkey_variant *variant; + struct ab8500_ponkey_info *info; + int irq_dbf, irq_dbr, ret; + + variant = (const struct ab8500_ponkey_variant *) + pdev->id_entry->driver_data; + + if (variant->hw_init) { + ret = variant->hw_init(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to init hw"); + return ret; + } + } - irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF"); + irq_dbf = platform_get_irq_byname(pdev, variant->irq_falling); if (irq_dbf < 0) { - dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf); + dev_err(&pdev->dev, "No IRQ for %s: %d\n", + variant->irq_falling, irq_dbf); return irq_dbf; } - irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR"); + irq_dbr = platform_get_irq_byname(pdev, variant->irq_rising); if (irq_dbr < 0) { - dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr); + dev_err(&pdev->dev, "No IRQ for %s: %d\n", + variant->irq_rising, irq_dbr); return irq_dbr; } - ponkey = kzalloc(sizeof(struct ab8500_ponkey), GFP_KERNEL); - input = input_allocate_device(); - if (!ponkey || !input) { - error = -ENOMEM; - goto err_free_mem; - } + info = kzalloc(sizeof(struct ab8500_ponkey_info), GFP_KERNEL); + if (!info) + return -ENOMEM; - ponkey->idev = input; - ponkey->ab8500 = ab8500; - ponkey->irq_dbf = irq_dbf; - ponkey->irq_dbr = irq_dbr; + info->irq_dbf = irq_dbf; + info->irq_dbr = irq_dbr; - input->name = "AB8500 POn(PowerOn) Key"; - input->dev.parent = &pdev->dev; + info->idev = input_allocate_device(); + if (!info->idev) { + dev_err(&pdev->dev, "Failed to allocate input dev\n"); + ret = -ENOMEM; + goto out; + } - input_set_capability(input, EV_KEY, KEY_POWER); + info->idev->name = "AB8500 POn(PowerOn) Key"; + info->idev->dev.parent = &pdev->dev; + info->idev->evbit[0] = BIT_MASK(EV_KEY); + info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); - error = request_any_context_irq(ponkey->irq_dbf, ab8500_ponkey_handler, - 0, "ab8500-ponkey-dbf", ponkey); - if (error < 0) { - dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n", - ponkey->irq_dbf, error); - goto err_free_mem; + ret = input_register_device(info->idev); + if (ret) { + dev_err(&pdev->dev, "Can't register input device: %d\n", ret); + goto out_unfreedevice; } - error = request_any_context_irq(ponkey->irq_dbr, ab8500_ponkey_handler, - 0, "ab8500-ponkey-dbr", ponkey); - if (error < 0) { - dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n", - ponkey->irq_dbr, error); - goto err_free_dbf_irq; + ret = request_threaded_irq(info->irq_dbf, NULL, ab8500_ponkey_handler, + IRQF_NO_SUSPEND, "ab8500-ponkey-dbf", + info); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n", + info->irq_dbf, ret); + goto out_unregisterdevice; } - error = input_register_device(ponkey->idev); - if (error) { - dev_err(ab8500->dev, "Can't register input device: %d\n", error); - goto err_free_dbr_irq; + ret = request_threaded_irq(info->irq_dbr, NULL, ab8500_ponkey_handler, + IRQF_NO_SUSPEND, "ab8500-ponkey-dbr", + info); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n", + info->irq_dbr, ret); + goto out_irq_dbf; } - platform_set_drvdata(pdev, ponkey); - return 0; + platform_set_drvdata(pdev, info); -err_free_dbr_irq: - free_irq(ponkey->irq_dbr, ponkey); -err_free_dbf_irq: - free_irq(ponkey->irq_dbf, ponkey); -err_free_mem: - input_free_device(input); - kfree(ponkey); + return 0; - return error; +out_irq_dbf: + free_irq(info->irq_dbf, info); +out_unregisterdevice: + input_unregister_device(info->idev); + info->idev = NULL; +out_unfreedevice: + input_free_device(info->idev); +out: + kfree(info); + return ret; } static int __devexit ab8500_ponkey_remove(struct platform_device *pdev) { - struct ab8500_ponkey *ponkey = platform_get_drvdata(pdev); - - free_irq(ponkey->irq_dbf, ponkey); - free_irq(ponkey->irq_dbr, ponkey); - input_unregister_device(ponkey->idev); - kfree(ponkey); - - platform_set_drvdata(pdev, NULL); + struct ab8500_ponkey_info *info = platform_get_drvdata(pdev); + free_irq(info->irq_dbf, info); + free_irq(info->irq_dbr, info); + input_unregister_device(info->idev); + kfree(info); return 0; } +static struct platform_device_id ab8500_ponkey_id_table[] = { + { "ab5500-onswa", (kernel_ulong_t)&ab5500_onswa, }, + { "ab8500-poweron-key", (kernel_ulong_t)&ab8500_ponkey, }, + { }, +}; +MODULE_DEVICE_TABLE(platform, ab8500_ponkey_id_table); + static struct platform_driver ab8500_ponkey_driver = { .driver = { .name = "ab8500-poweron-key", .owner = THIS_MODULE, }, + .id_table = ab8500_ponkey_id_table, .probe = ab8500_ponkey_probe, .remove = __devexit_p(ab8500_ponkey_remove), }; diff --git a/drivers/input/misc/abx500-accdet.c b/drivers/input/misc/abx500-accdet.c new file mode 100644 index 00000000000..4b5017a6e60 --- /dev/null +++ b/drivers/input/misc/abx500-accdet.c @@ -0,0 +1,1019 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Jarmo K. Kuronen <jarmo.kuronen@symbio.com> + * for ST-Ericsson. + * + * License terms: GPL V2 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/workqueue.h> +#include <linux/irq.h> +#include <linux/jiffies.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/input/abx500-accdet.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/gpio.h> +#include <linux/mfd/abx500.h> + +#include <sound/jack.h> +#include <sound/soc.h> +#include <sound/jack.h> + +#ifdef CONFIG_SND_SOC_UX500_AB8500 +#include <sound/ux500_ab8500.h> +#else +#define ux500_ab8500_jack_report(i) +#endif + +/* Unique value used to identify Headset button input device */ +#define BTN_INPUT_UNIQUE_VALUE "AB8500HsBtn" +#define BTN_INPUT_DEV_NAME "AB8500 Hs Button" + +#define DEBOUNCE_PLUG_EVENT_MS 100 +#define DEBOUNCE_PLUG_RETEST_MS 25 +#define DEBOUNCE_UNPLUG_EVENT_MS 0 + +/* After being loaded, how fast the first check is to be made */ +#define INIT_DELAY_MS 3000 + +/* Voltage limits (mV) for various types of AV Accessories */ +#define ACCESSORY_DET_VOL_DONTCARE -1 +#define ACCESSORY_HEADPHONE_DET_VOL_MIN 0 +#define ACCESSORY_HEADPHONE_DET_VOL_MAX 40 +#define ACCESSORY_U_HEADSET_DET_VOL_MIN 47 +#define ACCESSORY_U_HEADSET_DET_VOL_MAX 732 +#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN 25 +#define ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX 50 +#define ACCESSORY_CARKIT_DET_VOL_MIN 1100 +#define ACCESSORY_CARKIT_DET_VOL_MAX 1300 +#define ACCESSORY_HEADSET_DET_VOL_MIN 1301 +#define ACCESSORY_HEADSET_DET_VOL_MAX 2000 +#define ACCESSORY_OPENCABLE_DET_VOL_MIN 2001 +#define ACCESSORY_OPENCABLE_DET_VOL_MAX 2150 + + +/* Macros */ + +/* + * Conviniency macros to check jack characteristics. + */ +#define jack_supports_mic(type) \ + (type == JACK_TYPE_HEADSET || type == JACK_TYPE_CARKIT) +#define jack_supports_spkr(type) \ + ((type != JACK_TYPE_DISCONNECTED) && (type != JACK_TYPE_CONNECTED)) +#define jack_supports_buttons(type) \ + ((type == JACK_TYPE_HEADSET) ||\ + (type == JACK_TYPE_CARKIT) ||\ + (type == JACK_TYPE_OPENCABLE) ||\ + (type == JACK_TYPE_CONNECTED)) + + +/* Forward declarations */ +static void config_accdetect(struct abx500_ad *dd); +static enum accessory_jack_type detect(struct abx500_ad *dd, int *required_det); + +/* Static data initialization */ +static struct accessory_detect_task detect_ops[] = { + { + .type = JACK_TYPE_DISCONNECTED, + .typename = "DISCONNECTED", + .meas_mv = 1, + .req_det_count = 1, + .minvol = ACCESSORY_DET_VOL_DONTCARE, + .maxvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + }, + { + .type = JACK_TYPE_HEADPHONE, + .typename = "HEADPHONE", + .meas_mv = 1, + .req_det_count = 1, + .minvol = ACCESSORY_HEADPHONE_DET_VOL_MIN, + .maxvol = ACCESSORY_HEADPHONE_DET_VOL_MAX, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + }, + { + .type = JACK_TYPE_UNSUPPORTED_HEADSET, + .typename = "UNSUPPORTED HEADSET", + .meas_mv = 1, + .req_det_count = 2, + .minvol = ACCESSORY_U_HEADSET_DET_VOL_MIN, + .maxvol = ACCESSORY_U_HEADSET_DET_VOL_MAX, + .alt_minvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MIN, + .alt_maxvol = ACCESSORY_U_HEADSET_ALT_DET_VOL_MAX + }, + { + .type = JACK_TYPE_OPENCABLE, + .typename = "OPENCABLE", + .meas_mv = 0, + .req_det_count = 4, + .minvol = ACCESSORY_OPENCABLE_DET_VOL_MIN, + .maxvol = ACCESSORY_OPENCABLE_DET_VOL_MAX, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + }, + { + .type = JACK_TYPE_CARKIT, + .typename = "CARKIT", + .meas_mv = 1, + .req_det_count = 1, + .minvol = ACCESSORY_CARKIT_DET_VOL_MIN, + .maxvol = ACCESSORY_CARKIT_DET_VOL_MAX, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + }, + { + .type = JACK_TYPE_HEADSET, + .typename = "HEADSET", + .meas_mv = 0, + .req_det_count = 2, + .minvol = ACCESSORY_HEADSET_DET_VOL_MIN, + .maxvol = ACCESSORY_HEADSET_DET_VOL_MAX, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + }, + { + .type = JACK_TYPE_CONNECTED, + .typename = "CONNECTED", + .meas_mv = 0, + .req_det_count = 4, + .minvol = ACCESSORY_DET_VOL_DONTCARE, + .maxvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_minvol = ACCESSORY_DET_VOL_DONTCARE, + .alt_maxvol = ACCESSORY_DET_VOL_DONTCARE + } +}; + +static struct accessory_irq_descriptor *abx500_accdet_irq_desc; + +/* + * textual represenation of the accessory type + */ +static const char *accessory_str(enum accessory_jack_type type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(detect_ops); i++) + if (type == detect_ops[i].type) + return detect_ops[i].typename; + + return "UNKNOWN?"; +} + +/* + * enables regulator but only if it has not been enabled earlier. + */ +void accessory_regulator_enable(struct abx500_ad *dd, + enum accessory_regulator reg) +{ + int i; + + for (i = 0; i < dd->no_of_regu_desc; i++) { + if (reg & dd->regu_desc[i].id) { + if (!dd->regu_desc[i].enabled) { + if (!regulator_enable(dd->regu_desc[i].handle)) + dd->regu_desc[i].enabled = 1; + } + } + } +} + +/* + * disables regulator but only if it has been previously enabled. + */ +void accessory_regulator_disable(struct abx500_ad *dd, + enum accessory_regulator reg) +{ + int i; + + for (i = 0; i < dd->no_of_regu_desc; i++) { + if (reg & dd->regu_desc[i].id) { + if (dd->regu_desc[i].enabled) { + if (!regulator_disable(dd->regu_desc[i].handle)) + dd->regu_desc[i].enabled = 0; + } + } + } +} + +/* + * frees previously retrieved regulators. + */ +static void free_regulators(struct abx500_ad *dd) +{ + int i; + + for (i = 0; i < dd->no_of_regu_desc; i++) { + if (dd->regu_desc[i].handle) { + regulator_put(dd->regu_desc[i].handle); + dd->regu_desc[i].handle = NULL; + } + } +} + +/* + * gets required regulators. + */ +static int create_regulators(struct abx500_ad *dd) +{ + int i; + int status = 0; + + for (i = 0; i < dd->no_of_regu_desc; i++) { + struct regulator *regu = + regulator_get(&dd->pdev->dev, dd->regu_desc[i].name); + if (IS_ERR(regu)) { + status = PTR_ERR(regu); + dev_err(&dd->pdev->dev, + "%s: Failed to get supply '%s' (%d).\n", + __func__, dd->regu_desc[i].name, status); + free_regulators(dd); + goto out; + } else { + dd->regu_desc[i].handle = regu; + } + } + +out: + return status; +} + +/* + * create input device for button press reporting + */ +static int create_btn_input_dev(struct abx500_ad *dd) +{ + int err; + + dd->btn_input_dev = input_allocate_device(); + if (!dd->btn_input_dev) { + dev_err(&dd->pdev->dev, "%s: Failed to allocate input dev.\n", + __func__); + err = -ENOMEM; + goto out; + } + + input_set_capability(dd->btn_input_dev, + EV_KEY, + dd->pdata->btn_keycode); + + dd->btn_input_dev->name = BTN_INPUT_DEV_NAME; + dd->btn_input_dev->uniq = BTN_INPUT_UNIQUE_VALUE; + dd->btn_input_dev->dev.parent = &dd->pdev->dev; + + err = input_register_device(dd->btn_input_dev); + if (err) { + dev_err(&dd->pdev->dev, + "%s: register_input_device failed (%d).\n", __func__, + err); + input_free_device(dd->btn_input_dev); + dd->btn_input_dev = NULL; + goto out; + } +out: + return err; +} + +/* + * reports jack status + */ +void report_jack_status(struct abx500_ad *dd) +{ + int value = 0; + + /* Never report possible open cable */ + if (dd->jack_type == JACK_TYPE_OPENCABLE) + goto out; + + /* Never report same state twice in a row */ + if (dd->jack_type == dd->reported_jack_type) + goto out; + dd->reported_jack_type = dd->jack_type; + + dev_dbg(&dd->pdev->dev, "Accessory: %s\n", + accessory_str(dd->jack_type)); + + /* Never report unsupported headset */ + if (dd->jack_type == JACK_TYPE_UNSUPPORTED_HEADSET) + goto out; + + if (dd->jack_type != JACK_TYPE_DISCONNECTED && + dd->jack_type != JACK_TYPE_UNSPECIFIED) + value |= SND_JACK_MECHANICAL; + if (jack_supports_mic(dd->jack_type)) + value |= SND_JACK_MICROPHONE; + if (jack_supports_spkr(dd->jack_type)) + value |= (SND_JACK_HEADPHONE | SND_JACK_LINEOUT); + ux500_ab8500_jack_report(value); + +out: return; +} + +/* + * worker routine to handle accessory unplug case + */ +void unplug_irq_handler_work(struct work_struct *work) +{ + struct abx500_ad *dd = container_of(work, + struct abx500_ad, unplug_irq_work.work); + + dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__); + + dd->jack_type = dd->jack_type_temp = JACK_TYPE_DISCONNECTED; + dd->jack_det_count = dd->total_jack_det_count = 0; + dd->btn_state = BUTTON_UNK; + config_accdetect(dd); + + accessory_regulator_disable(dd, REGULATOR_ALL); + + report_jack_status(dd); +} + +/* + * interrupt service routine for accessory unplug. + */ +irqreturn_t unplug_irq_handler(int irq, void *_userdata) +{ + struct abx500_ad *dd = _userdata; + + dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq); + + queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work, + msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS)); + + return IRQ_HANDLED; +} + +/* + * interrupt service routine for accessory plug. + */ +irqreturn_t plug_irq_handler(int irq, void *_userdata) +{ + struct abx500_ad *dd = _userdata; + + dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", + __func__, irq); + + switch (dd->jack_type) { + case JACK_TYPE_DISCONNECTED: + case JACK_TYPE_UNSPECIFIED: + queue_delayed_work(dd->irq_work_queue, &dd->detect_work, + msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS)); + break; + + default: + dev_err(&dd->pdev->dev, "%s: Unexpected plug IRQ\n", __func__); + break; + } + + return IRQ_HANDLED; +} + +/* + * worker routine to perform detection. + */ +static void detect_work(struct work_struct *work) +{ + int req_det_count = 1; + enum accessory_jack_type new_type; + struct abx500_ad *dd = container_of(work, + struct abx500_ad, detect_work.work); + + dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__); + + if (dd->set_av_switch) + dd->set_av_switch(dd, AUDIO_IN); + + new_type = detect(dd, &req_det_count); + + dd->total_jack_det_count++; + if (dd->jack_type_temp == new_type) { + dd->jack_det_count++; + } else { + dd->jack_det_count = 1; + dd->jack_type_temp = new_type; + } + + if (dd->total_jack_det_count >= MAX_DET_COUNT) { + dev_err(&dd->pdev->dev, + "%s: MAX_DET_COUNT(=%d) reached. Bailing out.\n", + __func__, MAX_DET_COUNT); + queue_delayed_work(dd->irq_work_queue, &dd->unplug_irq_work, + msecs_to_jiffies(DEBOUNCE_UNPLUG_EVENT_MS)); + } else if (dd->jack_det_count >= req_det_count) { + dd->total_jack_det_count = dd->jack_det_count = 0; + dd->jack_type = new_type; + dd->detect_jiffies = jiffies; + report_jack_status(dd); + config_accdetect(dd); + } else { + queue_delayed_work(dd->irq_work_queue, + &dd->detect_work, + msecs_to_jiffies(DEBOUNCE_PLUG_RETEST_MS)); + } +} + +/* + * reports a button event (pressed, released). + */ +static void report_btn_event(struct abx500_ad *dd, int down) +{ + input_report_key(dd->btn_input_dev, dd->pdata->btn_keycode, down); + input_sync(dd->btn_input_dev); + + dev_dbg(&dd->pdev->dev, "HS-BTN: %s\n", down ? "PRESSED" : "RELEASED"); +} + +/* + * interrupt service routine invoked when hs button is pressed down. + */ +irqreturn_t button_press_irq_handler(int irq, void *_userdata) +{ + struct abx500_ad *dd = _userdata; + + unsigned long accept_jiffies = dd->detect_jiffies + + msecs_to_jiffies(1000); + if (time_before(jiffies, accept_jiffies)) { + dev_dbg(&dd->pdev->dev, "%s: Skipped spurious btn press.\n", + __func__); + return IRQ_HANDLED; + } + + dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq); + + if (dd->jack_type == JACK_TYPE_OPENCABLE) { + /* Someting got connected to open cable -> detect.. */ + dd->config_accdetect2_hw(dd, 0); + queue_delayed_work(dd->irq_work_queue, &dd->detect_work, + msecs_to_jiffies(DEBOUNCE_PLUG_EVENT_MS)); + return IRQ_HANDLED; + } + + if (dd->btn_state == BUTTON_PRESSED) + return IRQ_HANDLED; + + if (jack_supports_buttons(dd->jack_type)) { + dd->btn_state = BUTTON_PRESSED; + report_btn_event(dd, 1); + } else { + dd->btn_state = BUTTON_UNK; + } + + return IRQ_HANDLED; +} + +/* + * interrupts service routine invoked when hs button is released. + */ +irqreturn_t button_release_irq_handler(int irq, void *_userdata) +{ + struct abx500_ad *dd = _userdata; + + dev_dbg(&dd->pdev->dev, "%s: Enter (irq=%d)\n", __func__, irq); + + if (dd->jack_type == JACK_TYPE_OPENCABLE) + return IRQ_HANDLED; + + if (dd->btn_state != BUTTON_PRESSED) + return IRQ_HANDLED; + + if (jack_supports_buttons(dd->jack_type)) { + report_btn_event(dd, 0); + dd->btn_state = BUTTON_RELEASED; + } else { + dd->btn_state = BUTTON_UNK; + } + + return IRQ_HANDLED; +} + +/* + * checks whether measured voltage is in given range. depending on arguments, + * voltage might be re-measured or previously measured voltage is reused. + */ +static int mic_vol_in_range(struct abx500_ad *dd, + int lo, int hi, int alt_lo, int alt_hi, int force_read) +{ + static int mv = MIN_MIC_POWER; + static int alt_mv = MIN_MIC_POWER; + + if (mv == MIN_MIC_POWER || force_read) + mv = dd->meas_voltage_stable(dd); + + if (mv < lo || mv > hi) + return 0; + + if (ACCESSORY_DET_VOL_DONTCARE == alt_lo && + ACCESSORY_DET_VOL_DONTCARE == alt_hi) + return 1; + + if (alt_mv == MIN_MIC_POWER || force_read) + alt_mv = dd->meas_alt_voltage_stable(dd); + + if (alt_mv < alt_lo || alt_mv > alt_hi) + return 0; + + return 1; +} + +/* + * checks whether the currently connected HW is of given type. + */ +static int detect_hw(struct abx500_ad *dd, + struct accessory_detect_task *task) +{ + int status; + + switch (task->type) { + case JACK_TYPE_DISCONNECTED: + dd->config_hw_test_plug_connected(dd, 1); + status = !dd->detect_plugged_in(dd); + break; + case JACK_TYPE_CONNECTED: + dd->config_hw_test_plug_connected(dd, 1); + status = dd->detect_plugged_in(dd); + break; + case JACK_TYPE_CARKIT: + case JACK_TYPE_HEADPHONE: + case JACK_TYPE_HEADSET: + case JACK_TYPE_UNSUPPORTED_HEADSET: + case JACK_TYPE_OPENCABLE: + status = mic_vol_in_range(dd, + task->minvol, + task->maxvol, + task->alt_minvol, + task->alt_maxvol, + task->meas_mv); + break; + default: + status = 0; + } + + return status; +} + +/* + * Tries to detect the currently attached accessory + */ +static enum accessory_jack_type detect(struct abx500_ad *dd, + int *req_det_count) +{ + enum accessory_jack_type type = JACK_TYPE_DISCONNECTED; + int i; + + accessory_regulator_enable(dd, REGULATOR_VAUDIO | REGULATOR_AVSWITCH); + /* enable the VAMIC1 regulator */ + dd->config_hw_test_basic_carkit(dd, 0); + + for (i = 0; i < ARRAY_SIZE(detect_ops); ++i) { + if (detect_hw(dd, &detect_ops[i])) { + type = detect_ops[i].type; + *req_det_count = detect_ops[i].req_det_count; + break; + } + } + + dd->config_hw_test_plug_connected(dd, 0); + + if (jack_supports_buttons(type)) + accessory_regulator_enable(dd, REGULATOR_VAMIC1); + else + accessory_regulator_disable(dd, REGULATOR_VAMIC1 | + REGULATOR_AVSWITCH); + + accessory_regulator_disable(dd, REGULATOR_VAUDIO); + + return type; +} + +/* + * registers to specific interrupt + */ +static void claim_irq(struct abx500_ad *dd, enum accessory_irq irq_id) +{ + int ret; + int irq; + + if (dd->pdata->is_detection_inverted) + abx500_accdet_irq_desc = dd->irq_desc_inverted; + else + abx500_accdet_irq_desc = dd->irq_desc_norm; + + if (abx500_accdet_irq_desc[irq_id].registered) + return; + + irq = platform_get_irq_byname( + dd->pdev, + abx500_accdet_irq_desc[irq_id].name); + if (irq < 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to get irq %s\n", __func__, + abx500_accdet_irq_desc[irq_id].name); + return; + } + + ret = request_threaded_irq(irq, + NULL, + abx500_accdet_irq_desc[irq_id].isr, + IRQF_NO_SUSPEND | IRQF_SHARED, + abx500_accdet_irq_desc[irq_id].name, + dd); + if (ret != 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to claim irq %s (%d)\n", + __func__, + abx500_accdet_irq_desc[irq_id].name, + ret); + } else { + abx500_accdet_irq_desc[irq_id].registered = 1; + dev_dbg(&dd->pdev->dev, "%s: %s\n", + __func__, abx500_accdet_irq_desc[irq_id].name); + } +} + +/* + * releases specific interrupt + */ +static void release_irq(struct abx500_ad *dd, enum accessory_irq irq_id) +{ + int irq; + + if (dd->pdata->is_detection_inverted) + abx500_accdet_irq_desc = dd->irq_desc_inverted; + else + abx500_accdet_irq_desc = dd->irq_desc_norm; + + if (!abx500_accdet_irq_desc[irq_id].registered) + return; + + irq = platform_get_irq_byname( + dd->pdev, + abx500_accdet_irq_desc[irq_id].name); + if (irq < 0) { + dev_err(&dd->pdev->dev, + "%s: Failed to get irq %s (%d)\n", + __func__, + abx500_accdet_irq_desc[irq_id].name, irq); + } else { + free_irq(irq, dd); + abx500_accdet_irq_desc[irq_id].registered = 0; + dev_dbg(&dd->pdev->dev, "%s: %s\n", + __func__, abx500_accdet_irq_desc[irq_id].name); + } +} + +/* + * configures interrupts + detection hardware to meet the requirements + * set by currently attached accessory type. + */ +static void config_accdetect(struct abx500_ad *dd) +{ + switch (dd->jack_type) { + case JACK_TYPE_UNSPECIFIED: + dd->config_accdetect1_hw(dd, 1); + dd->config_accdetect2_hw(dd, 0); + + release_irq(dd, PLUG_IRQ); + release_irq(dd, UNPLUG_IRQ); + release_irq(dd, BUTTON_PRESS_IRQ); + release_irq(dd, BUTTON_RELEASE_IRQ); + if (dd->set_av_switch) + dd->set_av_switch(dd, NOT_SET); + break; + + case JACK_TYPE_DISCONNECTED: + if (dd->set_av_switch) + dd->set_av_switch(dd, NOT_SET); + case JACK_TYPE_HEADPHONE: + dd->config_accdetect1_hw(dd, 1); + dd->config_accdetect2_hw(dd, 0); + + claim_irq(dd, PLUG_IRQ); + claim_irq(dd, UNPLUG_IRQ); + release_irq(dd, BUTTON_PRESS_IRQ); + release_irq(dd, BUTTON_RELEASE_IRQ); + break; + + case JACK_TYPE_UNSUPPORTED_HEADSET: + dd->config_accdetect1_hw(dd, 1); + dd->config_accdetect2_hw(dd, 1); + + release_irq(dd, PLUG_IRQ); + claim_irq(dd, UNPLUG_IRQ); + release_irq(dd, BUTTON_PRESS_IRQ); + release_irq(dd, BUTTON_RELEASE_IRQ); + if (dd->set_av_switch) + dd->set_av_switch(dd, NOT_SET); + break; + + case JACK_TYPE_CONNECTED: + case JACK_TYPE_HEADSET: + case JACK_TYPE_CARKIT: + case JACK_TYPE_OPENCABLE: + dd->config_accdetect1_hw(dd, 1); + dd->config_accdetect2_hw(dd, 1); + + release_irq(dd, PLUG_IRQ); + claim_irq(dd, UNPLUG_IRQ); + claim_irq(dd, BUTTON_PRESS_IRQ); + claim_irq(dd, BUTTON_RELEASE_IRQ); + break; + + default: + dev_err(&dd->pdev->dev, "%s: Unknown type: %d\n", + __func__, dd->jack_type); + } +} + +/* + * Deferred initialization of the work. + */ +static void init_work(struct work_struct *work) +{ + struct abx500_ad *dd = container_of(work, + struct abx500_ad, init_work.work); + + dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__); + + dd->jack_type = dd->reported_jack_type = JACK_TYPE_UNSPECIFIED; + config_accdetect(dd); + queue_delayed_work(dd->irq_work_queue, + &dd->detect_work, + msecs_to_jiffies(0)); +} + +/* + * performs platform device initialization + */ +static int abx500_accessory_init(struct platform_device *pdev) +{ + int ret; + struct abx500_ad *dd = (struct abx500_ad *)pdev->id_entry->driver_data; + + dev_dbg(&pdev->dev, "Enter: %s\n", __func__); + + dd->pdev = pdev; + dd->pdata = dd->get_platform_data(pdev); + if (IS_ERR(dd->pdata)) + return PTR_ERR(dd->pdata); + + if (dd->pdata->video_ctrl_gpio) { + ret = gpio_is_valid(dd->pdata->video_ctrl_gpio); + if (!ret) { + dev_err(&pdev->dev, + "%s: Video ctrl GPIO invalid (%d).\n", __func__, + dd->pdata->video_ctrl_gpio); + + return ret; + } + ret = gpio_request(dd->pdata->video_ctrl_gpio, + "Video Control"); + if (ret) { + dev_err(&pdev->dev, "%s: Get video ctrl GPIO" + "failed.\n", __func__); + return ret; + } + } + + if (dd->pdata->mic_ctrl) { + ret = gpio_is_valid(dd->pdata->mic_ctrl); + if (!ret) { + dev_err(&pdev->dev, + "%s: Mic ctrl GPIO invalid (%d).\n", __func__, + dd->pdata->mic_ctrl); + + goto mic_ctrl_fail; + } + ret = gpio_request(dd->pdata->mic_ctrl, + "Mic Control"); + if (ret) { + dev_err(&pdev->dev, "%s: Get mic ctrl GPIO" + "failed.\n", __func__); + goto mic_ctrl_fail; + } + } + + ret = create_btn_input_dev(dd); + if (ret < 0) { + dev_err(&pdev->dev, "%s: create_button_input_dev failed.\n", + __func__); + goto fail_no_btn_input_dev; + } + + ret = create_regulators(dd); + if (ret < 0) { + dev_err(&pdev->dev, "%s: failed to create regulators\n", + __func__); + goto fail_no_regulators; + } + dd->btn_state = BUTTON_UNK; + + dd->irq_work_queue = create_singlethread_workqueue("abx500_accdet_wq"); + if (!dd->irq_work_queue) { + dev_err(&pdev->dev, "%s: Failed to create wq\n", __func__); + ret = -ENOMEM; + goto fail_no_mem_for_wq; + } + + dd->gpadc = dd->accdet_abx500_gpadc_get(); + + INIT_DELAYED_WORK(&dd->detect_work, detect_work); + INIT_DELAYED_WORK(&dd->unplug_irq_work, unplug_irq_handler_work); + INIT_DELAYED_WORK(&dd->init_work, init_work); + + /* Deferred init/detect since no use for the info early in boot */ + queue_delayed_work(dd->irq_work_queue, + &dd->init_work, + msecs_to_jiffies(INIT_DELAY_MS)); + + platform_set_drvdata(pdev, dd); + + return 0; +fail_no_mem_for_wq: + free_regulators(dd); +fail_no_regulators: + input_unregister_device(dd->btn_input_dev); +fail_no_btn_input_dev: + if (dd->pdata->mic_ctrl) + gpio_free(dd->pdata->mic_ctrl); +mic_ctrl_fail: + if (dd->pdata->video_ctrl_gpio) + gpio_free(dd->pdata->video_ctrl_gpio); + return ret; +} + +/* + * Performs platform device cleanup + */ +static void abx500_accessory_cleanup(struct abx500_ad *dd) +{ + dev_dbg(&dd->pdev->dev, "Enter: %s\n", __func__); + + dd->jack_type = JACK_TYPE_UNSPECIFIED; + config_accdetect(dd); + + if (dd->pdata->mic_ctrl) + gpio_free(dd->pdata->mic_ctrl); + + if (dd->pdata->video_ctrl_gpio) + gpio_free(dd->pdata->video_ctrl_gpio); + + input_unregister_device(dd->btn_input_dev); + free_regulators(dd); + + cancel_delayed_work(&dd->detect_work); + cancel_delayed_work(&dd->unplug_irq_work); + cancel_delayed_work(&dd->init_work); + flush_workqueue(dd->irq_work_queue); + destroy_workqueue(dd->irq_work_queue); + +} + +static int __devinit abx500_acc_detect_probe(struct platform_device *pdev) +{ + + return abx500_accessory_init(pdev); +} + +static int __devexit abx500_acc_detect_remove(struct platform_device *pdev) +{ + abx500_accessory_cleanup(platform_get_drvdata(pdev)); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +#if defined(CONFIG_PM) +static int abx500_acc_detect_suspend(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct abx500_ad *dd = platform_get_drvdata(pdev); + int irq_id, irq; + + dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__); + + cancel_delayed_work_sync(&dd->unplug_irq_work); + cancel_delayed_work_sync(&dd->detect_work); + cancel_delayed_work_sync(&dd->init_work); + + if (dd->pdata->is_detection_inverted) + abx500_accdet_irq_desc = dd->irq_desc_inverted; + else + abx500_accdet_irq_desc = dd->irq_desc_norm; + + for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) { + if (abx500_accdet_irq_desc[irq_id].registered == 1) { + irq = platform_get_irq_byname( + dd->pdev, + abx500_accdet_irq_desc[irq_id].name); + + disable_irq(irq); + } + } + + dd->turn_off_accdet_comparator(pdev); + + if (dd->jack_type == JACK_TYPE_HEADSET) + accessory_regulator_disable(dd, REGULATOR_VAMIC1); + + return 0; +} + +static int abx500_acc_detect_resume(struct device *dev) +{ + struct platform_device *pdev = container_of(dev, + struct platform_device, dev); + struct abx500_ad *dd = platform_get_drvdata(pdev); + int irq_id, irq; + + dev_dbg(&dd->pdev->dev, "%s: Enter\n", __func__); + + if (dd->jack_type == JACK_TYPE_HEADSET) + accessory_regulator_enable(dd, REGULATOR_VAMIC1); + + dd->turn_on_accdet_comparator(pdev); + + if (dd->pdata->is_detection_inverted) + abx500_accdet_irq_desc = dd->irq_desc_inverted; + else + abx500_accdet_irq_desc = dd->irq_desc_norm; + + for (irq_id = 0; irq_id < dd->no_irqs; irq_id++) { + if (abx500_accdet_irq_desc[irq_id].registered == 1) { + irq = platform_get_irq_byname( + dd->pdev, + abx500_accdet_irq_desc[irq_id].name); + + enable_irq(irq); + + } + } + + /* After resume, reinitialize */ + dd->gpio35_dir_set = dd->accdet1_th_set = dd->accdet2_th_set = 0; + queue_delayed_work(dd->irq_work_queue, &dd->init_work, 0); + + return 0; +} +#else +#define abx500_acc_detect_suspend NULL +#define abx500_acc_detect_resume NULL +#endif + +static struct platform_device_id abx500_accdet_ids[] = { +#ifdef CONFIG_INPUT_AB5500_ACCDET + { "ab5500-acc-det", (kernel_ulong_t)&ab5500_accessory_det_callbacks, }, +#endif +#ifdef CONFIG_INPUT_AB8500_ACCDET + { "ab8500-acc-det", (kernel_ulong_t)&ab8500_accessory_det_callbacks, }, +#endif + { }, +}; + +static const struct dev_pm_ops abx_ops = { + .suspend = abx500_acc_detect_suspend, + .resume = abx500_acc_detect_resume, +}; + +static struct platform_driver abx500_acc_detect_platform_driver = { + .driver = { + .name = "abx500-acc-det", + .owner = THIS_MODULE, + .pm = &abx_ops, + }, + .probe = abx500_acc_detect_probe, + .id_table = abx500_accdet_ids, + .remove = __devexit_p(abx500_acc_detect_remove), +}; + +static int __init abx500_acc_detect_init(void) +{ + return platform_driver_register(&abx500_acc_detect_platform_driver); +} + +static void __exit abx500_acc_detect_exit(void) +{ + platform_driver_unregister(&abx500_acc_detect_platform_driver); +} + +module_init(abx500_acc_detect_init); +module_exit(abx500_acc_detect_exit); + +MODULE_DESCRIPTION("ABx500 AV Accessory detection driver"); +MODULE_ALIAS("platform:abx500-acc-det"); +MODULE_AUTHOR("ST-Ericsson"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/misc/ste_ff_vibra.c b/drivers/input/misc/ste_ff_vibra.c new file mode 100644 index 00000000000..9038e6be046 --- /dev/null +++ b/drivers/input/misc/ste_ff_vibra.c @@ -0,0 +1,234 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> + * for ST-Ericsson + * License Terms: GNU General Public License v2 + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/input.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <mach/ste_audio_io_vibrator.h> + +#define FF_VIBRA_DOWN 0x0000 /* 0 degrees */ +#define FF_VIBRA_LEFT 0x4000 /* 90 degrees */ +#define FF_VIBRA_UP 0x8000 /* 180 degrees */ +#define FF_VIBRA_RIGHT 0xC000 /* 270 degrees */ + +/** + * struct vibra_info - Vibrator information structure + * @idev: Pointer to input device structure + * @vibra_workqueue: Pointer to vibrator workqueue structure + * @vibra_work: Vibrator work + * @direction: Vibration direction + * @speed: Vibration speed + * + * Structure vibra_info holds vibrator informations + **/ +struct vibra_info { + struct input_dev *idev; + struct workqueue_struct *vibra_workqueue; + struct work_struct vibra_work; + int direction; + unsigned char speed; +}; + +/** + * vibra_play_work() - Vibrator work, sets speed and direction + * @work: Pointer to work structure + * + * This function is called from workqueue, turns on/off vibrator + **/ +static void vibra_play_work(struct work_struct *work) +{ + struct vibra_info *vinfo = container_of(work, + struct vibra_info, vibra_work); + struct ste_vibra_speed left_speed = { + .positive = 0, + .negative = 0, + }; + struct ste_vibra_speed right_speed = { + .positive = 0, + .negative = 0, + }; + + /* Divide by 2 because supported range by PWM is 0-100 */ + vinfo->speed /= 2; + + if ((vinfo->direction > FF_VIBRA_DOWN) && + (vinfo->direction < FF_VIBRA_UP)) { + /* 1 - 179 degrees, turn on left vibrator */ + left_speed.positive = vinfo->speed; + } else if (vinfo->direction > FF_VIBRA_UP) { + /* more than 180 degrees, turn on right vibrator */ + right_speed.positive = vinfo->speed; + } else { + /* 0 (down) or 180 (up) degrees, turn on 2 vibrators */ + left_speed.positive = vinfo->speed; + right_speed.positive = vinfo->speed; + } + + ste_audioio_vibrator_pwm_control(STE_AUDIOIO_CLIENT_FF_VIBRA, + left_speed, right_speed); +} + +/** + * vibra_play() - Memless device control function + * @idev: Pointer to input device structure + * @data: Pointer to private data (not used) + * @effect: Pointer to force feedback effect structure + * + * This function controls memless device + * + * Returns: + * 0 - success + **/ +static int vibra_play(struct input_dev *idev, void *data, + struct ff_effect *effect) +{ + struct vibra_info *vinfo = input_get_drvdata(idev); + + vinfo->direction = effect->direction; + vinfo->speed = effect->u.rumble.strong_magnitude >> 8; + if (!vinfo->speed) + /* Shift weak magnitude to make it feelable on vibrator */ + vinfo->speed = effect->u.rumble.weak_magnitude >> 9; + + queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work); + + return 0; +} + +/** + * ste_ff_vibra_open() - Input device open function + * @idev: Pointer to input device structure + * + * This function is called on opening input device + * + * Returns: + * -ENOMEM - no memory left + * 0 - success + **/ +static int ste_ff_vibra_open(struct input_dev *idev) +{ + struct vibra_info *vinfo = input_get_drvdata(idev); + + vinfo->vibra_workqueue = + create_singlethread_workqueue("ste_ff-ff-vibra"); + if (!vinfo->vibra_workqueue) { + dev_err(&idev->dev, "couldn't create vibra workqueue\n"); + return -ENOMEM; + } + return 0; +} + +/** + * ste_ff_vibra_close() - Input device close function + * @idev: Pointer to input device structure + * + * This function is called on closing input device + **/ +static void ste_ff_vibra_close(struct input_dev *idev) +{ + struct vibra_info *vinfo = input_get_drvdata(idev); + + cancel_work_sync(&vinfo->vibra_work); + INIT_WORK(&vinfo->vibra_work, vibra_play_work); + destroy_workqueue(vinfo->vibra_workqueue); + vinfo->vibra_workqueue = NULL; +} + +static int __devinit ste_ff_vibra_probe(struct platform_device *pdev) +{ + struct vibra_info *vinfo; + int ret; + + vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL); + if (!vinfo) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + vinfo->idev = input_allocate_device(); + if (!vinfo->idev) { + dev_err(&pdev->dev, "failed to allocate input device\n"); + ret = -ENOMEM; + goto exit_vinfo_free; + } + + vinfo->idev->name = "ste-ff-vibra"; + vinfo->idev->dev.parent = pdev->dev.parent; + vinfo->idev->open = ste_ff_vibra_open; + vinfo->idev->close = ste_ff_vibra_close; + INIT_WORK(&vinfo->vibra_work, vibra_play_work); + __set_bit(FF_RUMBLE, vinfo->idev->ffbit); + + ret = input_ff_create_memless(vinfo->idev, NULL, vibra_play); + if (ret) { + dev_err(&pdev->dev, "failed to create memless device\n"); + goto exit_idev_free; + } + + ret = input_register_device(vinfo->idev); + if (ret) { + dev_err(&pdev->dev, "failed to register input device\n"); + goto exit_destroy_memless; + } + + input_set_drvdata(vinfo->idev, vinfo); + platform_set_drvdata(pdev, vinfo); + return 0; + +exit_destroy_memless: + input_ff_destroy(vinfo->idev); +exit_idev_free: + input_free_device(vinfo->idev); +exit_vinfo_free: + kfree(vinfo); + return ret; +} + +static int __devexit ste_ff_vibra_remove(struct platform_device *pdev) +{ + struct vibra_info *vinfo = platform_get_drvdata(pdev); + + /* + * Function device_release() will call input_dev_release() + * which will free ff and input device. No need to call + * input_ff_destroy() and input_free_device() explicitly. + */ + input_unregister_device(vinfo->idev); + kfree(vinfo); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ste_ff_vibra_driver = { + .driver = { + .name = "ste_ff_vibra", + .owner = THIS_MODULE, + }, + .probe = ste_ff_vibra_probe, + .remove = __devexit_p(ste_ff_vibra_remove) +}; + +static int __init ste_ff_vibra_init(void) +{ + return platform_driver_register(&ste_ff_vibra_driver); +} +module_init(ste_ff_vibra_init); + +static void __exit ste_ff_vibra_exit(void) +{ + platform_driver_unregister(&ste_ff_vibra_driver); +} +module_exit(ste_ff_vibra_exit); + +MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>"); +MODULE_DESCRIPTION("STE Force Feedback Vibrator Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c index f2d03c06c2d..d6ae75865e4 100644 --- a/drivers/input/touchscreen/bu21013_ts.c +++ b/drivers/input/touchscreen/bu21013_ts.c @@ -1,5 +1,5 @@ /* - * Copyright (C) ST-Ericsson SA 2010 + * Copyright (C) ST-Ericsson SA 2009 * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson * License terms:GNU General Public License (GPL) version 2 */ @@ -12,13 +12,14 @@ #include <linux/input.h> #include <linux/input/bu21013.h> #include <linux/slab.h> +#include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #define PEN_DOWN_INTR 0 -#define MAX_FINGERS 2 #define RESET_DELAY 30 -#define PENUP_TIMEOUT (10) +#define PENUP_TIMEOUT 2 /* 2msecs */ +#define SCALE_FACTOR 1000 #define DELTA_MIN 16 #define MASK_BITS 0x03 #define SHIFT_8 8 @@ -131,7 +132,7 @@ #define BU21013_NUMBER_OF_X_SENSORS (6) #define BU21013_NUMBER_OF_Y_SENSORS (11) -#define DRIVER_TP "bu21013_tp" +#define DRIVER_TP "bu21013_ts" /** * struct bu21013_ts_data - touch panel data structure @@ -142,6 +143,12 @@ * @in_dev: pointer to the input device structure * @intr_pin: interrupt pin value * @regulator: pointer to the Regulator used for touch screen + * @enable: variable to indicate the enable/disable of touch screen + * @ext_clk_enable: true if running on ext clk + * @ext_clk_state: Saved state for suspend/resume of ext clk + * @factor_x: x scale factor + * @factor_y: y scale factor + * @tpclk: pointer to clock structure * * Touch panel device data structure */ @@ -149,12 +156,226 @@ struct bu21013_ts_data { struct i2c_client *client; wait_queue_head_t wait; bool touch_stopped; - const struct bu21013_platform_device *chip; + struct bu21013_platform_device *chip; struct input_dev *in_dev; unsigned int intr_pin; struct regulator *regulator; + bool enable; + bool ext_clk_enable; + bool ext_clk_state; + unsigned int factor_x; + unsigned int factor_y; + struct clk *tpclk; }; +static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk); + +/** + * bu21013_ext_clk() - enable/disable the external clock + * @pdata: touch screen data + * @enable: enable external clock + * @reconfig: reconfigure chip upon external clock off. + * + * This function used to enable or disable the external clock and possible + * reconfigure hw. + */ +static int bu21013_ext_clk(struct bu21013_ts_data *pdata, bool enable, + bool reconfig) +{ + int retval = 0; + + if (!pdata->tpclk || pdata->ext_clk_enable == enable) + return retval; + + if (enable) { + pdata->ext_clk_enable = true; + clk_enable(pdata->tpclk); + retval = bu21013_init_chip(pdata, true); + } else { + pdata->ext_clk_enable = false; + if (reconfig) + retval = bu21013_init_chip(pdata, false); + clk_disable(pdata->tpclk); + } + return retval; +} + +/** + * bu21013_enable() - enable the touch driver event + * @pdata: touch screen data + * + * This function used to enable the driver and returns integer + */ +static int bu21013_enable(struct bu21013_ts_data *pdata) +{ + int retval; + + if (pdata->regulator) + regulator_enable(pdata->regulator); + + if (pdata->chip->cs_en) { + retval = pdata->chip->cs_en(pdata->chip->cs_pin); + if (retval < 0) { + dev_err(&pdata->client->dev, "enable hw failed\n"); + return retval; + } + } + + if (pdata->ext_clk_state) + retval = bu21013_ext_clk(pdata, true, true); + else + retval = bu21013_init_chip(pdata, false); + + if (retval < 0) { + dev_err(&pdata->client->dev, "enable hw failed\n"); + return retval; + } + pdata->touch_stopped = false; + enable_irq(pdata->chip->irq); + + return 0; +} + +/** + * bu21013_disable() - disable the touch driver event + * @pdata: touch screen data + * + * This function used to disable the driver and returns integer + */ +static void bu21013_disable(struct bu21013_ts_data *pdata) +{ + pdata->touch_stopped = true; + + pdata->ext_clk_state = pdata->ext_clk_enable; + (void) bu21013_ext_clk(pdata, false, false); + + disable_irq(pdata->chip->irq); + if (pdata->chip->cs_dis) + pdata->chip->cs_dis(pdata->chip->cs_pin); + if (pdata->regulator) + regulator_disable(pdata->regulator); +} + +/** + * bu21013_show_attr_enable() - show the touch screen controller status + * @dev: pointer to device structure + * @attr: pointer to device attribute + * @buf: parameter buffer + * + * This funtion is used to show whether the touch screen is enabled or + * disabled + */ +static ssize_t bu21013_show_attr_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bu21013_ts_data *pdata = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", pdata->enable); +} + +/** + * bu21013_store_attr_enable() - Enable/Disable the touchscreen. + * @dev: pointer to device structure + * @attr: pointer to device attribute + * @buf: parameter buffer + * @count: number of parameters + * + * This funtion is used to enable or disable the touch screen controller. + */ +static ssize_t bu21013_store_attr_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret = 0; + unsigned long val; + + struct bu21013_ts_data *pdata = dev_get_drvdata(dev); + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + if (pdata->enable != val) { + pdata->enable = val ? true : false; + if (pdata->enable) { + ret = bu21013_enable(pdata); + if (ret < 0) + return ret; + } else + bu21013_disable(pdata); + } + return count; +} + +/** + * bu21013_show_attr_extclk() - shows the external clock status + * @dev: pointer to device structure + * @attr: pointer to device attribute + * @buf: parameter buffer + * + * This funtion is used to show whether the external clock for the touch + * screen is enabled or disabled. + */ +static ssize_t bu21013_show_attr_extclk(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bu21013_ts_data *pdata = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", pdata->ext_clk_enable); +} + +/** + * bu21013_store_attr_extclk() - Enable/Disable the external clock + * for the tocuh screen controller. + * @dev: pointer to device structure + * @attr: pointer to device attribute + * @buf: parameter buffer + * @count: number of parameters + * + * This funtion is used enabled or disable the external clock for the touch + * screen controller. + */ +static ssize_t bu21013_store_attr_extclk(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int retval = 0; + struct bu21013_ts_data *pdata = dev_get_drvdata(dev); + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + if (pdata->chip->has_ext_clk) { + if (pdata->enable) + retval = bu21013_ext_clk(pdata, val, true); + else + pdata->ext_clk_state = val; + if (retval < 0) + return retval; + } + return count; +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, + bu21013_show_attr_enable, bu21013_store_attr_enable); + +static DEVICE_ATTR(ext_clk, S_IWUSR | S_IRUGO, + bu21013_show_attr_extclk, bu21013_store_attr_extclk); + + +static struct attribute *bu21013_attribute[] = { + &dev_attr_enable.attr, + &dev_attr_ext_clk.attr, + NULL, +}; + +static struct attribute_group bu21013_attr_group = { + .attrs = bu21013_attribute, +}; + + /** * bu21013_read_block_data(): read the touch co-ordinates * @data: bu21013_ts_data structure pointer @@ -204,12 +425,14 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data) if (!has_x_sensors || !has_y_sensors) return 0; - for (i = 0; i < MAX_FINGERS; i++) { + for (i = 0; i < 2; i++) { const u8 *p = &buf[4 * i + 3]; unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS); unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS); if (x == 0 || y == 0) continue; + x = x * data->factor_x / SCALE_FACTOR; + y = y * data->factor_y / SCALE_FACTOR; pos_x[finger_down_count] = x; pos_y[finger_down_count] = y; finger_down_count++; @@ -217,21 +440,21 @@ static int bu21013_do_touch_report(struct bu21013_ts_data *data) if (finger_down_count) { if (finger_down_count == 2 && - (abs(pos_x[0] - pos_x[1]) < DELTA_MIN || - abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) { + (abs(pos_x[0] - pos_x[1]) < DELTA_MIN || + abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) return 0; - } for (i = 0; i < finger_down_count; i++) { - if (data->chip->x_flip) - pos_x[i] = data->chip->touch_x_max - pos_x[i]; - if (data->chip->y_flip) - pos_y[i] = data->chip->touch_y_max - pos_y[i]; - - input_report_abs(data->in_dev, - ABS_MT_POSITION_X, pos_x[i]); - input_report_abs(data->in_dev, - ABS_MT_POSITION_Y, pos_y[i]); + if (data->chip->portrait && data->chip->x_flip) + pos_x[i] = data->chip->x_max_res - pos_x[i]; + if (data->chip->portrait && data->chip->y_flip) + pos_y[i] = data->chip->y_max_res - pos_y[i]; + input_report_abs(data->in_dev, ABS_MT_TOUCH_MAJOR, + max(pos_x[i], pos_y[i])); + input_report_abs(data->in_dev, ABS_MT_POSITION_X, + pos_x[i]); + input_report_abs(data->in_dev, ABS_MT_POSITION_Y, + pos_y[i]); input_mt_sync(data->in_dev); } } else @@ -261,24 +484,23 @@ static irqreturn_t bu21013_gpio_irq(int irq, void *device_data) dev_err(&i2c->dev, "bu21013_do_touch_report failed\n"); return IRQ_NONE; } - data->intr_pin = data->chip->irq_read_val(); if (data->intr_pin == PEN_DOWN_INTR) wait_event_timeout(data->wait, data->touch_stopped, - msecs_to_jiffies(2)); + msecs_to_jiffies(PENUP_TIMEOUT)); } while (!data->intr_pin && !data->touch_stopped); - return IRQ_HANDLED; } /** * bu21013_init_chip() - power on sequence for the bu21013 controller * @data: device structure pointer + * @on_ext_clk: Run on external clock * * This function is used to power on * the bu21013 controller and returns integer. */ -static int bu21013_init_chip(struct bu21013_ts_data *data) +static int bu21013_init_chip(struct bu21013_ts_data *data, bool on_ext_clk) { int retval; struct i2c_client *i2c = data->client; @@ -297,28 +519,24 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG, BU21013_SENSORS_EN_8_15); if (retval < 0) { dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG, BU21013_SENSORS_EN_16_23); if (retval < 0) { dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG, (BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1)); if (retval < 0) { dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG, (BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 | BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW | @@ -327,8 +545,7 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n"); return retval; } - - if (data->chip->ext_clk) + if (on_ext_clk) retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG, (BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB)); else @@ -338,21 +555,18 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG, (BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN)); if (retval < 0) { dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG, BU21013_INT_MODE_LEVEL); if (retval < 0) { dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG, (BU21013_DELTA_0_6 | BU21013_FILTER_EN)); @@ -367,14 +581,12 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG, BU21013_TH_OFF_4 | BU21013_TH_OFF_3); if (retval < 0) { dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG, (BU21013_GAIN_0 | BU21013_GAIN_1)); if (retval < 0) { @@ -388,7 +600,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG, (BU21013_X_EDGE_0 | BU21013_X_EDGE_2 | BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3)); @@ -396,7 +607,6 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n"); return retval; } - retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG, BU21013_DONE); if (retval < 0) { @@ -404,25 +614,15 @@ static int bu21013_init_chip(struct bu21013_ts_data *data) return retval; } - return 0; + data->factor_x = (data->chip->x_max_res * SCALE_FACTOR / + data->chip->touch_x_max); + data->factor_y = (data->chip->y_max_res * SCALE_FACTOR / + data->chip->touch_y_max); + return retval; } /** - * bu21013_free_irq() - frees IRQ registered for touchscreen - * @bu21013_data: device structure pointer - * - * This function signals interrupt thread to stop processing and - * frees interrupt. - */ -static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data) -{ - bu21013_data->touch_stopped = true; - wake_up(&bu21013_data->wait); - free_irq(bu21013_data->chip->irq, bu21013_data); -} - -/** - * bu21013_probe() - initializes the i2c-client touchscreen driver + * bu21013_probe() - initialzes the i2c-client touchscreen driver * @client: i2c client structure pointer * @id: i2c device id pointer * @@ -432,11 +632,11 @@ static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data) static int __devinit bu21013_probe(struct i2c_client *client, const struct i2c_device_id *id) { + int retval; struct bu21013_ts_data *bu21013_data; struct input_dev *in_dev; - const struct bu21013_platform_device *pdata = + struct bu21013_platform_device *pdata = client->dev.platform_data; - int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { @@ -446,53 +646,72 @@ static int __devinit bu21013_probe(struct i2c_client *client, if (!pdata) { dev_err(&client->dev, "platform data not defined\n"); - return -EINVAL; + retval = -EINVAL; + return retval; } bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL); - in_dev = input_allocate_device(); - if (!bu21013_data || !in_dev) { + if (!bu21013_data) { dev_err(&client->dev, "device memory alloc failed\n"); - error = -ENOMEM; - goto err_free_mem; + retval = -ENOMEM; + return retval; + } + /* allocate input device */ + in_dev = input_allocate_device(); + if (!in_dev) { + dev_err(&client->dev, "input device memory alloc failed\n"); + retval = -ENOMEM; + goto err_alloc; } bu21013_data->in_dev = in_dev; bu21013_data->chip = pdata; bu21013_data->client = client; - bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH"); + bu21013_data->regulator = regulator_get(&client->dev, "avdd"); if (IS_ERR(bu21013_data->regulator)) { - dev_err(&client->dev, "regulator_get failed\n"); - error = PTR_ERR(bu21013_data->regulator); - goto err_free_mem; + dev_warn(&client->dev, "regulator_get failed\n"); + bu21013_data->regulator = NULL; } - - error = regulator_enable(bu21013_data->regulator); - if (error < 0) { - dev_err(&client->dev, "regulator enable failed\n"); - goto err_put_regulator; - } - - bu21013_data->touch_stopped = false; - init_waitqueue_head(&bu21013_data->wait); + if (bu21013_data->regulator) + regulator_enable(bu21013_data->regulator); /* configure the gpio pins */ if (pdata->cs_en) { - error = pdata->cs_en(pdata->cs_pin); - if (error < 0) { + retval = pdata->cs_en(pdata->cs_pin); + if (retval < 0) { dev_err(&client->dev, "chip init failed\n"); - goto err_disable_regulator; + goto err_init_cs; + } + } + + if (pdata->has_ext_clk) { + bu21013_data->tpclk = clk_get(&client->dev, NULL); + if (IS_ERR(bu21013_data->tpclk)) { + dev_warn(&client->dev, "get extern clock failed\n"); + bu21013_data->tpclk = NULL; + } + } + + if (pdata->enable_ext_clk && bu21013_data->tpclk) { + retval = clk_enable(bu21013_data->tpclk); + if (retval < 0) { + dev_err(&client->dev, "clock enable failed\n"); + goto err_ext_clk; } + bu21013_data->ext_clk_enable = true; } /* configure the touch panel controller */ - error = bu21013_init_chip(bu21013_data); - if (error) { + retval = bu21013_init_chip(bu21013_data, bu21013_data->ext_clk_enable); + if (retval < 0) { dev_err(&client->dev, "error in bu21013 config\n"); - goto err_cs_disable; + goto err_init_config; } + init_waitqueue_head(&bu21013_data->wait); + bu21013_data->touch_stopped = false; + /* register the device to input subsystem */ in_dev->name = DRIVER_TP; in_dev->id.bustype = BUS_I2C; @@ -503,44 +722,63 @@ static int __devinit bu21013_probe(struct i2c_client *client, __set_bit(EV_ABS, in_dev->evbit); input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0, - pdata->touch_x_max, 0, 0); + pdata->x_max_res, 0, 0); input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0, - pdata->touch_y_max, 0, 0); + pdata->y_max_res, 0, 0); + input_set_abs_params(in_dev, ABS_MT_TOUCH_MAJOR, 0, + max(pdata->x_max_res , pdata->y_max_res), 0, 0); input_set_drvdata(in_dev, bu21013_data); - - error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq, - IRQF_TRIGGER_FALLING | IRQF_SHARED, - DRIVER_TP, bu21013_data); - if (error) { + retval = input_register_device(in_dev); + if (retval) + goto err_input_register; + + retval = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq, + (IRQF_TRIGGER_FALLING | IRQF_SHARED), + DRIVER_TP, bu21013_data); + if (retval) { dev_err(&client->dev, "request irq %d failed\n", pdata->irq); - goto err_cs_disable; + goto err_init_irq; } + bu21013_data->enable = true; + i2c_set_clientdata(client, bu21013_data); - error = input_register_device(in_dev); - if (error) { - dev_err(&client->dev, "failed to register input device\n"); - goto err_free_irq; + /* sysfs implementation for dynamic enable/disable the input event */ + retval = sysfs_create_group(&client->dev.kobj, &bu21013_attr_group); + if (retval) { + dev_err(&client->dev, "failed to create sysfs entries\n"); + goto err_sysfs_create; } - device_init_wakeup(&client->dev, pdata->wakeup); - i2c_set_clientdata(client, bu21013_data); - - return 0; + return retval; -err_free_irq: - bu21013_free_irq(bu21013_data); -err_cs_disable: - pdata->cs_dis(pdata->cs_pin); -err_disable_regulator: - regulator_disable(bu21013_data->regulator); -err_put_regulator: - regulator_put(bu21013_data->regulator); -err_free_mem: - input_free_device(in_dev); +err_sysfs_create: + free_irq(pdata->irq, bu21013_data); + i2c_set_clientdata(client, NULL); +err_init_irq: + input_unregister_device(bu21013_data->in_dev); +err_input_register: + wake_up(&bu21013_data->wait); +err_init_config: + if (bu21013_data->tpclk) { + if (bu21013_data->ext_clk_enable) + clk_disable(bu21013_data->tpclk); + clk_put(bu21013_data->tpclk); + } +err_ext_clk: + if (pdata->cs_dis) + pdata->cs_dis(pdata->cs_pin); +err_init_cs: + if (bu21013_data->regulator) { + regulator_disable(bu21013_data->regulator); + regulator_put(bu21013_data->regulator); + } + input_free_device(bu21013_data->in_dev); +err_alloc: kfree(bu21013_data); - return error; + return retval; } + /** * bu21013_remove() - removes the i2c-client touchscreen driver * @client: i2c client structure pointer @@ -552,19 +790,24 @@ static int __devexit bu21013_remove(struct i2c_client *client) { struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client); - bu21013_free_irq(bu21013_data); - + bu21013_data->touch_stopped = true; + sysfs_remove_group(&client->dev.kobj, &bu21013_attr_group); + wake_up(&bu21013_data->wait); + free_irq(bu21013_data->chip->irq, bu21013_data); bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin); - input_unregister_device(bu21013_data->in_dev); - regulator_disable(bu21013_data->regulator); - regulator_put(bu21013_data->regulator); - + if (bu21013_data->tpclk) { + if (bu21013_data->ext_clk_enable) + clk_disable(bu21013_data->tpclk); + clk_put(bu21013_data->tpclk); + } + if (bu21013_data->regulator) { + regulator_disable(bu21013_data->regulator); + regulator_put(bu21013_data->regulator); + } kfree(bu21013_data); - device_init_wakeup(&client->dev, false); - return 0; } @@ -579,15 +822,8 @@ static int __devexit bu21013_remove(struct i2c_client *client) static int bu21013_suspend(struct device *dev) { struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev); - struct i2c_client *client = bu21013_data->client; - bu21013_data->touch_stopped = true; - if (device_may_wakeup(&client->dev)) - enable_irq_wake(bu21013_data->chip->irq); - else - disable_irq(bu21013_data->chip->irq); - - regulator_disable(bu21013_data->regulator); + bu21013_disable(bu21013_data); return 0; } @@ -602,29 +838,8 @@ static int bu21013_suspend(struct device *dev) static int bu21013_resume(struct device *dev) { struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev); - struct i2c_client *client = bu21013_data->client; - int retval; - retval = regulator_enable(bu21013_data->regulator); - if (retval < 0) { - dev_err(&client->dev, "bu21013 regulator enable failed\n"); - return retval; - } - - retval = bu21013_init_chip(bu21013_data); - if (retval < 0) { - dev_err(&client->dev, "bu21013 controller config failed\n"); - return retval; - } - - bu21013_data->touch_stopped = false; - - if (device_may_wakeup(&client->dev)) - disable_irq_wake(bu21013_data->chip->irq); - else - enable_irq(bu21013_data->chip->irq); - - return 0; + return bu21013_enable(bu21013_data); } static const struct dev_pm_ops bu21013_dev_pm_ops = { diff --git a/drivers/input/touchscreen/synaptics_i2c_rmi.c b/drivers/input/touchscreen/synaptics_i2c_rmi.c new file mode 100644 index 00000000000..5729602cbb6 --- /dev/null +++ b/drivers/input/touchscreen/synaptics_i2c_rmi.c @@ -0,0 +1,675 @@ +/* drivers/input/keyboard/synaptics_i2c_rmi.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/earlysuspend.h> +#include <linux/hrtimer.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/synaptics_i2c_rmi.h> + +static struct workqueue_struct *synaptics_wq; + +struct synaptics_ts_data { + uint16_t addr; + struct i2c_client *client; + struct input_dev *input_dev; + int use_irq; + bool has_relative_report; + struct hrtimer timer; + struct work_struct work; + uint16_t max[2]; + int snap_state[2][2]; + int snap_down_on[2]; + int snap_down_off[2]; + int snap_up_on[2]; + int snap_up_off[2]; + int snap_down[2]; + int snap_up[2]; + uint32_t flags; + int reported_finger_count; + int8_t sensitivity_adjust; + int (*power)(int on); + struct early_suspend early_suspend; +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void synaptics_ts_early_suspend(struct early_suspend *h); +static void synaptics_ts_late_resume(struct early_suspend *h); +#endif + +static int synaptics_init_panel(struct synaptics_ts_data *ts) +{ + int ret; + + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + goto err_page_select_failed; + } + ret = i2c_smbus_write_byte_data(ts->client, 0x41, 0x04); /* Set "No Clip Z" */ + if (ret < 0) + printk(KERN_ERR "i2c_smbus_write_byte_data failed for No Clip Z\n"); + + ret = i2c_smbus_write_byte_data(ts->client, 0x44, + ts->sensitivity_adjust); + if (ret < 0) + pr_err("synaptics_ts: failed to set Sensitivity Adjust\n"); + +err_page_select_failed: + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x04); /* page select = 0x04 */ + if (ret < 0) + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + ret = i2c_smbus_write_byte_data(ts->client, 0xf0, 0x81); /* normal operation, 80 reports per second */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume: i2c_smbus_write_byte_data failed\n"); + return ret; +} + +static void synaptics_ts_work_func(struct work_struct *work) +{ + int i; + int ret; + int bad_data = 0; + struct i2c_msg msg[2]; + uint8_t start_reg; + uint8_t buf[15]; + struct synaptics_ts_data *ts = container_of(work, struct synaptics_ts_data, work); + int buf_len = ts->has_relative_report ? 15 : 13; + + msg[0].addr = ts->client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = &start_reg; + start_reg = 0x00; + msg[1].addr = ts->client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = buf_len; + msg[1].buf = buf; + + /* printk("synaptics_ts_work_func\n"); */ + for (i = 0; i < ((ts->use_irq && !bad_data) ? 1 : 10); i++) { + ret = i2c_transfer(ts->client->adapter, msg, 2); + if (ret < 0) { + printk(KERN_ERR "synaptics_ts_work_func: i2c_transfer failed\n"); + bad_data = 1; + } else { + /* printk("synaptics_ts_work_func: %x %x %x %x %x %x" */ + /* " %x %x %x %x %x %x %x %x %x, ret %d\n", */ + /* buf[0], buf[1], buf[2], buf[3], */ + /* buf[4], buf[5], buf[6], buf[7], */ + /* buf[8], buf[9], buf[10], buf[11], */ + /* buf[12], buf[13], buf[14], ret); */ + if ((buf[buf_len - 1] & 0xc0) != 0x40) { + printk(KERN_WARNING "synaptics_ts_work_func:" + " bad read %x %x %x %x %x %x %x %x %x" + " %x %x %x %x %x %x, ret %d\n", + buf[0], buf[1], buf[2], buf[3], + buf[4], buf[5], buf[6], buf[7], + buf[8], buf[9], buf[10], buf[11], + buf[12], buf[13], buf[14], ret); + if (bad_data) + synaptics_init_panel(ts); + bad_data = 1; + continue; + } + bad_data = 0; + if ((buf[buf_len - 1] & 1) == 0) { + /* printk("read %d coordinates\n", i); */ + break; + } else { + int pos[2][2]; + int f, a; + int base; + /* int x = buf[3] | (uint16_t)(buf[2] & 0x1f) << 8; */ + /* int y = buf[5] | (uint16_t)(buf[4] & 0x1f) << 8; */ + int z = buf[1]; + int w = buf[0] >> 4; + int finger = buf[0] & 7; + + /* int x2 = buf[3+6] | (uint16_t)(buf[2+6] & 0x1f) << 8; */ + /* int y2 = buf[5+6] | (uint16_t)(buf[4+6] & 0x1f) << 8; */ + /* int z2 = buf[1+6]; */ + /* int w2 = buf[0+6] >> 4; */ + /* int finger2 = buf[0+6] & 7; */ + + /* int dx = (int8_t)buf[12]; */ + /* int dy = (int8_t)buf[13]; */ + int finger2_pressed; + + /* printk("x %4d, y %4d, z %3d, w %2d, F %d, 2nd: x %4d, y %4d, z %3d, w %2d, F %d, dx %4d, dy %4d\n", */ + /* x, y, z, w, finger, */ + /* x2, y2, z2, w2, finger2, */ + /* dx, dy); */ + + base = 2; + for (f = 0; f < 2; f++) { + uint32_t flip_flag = SYNAPTICS_FLIP_X; + for (a = 0; a < 2; a++) { + int p = buf[base + 1]; + p |= (uint16_t)(buf[base] & 0x1f) << 8; + if (ts->flags & flip_flag) + p = ts->max[a] - p; + if (ts->flags & SYNAPTICS_SNAP_TO_INACTIVE_EDGE) { + if (ts->snap_state[f][a]) { + if (p <= ts->snap_down_off[a]) + p = ts->snap_down[a]; + else if (p >= ts->snap_up_off[a]) + p = ts->snap_up[a]; + else + ts->snap_state[f][a] = 0; + } else { + if (p <= ts->snap_down_on[a]) { + p = ts->snap_down[a]; + ts->snap_state[f][a] = 1; + } else if (p >= ts->snap_up_on[a]) { + p = ts->snap_up[a]; + ts->snap_state[f][a] = 1; + } + } + } + pos[f][a] = p; + base += 2; + flip_flag <<= 1; + } + base += 2; + if (ts->flags & SYNAPTICS_SWAP_XY) + swap(pos[f][0], pos[f][1]); + } + if (z) { + input_report_abs(ts->input_dev, ABS_X, pos[0][0]); + input_report_abs(ts->input_dev, ABS_Y, pos[0][1]); + } + input_report_abs(ts->input_dev, ABS_PRESSURE, z); + input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, w); + input_report_key(ts->input_dev, BTN_TOUCH, finger); + finger2_pressed = finger > 1 && finger != 7; + input_report_key(ts->input_dev, BTN_2, finger2_pressed); + if (finger2_pressed) { + input_report_abs(ts->input_dev, ABS_HAT0X, pos[1][0]); + input_report_abs(ts->input_dev, ABS_HAT0Y, pos[1][1]); + } + + if (!finger) + z = 0; + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[0][0]); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[0][1]); + input_mt_sync(ts->input_dev); + if (finger2_pressed) { + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, z); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w); + input_report_abs(ts->input_dev, ABS_MT_POSITION_X, pos[1][0]); + input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, pos[1][1]); + input_mt_sync(ts->input_dev); + } else if (ts->reported_finger_count > 1) { + input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0); + input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0); + input_mt_sync(ts->input_dev); + } + ts->reported_finger_count = finger; + input_sync(ts->input_dev); + } + } + } + if (ts->use_irq) + enable_irq(ts->client->irq); +} + +static enum hrtimer_restart synaptics_ts_timer_func(struct hrtimer *timer) +{ + struct synaptics_ts_data *ts = container_of(timer, struct synaptics_ts_data, timer); + /* printk("synaptics_ts_timer_func\n"); */ + + queue_work(synaptics_wq, &ts->work); + + hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +static irqreturn_t synaptics_ts_irq_handler(int irq, void *dev_id) +{ + struct synaptics_ts_data *ts = dev_id; + + /* printk("synaptics_ts_irq_handler\n"); */ + disable_irq_nosync(ts->client->irq); + queue_work(synaptics_wq, &ts->work); + return IRQ_HANDLED; +} + +static int synaptics_ts_probe( + struct i2c_client *client, const struct i2c_device_id *id) +{ + struct synaptics_ts_data *ts; + uint8_t buf0[4]; + uint8_t buf1[8]; + struct i2c_msg msg[2]; + int ret = 0; + uint16_t max_x, max_y; + int fuzz_x, fuzz_y, fuzz_p, fuzz_w; + struct synaptics_i2c_rmi_platform_data *pdata; + unsigned long irqflags; + int inactive_area_left; + int inactive_area_right; + int inactive_area_top; + int inactive_area_bottom; + int snap_left_on; + int snap_left_off; + int snap_right_on; + int snap_right_off; + int snap_top_on; + int snap_top_off; + int snap_bottom_on; + int snap_bottom_off; + uint32_t panel_version; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + printk(KERN_ERR "synaptics_ts_probe: need I2C_FUNC_I2C\n"); + ret = -ENODEV; + goto err_check_functionality_failed; + } + + ts = kzalloc(sizeof(*ts), GFP_KERNEL); + if (ts == NULL) { + ret = -ENOMEM; + goto err_alloc_data_failed; + } + INIT_WORK(&ts->work, synaptics_ts_work_func); + ts->client = client; + i2c_set_clientdata(client, ts); + pdata = client->dev.platform_data; + if (pdata) + ts->power = pdata->power; + if (ts->power) { + ret = ts->power(1); + if (ret < 0) { + printk(KERN_ERR "synaptics_ts_probe power on failed\n"); + goto err_power_failed; + } + } + + ret = i2c_smbus_write_byte_data(ts->client, 0xf4, 0x01); /* device command = reset */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed\n"); + /* fail? */ + } + { + int retry = 10; + while (retry-- > 0) { + ret = i2c_smbus_read_byte_data(ts->client, 0xe4); + if (ret >= 0) + break; + msleep(100); + } + } + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: Product Major Version %x\n", ret); + panel_version = ret << 8; + ret = i2c_smbus_read_byte_data(ts->client, 0xe5); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: Product Minor Version %x\n", ret); + panel_version |= ret; + + ret = i2c_smbus_read_byte_data(ts->client, 0xe3); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: product property %x\n", ret); + + if (pdata) { + while (pdata->version > panel_version) + pdata++; + ts->flags = pdata->flags; + ts->sensitivity_adjust = pdata->sensitivity_adjust; + irqflags = pdata->irqflags; + inactive_area_left = pdata->inactive_left; + inactive_area_right = pdata->inactive_right; + inactive_area_top = pdata->inactive_top; + inactive_area_bottom = pdata->inactive_bottom; + snap_left_on = pdata->snap_left_on; + snap_left_off = pdata->snap_left_off; + snap_right_on = pdata->snap_right_on; + snap_right_off = pdata->snap_right_off; + snap_top_on = pdata->snap_top_on; + snap_top_off = pdata->snap_top_off; + snap_bottom_on = pdata->snap_bottom_on; + snap_bottom_off = pdata->snap_bottom_off; + fuzz_x = pdata->fuzz_x; + fuzz_y = pdata->fuzz_y; + fuzz_p = pdata->fuzz_p; + fuzz_w = pdata->fuzz_w; + } else { + irqflags = 0; + inactive_area_left = 0; + inactive_area_right = 0; + inactive_area_top = 0; + inactive_area_bottom = 0; + snap_left_on = 0; + snap_left_off = 0; + snap_right_on = 0; + snap_right_off = 0; + snap_top_on = 0; + snap_top_off = 0; + snap_bottom_on = 0; + snap_bottom_off = 0; + fuzz_x = 0; + fuzz_y = 0; + fuzz_p = 0; + fuzz_w = 0; + } + + ret = i2c_smbus_read_byte_data(ts->client, 0xf0); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: device control %x\n", ret); + + ret = i2c_smbus_read_byte_data(ts->client, 0xf1); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_byte_data failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: interrupt enable %x\n", ret); + + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed\n"); + goto err_detect_failed; + } + + msg[0].addr = ts->client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = buf0; + buf0[0] = 0xe0; + msg[1].addr = ts->client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 8; + msg[1].buf = buf1; + ret = i2c_transfer(ts->client->adapter, msg, 2); + if (ret < 0) { + printk(KERN_ERR "i2c_transfer failed\n"); + goto err_detect_failed; + } + printk(KERN_INFO "synaptics_ts_probe: 0xe0: %x %x %x %x %x %x %x %x\n", + buf1[0], buf1[1], buf1[2], buf1[3], + buf1[4], buf1[5], buf1[6], buf1[7]); + + ret = i2c_smbus_write_byte_data(ts->client, 0xff, 0x10); /* page select = 0x10 */ + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_write_byte_data failed for page select\n"); + goto err_detect_failed; + } + ret = i2c_smbus_read_word_data(ts->client, 0x02); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->has_relative_report = !(ret & 0x100); + printk(KERN_INFO "synaptics_ts_probe: Sensor properties %x\n", ret); + ret = i2c_smbus_read_word_data(ts->client, 0x04); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->max[0] = max_x = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8); + ret = i2c_smbus_read_word_data(ts->client, 0x06); + if (ret < 0) { + printk(KERN_ERR "i2c_smbus_read_word_data failed\n"); + goto err_detect_failed; + } + ts->max[1] = max_y = (ret >> 8 & 0xff) | ((ret & 0x1f) << 8); + if (ts->flags & SYNAPTICS_SWAP_XY) + swap(max_x, max_y); + + ret = synaptics_init_panel(ts); /* will also switch back to page 0x04 */ + if (ret < 0) { + printk(KERN_ERR "synaptics_init_panel failed\n"); + goto err_detect_failed; + } + + ts->input_dev = input_allocate_device(); + if (ts->input_dev == NULL) { + ret = -ENOMEM; + printk(KERN_ERR "synaptics_ts_probe: Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + ts->input_dev->name = "synaptics-rmi-touchscreen"; + set_bit(EV_SYN, ts->input_dev->evbit); + set_bit(EV_KEY, ts->input_dev->evbit); + set_bit(BTN_TOUCH, ts->input_dev->keybit); + set_bit(BTN_2, ts->input_dev->keybit); + set_bit(EV_ABS, ts->input_dev->evbit); + inactive_area_left = inactive_area_left * max_x / 0x10000; + inactive_area_right = inactive_area_right * max_x / 0x10000; + inactive_area_top = inactive_area_top * max_y / 0x10000; + inactive_area_bottom = inactive_area_bottom * max_y / 0x10000; + snap_left_on = snap_left_on * max_x / 0x10000; + snap_left_off = snap_left_off * max_x / 0x10000; + snap_right_on = snap_right_on * max_x / 0x10000; + snap_right_off = snap_right_off * max_x / 0x10000; + snap_top_on = snap_top_on * max_y / 0x10000; + snap_top_off = snap_top_off * max_y / 0x10000; + snap_bottom_on = snap_bottom_on * max_y / 0x10000; + snap_bottom_off = snap_bottom_off * max_y / 0x10000; + fuzz_x = fuzz_x * max_x / 0x10000; + fuzz_y = fuzz_y * max_y / 0x10000; + ts->snap_down[!!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_left; + ts->snap_up[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x + inactive_area_right; + ts->snap_down[!(ts->flags & SYNAPTICS_SWAP_XY)] = -inactive_area_top; + ts->snap_up[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y + inactive_area_bottom; + ts->snap_down_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_on; + ts->snap_down_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_left_off; + ts->snap_up_on[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_on; + ts->snap_up_off[!!(ts->flags & SYNAPTICS_SWAP_XY)] = max_x - snap_right_off; + ts->snap_down_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_on; + ts->snap_down_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = snap_top_off; + ts->snap_up_on[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_on; + ts->snap_up_off[!(ts->flags & SYNAPTICS_SWAP_XY)] = max_y - snap_bottom_off; + printk(KERN_INFO "synaptics_ts_probe: max_x %d, max_y %d\n", max_x, max_y); + printk(KERN_INFO "synaptics_ts_probe: inactive_x %d %d, inactive_y %d %d\n", + inactive_area_left, inactive_area_right, + inactive_area_top, inactive_area_bottom); + printk(KERN_INFO "synaptics_ts_probe: snap_x %d-%d %d-%d, snap_y %d-%d %d-%d\n", + snap_left_on, snap_left_off, snap_right_on, snap_right_off, + snap_top_on, snap_top_off, snap_bottom_on, snap_bottom_off); + input_set_abs_params(ts->input_dev, ABS_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 255, fuzz_p, 0); + input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH, 0, 15, fuzz_w, 0); + input_set_abs_params(ts->input_dev, ABS_HAT0X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_HAT0Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, -inactive_area_left, max_x + inactive_area_right, fuzz_x, 0); + input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, -inactive_area_top, max_y + inactive_area_bottom, fuzz_y, 0); + input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, fuzz_p, 0); + input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 15, fuzz_w, 0); + /* ts->input_dev->name = ts->keypad_info->name; */ + ret = input_register_device(ts->input_dev); + if (ret) { + printk(KERN_ERR "synaptics_ts_probe: Unable to register %s input device\n", ts->input_dev->name); + goto err_input_register_device_failed; + } + if (client->irq) { + ret = request_irq(client->irq, synaptics_ts_irq_handler, irqflags, client->name, ts); + if (ret == 0) { + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */ + if (ret) + free_irq(client->irq, ts); + } + if (ret == 0) + ts->use_irq = 1; + else + dev_err(&client->dev, "request_irq failed\n"); + } + if (!ts->use_irq) { + hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ts->timer.function = synaptics_ts_timer_func; + hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + } +#ifdef CONFIG_HAS_EARLYSUSPEND + ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ts->early_suspend.suspend = synaptics_ts_early_suspend; + ts->early_suspend.resume = synaptics_ts_late_resume; + register_early_suspend(&ts->early_suspend); +#endif + + printk(KERN_INFO "synaptics_ts_probe: Start touchscreen %s in %s mode\n", ts->input_dev->name, ts->use_irq ? "interrupt" : "polling"); + + return 0; + +err_input_register_device_failed: + input_free_device(ts->input_dev); + +err_input_dev_alloc_failed: +err_detect_failed: +err_power_failed: + kfree(ts); +err_alloc_data_failed: +err_check_functionality_failed: + return ret; +} + +static int synaptics_ts_remove(struct i2c_client *client) +{ + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + unregister_early_suspend(&ts->early_suspend); + if (ts->use_irq) + free_irq(client->irq, ts); + else + hrtimer_cancel(&ts->timer); + input_unregister_device(ts->input_dev); + kfree(ts); + return 0; +} + +static int synaptics_ts_suspend(struct i2c_client *client, pm_message_t mesg) +{ + int ret; + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + + if (ts->use_irq) + disable_irq(client->irq); + else + hrtimer_cancel(&ts->timer); + ret = cancel_work_sync(&ts->work); + if (ret && ts->use_irq) /* if work was pending disable-count is now 2 */ + enable_irq(client->irq); + ret = i2c_smbus_write_byte_data(ts->client, 0xf1, 0); /* disable interrupt */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n"); + + ret = i2c_smbus_write_byte_data(client, 0xf0, 0x86); /* deep sleep */ + if (ret < 0) + printk(KERN_ERR "synaptics_ts_suspend: i2c_smbus_write_byte_data failed\n"); + if (ts->power) { + ret = ts->power(0); + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume power off failed\n"); + } + return 0; +} + +static int synaptics_ts_resume(struct i2c_client *client) +{ + int ret; + struct synaptics_ts_data *ts = i2c_get_clientdata(client); + + if (ts->power) { + ret = ts->power(1); + if (ret < 0) + printk(KERN_ERR "synaptics_ts_resume power on failed\n"); + } + + synaptics_init_panel(ts); + + if (ts->use_irq) + enable_irq(client->irq); + + if (!ts->use_irq) + hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL); + else + i2c_smbus_write_byte_data(ts->client, 0xf1, 0x01); /* enable abs int */ + + return 0; +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void synaptics_ts_early_suspend(struct early_suspend *h) +{ + struct synaptics_ts_data *ts; + ts = container_of(h, struct synaptics_ts_data, early_suspend); + synaptics_ts_suspend(ts->client, PMSG_SUSPEND); +} + +static void synaptics_ts_late_resume(struct early_suspend *h) +{ + struct synaptics_ts_data *ts; + ts = container_of(h, struct synaptics_ts_data, early_suspend); + synaptics_ts_resume(ts->client); +} +#endif + +static const struct i2c_device_id synaptics_ts_id[] = { + { SYNAPTICS_I2C_RMI_NAME, 0 }, + { } +}; + +static struct i2c_driver synaptics_ts_driver = { + .probe = synaptics_ts_probe, + .remove = synaptics_ts_remove, +#ifndef CONFIG_HAS_EARLYSUSPEND + .suspend = synaptics_ts_suspend, + .resume = synaptics_ts_resume, +#endif + .id_table = synaptics_ts_id, + .driver = { + .name = SYNAPTICS_I2C_RMI_NAME, + }, +}; + +static int __devinit synaptics_ts_init(void) +{ + synaptics_wq = create_singlethread_workqueue("synaptics_wq"); + if (!synaptics_wq) + return -ENOMEM; + return i2c_add_driver(&synaptics_ts_driver); +} + +static void __exit synaptics_ts_exit(void) +{ + i2c_del_driver(&synaptics_ts_driver); + if (synaptics_wq) + destroy_workqueue(synaptics_wq); +} + +module_init(synaptics_ts_init); +module_exit(synaptics_ts_exit); + +MODULE_DESCRIPTION("Synaptics Touchscreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index ff4b8cfda58..5183a2d4fd5 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -50,6 +50,14 @@ config LEDS_LM3530 controlled manually or using PWM input or using ambient light automatically. +config LEDS_AB5500 + tristate "HVLED driver for AB5500" + depends on AB5500_CORE + help + This option enables support for the HVLED in AB5500 + multi function device. Currently Ab5500 v1.0 chip leds + are supported. + config LEDS_LOCOMO tristate "LED Support for Locomo device" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 890481cb09f..59a569b376e 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o +obj-$(CONFIG_LEDS_AB5500) += leds-ab5500.o obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o diff --git a/drivers/leds/leds-ab5500.c b/drivers/leds/leds-ab5500.c new file mode 100644 index 00000000000..294551b1962 --- /dev/null +++ b/drivers/leds/leds-ab5500.c @@ -0,0 +1,811 @@ +/* + * leds-ab5500.c - driver for High Voltage (HV) LED in ST-Ericsson AB5500 chip + * + * Copyright (C) 2011 ST-Ericsson SA. + * + * License Terms: GNU General Public License v2 + * + * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com> + */ + +/* + * Driver for HVLED in ST-Ericsson AB5500 analog baseband controller + * + * This chip can drive upto 3 leds, of upto 40mA of led sink current. + * These leds can be programmed to blink between two intensities with + * fading delay of half, one or two seconds. + * + * Leds can be controlled via sysfs entries in + * "/sys/class/leds/< red | green | blue >" + * + * For each led, + * + * Modes of operation: + * - manual: echo 0 > fade_auto (default, no auto blinking) + * - auto: echo 1 > fade_auto + * + * Soft scaling delay between two intensities: + * - 1/2 sec: echo 1 > fade_delay + * - 1 sec: echo 2 > fade_delay + * - 2 sec: echo 3 > fade_delay + * + * Possible sequence of operation: + * - continuous glow: set brightness (brt) + * - blink between LED_OFF and LED_FULL: + * set fade delay -> set fade auto + * - blink between previous two brightness (only for LED-1): + * set brt1 -> set brt2 -> set fade auto + * + * Delay can be set in any step, its affect will be seen on switching mode. + * + * Note: Blink/Fade feature is supported in AB5500 v2 onwards + * + */ + +#include <linux/leds.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/input.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/leds-ab5500.h> +#include <linux/types.h> + +#include <mach/hardware.h> + +#define AB5500LED_NAME "ab5500-leds" +#define AB5500_LED_MAX 0x03 + +/* Register offsets */ +#define AB5500_LED_REG_ENABLE 0x03 +#define AB5500_LED_FADE_CTRL 0x0D + +/* LED-0 Register Addr. Offsets */ +#define AB5500_LED0_PWM_DUTY 0x01 +#define AB5500_LED0_PWMFREQ 0x02 +#define AB5500_LED0_SINKCTL 0x0A +#define AB5500_LED0_FADE_HI 0x11 +#define AB5500_LED0_FADE_LO 0x17 + +/* LED-1 Register Addr. Offsets */ +#define AB5500_LED1_PWM_DUTY 0x05 +#define AB5500_LED1_PWMFREQ 0x06 +#define AB5500_LED1_SINKCTL 0x0B +#define AB5500_LED1_FADE_HI 0x13 +#define AB5500_LED1_FADE_LO 0x19 + +/* LED-2 Register Addr. Offsets */ +#define AB5500_LED2_PWM_DUTY 0x08 +#define AB5500_LED2_PWMFREQ 0x09 +#define AB5500_LED2_SINKCTL 0x0C +#define AB5500_LED2_FADE_HI 0x15 +#define AB5500_LED2_FADE_LO 0x1B + +/* led-0/1/2 enable bit */ +#define AB5500_LED_ENABLE_MASK 0x04 + +/* led intensity */ +#define AB5500_LED_INTENSITY_OFF 0x0 +#define AB5500_LED_INTENSITY_MAX 0x3FF +#define AB5500_LED_INTENSITY_STEP (AB5500_LED_INTENSITY_MAX/LED_FULL) + +/* pwm frequency */ +#define AB5500_LED_PWMFREQ_MAX 0x0F /* 373.39 @sysclk=26MHz */ +#define AB5500_LED_PWMFREQ_SHIFT 4 + +/* LED sink current control */ +#define AB5500_LED_SINKCURR_MAX 0x0F /* 40mA MAX */ +#define AB5500_LED_SINKCURR_SHIFT 4 + +/* fade Control shift and masks */ +#define AB5500_FADE_DELAY_SHIFT 0x00 +#define AB5500_FADE_MODE_MASK 0x80 +#define AB5500_FADE_DELAY_MASK 0x03 +#define AB5500_FADE_START_MASK 0x04 +#define AB5500_FADE_ON_MASK 0x70 +#define AB5500_LED_FADE_ENABLE(ledid) (0x40 >> (ledid)) + +struct ab5500_led { + u8 id; + u8 max_current; + u16 brt_val; + u16 fade_hi; + u16 fade_lo; + bool led_on; + struct led_classdev led_cdev; + struct work_struct led_work; +}; + +struct ab5500_hvleds { + struct mutex lock; + struct device *dev; + struct ab5500_hvleds_platform_data *pdata; + struct ab5500_led leds[AB5500_HVLEDS_MAX]; + bool hw_fade; + bool fade_auto; + enum ab5500_fade_delay fade_delay; +}; + +static u8 ab5500_led_pwmduty_reg[AB5500_LED_MAX] = { + AB5500_LED0_PWM_DUTY, + AB5500_LED1_PWM_DUTY, + AB5500_LED2_PWM_DUTY, +}; + +static u8 ab5500_led_pwmfreq_reg[AB5500_LED_MAX] = { + AB5500_LED0_PWMFREQ, + AB5500_LED1_PWMFREQ, + AB5500_LED2_PWMFREQ, +}; + +static u8 ab5500_led_sinkctl_reg[AB5500_LED_MAX] = { + AB5500_LED0_SINKCTL, + AB5500_LED1_SINKCTL, + AB5500_LED2_SINKCTL +}; + +static u8 ab5500_led_fade_hi_reg[AB5500_LED_MAX] = { + AB5500_LED0_FADE_HI, + AB5500_LED1_FADE_HI, + AB5500_LED2_FADE_HI, +}; + +static u8 ab5500_led_fade_lo_reg[AB5500_LED_MAX] = { + AB5500_LED0_FADE_LO, + AB5500_LED1_FADE_LO, + AB5500_LED2_FADE_LO, +}; + +#define to_led(_x) container_of(_x, struct ab5500_led, _x) + +static inline struct ab5500_hvleds *led_to_hvleds(struct ab5500_led *led) +{ + return container_of(led, struct ab5500_hvleds, leds[led->id]); +} + +static int ab5500_led_enable(struct ab5500_hvleds *hvleds, + unsigned int led_id) +{ + int ret; + + ret = abx500_mask_and_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmduty_reg[led_id], + AB5500_LED_ENABLE_MASK, + AB5500_LED_ENABLE_MASK); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + ab5500_led_pwmduty_reg[led_id], ret); + + return ret; + +} + +static int ab5500_led_start_manual(struct ab5500_hvleds *hvleds) +{ + int ret; + + mutex_lock(&hvleds->lock); + + ret = abx500_mask_and_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + AB5500_LED_FADE_CTRL, AB5500_FADE_START_MASK, + AB5500_FADE_START_MASK); + if (ret < 0) + dev_err(hvleds->dev, "update reg 0x%x failed - %d\n", + AB5500_LED_FADE_CTRL, ret); + + mutex_unlock(&hvleds->lock); + + return ret; +} + +static int ab5500_led_disable(struct ab5500_hvleds *hvleds, + unsigned int led_id) +{ + int ret; + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmduty_reg[led_id] - 1, 0); + ret |= abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmduty_reg[led_id], 0); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + ab5500_led_pwmduty_reg[led_id], ret); + + return ret; +} + +static int ab5500_led_pwmduty_write(struct ab5500_hvleds *hvleds, + unsigned int led_id, u16 val) +{ + int ret; + u8 val_lsb = val & 0xFF; + u8 val_msb = (val & 0x300) >> 8; + + mutex_lock(&hvleds->lock); + + dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n" + "reg[%d] w val = %d\n", + ab5500_led_pwmduty_reg[led_id] - 1, val_lsb, + ab5500_led_pwmduty_reg[led_id], val_msb); + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmduty_reg[led_id] - 1, val_lsb); + ret |= abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmduty_reg[led_id], val_msb); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + ab5500_led_pwmduty_reg[led_id], ret); + + mutex_unlock(&hvleds->lock); + + return ret; +} + +static int ab5500_led_pwmfreq_write(struct ab5500_hvleds *hvleds, + unsigned int led_id, u8 val) +{ + int ret; + + val = (val & 0x0F) << AB5500_LED_PWMFREQ_SHIFT; + + mutex_lock(&hvleds->lock); + + dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n", + ab5500_led_pwmfreq_reg[led_id], val); + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_pwmfreq_reg[led_id], val); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + ab5500_led_pwmfreq_reg[led_id], ret); + + mutex_unlock(&hvleds->lock); + + return ret; +} + +static int ab5500_led_sinkctl_write(struct ab5500_hvleds *hvleds, + unsigned int led_id, u8 val) +{ + int ret; + + if (val > AB5500_LED_SINKCURR_MAX) + val = AB5500_LED_SINKCURR_MAX; + + val = (val << AB5500_LED_SINKCURR_SHIFT); + + dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val=%d\n", + ab5500_led_sinkctl_reg[led_id], val); + + mutex_lock(&hvleds->lock); + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_sinkctl_reg[led_id], val); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + ab5500_led_sinkctl_reg[led_id], ret); + + mutex_unlock(&hvleds->lock); + + return ret; +} + +static int ab5500_led_fade_write(struct ab5500_hvleds *hvleds, + unsigned int led_id, bool on, u16 val) +{ + int ret; + int val_lsb = val & 0xFF; + int val_msb = (val & 0x300) >> 8; + u8 *fade_reg; + + if (on) + fade_reg = ab5500_led_fade_hi_reg; + else + fade_reg = ab5500_led_fade_lo_reg; + + dev_dbg(hvleds->dev, "ab5500-leds: reg[%d] w val = %d\n" + "reg[%d] w val = %d\n", + fade_reg[led_id] - 1, val_lsb, + fade_reg[led_id], val_msb); + + mutex_lock(&hvleds->lock); + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + fade_reg[led_id] - 1, val_lsb); + ret |= abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + fade_reg[led_id], val_msb); + if (ret < 0) + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + fade_reg[led_id], ret); + + mutex_unlock(&hvleds->lock); + + return ret; +} + +static int ab5500_led_sinkctl_read(struct ab5500_hvleds *hvleds, + unsigned int led_id) +{ + int ret; + u8 val; + + mutex_lock(&hvleds->lock); + + ret = abx500_get_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + ab5500_led_sinkctl_reg[led_id], &val); + if (ret < 0) { + dev_err(hvleds->dev, "reg[%d] r failed: %d\n", + ab5500_led_sinkctl_reg[led_id], ret); + mutex_unlock(&hvleds->lock); + return ret; + } + + val = (val & 0xF0) >> AB5500_LED_SINKCURR_SHIFT; + + mutex_unlock(&hvleds->lock); + + return val; +} + +static void ab5500_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brt_val) +{ + struct ab5500_led *led = to_led(led_cdev); + + /* adjust LED_FULL to 10bit range */ + brt_val &= LED_FULL; + led->brt_val = brt_val * AB5500_LED_INTENSITY_STEP; + + schedule_work(&led->led_work); +} + +static void ab5500_led_work(struct work_struct *led_work) +{ + struct ab5500_led *led = to_led(led_work); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + if (led->led_on == true) { + ab5500_led_pwmduty_write(hvleds, led->id, led->brt_val); + if (hvleds->hw_fade && led->brt_val) { + ab5500_led_enable(hvleds, led->id); + ab5500_led_start_manual(hvleds); + } + } +} + +static ssize_t ab5500_led_show_current(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int led_curr = 0; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ab5500_led *led = to_led(led_cdev); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + led_curr = ab5500_led_sinkctl_read(hvleds, led->id); + + if (led_curr < 0) + return led_curr; + + return sprintf(buf, "%d\n", led_curr); +} + +static ssize_t ab5500_led_store_current(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + int ret; + unsigned long led_curr; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ab5500_led *led = to_led(led_cdev); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + if (strict_strtoul(buf, 0, &led_curr)) + return -EINVAL; + + if (led_curr > led->max_current) + led_curr = led->max_current; + + ret = ab5500_led_sinkctl_write(hvleds, led->id, led_curr); + if (ret < 0) + return ret; + + return len; +} + +static ssize_t ab5500_led_store_fade_auto(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + int ret; + u8 fade_ctrl = 0; + unsigned long fade_auto; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ab5500_led *led = to_led(led_cdev); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + if (strict_strtoul(buf, 0, &fade_auto)) + return -EINVAL; + + if (fade_auto > 1) { + dev_err(hvleds->dev, "invalid mode\n"); + return -EINVAL; + } + + mutex_lock(&hvleds->lock); + + ret = abx500_get_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + AB5500_LED_FADE_CTRL, &fade_ctrl); + if (ret < 0) { + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + AB5500_LED_FADE_CTRL, ret); + goto unlock_and_return; + } + + /* manual mode */ + if (fade_auto == false) { + fade_ctrl &= ~(AB5500_LED_FADE_ENABLE(led->id)); + if (!(fade_ctrl & AB5500_FADE_ON_MASK)) + fade_ctrl = 0; + + ret = ab5500_led_disable(hvleds, led->id); + if (ret < 0) + goto unlock_and_return; + } else { + /* set led auto enable bit */ + fade_ctrl |= AB5500_FADE_MODE_MASK; + fade_ctrl |= AB5500_LED_FADE_ENABLE(led->id); + + /* set fade delay */ + fade_ctrl &= ~AB5500_FADE_DELAY_MASK; + fade_ctrl |= hvleds->fade_delay << AB5500_FADE_DELAY_SHIFT; + + /* set fade start manual */ + fade_ctrl |= AB5500_FADE_START_MASK; + + /* enble corresponding led */ + ret = ab5500_led_enable(hvleds, led->id); + if (ret < 0) + goto unlock_and_return; + + } + + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + AB5500_LED_FADE_CTRL, fade_ctrl); + if (ret < 0) { + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + AB5500_LED_FADE_CTRL, ret); + goto unlock_and_return; + } + + hvleds->fade_auto = fade_auto; + + ret = len; + +unlock_and_return: + mutex_unlock(&hvleds->lock); + + return ret; +} + +static ssize_t ab5500_led_show_fade_auto(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ab5500_led *led = to_led(led_cdev); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + return sprintf(buf, "%d\n", hvleds->fade_auto); +} + +static ssize_t ab5500_led_store_fade_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + unsigned long fade_delay; + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ab5500_led *led = to_led(led_cdev); + struct ab5500_hvleds *hvleds = led_to_hvleds(led); + + if (strict_strtoul(buf, 0, &fade_delay)) + return -EINVAL; + + if (fade_delay > AB5500_FADE_DELAY_TWOSEC) { + dev_err(hvleds->dev, "invalid mode\n"); + return -EINVAL; + } + + hvleds->fade_delay = fade_delay; + + return len; +} + +/* led class device attributes */ +static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, + ab5500_led_show_current, ab5500_led_store_current); +static DEVICE_ATTR(fade_auto, S_IRUGO | S_IWUGO, + ab5500_led_show_fade_auto, ab5500_led_store_fade_auto); +static DEVICE_ATTR(fade_delay, S_IRUGO | S_IWUGO, + NULL, ab5500_led_store_fade_delay); + +static int ab5500_led_init_registers(struct ab5500_hvleds *hvleds) +{ + int ret = 0; + unsigned int led_id; + + /* fade - manual : dur mid : pwm duty mid */ + if (!hvleds->hw_fade) { + ret = abx500_set_register_interruptible( + hvleds->dev, AB5500_BANK_LED, + AB5500_LED_REG_ENABLE, true); + if (ret < 0) { + dev_err(hvleds->dev, "reg[%d] w failed: %d\n", + AB5500_LED_REG_ENABLE, ret); + return ret; + } + } + + for (led_id = 0; led_id < AB5500_HVLEDS_MAX; led_id++) { + if (hvleds->leds[led_id].led_on == false) + continue; + + ret = ab5500_led_sinkctl_write( + hvleds, led_id, + hvleds->leds[led_id].max_current); + if (ret < 0) + return ret; + + if (hvleds->hw_fade) { + ret = ab5500_led_pwmfreq_write( + hvleds, led_id, + AB5500_LED_PWMFREQ_MAX / 2); + if (ret < 0) + return ret; + + /* fade high intensity */ + ret = ab5500_led_fade_write( + hvleds, led_id, true, + hvleds->leds[led_id].fade_hi); + if (ret < 0) + return ret; + + /* fade low intensity */ + ret = ab5500_led_fade_write( + hvleds, led_id, false, + hvleds->leds[led_id].fade_lo); + if (ret < 0) + return ret; + } + + /* init led off */ + ret |= ab5500_led_pwmduty_write( + hvleds, led_id, AB5500_LED_INTENSITY_OFF); + if (ret < 0) + return ret; + } + + return ret; +} + +static int ab5500_led_register_leds(struct device *dev, + struct ab5500_hvleds_platform_data *pdata, + struct ab5500_hvleds *hvleds) +{ + int i_led; + int ret = 0; + struct ab5500_led_conf *pled; + struct ab5500_led *led; + + hvleds->dev = dev; + hvleds->pdata = pdata; + + if (abx500_get_chip_id(dev) == AB5500_2_0) + hvleds->hw_fade = true; + else + hvleds->hw_fade = false; + + for (i_led = 0; i_led < AB5500_HVLEDS_MAX; i_led++) { + pled = &pdata->leds[i_led]; + led = &hvleds->leds[i_led]; + + INIT_WORK(&led->led_work, ab5500_led_work); + + led->id = pled->led_id; + led->max_current = pled->max_current; + led->led_on = pled->led_on; + led->led_cdev.name = pled->name; + led->led_cdev.brightness_set = ab5500_led_brightness_set; + + /* Provide interface only for enabled LEDs */ + if (led->led_on == false) + continue; + + if (hvleds->hw_fade) { + led->fade_hi = (pled->fade_hi & LED_FULL); + led->fade_hi *= AB5500_LED_INTENSITY_STEP; + led->fade_lo = (pled->fade_lo & LED_FULL); + led->fade_lo *= AB5500_LED_INTENSITY_STEP; + } + + ret = led_classdev_register(dev, &led->led_cdev); + if (ret < 0) { + dev_err(dev, "Register led class failed: %d\n", ret); + goto bailout1; + } + + ret = device_create_file(led->led_cdev.dev, + &dev_attr_led_current); + if (ret < 0) { + dev_err(dev, "sysfs device creation failed: %d\n", ret); + goto bailout2; + } + + if (hvleds->hw_fade) { + ret = device_create_file(led->led_cdev.dev, + &dev_attr_fade_auto); + if (ret < 0) { + dev_err(dev, "sysfs device " + "creation failed: %d\n", ret); + goto bailout3; + } + + ret = device_create_file(led->led_cdev.dev, + &dev_attr_fade_delay); + if (ret < 0) { + dev_err(dev, "sysfs device " + "creation failed: %d\n", ret); + goto bailout4; + } + } + } + + return ret; + for (; i_led >= 0; i_led--) { + if (hvleds->leds[i_led].led_on == false) + continue; + + if (hvleds->hw_fade) { + device_remove_file(hvleds->leds[i_led].led_cdev.dev, + &dev_attr_fade_delay); +bailout4: + device_remove_file(hvleds->leds[i_led].led_cdev.dev, + &dev_attr_fade_auto); + } +bailout3: + device_remove_file(hvleds->leds[i_led].led_cdev.dev, + &dev_attr_led_current); +bailout2: + led_classdev_unregister(&hvleds->leds[i_led].led_cdev); +bailout1: + cancel_work_sync(&hvleds->leds[i_led].led_work); + } + return ret; +} + +static int __devinit ab5500_hvleds_probe(struct platform_device *pdev) +{ + struct ab5500_hvleds_platform_data *pdata = pdev->dev.platform_data; + struct ab5500_hvleds *hvleds = NULL; + int ret = 0, i; + + if (pdata == NULL) { + dev_err(&pdev->dev, "platform data required\n"); + ret = -ENODEV; + goto err_out; + } + + hvleds = kzalloc(sizeof(struct ab5500_hvleds), GFP_KERNEL); + if (hvleds == NULL) { + ret = -ENOMEM; + goto err_out; + } + + mutex_init(&hvleds->lock); + + /* init leds data and register led_classdev */ + ret = ab5500_led_register_leds(&pdev->dev, pdata, hvleds); + if (ret < 0) { + dev_err(&pdev->dev, "leds registration failed\n"); + goto err_out; + } + + /* init device registers and set initial led current */ + ret = ab5500_led_init_registers(hvleds); + if (ret < 0) { + dev_err(&pdev->dev, "reg init failed: %d\n", ret); + goto err_reg_init; + } + + if (hvleds->hw_fade) + dev_info(&pdev->dev, "v2 enabled\n"); + else + dev_info(&pdev->dev, "v1 enabled\n"); + + return ret; + +err_reg_init: + for (i = 0; i < AB5500_HVLEDS_MAX; i++) { + struct ab5500_led *led = &hvleds->leds[i]; + + if (led->led_on == false) + continue; + + device_remove_file(led->led_cdev.dev, &dev_attr_led_current); + if (hvleds->hw_fade) { + device_remove_file(led->led_cdev.dev, + &dev_attr_fade_auto); + device_remove_file(led->led_cdev.dev, + &dev_attr_fade_delay); + } + led_classdev_unregister(&led->led_cdev); + cancel_work_sync(&led->led_work); + } +err_out: + kfree(hvleds); + return ret; +} + +static int __devexit ab5500_hvleds_remove(struct platform_device *pdev) +{ + struct ab5500_hvleds *hvleds = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < AB5500_HVLEDS_MAX; i++) { + struct ab5500_led *led = &hvleds->leds[i]; + + if (led->led_on == false) + continue; + + device_remove_file(led->led_cdev.dev, &dev_attr_led_current); + if (hvleds->hw_fade) { + device_remove_file(led->led_cdev.dev, + &dev_attr_fade_auto); + device_remove_file(led->led_cdev.dev, + &dev_attr_fade_delay); + } + led_classdev_unregister(&led->led_cdev); + cancel_work_sync(&led->led_work); + } + kfree(hvleds); + return 0; +} + +static struct platform_driver ab5500_hvleds_driver = { + .driver = { + .name = AB5500LED_NAME, + .owner = THIS_MODULE, + }, + .probe = ab5500_hvleds_probe, + .remove = __devexit_p(ab5500_hvleds_remove), +}; + +static int __init ab5500_hvleds_module_init(void) +{ + return platform_driver_register(&ab5500_hvleds_driver); +} + +static void __exit ab5500_hvleds_module_exit(void) +{ + platform_driver_unregister(&ab5500_hvleds_driver); +} + +module_init(ab5500_hvleds_module_init); +module_exit(ab5500_hvleds_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>"); +MODULE_DESCRIPTION("Driver for AB5500 HVLED"); + diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index 968fd5fef4f..41aed6c89ab 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/regulator/consumer.h> #include <linux/module.h> +#include <linux/gpio.h> #define LM3530_LED_DEV "lcd-backlight" #define LM3530_NAME "lm3530-led" @@ -101,6 +102,7 @@ static struct lm3530_mode_map mode_map[] = { * @mode: mode of operation - manual, ALS, PWM * @regulator: regulator * @brighness: previous brightness value + * @hw_en_gpio: GPIO line for LM3530 HWEN * @enable: regulator is enabled */ struct lm3530_data { @@ -110,6 +112,7 @@ struct lm3530_data { enum lm3530_mode mode; struct regulator *regulator; enum led_brightness brightness; + int hw_en_gpio; bool enable; }; @@ -151,7 +154,7 @@ static int lm3530_init_registers(struct lm3530_data *drvdata) u8 als_imp_sel = 0; u8 brightness; u8 reg_val[LM3530_REG_MAX]; - u8 zones[LM3530_ALS_ZB_MAX]; + u8 zones[LM3530_ALS_ZB_MAX] = {0}; u32 als_vmin, als_vmax, als_vstep; struct lm3530_platform_data *pdata = drvdata->pdata; struct i2c_client *client = drvdata->client; @@ -230,6 +233,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata) reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */ if (!drvdata->enable) { + if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO) + gpio_set_value(drvdata->hw_en_gpio, 1); ret = regulator_enable(drvdata->regulator); if (ret) { dev_err(&drvdata->client->dev, @@ -294,6 +299,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev, if (err) dev_err(&drvdata->client->dev, "Disable regulator failed\n"); + if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO) + gpio_set_value(drvdata->hw_en_gpio, 0); drvdata->enable = false; } break; @@ -397,6 +404,7 @@ static int __devinit lm3530_probe(struct i2c_client *client, drvdata->client = client; drvdata->pdata = pdata; drvdata->brightness = LED_OFF; + drvdata->hw_en_gpio = pdata->hw_en_gpio; drvdata->enable = false; drvdata->led_dev.name = LM3530_LED_DEV; drvdata->led_dev.brightness_set = lm3530_brightness_set; @@ -404,6 +412,15 @@ static int __devinit lm3530_probe(struct i2c_client *client, i2c_set_clientdata(client, drvdata); + if (gpio_is_valid(drvdata->hw_en_gpio)) { + err = gpio_request_one(drvdata->hw_en_gpio, GPIOF_OUT_INIT_HIGH, + "lm3530_hw_en"); + if (err < 0) { + dev_err(&client->dev, "lm3530 hw_en gpio failed: %d\n", err); + goto err_gpio_request; + } + } + drvdata->regulator = regulator_get(&client->dev, "vin"); if (IS_ERR(drvdata->regulator)) { dev_err(&client->dev, "regulator get failed\n"); @@ -443,6 +460,10 @@ err_class_register: err_reg_init: regulator_put(drvdata->regulator); err_regulator_get: + if (gpio_is_valid(drvdata->hw_en_gpio)) + gpio_free(drvdata->hw_en_gpio); +err_gpio_request: + i2c_set_clientdata(client, NULL); kfree(drvdata); err_out: return err; @@ -457,6 +478,8 @@ static int __devexit lm3530_remove(struct i2c_client *client) if (drvdata->enable) regulator_disable(drvdata->regulator); regulator_put(drvdata->regulator); + if (gpio_is_valid(drvdata->hw_en_gpio)) + gpio_free(drvdata->hw_en_gpio); led_classdev_unregister(&drvdata->led_dev); kfree(drvdata); return 0; diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c index 410a723b869..2f667071278 100644 --- a/drivers/leds/leds-lp5521.c +++ b/drivers/leds/leds-lp5521.c @@ -355,7 +355,12 @@ static int lp5521_do_store_load(struct lp5521_engine *engine, while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { /* separate sscanfs because length is working only for %s */ ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); - if (ret != 2) + /* + * Execution of a %n directive does not always + * increment the assignment count returned at + * completion of execution.so ret need not be 2 + */ + if ((ret != 1) && (ret != 2)) goto fail; ret = sscanf(c, "%2x", &cmd); if (ret != 1) @@ -787,6 +792,7 @@ static int __devinit lp5521_probe(struct i2c_client *client, ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf); if (buf != LP5521_REG_R_CURR_DEFAULT) { dev_err(&client->dev, "error in resetting chip\n"); + ret = -EIO; goto fail2; } usleep_range(10000, 20000); diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 3ed92f34bd4..9c5f7136b37 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -27,9 +27,60 @@ struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; unsigned int active_low; + unsigned int lth_brightness; unsigned int period; + unsigned int dutycycle_steps; + unsigned int period_steps; }; +static int led_pwm_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct led_pwm_data *led_dat = + container_of(led_cdev, struct led_pwm_data, cdev); + int dutycycle_ms, period_sec; + int dutycycle, period; + /* + * If both the delays are zero set some sensible delay + */ + if (*delay_on == 0 && *delay_off == 0) { + *delay_on = 500; + *delay_off = 500; + } + /* + * calculate the duty cycle from on and off time + */ + dutycycle_ms = ((*delay_on * 1000)/(*delay_on + *delay_off)); + /* + * convert calculated value to write into the PWM out register + */ + if (led_dat->dutycycle_steps) + dutycycle = ((dutycycle_ms * led_dat->dutycycle_steps)/1000); + else + dutycycle = (dutycycle_ms/1000); + /* + * calculate period from on and off time(msec) + */ + period_sec = ((*delay_on + *delay_off)/1000); + /* + * convert calculated value to write into the PWM out register + */ + if (led_dat->period_steps) { + if ((*delay_on + *delay_off) == 500) + period = led_dat->period_steps; + else + period = led_dat->period_steps - period_sec; + } + else + period = period_sec; + /* + * configure the PWM registers and enable blink functionality + */ + pwm_config_blink(led_dat->pwm, dutycycle, period); + pwm_blink_ctrl(led_dat->pwm, 1); + return 0; +} + static void led_pwm_set(struct led_classdev *led_cdev, enum led_brightness brightness) { @@ -42,7 +93,10 @@ static void led_pwm_set(struct led_classdev *led_cdev, pwm_config(led_dat->pwm, 0, period); pwm_disable(led_dat->pwm); } else { - pwm_config(led_dat->pwm, brightness * period / max, period); + brightness = led_dat->lth_brightness + (brightness * + (led_dat->period - led_dat->lth_brightness) / max); + pwm_config(led_dat->pwm, brightness, led_dat->period); + pwm_enable(led_dat->pwm); } } @@ -79,8 +133,13 @@ static int led_pwm_probe(struct platform_device *pdev) led_dat->cdev.default_trigger = cur_led->default_trigger; led_dat->active_low = cur_led->active_low; led_dat->period = cur_led->pwm_period_ns; + led_dat->lth_brightness = cur_led->lth_brightness * + (cur_led->pwm_period_ns / cur_led->max_brightness); + led_dat->dutycycle_steps = cur_led->dutycycle_steps; + led_dat->period_steps = cur_led->period_steps; led_dat->cdev.brightness_set = led_pwm_set; led_dat->cdev.brightness = LED_OFF; + led_dat->cdev.blink_set = led_pwm_blink_set; led_dat->cdev.max_brightness = cur_led->max_brightness; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 11e44386fa9..825673af5f3 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -331,6 +331,17 @@ config MFD_TC3589X additional drivers must be enabled in order to use the functionality of the device. +config MFD_TC35892 + bool "Support Toshiba TC35892" + depends on I2C=y && GENERIC_HARDIRQS + select MFD_CORE + help + Support for the Toshiba TC35892 I/O Expander. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the + functionality of the device. + config MFD_TMIO bool default n @@ -359,6 +370,27 @@ config MFD_TC6393XB help Support for Toshiba Mobile IO Controller TC6393XB +config AB5500_CORE + bool "ST-Ericsson AB5500 Mixed Signal Circuit core functions" + select MFD_CORE + depends on GENERIC_HARDIRQS && ABX500_CORE + help + Select this to enable the AB5500 Mixed Signal IC core + functionality. This connects to a AB5500 chip on the I2C bus via + the Power and Reset Management Unit (PRCMU). It exposes a number + of symbols needed for dependent devices to read and write + registers and subscribe to events from this multi-functional IC. + This is needed to use other features of the AB5500 such as + battery-backed RTC, charging control, Regulators, LEDs, vibrator, + system power and temperature, power management and ALSA sound. + +config AB5500_GPADC + bool "AB5500 GPADC driver" + depends on AB5500_CORE + default y + help + AB5500 GPADC driver used to convert battery/usb voltage. + config PMIC_DA903X bool "Dialog Semiconductor DA9030/DA9034 PMIC Support" depends on I2C=y @@ -678,7 +710,7 @@ config AB8500_CORE config AB8500_I2C_CORE bool "AB8500 register access via PRCMU I2C" - depends on AB8500_CORE && MFD_DB8500_PRCMU + depends on AB8500_CORE default y help This enables register access to the AB8500 chip via PRCMU I2C. @@ -686,6 +718,14 @@ config AB8500_I2C_CORE the I2C bus is connected to the Power Reset and Mangagement Unit, PRCMU. +config AB8500_DENC + bool "AB8500_DENC driver support(CVBS)" + depends on AB8500_CORE + help + Select this option to add driver support for analog TV out through + AB8500. + + config AB8500_DEBUG bool "Enable debug info via debugfs" depends on AB8500_CORE && DEBUG_FS @@ -696,10 +736,10 @@ config AB8500_DEBUG config AB8500_GPADC bool "AB8500 GPADC driver" - depends on AB8500_CORE && REGULATOR_AB8500 + depends on AB8500_CORE default y help - AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage + AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage. config MFD_DB8500_PRCMU bool "ST-Ericsson DB8500 Power Reset Control Management Unit" diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 05fa538c5ef..cfed0402931 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -2,6 +2,7 @@ # Makefile for multifunction miscellaneous devices # +obj-$(CONFIG_AB5500_CORE) += ab5500-core.o ab5500-power.o 88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o obj-$(CONFIG_MFD_SM501) += sm501.o @@ -19,6 +20,7 @@ obj-$(CONFIG_MFD_STMPE) += stmpe.o obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o obj-$(CONFIG_MFD_TC3589X) += tc3589x.o +obj-$(CONFIG_MFD_TC35892) += tc35892.o obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o @@ -91,11 +93,13 @@ obj-$(CONFIG_AB5500_CORE) += ab5500-core.o obj-$(CONFIG_AB5500_DEBUG) += ab5500-debugfs.o obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o +obj-$(CONFIG_AB8500_DENC) += ab8500-denc.o obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o # ab8500-i2c need to come after db8500-prcmu (which provides the channel) obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o +obj-$(CONFIG_AB5500_GPADC) += ab5500-gpadc.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o obj-$(CONFIG_LPC_SCH) += lpc_sch.o diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c index 54d0fe40845..e8ae5d945e4 100644 --- a/drivers/mfd/ab5500-core.c +++ b/drivers/mfd/ab5500-core.c @@ -991,6 +991,74 @@ static struct mfd_cell ab5500_devs[AB5500_NUM_DEVICES] = { }, }, }, + [AB5500_DEVID_TEMPMON] = { + .name = "abx500-temp", + .id = AB5500_DEVID_TEMPMON, + .num_resources = 1, + .resources = (struct resource[]) { + { + .name = "ABX500_TEMP_WARM", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(2, 2), + .end = AB5500_IRQ(2, 2), + }, + }, + }, + [AB5500_DEVID_ACCDET] = { + .name = "ab5500-acc-det", + .id = AB5500_DEVID_ACCDET, + .num_resources = 8, + .resources = (struct resource[]) { + { + .name = "acc_detedt22db_rising", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(2, 7), + .end = AB5500_IRQ(2, 7), + }, + { + .name = "acc_detedt21db_falling", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(2, 6), + .end = AB5500_IRQ(2, 6), + }, + { + .name = "acc_detedt21db_rising", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(2, 5), + .end = AB5500_IRQ(2, 5), + }, + { + .name = "acc_detedt3db_falling", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(3, 4), + .end = AB5500_IRQ(3, 4), + }, + { + .name = "acc_detedt3db_rising", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(3, 3), + .end = AB5500_IRQ(3, 3), + }, + { + .name = "acc_detedt1db_falling", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(3, 2), + .end = AB5500_IRQ(3, 2), + }, + { + .name = "acc_detedt1db_rising", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(3, 1), + .end = AB5500_IRQ(3, 1), + }, + { + .name = "acc_detedt22db_falling", + .flags = IORESOURCE_IRQ, + .start = AB5500_IRQ(3, 0), + .end = AB5500_IRQ(3, 0), + }, + }, + }, }; /* @@ -1301,6 +1369,10 @@ static const struct ab_family_id ids[] __initdata = { .id = AB5500_1_1, .name = "1.1" }, + { + .id = AB5500_2_0, + .name = "2.0" + }, /* Terminator */ { .id = 0x00, diff --git a/drivers/mfd/ab5500-gpadc.c b/drivers/mfd/ab5500-gpadc.c new file mode 100644 index 00000000000..fe05ffbadfd --- /dev/null +++ b/drivers/mfd/ab5500-gpadc.c @@ -0,0 +1,1256 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * Author: Vijaya Kumar K <vijay.kilari@stericsson.com> + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/regulator/consumer.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> + +/* + * Manual mode ADC registers + */ +#define AB5500_GPADC_MANUAL_STAT_REG 0x1F +#define AB5500_GPADC_MANDATAL_REG 0x21 +#define AB5500_GPADC_MANDATAH_REG 0x20 +#define AB5500_GPADC_MANUAL_MUX_CTRL 0x22 +#define AB5500_GPADC_MANUAL_MODE_CTRL 0x23 +#define AB5500_GPADC_MANUAL_MODE_CTRL2 0x24 +/* + * Auto/Polling mode ADC registers + */ +#define AB5500_GPADC_AUTO_VBAT_MAX 0x26 +#define AB5500_GPADC_AUTO_VBAT_MIN_TXON 0x27 +#define AB5500_GPADC_AUTO_VBAT_MIN_NOTX 0x28 +#define AB5500_GPADC_AUTO_VBAT_AVGH 0x29 +#define AB5500_GPADC_AUTO_VBAT_AVGL 0x2A +#define AB5500_GPADC_AUTO_ICHAR_MAX 0x2B +#define AB5500_GPADC_AUTO_ICHAR_MIN 0x2C +#define AB5500_GPADC_AUTO_ICHAR_AVG 0x2D +#define AB5500_GPADC_AUTO_CTRL2 0x2F +#define AB5500_GPADC_AUTO_CTRL1 0x30 +#define AB5500_GPADC_AUTO_PWR_CTRL 0x31 +#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON 0x32 +#define AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX 0x33 +#define AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL 0x34 +#define AB5500_GPADC_AUTO_TRIG_ADOUT1_CTRL 0x35 +#define AB5500_GPADC_AUTO_TRIG0_MUX_CTRL 0x37 +#define AB5500_GPADC_AUTO_XTALTEMP_CTRL 0x57 +#define AB5500_GPADC_KELVIN_CTRL 0xFE + +/* gpadc constants */ +#define AB5500_INT_ADC_TRIG0 0x0 +#define AB5500_INT_ADC_TRIG1 0x1 +#define AB5500_INT_ADC_TRIG2 0x2 +#define AB5500_INT_ADC_TRIG3 0x3 +#define AB5500_INT_ADC_TRIG4 0x4 +#define AB5500_INT_ADC_TRIG5 0x5 +#define AB5500_INT_ADC_TRIG6 0x6 +#define AB5500_INT_ADC_TRIG7 0x7 + +#define AB5500_GPADC_AUTO_TRIG_INDEX AB5500_GPADC_AUTO_TRIG0_MUX_CTRL +#define GPADC_MANUAL_READY 0x01 +#define GPADC_MANUAL_ADOUT0_MASK 0x30 +#define GPADC_MANUAL_ADOUT1_MASK 0xC0 +#define GPADC_MANUAL_ADOUT0_ON 0x10 +#define GPADC_MANUAL_ADOUT1_ON 0x40 +#define MUX_SCALE_GPADC0_MASK 0x08 +#define MUX_SCALE_VBAT_MASK 0x02 +#define MUX_SCALE_45 0x02 +#define MUX_SCALE_BDATA_MASK 0x01 +#define MUX_SCALE_BDATA27 0x00 +#define MUX_SCALE_BDATA18 0x01 +#define MUX_SCALE_ACCDET2_MASK 0x01 +#define MUX_SCALE_ACCDET3_MASK 0x02 +#define GPADC0_SCALE_VOL27 0x00 +#define GPADC0_SCALE_VOL18 0x01 +#define ACCDET2_SCALE_VOL27 0x00 +#define ACCDET3_SCALE_VOL27 0x00 +#define TRIGX_FREQ_MASK 0x07 +#define AUTO_VBAT_MASK 0x10 +#define AUTO_VBAT_ON 0x10 +#define TRIG_VBAT_TXON_ARM_MASK 0x08 +#define TRIG_VBAT_NOTX_ARM_MASK 0x04 +#define TRIGX_ARM_MASK 0x20 +#define TRIGX_ARM 0x20 +#define TRIGX_MUX_SELECT 0x1F +#define ADC_CAL_OFF_MASK 0x04 +#define ADC_ON_MODE_MASK 0x03 +#define ADC_CAL_ON 0x00 +#define ADC_FULLPWR 0x03 +#define ADC_XTAL_FORCE_MASK 0x80 +#define ADC_XTAL_FORCE_EN 0x80 +#define ADC_XTAL_FORCE_DI 0x00 +#define ADOUT0 0x01 +#define ADOUT1 0x02 +#define MIN_INDEX 0x02 +#define MAX_INDEX 0x03 +#define CTRL_INDEX 0x01 +#define KELVIN_SCALE_VOL45 0x00 + +/* GPADC constants from AB5500 spec */ +#define GPADC0_MIN 0 +#define GPADC0_MAX 1800 +#define BTEMP_MIN 0 +#define BTEMP_MAX 1800 +#define BDATA_MIN 0 +#define BDATA_MAX 2750 +#define PCBTEMP_MIN 0 +#define PCBTEMP_MAX 1800 +#define XTALTEMP_MIN 0 +#define XTALTEMP_MAX 1800 +#define DIETEMP_MIN 0 +#define DIETEMP_MAX 1800 +#define VBUS_I_MIN 0 +#define VBUS_I_MAX 1600 +#define VBUS_V_MIN 0 +#define VBUS_V_MAX 20000 +#define ACCDET2_MIN 0 +#define ACCDET2_MAX 2500 +#define ACCDET3_MIN 0 +#define ACCDET3_MAX 2500 +#define VBAT_MIN 2300 +#define VBAT_MAX 4500 +#define BKBAT_MIN 0 +#define BKBAT_MAX 2750 +#define USBID_MIN 0 +#define USBID_MAX 1800 +#define KELVIN_MIN 0 +#define KELVIN_MAX 4500 +#define VIBRA_MIN 0 +#define VIBRA_MAX 4500 + +/* This is used for calibration */ +#define ADC_RESOLUTION 1023 +#define AUTO_ADC_RESOLUTION 255 + +/* ADOUT prestart time */ +#define ADOUT0_TRIGX_PRESTART 0x18 + +enum adc_auto_channels { + ADC_INPUT_TRIG0 = 0, + ADC_INPUT_TRIG1, + ADC_INPUT_TRIG2, + ADC_INPUT_TRIG3, + ADC_INPUT_TRIG4, + ADC_INPUT_TRIG5, + ADC_INPUT_TRIG6, + ADC_INPUT_TRIG7, + ADC_INPUT_VBAT_TXOFF, + ADC_INPUT_VBAT_TXON, + N_AUTO_TRIGGER +}; + +/** + * struct adc_auto_trigger - AB5500 GPADC auto trigger + * @adc_mux Mux input + * @flag Status of trigger + * @freq Frequency of conversion + * @adout Adout to pull + * @trig_min trigger minimum value + * @trig_max trigger maximum value + * @auto_adc_callback notification callback + */ +struct adc_auto_trigger { + u8 auto_mux; + u8 flag; + u8 freq; + u8 adout; + u8 trig_min; + u8 trig_max; + int (*auto_callb)(int mux); +}; + +/** + * struct ab5500_btemp_interrupts - ab5500 interrupts + * @name: name of the interrupt + * @isr function pointer to the isr + */ +struct ab5500_adc_interrupts { + char *name; + irqreturn_t (*isr)(int irq, void *data); +}; + +/** + * struct ab5500_gpadc - AB5500 GPADC device information + * @chip_id ABB chip id + * @dev: pointer to the struct device + * @node: a list of AB5500 GPADCs, hence prepared for + reentrance + * @ab5500_gpadc_complete: pointer to the struct completion, to indicate + * the completion of gpadc conversion + * @ab5500_gpadc_lock: structure of type mutex + * @regu: pointer to the struct regulator + * @irq: interrupt number that is used by gpadc + * @cal_data array of ADC calibration data structs + * @auto_trig auto trigger channel + * @gpadc_trigX_work work items for trigger channels + */ +struct ab5500_gpadc { + u8 chip_id; + struct device *dev; + struct list_head node; + struct mutex ab5500_gpadc_lock; + struct regulator *regu; + int irq; + int prev_bdata; + spinlock_t gpadc_auto_lock; + struct adc_auto_trigger adc_trig[N_AUTO_TRIGGER]; + struct workqueue_struct *gpadc_wq; + struct work_struct gpadc_trig0_work; + struct work_struct gpadc_trig1_work; + struct work_struct gpadc_trig2_work; + struct work_struct gpadc_trig3_work; + struct work_struct gpadc_trig4_work; + struct work_struct gpadc_trig5_work; + struct work_struct gpadc_trig6_work; + struct work_struct gpadc_trig7_work; + struct work_struct gpadc_trig_vbat_txon_work; + struct work_struct gpadc_trig_vbat_txoff_work; +}; + +static LIST_HEAD(ab5500_gpadc_list); + +struct adc_data { + u8 mux; + int min; + int max; + int adout; +}; + +#define ADC_DATA(_id, _mux, _min, _max, _adout) \ + [_id] = { \ + .mux = _mux, \ + .min = _min, \ + .max = _max, \ + .adout = _adout \ + } + +struct adc_data adc_tab[] = { + ADC_DATA(GPADC0_V, 0x00, GPADC0_MIN, GPADC0_MAX, 0), + ADC_DATA(BTEMP_BALL, 0x0D, BTEMP_MIN, BTEMP_MAX, ADOUT0), + ADC_DATA(BAT_CTRL, 0x0D, BDATA_MIN, BDATA_MAX, 0), + ADC_DATA(MAIN_BAT_V, 0x0C, VBAT_MIN, VBAT_MAX, 0), + ADC_DATA(MAIN_BAT_V_TXON, 0x0C, VBAT_MIN, VBAT_MAX, 0), + ADC_DATA(VBUS_V, 0x10, VBUS_V_MIN, VBUS_V_MAX, 0), + ADC_DATA(USB_CHARGER_C, 0x0A, VBUS_I_MIN, VBUS_I_MAX, 0), + ADC_DATA(BK_BAT_V, 0x07, BKBAT_MIN, BKBAT_MAX, 0), + ADC_DATA(DIE_TEMP, 0x0F, DIETEMP_MIN, DIETEMP_MAX, ADOUT0), + ADC_DATA(PCB_TEMP, 0x13, PCBTEMP_MIN, PCBTEMP_MAX, ADOUT0), + ADC_DATA(XTAL_TEMP, 0x06, XTALTEMP_MIN, XTALTEMP_MAX, ADOUT0), + ADC_DATA(USB_ID, 0x1A, USBID_MIN, USBID_MAX, 0), + ADC_DATA(ACC_DETECT2, 0x18, ACCDET2_MIN, ACCDET2_MAX, 0), + ADC_DATA(ACC_DETECT3, 0x19, ACCDET3_MIN, ACCDET3_MAX, 0), + ADC_DATA(MAIN_BAT_V_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0), + ADC_DATA(MAIN_BAT_V_TXON_TRIG_MIN, 0x0C, VBAT_MIN, VBAT_MAX, 0), + ADC_DATA(VIBRA_KELVIN, 0x16, VIBRA_MIN, VIBRA_MAX, 0), +}; + +/** + * ab5500_gpadc_get() - returns a reference to the primary AB5500 GPADC + * (i.e. the first GPADC in the instance list) + */ +struct ab5500_gpadc *ab5500_gpadc_get(const char *name) +{ + struct ab5500_gpadc *gpadc; + list_for_each_entry(gpadc, &ab5500_gpadc_list, node) { + if (!strcmp(name, dev_name(gpadc->dev))) + return gpadc; + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL(ab5500_gpadc_get); + +#define CONV(min, max, x)\ + ((min) + ((((max)-(min))*(x))/ADC_RESOLUTION)) + +static int ab5500_gpadc_ad_to_voltage(struct ab5500_gpadc *gpadc, + u8 in, u16 ad_val) +{ + int res; + + switch (in) { + case VIBRA_KELVIN: + case GPADC0_V: + case PCB_TEMP: + case BTEMP_BALL: + case MAIN_BAT_V: + case MAIN_BAT_V_TXON: + case ACC_DETECT2: + case ACC_DETECT3: + case VBUS_V: + case USB_CHARGER_C: + case BK_BAT_V: + case XTAL_TEMP: + case USB_ID: + case BAT_CTRL: + res = CONV(adc_tab[in].min, adc_tab[in].max, ad_val); + break; + case DIE_TEMP: + /* + * From the AB5500 product specification + * T(deg cel) = 27 - ((ADCode - 709)/2.4213) + * 27 + 709/2.4213 - ADCode/2.4213 + * 320 - (ADCode/2.4213) + */ + res = 320 - (((unsigned long)ad_val * 10000) / 24213); + break; + default: + dev_err(gpadc->dev, + "unknown channel, not possible to convert\n"); + res = -EINVAL; + break; + } + return res; +} + +/** + * ab5500_gpadc_convert() - gpadc conversion + * @input: analog input to be converted to digital data + * + * This function converts the selected analog i/p to digital + * data. + */ +int ab5500_gpadc_convert(struct ab5500_gpadc *gpadc, u8 input) +{ + int result, ret = -EINVAL; + u16 data = 0; + u8 looplimit = 0; + u8 status = 0; + u8 low_data, high_data, adout_mask, adout_val; + + if (!gpadc) + return -ENODEV; + + mutex_lock(&gpadc->ab5500_gpadc_lock); + + switch (input) { + case MAIN_BAT_V: + case MAIN_BAT_V_TXON: + /* + * The value of mux scale volatage depends + * on the type of battery + * for LI-ion use MUX_SCALE_35 => 2.3-3.5V + * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V + * Check type of battery from platform data TODO ??? + */ + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_VBAT_MASK, MUX_SCALE_45); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: failed to read status\n"); + goto out; + } + break; + case BTEMP_BALL: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA18); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set mux scale\n"); + goto out; + } + break; + case BAT_CTRL: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_BDATA_MASK, MUX_SCALE_BDATA27); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set mux scale\n"); + goto out; + } + break; + case XTAL_TEMP: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL, + ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_EN); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set xtaltemp\n"); + goto out; + } + break; + case GPADC0_V: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_GPADC0_MASK, GPADC0_SCALE_VOL18); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set gpadc0\n"); + goto out; + } + break; + case ACC_DETECT2: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2, + MUX_SCALE_ACCDET2_MASK, ACCDET2_SCALE_VOL27); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set accdet2\n"); + goto out; + } + break; + case ACC_DETECT3: + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL2, + MUX_SCALE_ACCDET3_MASK, ACCDET3_SCALE_VOL27); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set accdet3\n"); + goto out; + } + break; + case VIBRA_KELVIN: + ret = abx500_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_KELVIN_CTRL, + KELVIN_SCALE_VOL45); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set kelv scale\n"); + goto out; + } + break; + case USB_CHARGER_C: + case VBUS_V: + case BK_BAT_V: + case USB_ID: + case PCB_TEMP: + case DIE_TEMP: + break; + default: + dev_err(gpadc->dev, "gpadc: Wrong adc\n"); + goto out; + break; + } + if (adc_tab[input].adout) { + adout_mask = adc_tab[input].adout == ADOUT0 ? + GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK; + adout_val = adc_tab[input].adout == ADOUT0 ? + GPADC_MANUAL_ADOUT0_ON : GPADC_MANUAL_ADOUT1_ON; + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + adout_mask, adout_val); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to set ADOUT\n"); + goto out; + } + /* delay required to ramp up voltage on BDATA node */ + usleep_range(10000, 12000); + } + ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_MANUAL_MUX_CTRL, adc_tab[input].mux); + if (ret < 0) { + dev_err(gpadc->dev, + "gpadc: fail to trigger manual conv\n"); + goto out; + } + /* wait for completion of conversion */ + looplimit = 0; + do { + msleep(1); + ret = abx500_get_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_STAT_REG, + &status); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: failed to read status\n"); + goto out; + } + if (status & GPADC_MANUAL_READY) + break; + } while (++looplimit < 2); + if (looplimit >= 2) { + dev_err(gpadc->dev, "timeout:failed to complete conversion\n"); + ret = -EINVAL; + goto out; + } + + /* + * Disable ADOUT for measurement + */ + if (adc_tab[input].adout) { + adout_mask = adc_tab[input].adout == ADOUT0 ? + GPADC_MANUAL_ADOUT0_MASK : GPADC_MANUAL_ADOUT1_MASK; + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + adout_mask, 0x0); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to disable ADOUT\n"); + goto out; + } + } + /* + * Disable XTAL TEMP + */ + if (input == XTAL_TEMP) { + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_AUTO_XTALTEMP_CTRL, + ADC_XTAL_FORCE_MASK, ADC_XTAL_FORCE_DI); + if (ret < 0) { + dev_err(gpadc->dev, + "gpadc: fail to disable xtaltemp\n"); + goto out; + } + } + /* Read the converted RAW data */ + ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_MANDATAL_REG, &low_data); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: read low data failed\n"); + goto out; + } + + ret = abx500_get_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_MANDATAH_REG, &high_data); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: read high data failed\n"); + goto out; + } + + data = (high_data << 2) | (low_data >> 6); + if (input == BAT_CTRL || input == BTEMP_BALL) { + /* + * TODO: Re-check with h/w team + * discard null or value < 5, as there is some error + * in conversion + */ + if (data < 5) + data = gpadc->prev_bdata; + else + gpadc->prev_bdata = data; + } + result = ab5500_gpadc_ad_to_voltage(gpadc, input, data); + + mutex_unlock(&gpadc->ab5500_gpadc_lock); + return result; + +out: + mutex_unlock(&gpadc->ab5500_gpadc_lock); + dev_err(gpadc->dev, + "gpadc: Failed to AD convert channel %d\n", input); + return ret; +} +EXPORT_SYMBOL(ab5500_gpadc_convert); + +/** + * ab5500_gpadc_program_auto() - gpadc conversion auto conversion + * @trig_index: Generic trigger channel for conversion + * + * This function program the auto trigger channel + */ +static int ab5500_gpadc_program_auto(struct ab5500_gpadc *gpadc, int trig) +{ + int ret; + u8 adout; +#define MIN_INDEX 0x02 +#define MAX_INDEX 0x03 +#define CTRL_INDEX 0x01 + + ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MIN_INDEX, + gpadc->adc_trig[trig].trig_min); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to program min\n"); + return ret; + } + ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + MAX_INDEX, + gpadc->adc_trig[trig].trig_max); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to program max\n"); + return ret; + } + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2), + TRIGX_MUX_SELECT, gpadc->adc_trig[trig].auto_mux); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to select mux\n"); + return ret; + } + if (gpadc->adc_trig[trig].adout) { + adout = gpadc->adc_trig[trig].adout == ADOUT0 ? + gpadc->adc_trig[trig].adout << 6 : + gpadc->adc_trig[trig].adout << 5; + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX, + adout, adout); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to program adout\n"); + return ret; + } + } + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_INDEX + (trig << 2) + CTRL_INDEX, + TRIGX_FREQ_MASK, gpadc->adc_trig[trig].freq); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to program freq\n"); + return ret; + } + return ret; + +} + +#define TRIG_V(trigval, min, max) \ + ((((trigval) - (min)) * AUTO_ADC_RESOLUTION) / ((max) - (min))) + +static int ab5500_gpadc_vbat_auto_conf(struct ab5500_gpadc *gpadc, + struct adc_auto_input *in) +{ + int trig_min, ret; + u8 trig_reg, trig_arm; + + /* Scale mux voltage */ + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, + AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_VBAT_MASK, MUX_SCALE_45); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: failed to set vbat scale\n"); + return ret; + } + + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, + AB5500_GPADC_AUTO_CTRL1, + AUTO_VBAT_MASK, AUTO_VBAT_ON); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: failed to set vbat on\n"); + return ret; + } + + trig_min = TRIG_V(in->min, adc_tab[in->mux].min, adc_tab[in->mux].max); + + if (in->mux == MAIN_BAT_V_TRIG_MIN) { + trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_NOTX; + trig_arm = TRIG_VBAT_NOTX_ARM_MASK; + } else { + trig_reg = AB5500_GPADC_AUTO_TRIG_VBAT_MIN_TXON; + trig_arm = TRIG_VBAT_TXON_ARM_MASK; + } + ret = abx500_set_register_interruptible(gpadc->dev, AB5500_BANK_ADC, + trig_reg, trig_min); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to program vbat min\n"); + return ret; + } + /* + * arm the trigger + */ + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL1, trig_arm, trig_arm); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: failed to trig vbat\n"); + return ret; + } + return ret; +} +/** + * ab5500_gpadc_convert_auto() - gpadc conversion + * @auto_input: input trigger for conversion + * + * This function converts the selected channel from + * analog to digital data in auto mode + */ + +int ab5500_gpadc_convert_auto(struct ab5500_gpadc *gpadc, + struct adc_auto_input *in) +{ + int ret, trig; + unsigned long flags; + + if (!gpadc) + return -ENODEV; + mutex_lock(&gpadc->ab5500_gpadc_lock); + + if (in->mux == MAIN_BAT_V_TXON_TRIG_MIN) { + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + if (gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag == true) { + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + ret = -EBUSY; + dev_err(gpadc->dev, "gpadc: Auto vbat txon busy"); + goto out; + } + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + + ret = ab5500_gpadc_vbat_auto_conf(gpadc, in); + if (ret < 0) + goto out; + + gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_mux = in->mux; + gpadc->adc_trig[ADC_INPUT_VBAT_TXON].auto_callb = + in->auto_adc_callback; + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + gpadc->adc_trig[ADC_INPUT_VBAT_TXON].flag = true; + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + } else if (in->mux == MAIN_BAT_V_TRIG_MIN) { + + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + if (gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag == true) { + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + ret = -EBUSY; + dev_err(gpadc->dev, "gpadc: Auto vbat busy"); + goto out; + } + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + + ret = ab5500_gpadc_vbat_auto_conf(gpadc, in); + if (ret < 0) + goto out; + + gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_mux = in->mux; + gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].auto_callb = + in->auto_adc_callback; + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + gpadc->adc_trig[ADC_INPUT_VBAT_TXOFF].flag = true; + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + } else { + /* + * check if free trigger is available + */ + trig = ADC_INPUT_TRIG0; + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + while (gpadc->adc_trig[trig].flag == true && + trig <= ADC_INPUT_TRIG7) + trig++; + + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + if (trig > ADC_INPUT_TRIG7) { + ret = -EBUSY; + dev_err(gpadc->dev, "gpadc: no free channel\n"); + goto out; + } + switch (in->mux) { + case MAIN_BAT_V: + /* + * The value of mux scale volatage depends + * on the type of battery + * for LI-ion use MUX_SCALE_35 => 2.3-3.5V + * for LiFePo4 use MUX_SCALE_45 => 2.3-4.5V + * Check type of battery from platform data TODO ??? + */ + ret = abx500_mask_and_set_register_interruptible( + gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_MANUAL_MODE_CTRL, + MUX_SCALE_VBAT_MASK, MUX_SCALE_45); + if (ret < 0) { + dev_err(gpadc->dev, + "gpadc: failed to read status\n"); + goto out; + } + + case BTEMP_BALL: + ret = abx500_set_register_interruptible( + gpadc->dev, AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_ADOUT0_CTRL, + ADOUT0_TRIGX_PRESTART); + if (ret < 0) { + dev_err(gpadc->dev, + "gpadc: failed to set prestart\n"); + goto out; + } + + case ACC_DETECT2: + case ACC_DETECT3: + case VBUS_V: + case USB_CHARGER_C: + case BK_BAT_V: + case PCB_TEMP: + case USB_ID: + case BAT_CTRL: + gpadc->adc_trig[trig].trig_min = + (u8)TRIG_V(in->min, adc_tab[in->mux].min, + adc_tab[in->mux].max); + gpadc->adc_trig[trig].trig_max = + (u8)TRIG_V(in->max, adc_tab[in->mux].min, + adc_tab[in->mux].max); + gpadc->adc_trig[trig].adout = + adc_tab[in->mux].adout; + break; + case DIE_TEMP: + /* + * From the AB5500 product specification + * T(deg_cel) = 27 - (ADCode - 709)/2.4213) + * ADCode = 709 + (2.4213 * (27 - T)) + * Auto trigger min/max level is of 8bit precision. + * Hence use AB5500_GPADC_MANDATAH_REG value + * obtained by 2 bit right shift of ADCode. + */ + gpadc->adc_trig[trig].trig_min = + (709 + ((24213 * (27 - in->min))/10000))>>2; + gpadc->adc_trig[trig].trig_max = + (709 + ((24213 * (27 - in->max))/10000))>>2; + gpadc->adc_trig[trig].adout = + adc_tab[in->mux].adout; + break; + default: + dev_err(gpadc->dev, "Unknow GPADC request\n"); + break; + } + gpadc->adc_trig[trig].freq = in->freq; + gpadc->adc_trig[trig].auto_mux = + adc_tab[in->mux].mux; + gpadc->adc_trig[trig].auto_callb = in->auto_adc_callback; + + ret = ab5500_gpadc_program_auto(gpadc, trig); + if (ret < 0) { + dev_err(gpadc->dev, + "gpadc: fail to program auto ch\n"); + goto out; + } + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, + AB5500_GPADC_AUTO_TRIG_INDEX + (trig * 4), + TRIGX_ARM_MASK, TRIGX_ARM); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: fail to trigger\n"); + goto out; + } + spin_lock_irqsave(&gpadc->gpadc_auto_lock, flags); + gpadc->adc_trig[trig].flag = true; + spin_unlock_irqrestore(&gpadc->gpadc_auto_lock, flags); + } +out: + mutex_unlock(&gpadc->ab5500_gpadc_lock); + return ret; + +} +EXPORT_SYMBOL(ab5500_gpadc_convert_auto); + +/* sysfs interface for GPADC0 */ +static ssize_t ab5500_gpadc0_get(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int voltage; + struct ab5500_gpadc *gpadc = dev_get_drvdata(dev); + + voltage = ab5500_gpadc_convert(gpadc, GPADC0_V); + + return sprintf(buf, "%d\n", voltage); +} +static DEVICE_ATTR(adc0volt, 0644, ab5500_gpadc0_get, NULL); + +static void ab5500_gpadc_trigx_work(struct ab5500_gpadc *gp, int trig) +{ + unsigned long flags; + if (gp->adc_trig[trig].auto_callb != NULL) { + gp->adc_trig[trig].auto_callb(gp->adc_trig[trig].auto_mux); + spin_lock_irqsave(&gp->gpadc_auto_lock, flags); + gp->adc_trig[trig].flag = false; + spin_unlock_irqrestore(&gp->gpadc_auto_lock, flags); + } else { + dev_err(gp->dev, "Unknown trig for %d\n", trig); + } +} +/** + * ab5500_gpadc_trig0_work() - work item for trig0 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 0 auto conversion. + */ +static void ab5500_gpadc_trig0_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig0_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG0); +} + +/** + * ab5500_gpadc_trig1_work() - work item for trig1 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig1 auto conversion. + */ +static void ab5500_gpadc_trig1_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig1_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG1); +} + +/** + * ab5500_gpadc_trig2_work() - work item for trig2 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 2 auto conversion. + */ +static void ab5500_gpadc_trig2_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig2_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG2); +} + +/** + * ab5500_gpadc_trig3_work() - work item for trig3 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 3 auto conversion. + */ +static void ab5500_gpadc_trig3_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig3_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG3); +} + +/** + * ab5500_gpadc_trig4_work() - work item for trig4 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 4 auto conversion. + */ +static void ab5500_gpadc_trig4_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig4_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG4); +} + +/** + * ab5500_gpadc_trig5_work() - work item for trig5 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 5 auto conversion. + */ +static void ab5500_gpadc_trig5_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig5_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG5); +} + +/** + * ab5500_gpadc_trig6_work() - work item for trig6 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 6 auto conversion. + */ +static void ab5500_gpadc_trig6_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig6_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG6); +} + +/** + * ab5500_gpadc_trig7_work() - work item for trig7 auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for trig 7 auto conversion. + */ +static void ab5500_gpadc_trig7_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig7_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_TRIG7); +} + +/** + * ab5500_gpadc_vbat_txon_work() - work item for vbat_txon trigger auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for vbat_txon trigger auto adc. + */ +static void ab5500_gpadc_vbat_txon_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig_vbat_txon_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXON); +} + +/** + * ab5500_gpadc_vbat_txoff_work() - work item for vbat_txoff trigger auto adc + * @irq: irq number + * @work: work pointer + * + * This is a work handler for vbat_txoff trigger auto adc. + */ +static void ab5500_gpadc_vbat_txoff_work(struct work_struct *work) +{ + struct ab5500_gpadc *gpadc = container_of(work, + struct ab5500_gpadc, gpadc_trig_vbat_txoff_work); + ab5500_gpadc_trigx_work(gpadc, ADC_INPUT_VBAT_TXOFF); +} + +/** + * ab5500_adc_trigx_handler() - isr for auto gpadc conversion trigger + * @irq: irq number + * @data: pointer to the data passed during request irq + * + * This is a interrupt service routine for auto gpadc conversion. + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_adc_trigx_handler(int irq, void *_gpadc) +{ + struct ab5500_platform_data *plat; + struct ab5500_gpadc *gpadc = _gpadc; + int dev_irq; + + plat = dev_get_platdata(gpadc->dev->parent); + dev_irq = irq - plat->irq.base; + + switch (dev_irq) { + case AB5500_INT_ADC_TRIG0: + dev_dbg(gpadc->dev, "Trigger 0 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig0_work); + break; + case AB5500_INT_ADC_TRIG1: + dev_dbg(gpadc->dev, "Trigger 1 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig1_work); + break; + case AB5500_INT_ADC_TRIG2: + dev_dbg(gpadc->dev, "Trigger 2 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig2_work); + break; + case AB5500_INT_ADC_TRIG3: + dev_dbg(gpadc->dev, "Trigger 3 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig3_work); + break; + case AB5500_INT_ADC_TRIG4: + dev_dbg(gpadc->dev, "Trigger 4 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig4_work); + break; + case AB5500_INT_ADC_TRIG5: + dev_dbg(gpadc->dev, "Trigger 5 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig5_work); + break; + case AB5500_INT_ADC_TRIG6: + dev_dbg(gpadc->dev, "Trigger 6 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig6_work); + break; + case AB5500_INT_ADC_TRIG7: + dev_dbg(gpadc->dev, "Trigger 7 received\n"); + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig7_work); + break; + default: + dev_dbg(gpadc->dev, "unknown trigx handler input\n"); + break; + } + return IRQ_HANDLED; +} + +/** + * ab5500_adc_vbat_txon_handler() - isr for auto vbat_txon conversion trigger + * @irq: irq number + * @data: pointer to the data passed during request irq + * + * This is a interrupt service routine for auto vbat_txon conversion + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_adc_vbat_txon_handler(int irq, void *_gpadc) +{ + struct ab5500_gpadc *gpadc = _gpadc; + + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txon_work); + return IRQ_HANDLED; +} + +/** + * ab5500_adc_vbat_txoff_handler() - isr for auto vbat_txoff conversion trigger + * @irq: irq number + * @data: pointer to the data passed during request irq + * + * This is a interrupt service routine for auto vbat_txoff conversion + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_adc_vbat_txoff_handler(int irq, void *_gpadc) +{ + struct ab5500_gpadc *gpadc = _gpadc; + + queue_work(gpadc->gpadc_wq, &gpadc->gpadc_trig_vbat_txoff_work); + return IRQ_HANDLED; +} + +/** + * ab5500_gpadc_configuration() - function for gpadc conversion + * @irq: irq number + * @data: pointer to the data passed during request irq + * + * This function configures the gpadc + */ +static int ab5500_gpadc_configuration(struct ab5500_gpadc *gpadc) +{ + int ret; + ret = abx500_mask_and_set_register_interruptible(gpadc->dev, + AB5500_BANK_ADC, AB5500_GPADC_AUTO_CTRL2, + ADC_CAL_OFF_MASK | ADC_ON_MODE_MASK, + ADC_CAL_ON | ADC_FULLPWR); + return ret; +} + +/* ab5500 btemp driver interrupts and their respective isr */ +static struct ab5500_adc_interrupts ab5500_adc_irq[] = { + {"TRIGGER-0", ab5500_adc_trigx_handler}, + {"TRIGGER-1", ab5500_adc_trigx_handler}, + {"TRIGGER-2", ab5500_adc_trigx_handler}, + {"TRIGGER-3", ab5500_adc_trigx_handler}, + {"TRIGGER-4", ab5500_adc_trigx_handler}, + {"TRIGGER-5", ab5500_adc_trigx_handler}, + {"TRIGGER-6", ab5500_adc_trigx_handler}, + {"TRIGGER-7", ab5500_adc_trigx_handler}, + {"TRIGGER-VBAT-TXON", ab5500_adc_vbat_txon_handler}, + {"TRIGGER-VBAT", ab5500_adc_vbat_txoff_handler}, +}; + +static int __devinit ab5500_gpadc_probe(struct platform_device *pdev) +{ + int ret, irq, i, j; + struct ab5500_gpadc *gpadc; + + gpadc = kzalloc(sizeof(struct ab5500_gpadc), GFP_KERNEL); + if (!gpadc) { + dev_err(&pdev->dev, "Error: No memory\n"); + return -ENOMEM; + } + gpadc->dev = &pdev->dev; + mutex_init(&gpadc->ab5500_gpadc_lock); + spin_lock_init(&gpadc->gpadc_auto_lock); + + /* Register interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name); + ret = request_threaded_irq(irq, NULL, ab5500_adc_irq[i].isr, + IRQF_NO_SUSPEND, + ab5500_adc_irq[i].name, gpadc); + + if (ret) { + dev_err(gpadc->dev, "failed to request %s IRQ %d: %d\n" + , ab5500_adc_irq[i].name, irq, ret); + goto fail_irq; + } + dev_dbg(gpadc->dev, "Requested %s IRQ %d: %d\n", + ab5500_adc_irq[i].name, irq, ret); + } + + /* Get Chip ID of the ABB ASIC */ + ret = abx500_get_chip_id(gpadc->dev); + if (ret < 0) { + dev_err(gpadc->dev, "failed to get chip ID\n"); + goto fail_irq; + } + gpadc->chip_id = (u8) ret; + + /* Create a work queue for gpadc auto */ + gpadc->gpadc_wq = + create_singlethread_workqueue("ab5500_gpadc_wq"); + if (gpadc->gpadc_wq == NULL) { + dev_err(gpadc->dev, "failed to create work queue\n"); + goto fail_irq; + } + + INIT_WORK(&gpadc->gpadc_trig0_work, ab5500_gpadc_trig0_work); + INIT_WORK(&gpadc->gpadc_trig1_work, ab5500_gpadc_trig1_work); + INIT_WORK(&gpadc->gpadc_trig2_work, ab5500_gpadc_trig2_work); + INIT_WORK(&gpadc->gpadc_trig3_work, ab5500_gpadc_trig3_work); + INIT_WORK(&gpadc->gpadc_trig4_work, ab5500_gpadc_trig4_work); + INIT_WORK(&gpadc->gpadc_trig5_work, ab5500_gpadc_trig5_work); + INIT_WORK(&gpadc->gpadc_trig6_work, ab5500_gpadc_trig6_work); + INIT_WORK(&gpadc->gpadc_trig7_work, ab5500_gpadc_trig7_work); + INIT_WORK(&gpadc->gpadc_trig_vbat_txon_work, + ab5500_gpadc_vbat_txon_work); + INIT_WORK(&gpadc->gpadc_trig_vbat_txoff_work, + ab5500_gpadc_vbat_txoff_work); + + for (j = 0; j < N_AUTO_TRIGGER; j++) + gpadc->adc_trig[j].flag = false; + + ret = ab5500_gpadc_configuration(gpadc); + if (ret < 0) { + dev_err(gpadc->dev, "gpadc: configuration failed\n"); + goto free_wq; + } + + ret = device_create_file(gpadc->dev, &dev_attr_adc0volt); + if (ret < 0) { + dev_err(gpadc->dev, "File device creation failed: %d\n", ret); + ret = -ENODEV; + goto fail_sysfs; + } + list_add_tail(&gpadc->node, &ab5500_gpadc_list); + + platform_set_drvdata(pdev, gpadc); + + return 0; +fail_sysfs: +free_wq: + destroy_workqueue(gpadc->gpadc_wq); +fail_irq: + for (i = i - 1; i >= 0; i--) { + irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name); + free_irq(irq, gpadc); + } + kfree(gpadc); + gpadc = NULL; + return ret; +} + +static int __devexit ab5500_gpadc_remove(struct platform_device *pdev) +{ + int i, irq; + struct ab5500_gpadc *gpadc = platform_get_drvdata(pdev); + + device_remove_file(gpadc->dev, &dev_attr_adc0volt); + + /* remove this gpadc entry from the list */ + list_del(&gpadc->node); + /* Disable interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_adc_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_adc_irq[i].name); + free_irq(irq, gpadc); + } + /* Flush work */ + flush_workqueue(gpadc->gpadc_wq); + + /* Delete the work queue */ + destroy_workqueue(gpadc->gpadc_wq); + + kfree(gpadc); + gpadc = NULL; + return 0; +} + +static struct platform_driver ab5500_gpadc_driver = { + .probe = ab5500_gpadc_probe, + .remove = __devexit_p(ab5500_gpadc_remove), + .driver = { + .name = "ab5500-adc", + .owner = THIS_MODULE, + }, +}; + +static int __init ab5500_gpadc_init(void) +{ + return platform_driver_register(&ab5500_gpadc_driver); +} + +static void __exit ab5500_gpadc_exit(void) +{ + platform_driver_unregister(&ab5500_gpadc_driver); +} + +subsys_initcall_sync(ab5500_gpadc_init); +module_exit(ab5500_gpadc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vijaya Kumar K"); +MODULE_ALIAS("platform:ab5500_adc"); +MODULE_DESCRIPTION("AB5500 GPADC driver"); diff --git a/drivers/mfd/ab5500-power.c b/drivers/mfd/ab5500-power.c new file mode 100644 index 00000000000..9474c32809b --- /dev/null +++ b/drivers/mfd/ab5500-power.c @@ -0,0 +1,96 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/signal.h> +#include <linux/init.h> +#include <linux/platform_device.h> + +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> + +static struct device *dev; + +/* STARTUP */ +#define AB5500_SYSPOR_CONTROL 0x30 + +/* VINT IO I2C CLOCK */ +#define AB5500_RTC_VINT 0x01 + +int ab5500_clock_rtc_enable(int num, bool enable) +{ + /* RTC_CLK{0,1,2} are bits {4,3,2}, active low */ + u8 mask = BIT(4 - num); + u8 value = enable ? 0 : mask; + + /* Don't allow RTC_CLK0 to be controlled. */ + if (num < 1 || num > 2) + return -EINVAL; + + if (!dev) + return -EAGAIN; + + return abx500_mask_and_set(dev, AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP, + AB5500_RTC_VINT, mask, value); +} + +static void ab5500_power_off(void) +{ + sigset_t old; + sigset_t all; + + sigfillset(&all); + + if (!sigprocmask(SIG_BLOCK, &all, &old)) { + /* Clear dbb_on */ + int ret = abx500_set(dev, AB5500_BANK_STARTUP, + AB5500_SYSPOR_CONTROL, 0); + WARN_ON(ret); + } +} + +static int __devinit ab5500_power_probe(struct platform_device *pdev) +{ + struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent); + + dev = &pdev->dev; + + if (plat->pm_power_off) + pm_power_off = ab5500_power_off; + + return 0; +} + +static int __devexit ab5500_power_remove(struct platform_device *pdev) +{ + struct ab5500_platform_data *plat = dev_get_platdata(pdev->dev.parent); + + if (plat->pm_power_off) + pm_power_off = NULL; + dev = NULL; + + return 0; +} + +static struct platform_driver ab5500_power_driver = { + .driver = { + .name = "ab5500-power", + .owner = THIS_MODULE, + }, + .probe = ab5500_power_probe, + .remove = __devexit_p(ab5500_power_remove), +}; + +static int __init ab8500_sysctrl_init(void) +{ + return platform_driver_register(&ab5500_power_driver); +} + +subsys_initcall(ab8500_sysctrl_init); + +MODULE_DESCRIPTION("AB5500 power driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 1f08704f7ae..8137ac22816 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c @@ -100,6 +100,9 @@ #define AB9540_MODEM_CTRL2_REG 0x23 #define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2) +static bool no_bm; /* No battery management */ +module_param(no_bm, bool, S_IRUGO); + /* * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt * numbers are indexed into this array with (num / 8). The interupts are @@ -257,6 +260,7 @@ static struct abx500_ops ab8500_ops = { .mask_and_set_register = ab8500_mask_and_set_register, .event_registers_startup_state_get = NULL, .startup_irq_enabled = NULL, + .dump_all_banks = ab8500_dump_all_banks, }; static void ab8500_irq_lock(struct irq_data *data) @@ -354,6 +358,7 @@ static irqreturn_t ab8500_irq(int irq, void *dev) int line = i * 8 + bit; handle_nested_irq(ab8500->irq_base + line); + ab8500_debug_register_interrupt(line); value &= ~(1 << bit); } while (value); } @@ -746,7 +751,7 @@ static struct resource __devinitdata ab8500_usb_resources[] = { static struct resource __devinitdata ab8500_temp_resources[] = { { - .name = "AB8500_TEMP_WARM", + .name = "ABX500_TEMP_WARM", .start = AB8500_INT_TEMP_WARM, .end = AB8500_INT_TEMP_WARM, .flags = IORESOURCE_IRQ, @@ -768,6 +773,9 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = { .name = "ab8500-regulator", }, { + .name = "ab8500-regulator-debug", + }, + { .name = "ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), .resources = ab8500_gpadc_resources, @@ -778,26 +786,6 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = { .resources = ab8500_rtc_resources, }, { - .name = "ab8500-charger", - .num_resources = ARRAY_SIZE(ab8500_charger_resources), - .resources = ab8500_charger_resources, - }, - { - .name = "ab8500-btemp", - .num_resources = ARRAY_SIZE(ab8500_btemp_resources), - .resources = ab8500_btemp_resources, - }, - { - .name = "ab8500-fg", - .num_resources = ARRAY_SIZE(ab8500_fg_resources), - .resources = ab8500_fg_resources, - }, - { - .name = "ab8500-chargalg", - .num_resources = ARRAY_SIZE(ab8500_chargalg_resources), - .resources = ab8500_chargalg_resources, - }, - { .name = "ab8500-acc-det", .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources), .resources = ab8500_av_acc_detect_resources, @@ -815,20 +803,12 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = { .name = "ab8500-pwm", .id = 1, }, - { - .name = "ab8500-pwm", - .id = 2, - }, - { - .name = "ab8500-pwm", - .id = 3, - }, { .name = "ab8500-leds", }, { .name = "ab8500-denc", }, { - .name = "ab8500-temp", + .name = "abx500-temp", .num_resources = ARRAY_SIZE(ab8500_temp_resources), .resources = ab8500_temp_resources, }, @@ -860,6 +840,29 @@ static struct mfd_cell __devinitdata ab9540_devs[] = { }, }; +static struct mfd_cell __devinitdata ab8500_bm_devs[] = { + { + .name = "ab8500-charger", + .num_resources = ARRAY_SIZE(ab8500_charger_resources), + .resources = ab8500_charger_resources, + }, + { + .name = "ab8500-btemp", + .num_resources = ARRAY_SIZE(ab8500_btemp_resources), + .resources = ab8500_btemp_resources, + }, + { + .name = "ab8500-fg", + .num_resources = ARRAY_SIZE(ab8500_fg_resources), + .resources = ab8500_fg_resources, + }, + { + .name = "ab8500-chargalg", + .num_resources = ARRAY_SIZE(ab8500_chargalg_resources), + .resources = ab8500_chargalg_resources, + }, +}; + static ssize_t show_chip_id(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1130,6 +1133,15 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version) if (ret) goto out_freeirq; + if (!no_bm) { + /* Add battery management devices */ + ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs, + ARRAY_SIZE(ab8500_bm_devs), NULL, + ab8500->irq_base); + if (ret) + dev_err(ab8500->dev, "error adding bm devices\n"); + } + if (is_ab9540(ab8500)) ret = sysfs_create_group(&ab8500->dev->kobj, &ab9540_attr_group); diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index 9a0211aa889..7b912afd664 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -4,6 +4,72 @@ * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson. * License Terms: GNU General Public License v2 */ +/* + * AB8500 register access + * ====================== + * + * read: + * # echo BANK > <debugfs>/ab8500/register-bank + * # echo ADDR > <debugfs>/ab8500/register-address + * # cat <debugfs>/ab8500/register-value + * + * write: + * # echo BANK > <debugfs>/ab8500/register-bank + * # echo ADDR > <debugfs>/ab8500/register-address + * # echo VALUE > <debugfs>/ab8500/register-value + * + * read all registers from a bank: + * # echo BANK > <debugfs>/ab8500/register-bank + * # cat <debugfs>/ab8500/all-bank-register + * + * BANK target AB8500 register bank + * ADDR target AB8500 register address + * VALUE decimal or 0x-prefixed hexadecimal + * + * + * User Space notification on AB8500 IRQ + * ===================================== + * + * Allows user space entity to be notified when target AB8500 IRQ occurs. + * When subscribed, a sysfs entry is created in ab8500.i2c platform device. + * One can pool this file to get target IRQ occurence information. + * + * subscribe to an AB8500 IRQ: + * # echo IRQ > <debugfs>/ab8500/irq-subscribe + * + * unsubscribe from an AB8500 IRQ: + * # echo IRQ > <debugfs>/ab8500/irq-unsubscribe + * + * + * AB8500 register formated read/write access + * ========================================== + * + * Read: read data, data>>SHIFT, data&=MASK, output data + * [0xABCDEF98] shift=12 mask=0xFFF => 0x00000CDE + * Write: read data, data &= ~(MASK<<SHIFT), data |= (VALUE<<SHIFT), write data + * [0xABCDEF98] shift=12 mask=0xFFF value=0x123 => [0xAB123F98] + * + * Usage: + * # echo "CMD [OPTIONS] BANK ADRESS [VALUE]" > $debugfs/ab8500/hwreg + * + * CMD read read access + * write write access + * + * BANK target reg bank + * ADDRESS target reg address + * VALUE (write) value to be updated + * + * OPTIONS + * -d|-dec (read) output in decimal + * -h|-hexa (read) output in 0x-hexa (default) + * -l|-w|-b 32bit (default), 16bit or 8bit reg access + * -m|-mask MASK 0x-hexa mask (default 0xFFFFFFFF) + * -s|-shift SHIFT bit shift value (read:left, write:right) + * -o|-offset OFFSET address offset to add to ADDRESS value + * + * Warning: bit shift operation is applied to bit-mask. + * Warning: bit shift direction depends on read or right command. + */ #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -11,13 +77,29 @@ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/kobject.h> +#include <linux/slab.h> #include <linux/mfd/abx500.h> -#include <linux/mfd/abx500/ab8500.h> +#include <linux/mfd/abx500/ab8500-gpadc.h> + +#ifdef CONFIG_DEBUG_FS +#include <linux/string.h> +#include <linux/ctype.h> +#endif static u32 debug_bank; static u32 debug_address; +static int irq_first; +static int irq_last; +static u32 *irq_count; +static int num_irqs; + +static struct device_attribute **dev_attr; +static char **event_name; + /** * struct ab8500_reg_range * @first: the first address of the range @@ -42,15 +124,35 @@ struct ab8500_i2c_ranges { const struct ab8500_reg_range *range; }; +/* hwreg- "mask" and "shift" entries ressources */ +struct hwreg_cfg { + u32 bank; /* target bank */ + u32 addr; /* target address */ + uint fmt; /* format */ + uint mask; /* read/write mask, applied before any bit shift */ + int shift; /* bit shift (read:right shift, write:left shift */ +}; +/* fmt bit #0: 0=hexa, 1=dec */ +#define REG_FMT_DEC(c) ((c)->fmt & 0x1) +#define REG_FMT_HEX(c) (!REG_FMT_DEC(c)) + +static struct hwreg_cfg hwreg_cfg = { + .addr = 0, /* default: invalid phys addr */ + .fmt = 0, /* default: 32bit access, hex output */ + .mask = 0xFFFFFFFF, /* default: no mask */ + .shift = 0, /* default: no bit shift */ +}; + #define AB8500_NAME_STRING "ab8500" -#define AB8500_NUM_BANKS 22 +#define AB8500_ADC_NAME_STRING "gpadc" +#define AB8500_NUM_BANKS 24 #define AB8500_REV_REG 0x80 static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { [0x0] = { .num_ranges = 0, - .range = 0, + .range = NULL, }, [AB8500_SYS_CTRL1_BLOCK] = { .num_ranges = 3, @@ -215,7 +317,7 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { }, }, [AB8500_CHARGER] = { - .num_ranges = 8, + .num_ranges = 9, .range = (struct ab8500_reg_range[]) { { .first = 0x00, @@ -249,6 +351,10 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { .first = 0xC0, .last = 0xC2, }, + { + .first = 0xf5, + .last = 0xf6, + }, }, }, [AB8500_GAS_GAUGE] = { @@ -268,6 +374,24 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { }, }, }, + [AB8500_DEVELOPMENT] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x00, + .last = 0x00, + }, + }, + }, + [AB8500_DEBUG] = { + .num_ranges = 1, + .range = (struct ab8500_reg_range[]) { + { + .first = 0x05, + .last = 0x07, + }, + }, + }, [AB8500_AUDIO] = { .num_ranges = 1, .range = (struct ab8500_reg_range[]) { @@ -354,15 +478,30 @@ static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = { }, }; -static int ab8500_registers_print(struct seq_file *s, void *p) +static irqreturn_t ab8500_debug_handler(int irq, void *data) { - struct device *dev = s->private; - unsigned int i; - u32 bank = debug_bank; + char buf[16]; + struct kobject *kobj = (struct kobject *)data; + unsigned int irq_abb = irq - irq_first; - seq_printf(s, AB8500_NAME_STRING " register values:\n"); + if (irq_abb < num_irqs) + irq_count[irq_abb]++; + /* + * This makes it possible to use poll for events (POLLPRI | POLLERR) + * from userspace on sysfs file named <irq-nr> + */ + sprintf(buf, "%d", irq); + sysfs_notify(kobj, NULL, buf); + + return IRQ_HANDLED; +} + +/* Prints to seq_file or log_buf */ +static int ab8500_registers_print(struct device *dev, u32 bank, + struct seq_file *s) +{ + unsigned int i; - seq_printf(s, " bank %u:\n", bank); for (i = 0; i < debug_ranges[bank].num_ranges; i++) { u32 reg; @@ -379,22 +518,42 @@ static int ab8500_registers_print(struct seq_file *s, void *p) return err; } - err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", bank, - reg, value); - if (err < 0) { - dev_err(dev, "seq_printf overflow\n"); - /* Error is not returned here since - * the output is wanted in any case */ - return 0; + if (s) { + err = seq_printf(s, " [%u/0x%02X]: 0x%02X\n", + bank, reg, value); + if (err < 0) { + dev_err(dev, + "seq_printf overflow bank=%d reg=%d\n", + bank, reg); + /* Error is not returned here since + * the output is wanted in any case */ + return 0; + } + } else { + printk(KERN_INFO" [%u/0x%02X]: 0x%02X\n", bank, + reg, value); } } } return 0; } +static int ab8500_print_bank_registers(struct seq_file *s, void *p) +{ + struct device *dev = s->private; + u32 bank = debug_bank; + + seq_printf(s, AB8500_NAME_STRING " register values:\n"); + + seq_printf(s, " bank %u:\n", bank); + + ab8500_registers_print(dev, bank, s); + return 0; +} + static int ab8500_registers_open(struct inode *inode, struct file *file) { - return single_open(file, ab8500_registers_print, inode->i_private); + return single_open(file, ab8500_print_bank_registers, inode->i_private); } static const struct file_operations ab8500_registers_fops = { @@ -405,6 +564,64 @@ static const struct file_operations ab8500_registers_fops = { .owner = THIS_MODULE, }; +static int ab8500_print_all_banks(struct seq_file *s, void *p) +{ + struct device *dev = s->private; + unsigned int i; + int err; + + seq_printf(s, AB8500_NAME_STRING " register values:\n"); + + for (i = 1; i < AB8500_NUM_BANKS; i++) { + err = seq_printf(s, " bank %u:\n", i); + if (err < 0) + dev_err(dev, "seq_printf overflow, bank=%d\n", i); + + ab8500_registers_print(dev, i, s); + } + return 0; +} + +/* Dump registers to kernel log */ +void ab8500_dump_all_banks(struct device *dev) +{ + unsigned int i; + + printk(KERN_INFO"ab8500 register values:\n"); + + for (i = 1; i < AB8500_NUM_BANKS; i++) { + printk(KERN_INFO" bank %u:\n", i); + ab8500_registers_print(dev, i, NULL); + } +} + +static int ab8500_all_banks_open(struct inode *inode, struct file *file) +{ + struct seq_file *s; + int err; + + err = single_open(file, ab8500_print_all_banks, inode->i_private); + if (!err) { + /* Default buf size in seq_read is not enough */ + s = (struct seq_file *)file->private_data; + s->size = (PAGE_SIZE * 2); + s->buf = kmalloc(s->size, GFP_KERNEL); + if (!s->buf) { + single_release(inode, file); + err = -ENOMEM; + } + } + return err; +} + +static const struct file_operations ab8500_all_banks_fops = { + .open = ab8500_all_banks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static int ab8500_bank_print(struct seq_file *s, void *p) { return seq_printf(s, "%d\n", debug_bank); @@ -515,10 +732,761 @@ static ssize_t ab8500_val_write(struct file *file, printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__); return -EINVAL; } + return count; +} + +/* + * Interrupt status + */ +static u32 num_interrupts[AB8500_MAX_NR_IRQS]; +static int num_interrupt_lines; + +void ab8500_debug_register_interrupt(int line) +{ + if (line < num_interrupt_lines) + num_interrupts[line]++; +} + +static int ab8500_interrupts_print(struct seq_file *s, void *p) +{ + int line; + + seq_printf(s, "irq: number of\n"); + + for (line = 0; line < num_interrupt_lines; line++) + seq_printf(s, "%3i: %6i\n", line, num_interrupts[line]); + + return 0; +} + +static int ab8500_interrupts_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_interrupts_print, inode->i_private); +} + +/* + * - HWREG DB8500 formated routines + */ +static int ab8500_hwreg_print(struct seq_file *s, void *d) +{ + struct device *dev = s->private; + int ret; + u8 regvalue; + + ret = abx500_get_register_interruptible(dev, + (u8)hwreg_cfg.bank, (u8)hwreg_cfg.addr, ®value); + if (ret < 0) { + dev_err(dev, "abx500_get_reg fail %d, %d\n", + ret, __LINE__); + return -EINVAL; + } + + if (hwreg_cfg.shift >= 0) + regvalue >>= hwreg_cfg.shift; + else + regvalue <<= -hwreg_cfg.shift; + regvalue &= hwreg_cfg.mask; + + if (REG_FMT_DEC(&hwreg_cfg)) + seq_printf(s, "%d\n", regvalue); + else + seq_printf(s, "0x%02X\n", regvalue); + return 0; +} + +static int ab8500_hwreg_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_hwreg_print, inode->i_private); +} + +static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p) +{ + int bat_ctrl_raw; + int bat_ctrl_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL); + bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc, + BAT_CTRL, bat_ctrl_raw); + + return seq_printf(s, "%d,0x%X\n", + bat_ctrl_convert, bat_ctrl_raw); +} + +static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_bat_ctrl_fops = { + .open = ab8500_gpadc_bat_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p) +{ + int btemp_ball_raw; + int btemp_ball_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL); + btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL, + btemp_ball_raw); + + return seq_printf(s, + "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw); +} + +static int ab8500_gpadc_btemp_ball_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_btemp_ball_fops = { + .open = ab8500_gpadc_btemp_ball_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p) +{ + int main_charger_v_raw; + int main_charger_v_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V); + main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, + MAIN_CHARGER_V, main_charger_v_raw); + + return seq_printf(s, "%d,0x%X\n", + main_charger_v_convert, main_charger_v_raw); +} + +static int ab8500_gpadc_main_charger_v_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_main_charger_v_print, + inode->i_private); +} + +static const struct file_operations ab8500_gpadc_main_charger_v_fops = { + .open = ab8500_gpadc_main_charger_v_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p) +{ + int acc_detect1_raw; + int acc_detect1_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1); + acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1, + acc_detect1_raw); + + return seq_printf(s, "%d,0x%X\n", + acc_detect1_convert, acc_detect1_raw); +} + +static int ab8500_gpadc_acc_detect1_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_acc_detect1_print, + inode->i_private); +} + +static const struct file_operations ab8500_gpadc_acc_detect1_fops = { + .open = ab8500_gpadc_acc_detect1_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p) +{ + int acc_detect2_raw; + int acc_detect2_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2); + acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc, + ACC_DETECT2, acc_detect2_raw); + + return seq_printf(s, "%d,0x%X\n", + acc_detect2_convert, acc_detect2_raw); +} + +static int ab8500_gpadc_acc_detect2_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_acc_detect2_print, + inode->i_private); +} + +static const struct file_operations ab8500_gpadc_acc_detect2_fops = { + .open = ab8500_gpadc_acc_detect2_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p) +{ + int aux1_raw; + int aux1_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1); + aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1, + aux1_raw); + + return seq_printf(s, "%d,0x%X\n", + aux1_convert, aux1_raw); +} + +static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_aux1_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_aux1_fops = { + .open = ab8500_gpadc_aux1_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p) +{ + int aux2_raw; + int aux2_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2); + aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2, + aux2_raw); + + return seq_printf(s, "%d,0x%X\n", + aux2_convert, aux2_raw); +} + +static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_aux2_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_aux2_fops = { + .open = ab8500_gpadc_aux2_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p) +{ + int main_bat_v_raw; + int main_bat_v_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V); + main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V, + main_bat_v_raw); + + return seq_printf(s, "%d,0x%X\n", + main_bat_v_convert, main_bat_v_raw); +} + +static int ab8500_gpadc_main_bat_v_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_main_bat_v_fops = { + .open = ab8500_gpadc_main_bat_v_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p) +{ + int vbus_v_raw; + int vbus_v_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V); + vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V, + vbus_v_raw); + + return seq_printf(s, "%d,0x%X\n", + vbus_v_convert, vbus_v_raw); +} + +static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_vbus_v_fops = { + .open = ab8500_gpadc_vbus_v_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p) +{ + int main_charger_c_raw; + int main_charger_c_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C); + main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc, + MAIN_CHARGER_C, main_charger_c_raw); + + return seq_printf(s, "%d,0x%X\n", + main_charger_c_convert, main_charger_c_raw); +} + +static int ab8500_gpadc_main_charger_c_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_main_charger_c_print, + inode->i_private); +} + +static const struct file_operations ab8500_gpadc_main_charger_c_fops = { + .open = ab8500_gpadc_main_charger_c_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p) +{ + int usb_charger_c_raw; + int usb_charger_c_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C); + usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc, + USB_CHARGER_C, usb_charger_c_raw); + + return seq_printf(s, "%d,0x%X\n", + usb_charger_c_convert, usb_charger_c_raw); +} + +static int ab8500_gpadc_usb_charger_c_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_gpadc_usb_charger_c_print, + inode->i_private); +} + +static const struct file_operations ab8500_gpadc_usb_charger_c_fops = { + .open = ab8500_gpadc_usb_charger_c_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p) +{ + int bk_bat_v_raw; + int bk_bat_v_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V); + bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, + BK_BAT_V, bk_bat_v_raw); + + return seq_printf(s, "%d,0x%X\n", + bk_bat_v_convert, bk_bat_v_raw); +} + +static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_bk_bat_v_fops = { + .open = ab8500_gpadc_bk_bat_v_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p) +{ + int die_temp_raw; + int die_temp_convert; + struct ab8500_gpadc *gpadc; + + gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP); + die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP, + die_temp_raw); + + return seq_printf(s, "%d,0x%X\n", + die_temp_convert, die_temp_raw); +} + +static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private); +} + +static const struct file_operations ab8500_gpadc_die_temp_fops = { + .open = ab8500_gpadc_die_temp_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +/* + * return length of an ASCII numerical value, 0 is string is not a + * numerical value. + * string shall start at value 1st char. + * string can be tailed with \0 or space or newline chars only. + * value can be decimal or hexadecimal (prefixed 0x or 0X). + */ +static int strval_len(char *b) +{ + char *s = b; + if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) { + s += 2; + for (; *s && (*s != ' ') && (*s != '\n'); s++) { + if (!isxdigit(*s)) + return 0; + } + } else { + if (*s == '-') + s++; + for (; *s && (*s != ' ') && (*s != '\n'); s++) { + if (!isdigit(*s)) + return 0; + } + } + return (int) (s-b); +} + +/* + * parse hwreg input data. + * update global hwreg_cfg only if input data syntax is ok. + */ +static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg, + struct device *dev) +{ + uint write, val = 0; + struct hwreg_cfg loc = { + .bank = 0, /* default: invalid phys addr */ + .addr = 0, /* default: invalid phys addr */ + .fmt = 0, /* default: 32bit access, hex output */ + .mask = 0xFFFFFFFF, /* default: no mask */ + .shift = 0, /* default: no bit shift */ + }; + + /* read or write ? */ + if (!strncmp(b, "read ", 5)) { + write = 0; + b += 5; + } else if (!strncmp(b, "write ", 6)) { + write = 1; + b += 6; + } else + return -EINVAL; + + /* OPTIONS -l|-w|-b -s -m -o */ + while ((*b == ' ') || (*b == '-')) { + if (*(b-1) != ' ') { + b++; + continue; + } + if ((!strncmp(b, "-d ", 3)) || + (!strncmp(b, "-dec ", 5))) { + b += (*(b+2) == ' ') ? 3 : 5; + loc.fmt |= (1<<0); + } else if ((!strncmp(b, "-h ", 3)) || + (!strncmp(b, "-hex ", 5))) { + b += (*(b+2) == ' ') ? 3 : 5; + loc.fmt &= ~(1<<0); + } else if ((!strncmp(b, "-m ", 3)) || + (!strncmp(b, "-mask ", 6))) { + b += (*(b+2) == ' ') ? 3 : 6; + if (strval_len(b) == 0) + return -EINVAL; + loc.mask = simple_strtoul(b, &b, 0); + } else if ((!strncmp(b, "-s ", 3)) || + (!strncmp(b, "-shift ", 7))) { + b += (*(b+2) == ' ') ? 3 : 7; + if (strval_len(b) == 0) + return -EINVAL; + loc.shift = simple_strtol(b, &b, 0); + } else { + return -EINVAL; + } + } + /* get arg BANK and ADDRESS */ + if (strval_len(b) == 0) + return -EINVAL; + loc.bank = simple_strtoul(b, &b, 0); + while (*b == ' ') + b++; + if (strval_len(b) == 0) + return -EINVAL; + loc.addr = simple_strtoul(b, &b, 0); + + if (write) { + while (*b == ' ') + b++; + if (strval_len(b) == 0) + return -EINVAL; + val = simple_strtoul(b, &b, 0); + } + + /* args are ok, update target cfg (mainly for read) */ + *cfg = loc; + +#ifdef ABB_HWREG_DEBUG + pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d" + "value=0x%X\n", (write) ? "write" : "read", + REG_FMT_DEC(cfg) ? "decimal" : "hexa", + cfg->addr, cfg->mask, cfg->shift, val); +#endif + + if (write) { + u8 regvalue; + int ret = abx500_get_register_interruptible(dev, + (u8)cfg->bank, (u8)cfg->addr, ®value); + if (ret < 0) { + dev_err(dev, "abx500_get_reg fail %d, %d\n", + ret, __LINE__); + return -EINVAL; + } + + if (cfg->shift >= 0) { + regvalue &= ~(cfg->mask << (cfg->shift)); + val = (val & cfg->mask) << (cfg->shift); + } else { + regvalue &= ~(cfg->mask >> (-cfg->shift)); + val = (val & cfg->mask) >> (-cfg->shift); + } + val = val | regvalue; + + ret = abx500_set_register_interruptible(dev, + (u8)cfg->bank, (u8)cfg->addr, (u8)val); + if (ret < 0) { + pr_err("abx500_set_reg failed %d, %d", ret, __LINE__); + return -EINVAL; + } + + } + return 0; +} + +static ssize_t ab8500_hwreg_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[128]; + int buf_size, ret; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + /* get args and process */ + ret = hwreg_common_write(buf, &hwreg_cfg, dev); + return (ret) ? ret : buf_size; +} + +/* + * - irq subscribe/unsubscribe stuff + */ +static int ab8500_subscribe_unsubscribe_print(struct seq_file *s, void *p) +{ + seq_printf(s, "%d\n", irq_first); + + return 0; +} + +static int ab8500_subscribe_unsubscribe_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_subscribe_unsubscribe_print, + inode->i_private); +} + +/* + * Userspace should use poll() on this file. When an event occur + * the blocking poll will be released. + */ +static ssize_t show_irq(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long name; + unsigned int irq_index; + int err; + + err = strict_strtoul(attr->attr.name, 0, &name); + if (err) + return err; + + irq_index = name - irq_first; + if (irq_index >= num_irqs) + return -EINVAL; + else + return sprintf(buf, "%u\n", irq_count[irq_index]); +} + +static ssize_t ab8500_subscribe_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + unsigned int irq_index; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + if (user_val < irq_first) { + dev_err(dev, "debugfs error input < %d\n", irq_first); + return -EINVAL; + } + if (user_val > irq_last) { + dev_err(dev, "debugfs error input > %d\n", irq_last); + return -EINVAL; + } + + irq_index = user_val - irq_first; + if (irq_index >= num_irqs) + return -EINVAL; + + /* + * This will create a sysfs file named <irq-nr> which userspace can + * use to select or poll and get the AB8500 events + */ + dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute), + GFP_KERNEL); + event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL); + sprintf(event_name[irq_index], "%lu", user_val); + dev_attr[irq_index]->show = show_irq; + dev_attr[irq_index]->store = NULL; + dev_attr[irq_index]->attr.name = event_name[irq_index]; + dev_attr[irq_index]->attr.mode = S_IRUGO; + err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr); + if (err < 0) { + printk(KERN_ERR "sysfs_create_file failed %d\n", err); + return err; + } + + err = request_threaded_irq(user_val, NULL, ab8500_debug_handler, + IRQF_SHARED | IRQF_NO_SUSPEND, "ab8500-debug", &dev->kobj); + if (err < 0) { + printk(KERN_ERR "request_threaded_irq failed %d, %lu\n", + err, user_val); + sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr); + return err; + } + + return buf_size; +} + +static ssize_t ab8500_unsubscribe_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct device *dev = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + unsigned int irq_index; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + if (user_val < irq_first) { + dev_err(dev, "debugfs error input < %d\n", irq_first); + return -EINVAL; + } + if (user_val > irq_last) { + dev_err(dev, "debugfs error input > %d\n", irq_last); + return -EINVAL; + } + + irq_index = user_val - irq_first; + if (irq_index >= num_irqs) + return -EINVAL; + + /* Set irq count to 0 when unsubscribe */ + irq_count[irq_index] = 0; + + if (dev_attr[irq_index]) + sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr); + + + free_irq(user_val, &dev->kobj); + kfree(event_name[irq_index]); + kfree(dev_attr[irq_index]); return count; } +/* + * - several deubgfs nodes fops + */ + static const struct file_operations ab8500_bank_fops = { .open = ab8500_bank_open, .write = ab8500_bank_write, @@ -546,64 +1514,231 @@ static const struct file_operations ab8500_val_fops = { .owner = THIS_MODULE, }; +static const struct file_operations ab8500_interrupts_fops = { + .open = ab8500_interrupts_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab8500_subscribe_fops = { + .open = ab8500_subscribe_unsubscribe_open, + .write = ab8500_subscribe_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab8500_unsubscribe_fops = { + .open = ab8500_subscribe_unsubscribe_open, + .write = ab8500_unsubscribe_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab8500_hwreg_fops = { + .open = ab8500_hwreg_open, + .write = ab8500_hwreg_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static struct dentry *ab8500_dir; -static struct dentry *ab8500_reg_file; -static struct dentry *ab8500_bank_file; -static struct dentry *ab8500_address_file; -static struct dentry *ab8500_val_file; +static struct dentry *ab8500_gpadc_dir; static int __devinit ab8500_debug_probe(struct platform_device *plf) { + struct dentry *file; + int ret = -ENOMEM; + struct ab8500 *ab8500; debug_bank = AB8500_MISC; debug_address = AB8500_REV_REG & 0x00FF; + ab8500 = dev_get_drvdata(plf->dev.parent); + num_irqs = ab8500->mask_size; + + irq_count = kzalloc(sizeof(*irq_count)*num_irqs, GFP_KERNEL); + if (!irq_count) + return -ENOMEM; + + dev_attr = kzalloc(sizeof(*dev_attr)*num_irqs,GFP_KERNEL); + if (!dev_attr) + goto out_freeirq_count; + + event_name = kzalloc(sizeof(*event_name)*num_irqs, GFP_KERNEL); + if (!event_name) + goto out_freedev_attr; + + irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); + if (irq_first < 0) { + dev_err(&plf->dev, "First irq not found, err %d\n", + irq_first); + ret = irq_first; + goto out_freeevent_name; + } + + irq_last = platform_get_irq_byname(plf, "IRQ_LAST"); + if (irq_last < 0) { + dev_err(&plf->dev, "Last irq not found, err %d\n", + irq_last); + ret = irq_last; + goto out_freeevent_name; + } + ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL); if (!ab8500_dir) - goto exit_no_debugfs; + goto err; + + ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING, + ab8500_dir); + if (!ab8500_gpadc_dir) + goto err; + + file = debugfs_create_file("all-bank-registers", S_IRUGO, + ab8500_dir, &plf->dev, &ab8500_registers_fops); + if (!file) + goto err; + + file = debugfs_create_file("all-banks", S_IRUGO, + ab8500_dir, &plf->dev, &ab8500_all_banks_fops); + if (!file) + goto err; + + file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_bank_fops); + if (!file) + goto err; + + file = debugfs_create_file("register-address", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_address_fops); + if (!file) + goto err; + + file = debugfs_create_file("register-value", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_val_fops); + if (!file) + goto err; - ab8500_reg_file = debugfs_create_file("all-bank-registers", - S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops); - if (!ab8500_reg_file) - goto exit_destroy_dir; + if (is_ab8500(ab8500)) + num_interrupt_lines = AB8500_NR_IRQS; + else if (is_ab8505(ab8500)) + num_interrupt_lines = AB8505_NR_IRQS; + else if (is_ab9540(ab8500)) + num_interrupt_lines = AB9540_NR_IRQS; - ab8500_bank_file = debugfs_create_file("register-bank", - (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_bank_fops); - if (!ab8500_bank_file) - goto exit_destroy_reg; + file = debugfs_create_file("interrupts", (S_IRUGO), + ab8500_dir, &plf->dev, &ab8500_interrupts_fops); + if (!file) + goto err; - ab8500_address_file = debugfs_create_file("register-address", - (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, - &ab8500_address_fops); - if (!ab8500_address_file) - goto exit_destroy_bank; + file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_subscribe_fops); + if (!file) + goto err; - ab8500_val_file = debugfs_create_file("register-value", - (S_IRUGO | S_IWUSR), ab8500_dir, &plf->dev, &ab8500_val_fops); - if (!ab8500_val_file) - goto exit_destroy_address; + file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops); + if (!file) + goto err; + + file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUGO), + ab8500_dir, &plf->dev, &ab8500_hwreg_fops); + if (!file) + goto err; + + file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops); + if (!file) + goto err; + + file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops); + if (!file) + goto err; + + file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops); + if (!file) + goto err; + + file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops); + if (!file) + goto err; + + file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops); + if (!file) + goto err; + + file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops); + if (!file) + goto err; + + file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops); + if (!file) + goto err; + + file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops); + if (!file) + goto err; + + file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops); + if (!file) + goto err; + + file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops); + if (!file) + goto err; + + file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops); + if (!file) + goto err; + + file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops); + if (!file) + goto err; + + file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUGO), + ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops); + if (!file) + goto err; return 0; -exit_destroy_address: - debugfs_remove(ab8500_address_file); -exit_destroy_bank: - debugfs_remove(ab8500_bank_file); -exit_destroy_reg: - debugfs_remove(ab8500_reg_file); -exit_destroy_dir: - debugfs_remove(ab8500_dir); -exit_no_debugfs: +err: + if (ab8500_dir) + debugfs_remove_recursive(ab8500_dir); dev_err(&plf->dev, "failed to create debugfs entries.\n"); - return -ENOMEM; +out_freeevent_name: + kfree(event_name); +out_freedev_attr: + kfree(dev_attr); +out_freeirq_count: + kfree(irq_count); + + return ret; } static int __devexit ab8500_debug_remove(struct platform_device *plf) { - debugfs_remove(ab8500_val_file); - debugfs_remove(ab8500_address_file); - debugfs_remove(ab8500_bank_file); - debugfs_remove(ab8500_reg_file); - debugfs_remove(ab8500_dir); + debugfs_remove_recursive(ab8500_dir); + kfree(event_name); + kfree(dev_attr); + kfree(irq_count); return 0; } diff --git a/drivers/mfd/ab8500-denc.c b/drivers/mfd/ab8500-denc.c new file mode 100644 index 00000000000..17efee62110 --- /dev/null +++ b/drivers/mfd/ab8500-denc.c @@ -0,0 +1,539 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson AB8500 DENC base driver + * + * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/err.h> +#include <linux/uaccess.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab8500.h> +#include <linux/mfd/ab8500/denc-regs.h> +#include <linux/mfd/ab8500/denc.h> + +#define AB8500_NAME "ab8500" +#define AB8500_DENC_NAME "ab8500_denc" + +struct device_usage { + struct list_head list; + struct platform_device *pdev; + bool taken; +}; +static LIST_HEAD(device_list); + +/* To get rid of the extra bank parameter: */ +#define AB8500_REG_BANK_NR(__reg) ((0xff00 & (__reg)) >> 8) +static inline u8 ab8500_rreg(struct device *dev, u32 reg) +{ + u8 val; + if (abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg), + reg, &val) < 0) + return 0; + else + return val; +} + +static inline int ab8500_wreg(struct device *dev, u32 reg, u8 val) +{ + return abx500_set_register_interruptible(dev, AB8500_REG_BANK_NR(reg), + reg, val); +} + +/* Only use in the macro below: */ +static inline int _ab8500_wreg_fld(struct device *dev, u32 reg, u8 val, + u8 mask, u8 shift) +{ + int ret; + u8 org_val; + + ret = abx500_get_register_interruptible(dev, AB8500_REG_BANK_NR(reg), + reg, &org_val); + if (ret < 0) + return ret; + else + ab8500_wreg(dev, reg, + (org_val & ~mask) | ((val << shift) & mask)); + return 0; +} + +#define ab8500_wr_fld(__d, __reg, __fld, __val) \ + _ab8500_wreg_fld(__d, __reg, __val, __reg##_##__fld##_MASK, \ + __reg##_##__fld##_SHIFT) + +#define ab8500_set_fld(__cur_val, __reg, __fld, __val) \ + (((__cur_val) & ~__reg##_##__fld##_MASK) | \ + (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK)) + +#define AB8500_DENC_TRACE(__pd) dev_dbg(&(__pd)->dev, "%s\n", __func__) + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_ab8500_denc_dir; +static struct dentry *debugfs_ab8500_dump_regs_file; +static void ab8500_denc_conf_ddr(struct platform_device *pdev); +static int debugfs_ab8500_open_file(struct inode *inode, struct file *file); +static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_ab8500_dump_regs_fops = { + .owner = THIS_MODULE, + .open = debugfs_ab8500_open_file, + .read = debugfs_ab8500_dump_regs, +}; +#endif /* CONFIG_DEBUG_FS */ + +static int __devinit ab8500_denc_probe(struct platform_device *pdev) +{ + int ret = 0; + struct ab8500_platform_data *ab8500_pdata = + dev_get_platdata(pdev->dev.parent); + struct ab8500_denc_platform_data *pdata; + struct device_usage *device_data; + + AB8500_DENC_TRACE(pdev); + + if (ab8500_pdata == NULL) { + dev_err(&pdev->dev, "AB8500 platform data missing\n"); + return -EINVAL; + } + + pdata = ab8500_pdata->denc; + if (pdata == NULL) { + dev_err(&pdev->dev, "Denc platform data missing\n"); + return -EINVAL; + } + + device_data = kzalloc(sizeof(struct device_usage), GFP_KERNEL); + if (!device_data) { + dev_err(&pdev->dev, "Failed to allocate device data\n"); + return -ENOMEM; + } + device_data->pdev = pdev; + list_add_tail(&device_data->list, &device_list); + +#ifdef CONFIG_DEBUG_FS + debugfs_ab8500_denc_dir = debugfs_create_dir(pdev->name, NULL); + debugfs_ab8500_dump_regs_file = debugfs_create_file( + "dumpregs", S_IRUGO, + debugfs_ab8500_denc_dir, &pdev->dev, + &debugfs_ab8500_dump_regs_fops + ); +#endif /* CONFIG_DEBUG_FS */ + return ret; +} + +static int __devexit ab8500_denc_remove(struct platform_device *pdev) +{ + struct list_head *element; + struct device_usage *device_data; + + AB8500_DENC_TRACE(pdev); + +#ifdef CONFIG_DEBUG_FS + debugfs_remove(debugfs_ab8500_dump_regs_file); + debugfs_remove(debugfs_ab8500_denc_dir); +#endif /* CONFIG_DEBUG_FS */ + + list_for_each(element, &device_list) { + device_data = list_entry(element, struct device_usage, list); + if (device_data->pdev == pdev) { + list_del(element); + kzfree(device_data); + } + } + + return 0; +} + +static struct platform_driver ab8500_denc_driver = { + .probe = ab8500_denc_probe, + .remove = ab8500_denc_remove, + .driver = { + .name = "ab8500-denc", + }, +}; + +static void setup_27mhz(struct platform_device *pdev, bool enable) +{ + u8 data = ab8500_rreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF); + + AB8500_DENC_TRACE(pdev); + /* TODO: check if this field needs to be set */ + data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_PD_ENA, + true); + data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_BUF_ENA, + enable); + data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_INV, + false); + data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, TVOUT_CLK_DE_IN, + false); + data = ab8500_set_fld(data, AB8500_SYS_ULP_CLK_CONF, CLK_27MHZ_STRE, + 1); + ab8500_wreg(&pdev->dev, AB8500_SYS_ULP_CLK_CONF, data); + + data = ab8500_rreg(&pdev->dev, AB8500_SYS_CLK_CTRL); + data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_CLK_VALID, + enable); + data = ab8500_set_fld(data, AB8500_SYS_CLK_CTRL, TVOUT_PLL_ENA, + enable); + ab8500_wreg(&pdev->dev, AB8500_SYS_CLK_CTRL, data); +} + +static u32 map_tv_std(enum ab8500_denc_TV_std std) +{ + switch (std) { + case TV_STD_PAL_BDGHI: + return AB8500_DENC_CONF0_STD_PAL_BDGHI; + case TV_STD_PAL_N: + return AB8500_DENC_CONF0_STD_PAL_N; + case TV_STD_PAL_M: + return AB8500_DENC_CONF0_STD_PAL_M; + case TV_STD_NTSC_M: + return AB8500_DENC_CONF0_STD_NTSC_M; + default: + return 0; + } +} + +static u32 map_cr_filter(enum ab8500_denc_cr_filter_bandwidth bw) +{ + switch (bw) { + case TV_CR_NTSC_LOW_DEF_FILTER: + return AB8500_DENC_CONF1_FLT_1_1MHZ; + case TV_CR_PAL_LOW_DEF_FILTER: + return AB8500_DENC_CONF1_FLT_1_3MHZ; + case TV_CR_NTSC_HIGH_DEF_FILTER: + return AB8500_DENC_CONF1_FLT_1_6MHZ; + case TV_CR_PAL_HIGH_DEF_FILTER: + return AB8500_DENC_CONF1_FLT_1_9MHZ; + default: + return 0; + } +} + +static u32 map_phase_rst_mode(enum ab8500_denc_phase_reset_mode mode) +{ + switch (mode) { + case TV_PHASE_RST_MOD_DISABLE: + return AB8500_DENC_CONF8_PH_RST_MODE_DISABLED; + case TV_PHASE_RST_MOD_FROM_PHASE_BUF: + return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_PHASE_BUF; + case TV_PHASE_RST_MOD_FROM_INC_DFS: + return AB8500_DENC_CONF8_PH_RST_MODE_UPDATE_FROM_INC_DFS; + case TV_PHASE_RST_MOD_RST: + return AB8500_DENC_CONF8_PH_RST_MODE_RESET; + default: + return 0; + } +} + +static u32 map_plug_time(enum ab8500_denc_plug_time time) +{ + switch (time) { + case TV_PLUG_TIME_0_5S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_0_5S; + case TV_PLUG_TIME_1S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1S; + case TV_PLUG_TIME_1_5S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_1_5S; + case TV_PLUG_TIME_2S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2S; + case TV_PLUG_TIME_2_5S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_2_5S; + case TV_PLUG_TIME_3S: + return AB8500_TVOUT_CTRL_PLUG_TV_TIME_3S; + default: + return 0; + } +} + +struct platform_device *ab8500_denc_get_device(void) +{ + struct list_head *element; + struct device_usage *device_data; + + pr_debug("%s\n", __func__); + list_for_each(element, &device_list) { + device_data = list_entry(element, struct device_usage, list); + if (!device_data->taken) { + device_data->taken = true; + return device_data->pdev; + } + } + return NULL; +} +EXPORT_SYMBOL(ab8500_denc_get_device); + +void ab8500_denc_put_device(struct platform_device *pdev) +{ + struct list_head *element; + struct device_usage *device_data; + + AB8500_DENC_TRACE(pdev); + list_for_each(element, &device_list) { + device_data = list_entry(element, struct device_usage, list); + if (device_data->pdev == pdev) + device_data->taken = false; + } +} +EXPORT_SYMBOL(ab8500_denc_put_device); + +void ab8500_denc_reset(struct platform_device *pdev, bool hard) +{ + AB8500_DENC_TRACE(pdev); + if (hard) { + u8 data = ab8500_rreg(&pdev->dev, AB8500_CTRL3); + /* reset start */ + ab8500_wreg(&pdev->dev, AB8500_CTRL3, + ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 0) + ); + /* reset done */ + ab8500_wreg(&pdev->dev, AB8500_CTRL3, + ab8500_set_fld(data, AB8500_CTRL3, RESET_DENC_N, 1) + ); + } else { + ab8500_wr_fld(&pdev->dev, AB8500_DENC_CONF6, SOFT_RESET, 1); + mdelay(10); + } +} +EXPORT_SYMBOL(ab8500_denc_reset); + +void ab8500_denc_power_up(struct platform_device *pdev) +{ + setup_27mhz(pdev, true); +} +EXPORT_SYMBOL(ab8500_denc_power_up); + +void ab8500_denc_power_down(struct platform_device *pdev) +{ + setup_27mhz(pdev, false); +} +EXPORT_SYMBOL(ab8500_denc_power_down); + +void ab8500_denc_conf(struct platform_device *pdev, + struct ab8500_denc_conf *conf) +{ + u8 data; + + AB8500_DENC_TRACE(pdev); + + ab8500_wreg(&pdev->dev, AB8500_DENC_CONF0, + AB8500_VAL2REG(AB8500_DENC_CONF0, STD, map_tv_std(conf->TV_std)) + | + AB8500_VAL2REG(AB8500_DENC_CONF0, SYNC, + conf->test_pattern ? AB8500_DENC_CONF0_SYNC_AUTO_TEST : + AB8500_DENC_CONF0_SYNC_F_BASED_SLAVE + ) + ); + ab8500_wreg(&pdev->dev, AB8500_DENC_CONF1, + AB8500_VAL2REG(AB8500_DENC_CONF1, BLK_LI, + !conf->partial_blanking) + | + AB8500_VAL2REG(AB8500_DENC_CONF1, FLT, + map_cr_filter(conf->cr_filter)) + | + AB8500_VAL2REG(AB8500_DENC_CONF1, CO_KI, conf->suppress_col) + | + AB8500_VAL2REG(AB8500_DENC_CONF1, SETUP_MAIN, + conf->black_level_setup) + /* TODO: handle cc field: set to 0 now */ + ); + + data = ab8500_rreg(&pdev->dev, AB8500_DENC_CONF2); + data = ab8500_set_fld(data, AB8500_DENC_CONF2, N_INTRL, + conf->progressive); + ab8500_wreg(&pdev->dev, AB8500_DENC_CONF2, data); + + ab8500_wreg(&pdev->dev, AB8500_DENC_CONF8, + AB8500_VAL2REG(AB8500_DENC_CONF8, PH_RST_MODE, + map_phase_rst_mode(conf->phase_reset_mode)) + | + AB8500_VAL2REG(AB8500_DENC_CONF8, VAL_422_MUX, + conf->act_output) + | + AB8500_VAL2REG(AB8500_DENC_CONF8, BLK_ALL, + conf->blank_all) + ); + data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL); + data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL0, + conf->dac_enable); + data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, DAC_CTRL1, + conf->act_dc_output); + ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data); + + /* no support for DDR in early versions */ + if (AB8500_REG2VAL(AB8500_REV, FULL_MASK, + ab8500_rreg(&pdev->dev, AB8500_REV)) > 0) + ab8500_denc_conf_ddr(pdev); +} +EXPORT_SYMBOL(ab8500_denc_conf); + +void ab8500_denc_conf_plug_detect(struct platform_device *pdev, + bool enable, bool load_RC, + enum ab8500_denc_plug_time time) +{ + u8 data; + + AB8500_DENC_TRACE(pdev); + data = ab8500_rreg(&pdev->dev, AB8500_TVOUT_CTRL); + data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_PLUG_ON, enable); + data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, TV_LOAD_RC, load_RC); + data = ab8500_set_fld(data, AB8500_TVOUT_CTRL, PLUG_TV_TIME, + map_plug_time(time)); + ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL, data); +} +EXPORT_SYMBOL(ab8500_denc_conf_plug_detect); + +void ab8500_denc_mask_int_plug_det(struct platform_device *pdev, bool plug, + bool unplug) +{ + u8 data = ab8500_rreg(&pdev->dev, AB8500_IT_MASK1); + + AB8500_DENC_TRACE(pdev); + data = ab8500_set_fld(data, AB8500_IT_MASK1, PLUG_TV_DET, plug); + data = ab8500_set_fld(data, AB8500_IT_MASK1, UNPLUG_TV_DET, unplug); + ab8500_wreg(&pdev->dev, AB8500_IT_MASK1, data); +} +EXPORT_SYMBOL(ab8500_denc_mask_int_plug_det); + +static void ab8500_denc_conf_ddr(struct platform_device *pdev) +{ + struct ab8500_platform_data *core_pdata; + struct ab8500_denc_platform_data *denc_pdata; + + AB8500_DENC_TRACE(pdev); + core_pdata = dev_get_platdata(pdev->dev.parent); + denc_pdata = core_pdata->denc; + ab8500_wreg(&pdev->dev, AB8500_TVOUT_CTRL2, + AB8500_VAL2REG(AB8500_TVOUT_CTRL2, + DENC_DDR, denc_pdata->ddr_enable) | + AB8500_VAL2REG(AB8500_TVOUT_CTRL2, SWAP_DDR_DATA_IN, + denc_pdata->ddr_little_endian)); +} + +#ifdef CONFIG_DEBUG_FS +static int debugfs_ab8500_open_file(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUG_BUF_SIZE 900 + +#define AB8500_GPIO_DIR5 0x1014 +#define AB8500_GPIO_DIR5_35_SHIFT 2 +#define AB8500_GPIO_DIR5_35_MASK (1 << AB8500_GPIO_DIR5_35_SHIFT) +#define AB8500_GPIO_OUT5 0x1024 +#define AB8500_GPIO_OUT5_35_SHIFT 2 +#define AB8500_GPIO_OUT5_35_MASK (1 << AB8500_GPIO_OUT5_35_SHIFT) +#define AB8500_GPIO_OUT5_35_VIDEO 0 +#define AB8500_GPIO_OUT5_35_AUDIO 1 +#define AB8500_GPIO_NPUD5 0x1034 +#define AB8500_GPIO_NPUD5_35_SHIFT 2 +#define AB8500_GPIO_NPUD5_35_MASK (1 << AB8500_GPIO_NPUD5_35_SHIFT) +#define AB8500_GPIO_NPUD5_35_ACTIVE 0 +#define AB8500_GPIO_NPUD5_35_INACTIVE 1 + +static ssize_t debugfs_ab8500_dump_regs(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + int ret = 0; + size_t data_size = 0; + char buffer[DEBUG_BUF_SIZE]; + struct device *dev = file->private_data; + + data_size += sprintf(buffer + data_size, + "AB8500 DENC registers:\n" + "------Regulators etc ----------\n" + "CTRL3 : 0x%04x = 0x%02x\n" + "SYSULPCLK_CONF: 0x%04x = 0x%02x\n" + "SYSCLK_CTRL : 0x%04x = 0x%02x\n" + "REGU_MISC1 : 0x%04x = 0x%02x\n" + "VAUX12_REGU : 0x%04x = 0x%02x\n" + "VAUX1_SEL1 : 0x%04x = 0x%02x\n" + "------TVout only --------------\n" + "DENC_CONF0 : 0x%04x = 0x%02x\n" + "DENC_CONF1 : 0x%04x = 0x%02x\n" + "DENC_CONF2 : 0x%04x = 0x%02x\n" + "DENC_CONF6 : 0x%04x = 0x%02x\n" + "DENC_CONF8 : 0x%04x = 0x%02x\n" + "TVOUT_CTRL : 0x%04x = 0x%02x\n" + "TVOUT_CTRL2 : 0x%04x = 0x%02x\n" + "IT_MASK1 : 0x%04x = 0x%02x\n" + "------AV connector-------------\n" + "GPIO_DIR5 : 0x%04x = 0x%02x\n" + "GPIO_OUT5 : 0x%04x = 0x%02x\n" + "GPIO_NPUD5 : 0x%04x = 0x%02x\n" + , + AB8500_CTRL3, ab8500_rreg(dev, AB8500_CTRL3), + AB8500_SYS_ULP_CLK_CONF, ab8500_rreg(dev, + AB8500_SYS_ULP_CLK_CONF), + AB8500_SYS_CLK_CTRL, ab8500_rreg(dev, AB8500_SYS_CLK_CTRL), + AB8500_REGU_MISC1, ab8500_rreg(dev, AB8500_REGU_MISC1), + AB8500_VAUX12_REGU, ab8500_rreg(dev, AB8500_VAUX12_REGU), + AB8500_VAUX1_SEL, ab8500_rreg(dev, AB8500_VAUX1_SEL), + AB8500_DENC_CONF0, ab8500_rreg(dev, AB8500_DENC_CONF0), + AB8500_DENC_CONF1, ab8500_rreg(dev, AB8500_DENC_CONF1), + AB8500_DENC_CONF2, ab8500_rreg(dev, AB8500_DENC_CONF2), + AB8500_DENC_CONF6, ab8500_rreg(dev, AB8500_DENC_CONF6), + AB8500_DENC_CONF8, ab8500_rreg(dev, AB8500_DENC_CONF8), + AB8500_TVOUT_CTRL, ab8500_rreg(dev, AB8500_TVOUT_CTRL), + AB8500_TVOUT_CTRL2, ab8500_rreg(dev, AB8500_TVOUT_CTRL2), + AB8500_IT_MASK1, ab8500_rreg(dev, AB8500_IT_MASK1), + AB8500_GPIO_DIR5, ab8500_rreg(dev, AB8500_GPIO_DIR5), + AB8500_GPIO_OUT5, ab8500_rreg(dev, AB8500_GPIO_OUT5), + AB8500_GPIO_NPUD5, ab8500_rreg(dev, AB8500_GPIO_NPUD5) + ); + if (data_size >= DEBUG_BUF_SIZE) { + printk(KERN_EMERG "AB8500 DENC: Buffer overrun\n"); + ret = -EINVAL; + goto out; + } + + /* check if read done */ + if (*f_pos > data_size) + goto out; + + if (*f_pos + count > data_size) + count = data_size - *f_pos; + + if (copy_to_user(buf, buffer + *f_pos, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; +out: + return ret; +} +#endif /* CONFIG_DEBUG_FS */ + +/* Module init */ +static int __init ab8500_denc_init(void) +{ + return platform_driver_register(&ab8500_denc_driver); +} +module_init(ab8500_denc_init); + +static void __exit ab8500_denc_exit(void) +{ + platform_driver_unregister(&ab8500_denc_driver); +} +module_exit(ab8500_denc_exit); + +MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson AB8500 DENC driver"); diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c index c39fc716e1d..d06f4826619 100644 --- a/drivers/mfd/ab8500-gpadc.c +++ b/drivers/mfd/ab8500-gpadc.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/completion.h> #include <linux/regulator/consumer.h> @@ -82,6 +83,11 @@ /* This is used to not lose precision when dividing to get gain and offset */ #define CALIB_SCALE 1000 +/* Time in ms before disabling regulator */ +#define GPADC_AUDOSUSPEND_DELAY 1 + +#define CONVERSION_TIME 500 /* ms */ + enum cal_channels { ADC_INPUT_VMAIN = 0, ADC_INPUT_BTEMP, @@ -102,10 +108,10 @@ struct adc_cal_data { /** * struct ab8500_gpadc - AB8500 GPADC device information - * @chip_id ABB chip id * @dev: pointer to the struct device * @node: a list of AB8500 GPADCs, hence prepared for reentrance + * @parent: pointer to the struct ab8500 * @ab8500_gpadc_complete: pointer to the struct completion, to indicate * the completion of gpadc conversion * @ab8500_gpadc_lock: structure of type mutex @@ -114,9 +120,9 @@ struct adc_cal_data { * @cal_data array of ADC calibration data structs */ struct ab8500_gpadc { - u8 chip_id; struct device *dev; struct list_head node; + struct ab8500 *parent; struct completion ab8500_gpadc_complete; struct mutex ab8500_gpadc_lock; struct regulator *regu; @@ -282,8 +288,9 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) return -ENODEV; mutex_lock(&gpadc->ab8500_gpadc_lock); + /* Enable VTVout LDO this is required for GPADC */ - regulator_enable(gpadc->regu); + pm_runtime_get_sync(gpadc->dev); /* Check if ADC is not busy, lock and proceed */ do { @@ -332,7 +339,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) EN_BUF | EN_ICHAR); break; case BTEMP_BALL: - if (gpadc->chip_id >= AB8500_CUT3P0) { + if (!is_ab8500_2p0_or_earlier(gpadc->parent)) { /* Turn on btemp pull-up on ABB 3.0 */ ret = abx500_mask_and_set_register_interruptible( gpadc->dev, @@ -344,7 +351,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) * Delay might be needed for ABB8500 cut 3.0, if not, remove * when hardware will be availible */ - msleep(1); + mdelay(1); break; } /* Intentional fallthrough */ @@ -367,7 +374,8 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) goto out; } /* wait for completion of conversion */ - if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, 2*HZ)) { + if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete, + msecs_to_jiffies(CONVERSION_TIME))) { dev_err(gpadc->dev, "timeout: didn't receive GPADC conversion interrupt\n"); ret = -EINVAL; @@ -397,8 +405,10 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n"); goto out; } - /* Disable VTVout LDO this is required for GPADC */ - regulator_disable(gpadc->regu); + + pm_runtime_mark_last_busy(gpadc->dev); + pm_runtime_put_autosuspend(gpadc->dev); + mutex_unlock(&gpadc->ab8500_gpadc_lock); return (high_data << 8) | low_data; @@ -412,7 +422,9 @@ out: */ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC, AB8500_GPADC_CTRL1_REG, DIS_GPADC); - regulator_disable(gpadc->regu); + + pm_runtime_put(gpadc->dev); + mutex_unlock(&gpadc->ab8500_gpadc_lock); dev_err(gpadc->dev, "gpadc_conversion: Failed to AD convert channel %d\n", channel); @@ -571,6 +583,28 @@ static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc) gpadc->cal_data[ADC_INPUT_VBAT].offset); } +static int ab8500_gpadc_runtime_suspend(struct device *dev) +{ + struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); + + regulator_disable(gpadc->regu); + return 0; +} + +static int ab8500_gpadc_runtime_resume(struct device *dev) +{ + struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); + + regulator_enable(gpadc->regu); + return 0; +} + +static int ab8500_gpadc_runtime_idle(struct device *dev) +{ + pm_runtime_suspend(dev); + return 0; +} + static int __devinit ab8500_gpadc_probe(struct platform_device *pdev) { int ret = 0; @@ -591,6 +625,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev) } gpadc->dev = &pdev->dev; + gpadc->parent = dev_get_drvdata(pdev->dev.parent); mutex_init(&gpadc->ab8500_gpadc_lock); /* Initialize completion used to notify completion of conversion */ @@ -606,14 +641,6 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev) goto fail; } - /* Get Chip ID of the ABB ASIC */ - ret = abx500_get_chip_id(gpadc->dev); - if (ret < 0) { - dev_err(gpadc->dev, "failed to get chip ID\n"); - goto fail_irq; - } - gpadc->chip_id = (u8) ret; - /* VTVout LDO used to power up ab8500-GPADC */ gpadc->regu = regulator_get(&pdev->dev, "vddadc"); if (IS_ERR(gpadc->regu)) { @@ -621,6 +648,16 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev) dev_err(gpadc->dev, "failed to get vtvout LDO\n"); goto fail_irq; } + + platform_set_drvdata(pdev, gpadc); + + regulator_enable(gpadc->regu); + + pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY); + pm_runtime_use_autosuspend(gpadc->dev); + pm_runtime_set_active(gpadc->dev); + pm_runtime_enable(gpadc->dev); + ab8500_gpadc_read_calibration_data(gpadc); list_add_tail(&gpadc->node, &ab8500_gpadc_list); dev_dbg(gpadc->dev, "probe success\n"); @@ -641,19 +678,34 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev) list_del(&gpadc->node); /* remove interrupt - completion of Sw ADC conversion */ free_irq(gpadc->irq, gpadc); - /* disable VTVout LDO that is being used by GPADC */ - regulator_put(gpadc->regu); + + pm_runtime_get_sync(gpadc->dev); + pm_runtime_disable(gpadc->dev); + + regulator_disable(gpadc->regu); + + pm_runtime_set_suspended(gpadc->dev); + + pm_runtime_put_noidle(gpadc->dev); + kfree(gpadc); gpadc = NULL; return 0; } +static const struct dev_pm_ops ab8500_gpadc_pm_ops = { + SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend, + ab8500_gpadc_runtime_resume, + ab8500_gpadc_runtime_idle) +}; + static struct platform_driver ab8500_gpadc_driver = { .probe = ab8500_gpadc_probe, .remove = __devexit_p(ab8500_gpadc_remove), .driver = { .name = "ab8500-gpadc", .owner = THIS_MODULE, + .pm = &ab8500_gpadc_pm_ops, }, }; diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c index b83045f102b..5ee90fd125e 100644 --- a/drivers/mfd/ab8500-i2c.c +++ b/drivers/mfd/ab8500-i2c.c @@ -13,6 +13,7 @@ #include <linux/mfd/abx500/ab8500.h> #include <linux/mfd/dbx500-prcmu.h> + static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data) { int ret; diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c index c28d4eb1eff..d5865d41514 100644 --- a/drivers/mfd/ab8500-sysctrl.c +++ b/drivers/mfd/ab8500-sysctrl.c @@ -7,12 +7,114 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/reboot.h> +#include <linux/signal.h> +#include <linux/power_supply.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/mfd/abx500/ab8500-sysctrl.h> +#include <linux/time.h> +#include <linux/hwmon.h> static struct device *sysctrl_dev; +void ab8500_power_off(void) +{ + struct ab8500_platform_data *plat; + struct timespec ts; + sigset_t old; + sigset_t all; + static char *pss[] = {"ab8500_ac", "ab8500_usb"}; + int i; + bool charger_present = false; + union power_supply_propval val; + struct power_supply *psy; + int ret; + + /* + * If we have a charger connected and we're powering off, + * reboot into charge-only mode. + */ + + for (i = 0; i < ARRAY_SIZE(pss); i++) { + psy = power_supply_get_by_name(pss[i]); + if (!psy) + continue; + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); + + if (!ret && val.intval) { + charger_present = true; + break; + } + } + + if (!charger_present) + goto shutdown; + + /* Check if battery is known */ + psy = power_supply_get_by_name("ab8500_btemp"); + if (psy) { + ret = psy->get_property(psy, POWER_SUPPLY_PROP_TECHNOLOGY, + &val); + if (!ret && val.intval != POWER_SUPPLY_TECHNOLOGY_UNKNOWN) { + printk(KERN_INFO + "Charger \"%s\" is connected with known battery." + " Rebooting.\n", + pss[i]); + machine_restart("charging"); + } + } + +shutdown: + sigfillset(&all); + + plat = dev_get_platdata(sysctrl_dev->parent); + getnstimeofday(&ts); + if (!sigprocmask(SIG_BLOCK, &all, &old)) { + if (ts.tv_sec == 0 || + (ts.tv_sec - plat->thermal_set_time_sec > + plat->thermal_time_out)) + plat->thermal_power_off_pending = false; + if (!plat->thermal_power_off_pending) { + (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1, + AB8500_STW4500CTRL1_SWOFF | + AB8500_STW4500CTRL1_SWRESET4500N); + (void)sigprocmask(SIG_SETMASK, &old, NULL); + } else { + (void)ab8500_sysctrl_set(AB8500_STW4500CTRL1, + AB8500_STW4500CTRL1_THDB8500SWOFF | + AB8500_STW4500CTRL1_SWRESET4500N); + (void)sigprocmask(SIG_SETMASK, &old, NULL); + } + } +} + +static int ab8500_notifier_call(struct notifier_block *this, + unsigned long val, void *data) +{ + struct ab8500_platform_data *plat; + static struct timespec ts; + if (sysctrl_dev == NULL) + return -EAGAIN; + + plat = dev_get_platdata(sysctrl_dev->parent); + if (val) { + getnstimeofday(&ts); + plat->thermal_set_time_sec = ts.tv_sec; + plat->thermal_power_off_pending = true; + } else { + plat->thermal_set_time_sec = 0; + plat->thermal_power_off_pending = false; + } + return 0; +} + +static struct notifier_block ab8500_notifier = { + .notifier_call = ab8500_notifier_call, +}; + static inline bool valid_bank(u8 bank) { return ((bank == AB8500_SYS_CTRL1_BLOCK) || @@ -33,6 +135,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value) return abx500_get_register_interruptible(sysctrl_dev, bank, (u8)(reg & 0xFF), value); } +EXPORT_SYMBOL(ab8500_sysctrl_read); int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value) { @@ -48,10 +151,42 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value) return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank, (u8)(reg & 0xFF), mask, value); } +EXPORT_SYMBOL(ab8500_sysctrl_write); static int __devinit ab8500_sysctrl_probe(struct platform_device *pdev) { + struct ab8500_platform_data *plat; + struct ab8500_sysctrl_platform_data *pdata; + sysctrl_dev = &pdev->dev; + plat = dev_get_platdata(pdev->dev.parent); + if (plat->pm_power_off) + pm_power_off = ab8500_power_off; + hwmon_notifier_register(&ab8500_notifier); + + pdata = plat->sysctrl; + + if (pdata) { + int ret; + int i; + int j; + for (i = AB8500_SYSCLKREQ1RFCLKBUF; + i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) { + j = i - AB8500_SYSCLKREQ1RFCLKBUF; + ret = ab8500_sysctrl_write(i, 0xff, + pdata->initial_req_buf_config[j]); + dev_dbg(&pdev->dev, + "Setting SysClkReq%dRfClkBuf 0x%X\n", + j + 1, + pdata->initial_req_buf_config[j]); + if (ret < 0) { + dev_err(&pdev->dev, + "unable to set sysClkReq%dRfClkBuf: " + "%d\n", j + 1, ret); + } + } + } + return 0; } diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c index 7ce65f49480..9818afba251 100644 --- a/drivers/mfd/abx500-core.c +++ b/drivers/mfd/abx500-core.c @@ -153,6 +153,22 @@ int abx500_startup_irq_enabled(struct device *dev, unsigned int irq) } EXPORT_SYMBOL(abx500_startup_irq_enabled); +void abx500_dump_all_banks(void) +{ + struct abx500_ops *ops; + struct device dummy_child = {0}; + struct abx500_device_entry *dev_entry; + + list_for_each_entry(dev_entry, &abx500_list, list) { + dummy_child.parent = dev_entry->dev; + ops = &dev_entry->ops; + + if ((ops != NULL) && (ops->dump_all_banks != NULL)) + ops->dump_all_banks(&dummy_child); + } +} +EXPORT_SYMBOL(abx500_dump_all_banks); + MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>"); MODULE_DESCRIPTION("ABX500 core driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h new file mode 100644 index 00000000000..0428b5e95ae --- /dev/null +++ b/drivers/mfd/db5500-prcmu-regs.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + */ + +#ifndef __MACH_PRCMU_REGS_DB5500_H +#define __MACH_PRCMU_REGS_DB5500_H + +#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end)) + +#define PRCM_TCR 0x1C8 +#define PRCM_TCR_TENSEL_MASK BITS(0, 7) +#define PRCM_TCR_STOP_TIMERS BIT(16) +#define PRCM_TCR_DOZE_MODE BIT(17) + +/* PRCMU HW semaphore */ +#define PRCM_SEM 0x400 +#define PRCM_SEM_PRCM_SEM BIT(0) + +#define DB5500_PRCM_ACLK_MGT 0x004 +#define DB5500_PRCM_SVACLK_MGT 0x008 +#define DB5500_PRCM_SIACLK_MGT 0x00C +#define DB5500_PRCM_SGACLK_MGT 0x014 +#define DB5500_PRCM_UARTCLK_MGT 0x018 +#define DB5500_PRCM_MSP02CLK_MGT 0x01C +#define DB5500_PRCM_I2CCLK_MGT 0x020 +#define DB5500_PRCM_SDMMCCLK_MGT 0x024 +#define DB5500_PRCM_PER1CLK_MGT 0x02C +#define DB5500_PRCM_PER2CLK_MGT 0x030 +#define DB5500_PRCM_PER3CLK_MGT 0x034 +#define DB5500_PRCM_PER5CLK_MGT 0x038 +#define DB5500_PRCM_PER6CLK_MGT 0x03C +#define DB5500_PRCM_IRDACLK_MGT 0x040 +#define DB5500_PRCM_PWMCLK_MGT 0x044 +#define DB5500_PRCM_SPARE1CLK_MGT 0x048 +#define DB5500_PRCM_IRRCCLK_MGT 0x04C +#define DB5500_PRCM_HDMICLK_MGT 0x058 +#define DB5500_PRCM_APEATCLK_MGT 0x05C +#define DB5500_PRCM_APETRACECLK_MGT 0x060 +#define DB5500_PRCM_MCDECLK_MGT 0x064 +#define DB5500_PRCM_DSIALTCLK_MGT 0x06C +#define DB5500_PRCM_DMACLK_MGT 0x074 +#define DB5500_PRCM_B2R2CLK_MGT 0x078 +#define DB5500_PRCM_TVCLK_MGT 0x07C +#define DB5500_PRCM_RNGCLK_MGT 0x284 + +#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4) +#define PRCM_CLK_MGT_CLKPLLDIV_SHIFT 0 +#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7) +#define PRCM_CLK_MGT_CLKEN BIT(8) + +#define PRCM_ARM_IT1_CLEAR 0x48C +#define PRCM_ARM_IT1_VAL 0x494 + +/* CPU mailbox registers */ +#define PRCM_MBOX_CPU_VAL 0x0FC +#define PRCM_MBOX_CPU_SET 0x100 + +/* System reset register */ +#define PRCM_APE_SOFTRST 0x228 + +/* PRCMU clock/PLL/reset registers */ +#define PRCM_PLLDSI_FREQ 0x500 +#define PRCM_PLLDSI_ENABLE 0x504 +#define PRCM_PLLDSI_LOCKP 0x508 +#define PRCM_DSI_PLLOUT_SEL 0x530 +#define PRCM_DSITVCLK_DIV 0x52C +#define PRCM_APE_RESETN_SET 0x1E4 +#define PRCM_APE_RESETN_CLR 0x1E8 + +/* CLKOUTx SEL0 settings */ +#define CLKOUT_SEL0_REF_CLK 0x01 /* 0b 0001 */ +#define CLKOUT_SEL0_RTC_CLK0 0x02 /* 0b 0010 */ +#define CLKOUT_SEL0_ULP_CLK 0x04 /* 0b 0100 */ +#define CLKOUT_SEL0_SEL_CLK 0x08 /* 0b 1000 */ + +/* CLKOUTx SEL settings */ +#define CLKOUT_SEL_STATIC0 0x0001 /* 0b 00 0000 0001 */ +#define CLKOUT_SEL_REFCLK 0x0002 /* 0b 00 0000 0010 */ +#define CLKOUT_SEL_ULPCLK 0x0004 /* 0b 00 0000 0100 */ +#define CLKOUT_SEL_ARMCLK 0x0008 /* 0b 00 0000 1000 */ +#define CLKOUT_SEL_SYSACC0CLK 0x0010 /* 0b 00 0001 0000 */ +#define CLKOUT_SEL_SOC0PLLCLK 0x0020 /* 0b 00 0010 0000 */ +#define CLKOUT_SEL_SOC1PLLCLK 0x0040 /* 0b 00 0100 0000 */ +#define CLKOUT_SEL_DDRPLLCLK 0x0080 /* 0b 00 1000 0000 */ +#define CLKOUT_SEL_TVCLK 0x0100 /* 0b 01 0000 0000 */ +#define CLKOUT_SEL_IRDACLK 0x0200 /* 0b 10 0000 0000 */ + +/* CLKOUTx dividers */ +#define CLKOUT_DIV_2 0x00 /* 0b 000 */ +#define CLKOUT_DIV_4 0x01 /* 0b 001 */ +#define CLKOUT_DIV_8 0x02 /* 0b 010 */ +#define CLKOUT_DIV_16 0x03 /* 0b 011 */ +#define CLKOUT_DIV_32 0x04 /* 0b 100 */ +#define CLKOUT_DIV_64 0x05 /* 0b 101 */ +/* Values 0x06 and 0x07 will also set the CLKOUTx divider to 64. */ + +/* PRCM_CLKOCR CLKOUTx Control registers */ +#define PRCM_CLKOCR 0x1CC +#define PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT 0 +#define PRCM_CLKOCR_CLKOUT0_SEL0_MASK BITS(0, 3) +#define PRCM_CLKOCR_CLKOUT0_SEL_SHIFT 4 +#define PRCM_CLKOCR_CLKOUT0_SEL_MASK BITS(4, 13) +#define PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT 16 +#define PRCM_CLKOCR_CLKOUT1_SEL0_MASK BITS(16, 19) +#define PRCM_CLKOCR_CLKOUT1_SEL_SHIFT 20 +#define PRCM_CLKOCR_CLKOUT1_SEL_MASK BITS(20, 29) + +/* PRCM_CLKODIV CLKOUTx Dividers */ +#define PRCM_CLKODIV 0x188 +#define PRCM_CLKODIV_CLKOUT0_DIV_SHIFT 0 +#define PRCM_CLKODIV_CLKOUT0_DIV_MASK BITS(0, 2) +#define PRCM_CLKODIV_CLKOUT1_DIV_SHIFT 16 +#define PRCM_CLKODIV_CLKOUT1_DIV_MASK BITS(16, 18) + +#define PRCM_MMIP_LS_CLAMP_SET 0x420 +#define PRCM_MMIP_LS_CLAMP_CLR 0x424 +#define PRCM_DDR_SUBSYS_APE_MINBW 0x438 + +/* Miscellaneous unit registers */ +#define PRCM_DSI_SW_RESET 0x324 +#define PRCM_RESOUTN_SET_OFFSET 0x214 +#define PRCM_RESOUTN_CLR_OFFSET 0x218 + +/* APE - Modem Registers */ +#define PRCM_HOSTACCESS_REQ 0x334 +/* APE - Modem register bit maipulation */ +#define PRCM_HOSTACCESS_REQ_BIT BIT(0) +#define PRCM_APE_ACK 0x49c +#define PRCM_APE_ACK_BIT 0x01 + +/* Watchdog - mtimer registers */ +#define PRCM_TIMER0_RTOS_COMP1_OFFSET 0x4C +#define PRCM_TIMER0_RTOS_COUNTER_OFFSET 0x40 +#define PRCM_TIMER0_IRQ_EN_SET_OFFSET 0x70 +#define PRCM_TIMER0_IRQ_EN_CLR_OFFSET 0x6C +#define PRCM_TIMER0_IRQ_RTOS1_SET 0x08 +#define PRCM_TIMER0_IRQ_RTOS1_CLR 0x08 + +#endif diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c index bb115b2f04e..b106632d03c 100644 --- a/drivers/mfd/db5500-prcmu.c +++ b/drivers/mfd/db5500-prcmu.c @@ -19,12 +19,21 @@ #include <linux/irq.h> #include <linux/jiffies.h> #include <linux/bitops.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/regulator/db5500-prcmu.h> +#include <linux/regulator/machine.h> #include <linux/interrupt.h> #include <linux/mfd/dbx500-prcmu.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/db5500-regs.h> -#include "dbx500-prcmu-regs.h" +#include <mach/prcmu-debug.h> + +#include "db5500-prcmu-regs.h" + +#define PRCMU_FW_VERSION_OFFSET 0xA4 +#define PRCM_SW_RST_REASON (tcdm_base + 0xFF8) /* 2 bytes */ #define _PRCM_MB_HEADER (tcdm_base + 0xFE8) #define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0) @@ -64,6 +73,52 @@ #define PRCM_ACK_MB6 (tcdm_base + 0xF0C) #define PRCM_ACK_MB7 (tcdm_base + 0xF08) +/* Share info */ +#define PRCM_SHARE_INFO (tcdm_base + 0xEC8) + +#define PRCM_SHARE_INFO_HOTDOG (PRCM_SHARE_INFO + 62) + +/* Mailbox 0 REQs */ +#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0) +#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x1) +#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x2) +#define PRCM_REQ_MB0_DDR_STATE (PRCM_REQ_MB0 + 0x3) +#define PRCM_REQ_MB0_ESRAM0_STATE (PRCM_REQ_MB0 + 0x4) +#define PRCM_REQ_MB0_WAKEUP_DBB (PRCM_REQ_MB0 + 0x8) +#define PRCM_REQ_MB0_WAKEUP_ABB (PRCM_REQ_MB0 + 0xC) + +/* Mailbox 0 ACKs */ +#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0) +#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1) +#define PRCM_ACK_MB0_WAKEUP_0_DBB (PRCM_ACK_MB0 + 0x4) +#define PRCM_ACK_MB0_WAKEUP_0_ABB (PRCM_ACK_MB0 + 0x8) +#define PRCM_ACK_MB0_WAKEUP_1_DBB (PRCM_ACK_MB0 + 0x28) +#define PRCM_ACK_MB0_WAKEUP_1_ABB (PRCM_ACK_MB0 + 0x2C) +#define PRCM_ACK_MB0_EVENT_ABB_NUMBERS 20 + +/* Request mailbox 1 fields. */ +#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0) +#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1) + +/* Mailbox 1 ACKs */ +#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0) +#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1) +#define PRCM_ACK_MB1_ARM_VOLT_STATUS (PRCM_ACK_MB1 + 0x2) +#define PRCM_ACK_MB1_APE_VOLT_STATUS (PRCM_ACK_MB1 + 0x3) + +/* Mailbox 2 REQs */ +#define PRCM_REQ_MB2_EPOD_CLIENT (PRCM_REQ_MB2 + 0x0) +#define PRCM_REQ_MB2_EPOD_STATE (PRCM_REQ_MB2 + 0x1) +#define PRCM_REQ_MB2_CLK_CLIENT (PRCM_REQ_MB2 + 0x2) +#define PRCM_REQ_MB2_CLK_STATE (PRCM_REQ_MB2 + 0x3) +#define PRCM_REQ_MB2_PLL_CLIENT (PRCM_REQ_MB2 + 0x4) +#define PRCM_REQ_MB2_PLL_STATE (PRCM_REQ_MB2 + 0x5) + +/* Mailbox 2 ACKs */ +#define PRCM_ACK_MB2_EPOD_STATUS (PRCM_ACK_MB2 + 0x2) +#define PRCM_ACK_MB2_CLK_STATUS (PRCM_ACK_MB2 + 0x6) +#define PRCM_ACK_MB2_PLL_STATUS (PRCM_ACK_MB2 + 0xA) + enum mb_return_code { RC_SUCCESS, RC_FAIL, @@ -71,12 +126,58 @@ enum mb_return_code { /* Mailbox 0 headers. */ enum mb0_header { - /* request */ - RMB0H_PWR_STATE_TRANS = 1, - RMB0H_WAKE_UP_CFG, - RMB0H_RD_WAKE_UP_ACK, /* acknowledge */ - AMB0H_WAKE_UP = 1, + MB0H_WAKE_UP = 0, + /* request */ + MB0H_PWR_STATE_TRANS, + MB0H_WAKE_UP_CFG, + MB0H_RD_WAKE_UP_ACK, +}; + +/* Mailbox 1 headers.*/ +enum mb1_header { + MB1H_ARM_OPP = 1, + MB1H_APE_OPP, + MB1H_ARM_APE_OPP, +}; + +/* Mailbox 2 headers. */ +enum mb2_header { + MB2H_EPOD_REQUEST = 1, + MB2H_CLK_REQUEST, + MB2H_PLL_REQUEST, +}; + +/* Mailbox 3 headers. */ +enum mb3_header { + MB3H_REFCLK_REQUEST = 1, +}; + +enum sysclk_state { + SYSCLK_OFF, + SYSCLK_ON, +}; + +/* Mailbox 4 headers */ +enum mb4_header { + MB4H_CFG_HOTDOG = 7, + MB4H_CFG_HOTMON = 8, + MB4H_CFG_HOTPERIOD = 10, + MB4H_CGF_MODEM_RESET = 13, + MB4H_CGF_A9WDOG_EN_PREBARK = 14, + MB4H_CGF_A9WDOG_EN_NOPREBARK = 15, + MB4H_CGF_A9WDOG_DIS = 16, +}; + +/* Mailbox 4 ACK headers */ +enum mb4_ack_header { + MB4H_ACK_CFG_HOTDOG = 5, + MB4H_ACK_CFG_HOTMON = 6, + MB4H_ACK_CFG_HOTPERIOD = 8, + MB4H_ACK_CFG_MODEM_RESET = 11, + MB4H_ACK_CGF_A9WDOG_EN_PREBARK = 12, + MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK = 13, + MB4H_ACK_CGF_A9WDOG_DIS = 14, }; /* Mailbox 5 headers. */ @@ -85,6 +186,69 @@ enum mb5_header { MB5H_I2C_READ, }; +enum db5500_arm_opp { + DB5500_ARM_100_OPP = 1, + DB5500_ARM_50_OPP, + DB5500_ARM_EXT_OPP, +}; + +enum db5500_ape_opp { + DB5500_APE_100_OPP = 1, + DB5500_APE_50_OPP +}; + +enum epod_state { + EPOD_OFF, + EPOD_ON, +}; +enum epod_onoffret_state { + EPOD_OOR_OFF, + EPOD_OOR_RET, + EPOD_OOR_ON, +}; +enum db5500_prcmu_pll { + DB5500_PLL_SOC0, + DB5500_PLL_SOC1, + DB5500_PLL_DDR, + DB5500_NUM_PLL_ID, +}; + +enum db5500_prcmu_clk { + DB5500_MSP1CLK, + DB5500_CDCLK, + DB5500_IRDACLK, + DB5500_TVCLK, + DB5500_NUM_CLK_CLIENTS, +}; + +enum on_off_ret { + OFF_ST, + RET_ST, + ON_ST, +}; + +enum db5500_ap_pwr_state { + DB5500_AP_SLEEP = 2, + DB5500_AP_DEEP_SLEEP, + DB5500_AP_IDLE, +}; + +/* Request mailbox 3 fields */ +#define PRCM_REQ_MB3_REFCLK_MGT (PRCM_REQ_MB3 + 0x0) + +/* Ack. mailbox 3 fields */ +#define PRCM_ACK_MB3_REFCLK_REQ (PRCM_ACK_MB3 + 0x0) + + +/* Request mailbox 4 fields */ +#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 32) +#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 34) +#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 36) +#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 38) + +/* Ack. mailbox 4 field */ +#define PRCM_ACK_MB4_REQUESTS (PRCM_ACK_MB4 + 0x0) + /* Request mailbox 5 fields. */ #define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0) #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1) @@ -105,11 +269,12 @@ enum mb5_header { #define PRCMU_RESET_DSIPLL 0x00004000 #define PRCMU_UNCLAMP_DSIPLL 0x00400800 -/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/ -#define PRCMU_DSI_CLOCK_SETTING 0x00000128 +/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0xC, = 33.33 Mhz*/ +#define PRCMU_DSI_CLOCK_SETTING 0x0000012C /* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */ #define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135 -#define PRCMU_PLLDSI_FREQ_SETTING 0x00020121 +/* PRCM_PLLDSI_FREQ R=4, N=1, D= 0x65 */ +#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165 #define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000201 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101 @@ -125,13 +290,176 @@ enum mb5_header { #define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 /* + * Wakeups/IRQs + */ + +#define WAKEUP_BIT_RTC BIT(0) +#define WAKEUP_BIT_RTT0 BIT(1) +#define WAKEUP_BIT_RTT1 BIT(2) +#define WAKEUP_BIT_CD_IRQ BIT(3) +#define WAKEUP_BIT_SRP_TIM BIT(4) +#define WAKEUP_BIT_APE_REQ BIT(5) +#define WAKEUP_BIT_USB BIT(6) +#define WAKEUP_BIT_ABB BIT(7) +#define WAKEUP_BIT_LOW_POWER_AUDIO BIT(8) +#define WAKEUP_BIT_TEMP_SENSOR_LOW BIT(9) +#define WAKEUP_BIT_ARM BIT(10) +#define WAKEUP_BIT_AC_WAKE_ACK BIT(11) +#define WAKEUP_BIT_TEMP_SENSOR_HIGH BIT(12) +#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20) +#define WAKEUP_BIT_GPIO0 BIT(23) +#define WAKEUP_BIT_GPIO1 BIT(24) +#define WAKEUP_BIT_GPIO2 BIT(25) +#define WAKEUP_BIT_GPIO3 BIT(26) +#define WAKEUP_BIT_GPIO4 BIT(27) +#define WAKEUP_BIT_GPIO5 BIT(28) +#define WAKEUP_BIT_GPIO6 BIT(29) +#define WAKEUP_BIT_GPIO7 BIT(30) +#define WAKEUP_BIT_AC_REL_ACK BIT(30) + +/* + * This vector maps irq numbers to the bits in the bit field used in + * communication with the PRCMU firmware. + * + * The reason for having this is to keep the irq numbers contiguous even though + * the bits in the bit field are not. (The bits also have a tendency to move + * around, to further complicate matters.) + */ +#define IRQ_INDEX(_name) ((IRQ_DB5500_PRCMU_##_name) - IRQ_DB5500_PRCMU_BASE) +#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name) +static u32 prcmu_irq_bit[NUM_DB5500_PRCMU_WAKEUPS] = { + IRQ_ENTRY(RTC), + IRQ_ENTRY(RTT0), + IRQ_ENTRY(RTT1), + IRQ_ENTRY(CD_IRQ), + IRQ_ENTRY(SRP_TIM), + IRQ_ENTRY(APE_REQ), + IRQ_ENTRY(USB), + IRQ_ENTRY(ABB), + IRQ_ENTRY(LOW_POWER_AUDIO), + IRQ_ENTRY(TEMP_SENSOR_LOW), + IRQ_ENTRY(TEMP_SENSOR_HIGH), + IRQ_ENTRY(ARM), + IRQ_ENTRY(AC_WAKE_ACK), + IRQ_ENTRY(MODEM_SW_RESET_REQ), + IRQ_ENTRY(GPIO0), + IRQ_ENTRY(GPIO1), + IRQ_ENTRY(GPIO2), + IRQ_ENTRY(GPIO3), + IRQ_ENTRY(GPIO4), + IRQ_ENTRY(GPIO5), + IRQ_ENTRY(GPIO6), + IRQ_ENTRY(GPIO7), + IRQ_ENTRY(AC_REL_ACK), +}; + +#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1) +#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name) +static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = { + WAKEUP_ENTRY(RTC), + WAKEUP_ENTRY(RTT0), + WAKEUP_ENTRY(RTT1), + WAKEUP_ENTRY(CD_IRQ), + WAKEUP_ENTRY(USB), + WAKEUP_ENTRY(ABB), + WAKEUP_ENTRY(ARM) +}; + +/* * mb0_transfer - state needed for mailbox 0 communication. - * @lock: The transaction lock. + * @lock The transaction lock. + * @dbb_irqs_lock lock used for (un)masking DBB wakeup interrupts + * @mask_work: Work structure used for (un)masking wakeup interrupts. + * @ac_wake_lock: mutex to lock modem_req and modem_rel + * @req: Request data that need to persist between requests. */ static struct { spinlock_t lock; + spinlock_t dbb_irqs_lock; + struct work_struct mask_work; + struct mutex ac_wake_lock; + struct { + u32 dbb_irqs; + u32 dbb_wakeups; + u32 abb_events; + } req; } mb0_transfer; + +/* + * mb1_transfer - state needed for mailbox 1 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @req_arm_opp Requested arm opp + * @req_ape_opp Requested ape opp + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + u8 req_arm_opp; + u8 req_ape_opp; + struct { + u8 header; + u8 arm_opp; + u8 ape_opp; + u8 arm_voltage_st; + u8 ape_voltage_st; + } ack; +} mb1_transfer; + +/* + * mb2_transfer - state needed for mailbox 2 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @req: Request data that need to persist between requests. + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + struct { + u8 epod_st[DB5500_NUM_EPOD_ID]; + u8 pll_st[DB5500_NUM_PLL_ID]; + } req; + struct { + u8 header; + u8 status; + } ack; +} mb2_transfer; + +/* + * mb3_transfer - state needed for mailbox 3 communication. + * @sysclk_lock: A lock used to handle concurrent sysclk requests. + * @sysclk_work: Work structure used for sysclk requests. + * @req_st: Requested clock state. + * @ack: Acknowledgement data + */ +static struct { + struct mutex sysclk_lock; + struct completion sysclk_work; + enum sysclk_state req_st; + struct { + u8 header; + u8 status; + } ack; +} mb3_transfer; + +/* + * mb4_transfer - state needed for mailbox 4 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @ack: Acknowledgement data + */ +static struct { + struct mutex lock; + struct completion work; + struct { + u8 header; + u8 status; + } ack; +} mb4_transfer; + /* * mb5_transfer - state needed for mailbox 5 communication. * @lock: The transaction lock. @@ -148,9 +476,825 @@ static struct { } ack; } mb5_transfer; -/* PRCMU TCDM base IO address. */ +/* Spinlocks */ +static DEFINE_SPINLOCK(clkout_lock); + +/* PRCMU TCDM base IO address */ static __iomem void *tcdm_base; +/* PRCMU MTIMER base IO address */ +static __iomem void *mtimer_base; + +struct clk_mgt { + unsigned int offset; + u32 pllsw; + u32 div; + bool scalable; + bool force50; +}; + +/* PRCMU Firmware Details */ +static struct { + u16 board; + u8 fw_version; + u8 api_version; +} prcmu_version; + +static struct { + u32 timeout; + bool enabled; +} a9wdog_timer; + +static DEFINE_SPINLOCK(clk_mgt_lock); + +#define CLK_MGT_ENTRY(_name, _scalable)[PRCMU_##_name] = { \ + .offset = DB5500_PRCM_##_name##_MGT, \ + .scalable = _scalable, \ +} + +static struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = { + CLK_MGT_ENTRY(SGACLK, true), + CLK_MGT_ENTRY(UARTCLK, false), + CLK_MGT_ENTRY(MSP02CLK, false), + CLK_MGT_ENTRY(I2CCLK, false), + [PRCMU_SDMMCCLK] { + .offset = DB5500_PRCM_SDMMCCLK_MGT, + .force50 = true, + .scalable = false, + + }, + [PRCMU_SPARE1CLK] { + .offset = DB5500_PRCM_SPARE1CLK_MGT, + .force50 = true, + .scalable = false, + + }, + CLK_MGT_ENTRY(PER1CLK, false), + CLK_MGT_ENTRY(PER2CLK, true), + CLK_MGT_ENTRY(PER3CLK, true), + CLK_MGT_ENTRY(PER5CLK, false), /* used for SPI */ + CLK_MGT_ENTRY(PER6CLK, true), + CLK_MGT_ENTRY(PWMCLK, false), + CLK_MGT_ENTRY(IRDACLK, false), + CLK_MGT_ENTRY(IRRCCLK, false), + CLK_MGT_ENTRY(HDMICLK, false), + CLK_MGT_ENTRY(APEATCLK, false), + CLK_MGT_ENTRY(APETRACECLK, true), + CLK_MGT_ENTRY(MCDECLK, true), + CLK_MGT_ENTRY(DSIALTCLK, false), + CLK_MGT_ENTRY(DMACLK, true), + CLK_MGT_ENTRY(B2R2CLK, true), + CLK_MGT_ENTRY(TVCLK, false), + CLK_MGT_ENTRY(RNGCLK, false), + CLK_MGT_ENTRY(SIACLK, false), + CLK_MGT_ENTRY(SVACLK, false), + CLK_MGT_ENTRY(ACLK, true), +}; + +static atomic_t modem_req_state = ATOMIC_INIT(0); + +bool db5500_prcmu_is_modem_requested(void) +{ + return (atomic_read(&modem_req_state) != 0); +} + +/** + * prcmu_modem_req - APE requests Modem to wake up + * + * Whenever APE wants to send message to the modem, it will have to call this + * function to make sure that modem is awake. + */ +void prcmu_modem_req(void) +{ + u32 val; + + mutex_lock(&mb0_transfer.ac_wake_lock); + + val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ); + if (val & PRCM_HOSTACCESS_REQ_BIT) + goto unlock_and_return; + + writel((val | PRCM_HOSTACCESS_REQ_BIT), + (_PRCMU_BASE + PRCM_HOSTACCESS_REQ)); + atomic_set(&modem_req_state, 1); + +unlock_and_return: + mutex_unlock(&mb0_transfer.ac_wake_lock); + +} + +/** + * prcmu_modem_rel - APE has no more messages to send and hence releases modem. + * + * APE to Modem communication is initiated by modem_req and once the + * communication is completed, APE sends modem_rel to complete the protocol. + */ +void prcmu_modem_rel(void) +{ + u32 val; + + mutex_lock(&mb0_transfer.ac_wake_lock); + + val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ); + if (!(val & PRCM_HOSTACCESS_REQ_BIT)) + goto unlock_and_return; + + writel((val & ~PRCM_HOSTACCESS_REQ_BIT), + (_PRCMU_BASE + PRCM_HOSTACCESS_REQ)); + + atomic_set(&modem_req_state, 0); + +unlock_and_return: + mutex_unlock(&mb0_transfer.ac_wake_lock); +} + +/** + * prcm_ape_ack - send an acknowledgement to modem + * + * On ape receiving ape_req, APE will have to acknowledge for the interrupt + * received. This function will send the acknowledgement by writing to the + * prcmu register and an interrupt is trigerred to modem. + */ +void prcmu_ape_ack(void) +{ + writel(PRCM_APE_ACK_BIT, (_PRCMU_BASE + PRCM_APE_ACK)); +} + +/** + * db5500_prcmu_modem_reset - Assert a Reset on modem + * + * This function will assert a reset request to the modem. Prior to that + * PRCM_HOSTACCESS_REQ must be '0'. + */ +void db5500_prcmu_modem_reset(void) +{ + mutex_lock(&mb4_transfer.lock); + + /* PRCM_HOSTACCESS_REQ = 0, before asserting a reset */ + prcmu_modem_rel(); + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writeb(MB4H_CGF_MODEM_RESET, PRCM_REQ_MB4_HEADER); + writel(MBOX_BIT(4), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + wait_for_completion(&mb4_transfer.work); + if (mb4_transfer.ack.status != RC_SUCCESS || + mb4_transfer.ack.header != MB4H_CGF_MODEM_RESET) + printk(KERN_ERR, + "ACK not received for modem reset interrupt\n"); + mutex_unlock(&mb4_transfer.lock); +} + +/** + * prcmu_config_clkout - Configure one of the programmable clock outputs. + * @clkout: The CLKOUT number (0 or 1). + * @source: Clock source. + * @div: The divider to be applied. + * + * Configures one of the programmable clock outputs (CLKOUTs). + */ +int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + static bool configured[2] = {false, false}; + int r = 0; + unsigned long flags; + u32 sel_val; + u32 div_val; + u32 sel_bits; + u32 div_bits; + u32 sel_mask; + u32 div_mask; + u8 sel0 = CLKOUT_SEL0_SEL_CLK; + u16 sel = 0; + + BUG_ON(clkout > DB5500_CLKOUT1); + BUG_ON(source > DB5500_CLKOUT_IRDACLK); + BUG_ON(div > 7); + + switch (source) { + case DB5500_CLKOUT_REF_CLK_SEL0: + sel0 = CLKOUT_SEL0_REF_CLK; + break; + case DB5500_CLKOUT_RTC_CLK0_SEL0: + sel0 = CLKOUT_SEL0_RTC_CLK0; + break; + case DB5500_CLKOUT_ULP_CLK_SEL0: + sel0 = CLKOUT_SEL0_ULP_CLK; + break; + case DB5500_CLKOUT_STATIC0: + sel = CLKOUT_SEL_STATIC0; + break; + case DB5500_CLKOUT_REFCLK: + sel = CLKOUT_SEL_REFCLK; + break; + case DB5500_CLKOUT_ULPCLK: + sel = CLKOUT_SEL_ULPCLK; + break; + case DB5500_CLKOUT_ARMCLK: + sel = CLKOUT_SEL_ARMCLK; + break; + case DB5500_CLKOUT_SYSACC0CLK: + sel = CLKOUT_SEL_SYSACC0CLK; + break; + case DB5500_CLKOUT_SOC0PLLCLK: + sel = CLKOUT_SEL_SOC0PLLCLK; + break; + case DB5500_CLKOUT_SOC1PLLCLK: + sel = CLKOUT_SEL_SOC1PLLCLK; + break; + case DB5500_CLKOUT_DDRPLLCLK: + sel = CLKOUT_SEL_DDRPLLCLK; + break; + case DB5500_CLKOUT_TVCLK: + sel = CLKOUT_SEL_TVCLK; + break; + case DB5500_CLKOUT_IRDACLK: + sel = CLKOUT_SEL_IRDACLK; + break; + } + + switch (clkout) { + case DB5500_CLKOUT0: + sel_mask = PRCM_CLKOCR_CLKOUT0_SEL0_MASK | + PRCM_CLKOCR_CLKOUT0_SEL_MASK; + sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT0_SEL0_SHIFT) | + (sel << PRCM_CLKOCR_CLKOUT0_SEL_SHIFT)); + div_mask = PRCM_CLKODIV_CLKOUT0_DIV_MASK; + div_bits = div << PRCM_CLKODIV_CLKOUT0_DIV_SHIFT; + break; + case DB5500_CLKOUT1: + sel_mask = PRCM_CLKOCR_CLKOUT1_SEL0_MASK | + PRCM_CLKOCR_CLKOUT1_SEL_MASK; + sel_bits = ((sel0 << PRCM_CLKOCR_CLKOUT1_SEL0_SHIFT) | + (sel << PRCM_CLKOCR_CLKOUT1_SEL_SHIFT)); + div_mask = PRCM_CLKODIV_CLKOUT1_DIV_MASK; + div_bits = div << PRCM_CLKODIV_CLKOUT1_DIV_SHIFT; + break; + } + + spin_lock_irqsave(&clkout_lock, flags); + + if (configured[clkout]) { + r = -EINVAL; + goto unlock_and_return; + } + + sel_val = readl(_PRCMU_BASE + PRCM_CLKOCR); + writel((sel_bits | (sel_val & ~sel_mask)), + (_PRCMU_BASE + PRCM_CLKOCR)); + + div_val = readl(_PRCMU_BASE + PRCM_CLKODIV); + writel((div_bits | (div_val & ~div_mask)), + (_PRCMU_BASE + PRCM_CLKODIV)); + + configured[clkout] = true; + +unlock_and_return: + spin_unlock_irqrestore(&clkout_lock, flags); + + return r; +} + +static int request_sysclk(bool enable) +{ + int r; + + r = 0; + mutex_lock(&mb3_transfer.sysclk_lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3)) + cpu_relax(); + + if (enable) + mb3_transfer.req_st = SYSCLK_ON; + else + mb3_transfer.req_st = SYSCLK_OFF; + + writeb(mb3_transfer.req_st, (PRCM_REQ_MB3_REFCLK_MGT)); + + writeb(MB3H_REFCLK_REQUEST, (PRCM_REQ_MB3_HEADER)); + writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + + /* + * The firmware only sends an ACK if we want to enable the + * SysClk, and it succeeds. + */ + if (!wait_for_completion_timeout(&mb3_transfer.sysclk_work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + r = -EIO; + WARN(1, "Failed to set sysclk"); + goto unlock_and_return; + } + + if ((mb3_transfer.ack.header != MB3H_REFCLK_REQUEST) || + (mb3_transfer.ack.status != mb3_transfer.req_st)) { + r = -EIO; + } + +unlock_and_return: + mutex_unlock(&mb3_transfer.sysclk_lock); + + return r; +} + +static int request_timclk(bool enable) +{ + u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK); + + if (!enable) + val |= PRCM_TCR_STOP_TIMERS; + writel(val, _PRCMU_BASE + PRCM_TCR); + + return 0; +} + +static int request_clk(u8 clock, bool enable) +{ + int r = 0; + + BUG_ON(clock >= DB5500_NUM_CLK_CLIENTS); + + mutex_lock(&mb2_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) + cpu_relax(); + + /* fill in mailbox */ + writeb(clock, PRCM_REQ_MB2_CLK_CLIENT); + writeb(enable, PRCM_REQ_MB2_CLK_STATE); + + writeb(MB2H_CLK_REQUEST, PRCM_REQ_MB2_HEADER); + + writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + if (!wait_for_completion_timeout(&mb2_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: request_clk() failed.\n"); + r = -EIO; + WARN(1, "Failed in request_clk"); + goto unlock_and_return; + } + if (mb2_transfer.ack.status != RC_SUCCESS || + mb2_transfer.ack.header != MB2H_CLK_REQUEST) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb2_transfer.lock); + return r; +} + +static int request_reg_clock(u8 clock, bool enable) +{ + u32 val; + unsigned long flags; + + WARN_ON(!clk_mgt[clock].offset); + + spin_lock_irqsave(&clk_mgt_lock, flags); + + /* Grab the HW semaphore. */ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + val = readl(_PRCMU_BASE + clk_mgt[clock].offset); + if (enable) { + val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw); + } else { + clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); + val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK); + } + writel(val, (_PRCMU_BASE + clk_mgt[clock].offset)); + + /* Release the HW semaphore. */ + writel(0, _PRCMU_BASE + PRCM_SEM); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + + return 0; +} + +/* + * request_pll() - Request for a pll to be enabled or disabled. + * @pll: The pll for which the request is made. + * @enable: Whether the clock should be enabled (true) or disabled (false). + * + * This function should only be used by the clock implementation. + * Do not use it from any other place! + */ +static int request_pll(u8 pll, bool enable) +{ + int r = 0; + + BUG_ON(pll >= DB5500_NUM_PLL_ID); + mutex_lock(&mb2_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) + cpu_relax(); + + mb2_transfer.req.pll_st[pll] = enable; + + /* fill in mailbox */ + writeb(pll, PRCM_REQ_MB2_PLL_CLIENT); + writeb(mb2_transfer.req.pll_st[pll], PRCM_REQ_MB2_PLL_STATE); + + writeb(MB2H_PLL_REQUEST, PRCM_REQ_MB2_HEADER); + + writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + if (!wait_for_completion_timeout(&mb2_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: set_pll() failed.\n"); + r = -EIO; + WARN(1, "Failed to set pll"); + goto unlock_and_return; + } + if (mb2_transfer.ack.status != RC_SUCCESS || + mb2_transfer.ack.header != MB2H_PLL_REQUEST) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb2_transfer.lock); + + return r; +} + +/** + * db5500_prcmu_request_clock() - Request for a clock to be enabled or disabled. + * @clock: The clock for which the request is made. + * @enable: Whether the clock should be enabled (true) or disabled (false). + * + * This function should only be used by the clock implementation. + * Do not use it from any other place! + */ +int db5500_prcmu_request_clock(u8 clock, bool enable) +{ + /* MSP1 & CD clocks are handled by FW */ + if (clock == PRCMU_MSP1CLK) + return request_clk(DB5500_MSP1CLK, enable); + else if (clock == PRCMU_CDCLK) + return request_clk(DB5500_CDCLK, enable); + else if (clock == PRCMU_IRDACLK) + return request_clk(DB5500_IRDACLK, enable); + else if (clock < PRCMU_NUM_REG_CLOCKS) + return request_reg_clock(clock, enable); + else if (clock == PRCMU_TIMCLK) + return request_timclk(enable); + else if (clock == PRCMU_PLLSOC0) + return request_pll(DB5500_PLL_SOC0, enable); + else if (clock == PRCMU_PLLSOC1) + return request_pll(DB5500_PLL_SOC1, enable); + else if (clock == PRCMU_PLLDDR) + return request_pll(DB5500_PLL_DDR, enable); + else if (clock == PRCMU_SYSCLK) + return request_sysclk(enable); + else + return -EINVAL; +} + +/* This function should only be called while mb0_transfer.lock is held. */ +static void config_wakeups(void) +{ + static u32 last_dbb_events; + static u32 last_abb_events; + u32 dbb_events; + u32 abb_events; + + dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups; + + abb_events = mb0_transfer.req.abb_events; + + if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events)) + return; + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + + writel(dbb_events, PRCM_REQ_MB0_WAKEUP_DBB); + writel(abb_events, PRCM_REQ_MB0_WAKEUP_ABB); + writeb(MB0H_WAKE_UP_CFG, PRCM_REQ_MB0_HEADER); + writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + + last_dbb_events = dbb_events; + last_abb_events = abb_events; +} + +int db5500_prcmu_config_esram0_deep_sleep(u8 state) +{ + unsigned long flags; + + if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) || + (state < ESRAM0_DEEP_SLEEP_STATE_OFF)) + return -EINVAL; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + if (state == ESRAM0_DEEP_SLEEP_STATE_RET) + writeb(RET_ST, PRCM_REQ_MB0_ESRAM0_STATE); + else + writeb(OFF_ST, PRCM_REQ_MB0_ESRAM0_STATE); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); + + return 0; +} + +int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) +{ + int r = 0; + unsigned long flags; + + /* Deep Idle is not supported in DB5500 */ + BUG_ON((state < PRCMU_AP_SLEEP) || (state >= PRCMU_AP_DEEP_IDLE)); + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + + switch (state) { + case PRCMU_AP_IDLE: + writeb(DB5500_AP_IDLE, PRCM_REQ_MB0_AP_POWER_STATE); + /* TODO: Can be high latency */ + writeb(DDR_PWR_STATE_UNCHANGED, PRCM_REQ_MB0_DDR_STATE); + break; + case PRCMU_AP_SLEEP: + writeb(DB5500_AP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE); + break; + case PRCMU_AP_DEEP_SLEEP: + writeb(DB5500_AP_DEEP_SLEEP, PRCM_REQ_MB0_AP_POWER_STATE); + break; + default: + r = -EINVAL; + goto unlock_return; + } + writeb((keep_ap_pll ? 1 : 0), PRCM_REQ_MB0_AP_PLL_STATE); + writeb((keep_ulp_clk ? 1 : 0), PRCM_REQ_MB0_ULP_CLOCK_STATE); + + writeb(MB0H_PWR_STATE_TRANS, PRCM_REQ_MB0_HEADER); + writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + +unlock_return: + spin_unlock_irqrestore(&mb0_transfer.lock, flags); + + return r; +} + +u8 db5500_prcmu_get_power_state_result(void) +{ + u8 status = readb_relaxed(PRCM_ACK_MB0_AP_PWRSTTR_STATUS); + + /* + * Callers expect all the status values to match 8500. Adjust for + * PendingReq_Er (0x2b). + */ + if (status == 0x2b) + status = PRCMU_PRCMU2ARMPENDINGIT_ER; + + return status; +} + +void db5500_prcmu_enable_wakeups(u32 wakeups) +{ + unsigned long flags; + u32 bits; + int i; + + BUG_ON(wakeups != (wakeups & VALID_WAKEUPS)); + + for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) { + if (wakeups & BIT(i)) { + if (prcmu_wakeup_bit[i] == 0) + WARN(1, "WAKEUP NOT SUPPORTED"); + else + bits |= prcmu_wakeup_bit[i]; + } + } + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + mb0_transfer.req.dbb_wakeups = bits; + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +void db5500_prcmu_config_abb_event_readout(u32 abb_events) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + mb0_transfer.req.abb_events = abb_events; + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) +{ + if (readb(PRCM_ACK_MB0_READ_POINTER) & 1) + *buf = (PRCM_ACK_MB0_WAKEUP_1_ABB); + else + *buf = (PRCM_ACK_MB0_WAKEUP_0_ABB); +} + +/* This function should be called with lock */ +static int mailbox4_request(u8 mb4_request, u8 ack_request) +{ + int ret = 0; + + writeb(mb4_request, PRCM_REQ_MB4_HEADER); + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + if (!wait_for_completion_timeout(&mb4_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: MB4 request %d failed", mb4_request); + ret = -EIO; + WARN(1, "prcmu: failed mb4 request"); + goto failed; + } + + if (mb4_transfer.ack.header != ack_request || + mb4_transfer.ack.status != RC_SUCCESS) + ret = -EIO; +failed: + return ret; +} + +int db5500_prcmu_get_hotdog(void) +{ + return readw(PRCM_SHARE_INFO_HOTDOG); +} + +int db5500_prcmu_config_hotdog(u8 threshold) +{ + int r = 0; + + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writew(threshold, PRCM_REQ_MB4_HOTDOG_THRESHOLD); + r = mailbox4_request(MB4H_CFG_HOTDOG, MB4H_ACK_CFG_HOTDOG); + + mutex_unlock(&mb4_transfer.lock); + + return r; +} + +int db5500_prcmu_config_hotmon(u8 low, u8 high) +{ + int r = 0; + + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writew(low, PRCM_REQ_MB4_HOTMON_LOW); + writew(high, PRCM_REQ_MB4_HOTMON_HIGH); + + r = mailbox4_request(MB4H_CFG_HOTMON, MB4H_ACK_CFG_HOTMON); + + mutex_unlock(&mb4_transfer.lock); + + return r; +} + +static int config_hot_period(u16 val) +{ + int r = 0; + + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writew(val, PRCM_REQ_MB4_HOT_PERIOD); + r = mailbox4_request(MB4H_CFG_HOTPERIOD, MB4H_ACK_CFG_HOTPERIOD); + + mutex_unlock(&mb4_transfer.lock); + + return r; +} + +/* + * period in milli seconds + */ +int db5500_prcmu_start_temp_sense(u16 period) +{ + if (period == 0xFFFF) + return -EINVAL; + + return config_hot_period(period); +} + +int db5500_prcmu_stop_temp_sense(void) +{ + return config_hot_period(0xFFFF); +} + +static int prcmu_a9wdog(u8 req, u8 ack) +{ + int r = 0; + + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + r = mailbox4_request(req, ack); + + mutex_unlock(&mb4_transfer.lock); + + return r; +} + +static void prcmu_a9wdog_set_interrupt(bool enable) +{ + if (enable) { + writel(PRCM_TIMER0_IRQ_RTOS1_SET, + (mtimer_base + PRCM_TIMER0_IRQ_EN_SET_OFFSET)); + } else { + writel(PRCM_TIMER0_IRQ_RTOS1_CLR, + (mtimer_base + PRCM_TIMER0_IRQ_EN_CLR_OFFSET)); + } +} + +static void prcmu_a9wdog_set_timeout(u32 timeout) +{ + u32 comp_timeout; + + comp_timeout = readl(mtimer_base + PRCM_TIMER0_RTOS_COUNTER_OFFSET) + + timeout; + writel(comp_timeout, mtimer_base + PRCM_TIMER0_RTOS_COMP1_OFFSET); +} + +int db5500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) +{ + /* + * Sleep auto off feature is not supported. Resume and + * suspend will be handled by watchdog driver. + */ + return 0; +} + +int db5500_prcmu_enable_a9wdog(u8 id) +{ + int r = 0; + + if (a9wdog_timer.enabled) + return -EPERM; + + prcmu_a9wdog_set_interrupt(true); + + r = prcmu_a9wdog(MB4H_CGF_A9WDOG_EN_PREBARK, + MB4H_ACK_CGF_A9WDOG_EN_PREBARK); + if (!r) + a9wdog_timer.enabled = true; + else + prcmu_a9wdog_set_interrupt(false); + + return r; +} + +int db5500_prcmu_disable_a9wdog(u8 id) +{ + if (!a9wdog_timer.enabled) + return -EPERM; + + prcmu_a9wdog_set_interrupt(false); + + a9wdog_timer.enabled = false; + + return prcmu_a9wdog(MB4H_CGF_A9WDOG_DIS, + MB4H_ACK_CGF_A9WDOG_DIS); +} + +int db5500_prcmu_kick_a9wdog(u8 id) +{ + int r = 0; + + if (a9wdog_timer.enabled) + prcmu_a9wdog_set_timeout(a9wdog_timer.timeout); + else + r = -EPERM; + + return r; +} + +int db5500_prcmu_load_a9wdog(u8 id, u32 timeout) +{ + if (a9wdog_timer.enabled) + return -EPERM; + + prcmu_a9wdog_set_timeout(timeout); + a9wdog_timer.timeout = timeout; + + return 0; +} + /** * db5500_prcmu_abb_read() - Read register value(s) from the ABB. * @slave: The I2C slave address. @@ -170,14 +1314,14 @@ int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) mutex_lock(&mb5_transfer.lock); - while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) cpu_relax(); writeb(slave, PRCM_REQ_MB5_I2C_SLAVE); writeb(reg, PRCM_REQ_MB5_I2C_REG); writeb(size, PRCM_REQ_MB5_I2C_SIZE); writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER); - writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); + writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET); wait_for_completion(&mb5_transfer.work); r = 0; @@ -211,7 +1355,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) mutex_lock(&mb5_transfer.lock); - while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) cpu_relax(); writeb(slave, PRCM_REQ_MB5_I2C_SLAVE); writeb(reg, PRCM_REQ_MB5_I2C_REG); @@ -219,7 +1363,7 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size); writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER); - writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); + writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_MBOX_CPU_SET); wait_for_completion(&mb5_transfer.work); if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) && @@ -233,42 +1377,385 @@ int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) return r; } +/** + * db5500_prcmu_set_arm_opp - set the appropriate ARM OPP + * @opp: The new ARM operating point to which transition is to be made + * Returns: 0 on success, non-zero on failure + * + * This function sets the the operating point of the ARM. + */ +int db5500_prcmu_set_arm_opp(u8 opp) +{ + int r; + u8 db5500_opp; + + r = 0; + + switch (opp) { + case ARM_EXTCLK: + db5500_opp = DB5500_ARM_EXT_OPP; + break; + case ARM_50_OPP: + db5500_opp = DB5500_ARM_50_OPP; + break; + case ARM_100_OPP: + db5500_opp = DB5500_ARM_100_OPP; + break; + default: + pr_err("prcmu: %s() received wrong opp value: %d\n", + __func__, opp); + r = -EINVAL; + goto bailout; + } + + mutex_lock(&mb1_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_ARM_OPP, PRCM_REQ_MB1_HEADER); + + writeb(db5500_opp, PRCM_REQ_MB1_ARM_OPP); + writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + + if (!wait_for_completion_timeout(&mb1_transfer.work, + msecs_to_jiffies(20000))) { + r = -EIO; + WARN(1, "prcmu: failed to set arm opp"); + goto unlock_and_return; + } + + if (mb1_transfer.ack.header != MB1H_ARM_OPP || + (mb1_transfer.ack.arm_opp != db5500_opp) || + (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS)) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb1_transfer.lock); +bailout: + if (!r) + prcmu_debug_arm_opp_log(opp); + return r; +} + +static void __init prcmu_ape_clocks_init(void) +{ + u8 opp = db5500_prcmu_get_ape_opp(); + unsigned long flags; + int i; + + WARN(opp != APE_100_OPP, "%s: Initial APE OPP (%u) not 100%%?\n", + __func__, opp); + + for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) { + struct clk_mgt *clkmgt = &clk_mgt[i]; + u32 clkval; + u32 div; + + if (!clkmgt->scalable && !clkmgt->force50) + continue; + + spin_lock_irqsave(&clk_mgt_lock, flags); + + clkval = readl(_PRCMU_BASE + clkmgt->offset); + div = clkval & PRCM_CLK_MGT_CLKPLLDIV_MASK; + div >>= PRCM_CLK_MGT_CLKPLLDIV_SHIFT; + + if (clkmgt->force50) { + div *= 2; + + clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK; + clkval |= div << PRCM_CLK_MGT_CLKPLLDIV_SHIFT; + writel(clkval, _PRCMU_BASE + clkmgt->offset); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + continue; + } + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + + clkmgt->div = div; + if (!div) + pr_err("%s: scalable clock at offset %#x has zero divisor\n", + __func__, clkmgt->offset); + } +} + +static void prcmu_ape_clocks_scale(u8 opp) +{ + unsigned long irqflags; + unsigned int i; + u32 clkval; + + /* + * Note: calling printk() under the following lock can cause lock + * recursion via clk_enable() for the console UART! + */ + spin_lock_irqsave(&clk_mgt_lock, irqflags); + + /* take a lock on HW (HWSEM)*/ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + for (i = 0; i < PRCMU_NUM_REG_CLOCKS; i++) { + u32 divval; + + if (!clk_mgt[i].scalable) + continue; + + clkval = readl(_PRCMU_BASE + clk_mgt[i].offset); + divval = clk_mgt[i].div; + + pr_debug("PRCMU: reg %#x prev clk = 0x%x stored div = 0x%x\n", + clk_mgt[i].offset, clkval, divval); + + if (opp == DB5500_APE_50_OPP) + divval *= 2; + + clkval &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK; + clkval |= divval << PRCM_CLK_MGT_CLKPLLDIV_SHIFT; + + pr_debug("PRCMU: wr 0x%x in reg 0x%x\n", + clkval, clk_mgt[i].offset); + + writel(clkval, _PRCMU_BASE + clk_mgt[i].offset); + } + + /* release lock */ + writel(0, (_PRCMU_BASE + PRCM_SEM)); + + spin_unlock_irqrestore(&clk_mgt_lock, irqflags); +} +/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */ +static void request_even_slower_clocks(bool enable) +{ + void __iomem *clock_reg[] = { + (_PRCMU_BASE + DB5500_PRCM_ACLK_MGT), + (_PRCMU_BASE + DB5500_PRCM_DMACLK_MGT) + }; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&clk_mgt_lock, flags); + + /* Grab the HW semaphore. */ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + for (i = 0; i < ARRAY_SIZE(clock_reg); i++) { + u32 val; + u32 div; + + val = readl(clock_reg[i]); + div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK); + if (enable) { + if ((div <= 1) || (div > 15)) { + pr_err("prcmu: Bad clock divider %d in %s\n", + div, __func__); + goto unlock_and_return; + } + div <<= 1; + } else { + if (div <= 2) + goto unlock_and_return; + div >>= 1; + } + val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) | + (div & PRCM_CLK_MGT_CLKPLLDIV_MASK)); + writel(val, clock_reg[i]); + } + +unlock_and_return: + /* Release the HW semaphore. */ + writel(0, _PRCMU_BASE + PRCM_SEM); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); +} +int db5500_prcmu_set_ape_opp(u8 opp) +{ + int ret = 0; + u8 db5500_opp; + if (opp == mb1_transfer.req_ape_opp) + return 0; + + switch (opp) { + case APE_100_OPP: + db5500_opp = DB5500_APE_100_OPP; + break; + case APE_50_OPP: + case APE_50_PARTLY_25_OPP: + db5500_opp = DB5500_APE_50_OPP; + break; + default: + pr_err("prcmu: %s() received wrong opp value: %d\n", + __func__, opp); + ret = -EINVAL; + goto bailout; + } + + mutex_lock(&mb1_transfer.lock); + if (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP) + request_even_slower_clocks(false); + if ((opp != APE_100_OPP) && (mb1_transfer.req_ape_opp != APE_100_OPP)) + goto skip_message; + + prcmu_ape_clocks_scale(db5500_opp); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_APE_OPP, PRCM_REQ_MB1_HEADER); + writeb(db5500_opp, PRCM_REQ_MB1_APE_OPP); + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + if (!wait_for_completion_timeout(&mb1_transfer.work, + msecs_to_jiffies(20000))) { + ret = -EIO; + WARN(1, "prcmu: failed to set ape opp to %u", opp); + goto unlock_and_return; + } + + if (mb1_transfer.ack.header != MB1H_APE_OPP || + (mb1_transfer.ack.ape_opp != db5500_opp) || + (mb1_transfer.ack.arm_voltage_st != RC_SUCCESS)) + ret = -EIO; + +skip_message: + if ((!ret && (opp == APE_50_PARTLY_25_OPP)) || + (ret && (mb1_transfer.req_ape_opp == APE_50_PARTLY_25_OPP))) + request_even_slower_clocks(true); + if (!ret) + mb1_transfer.req_ape_opp = opp; +unlock_and_return: + mutex_unlock(&mb1_transfer.lock); +bailout: + return ret; +} + +int db5500_prcmu_get_ape_opp(void) +{ + u8 opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP); + + switch (opp) { + case DB5500_APE_100_OPP: + return APE_100_OPP; + case DB5500_APE_50_OPP: + return APE_50_OPP; + default: + pr_err("prcmu: %s() read unknown opp value: %d\n", + __func__, opp); + return APE_100_OPP; + } +} + +int db5500_prcmu_get_ddr_opp(void) +{ + return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW); +} + +int db5500_prcmu_set_ddr_opp(u8 opp) +{ + if (opp != DDR_100_OPP && opp != DDR_50_OPP) + return -EINVAL; + + writeb(opp, _PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW); + + return 0; +} + +/** + * db5500_prcmu_get_arm_opp - get the current ARM OPP + * + * Returns: the current ARM OPP + */ +int db5500_prcmu_get_arm_opp(void) +{ + u8 opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP); + + switch (opp) { + case DB5500_ARM_EXT_OPP: + return ARM_EXTCLK; + case DB5500_ARM_50_OPP: + return ARM_50_OPP; + case DB5500_ARM_100_OPP: + return ARM_100_OPP; + default: + pr_err("prcmu: %s() read unknown opp value: %d\n", + __func__, opp); + return ARM_100_OPP; + } +} + +int prcmu_resetout(u8 resoutn, u8 state) +{ + int offset; + int pin = -1; + + offset = state > 0 ? PRCM_RESOUTN_SET_OFFSET : PRCM_RESOUTN_CLR_OFFSET; + + switch (resoutn) { + case 0: + pin = PRCMU_RESOUTN0_PIN; + break; + case 1: + pin = PRCMU_RESOUTN1_PIN; + break; + case 2: + pin = PRCMU_RESOUTN2_PIN; + default: + break; + } + + if (pin > 0) + writel(pin, _PRCMU_BASE + offset); + else + return -EINVAL; + + return 0; +} + int db5500_prcmu_enable_dsipll(void) { int i; + int ret = 0; /* Enable DSIPLL_RESETN resets */ - writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR); + writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_CLR); /* Unclamp DSIPLL in/out */ - writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR); + writel(PRCMU_UNCLAMP_DSIPLL, _PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR); /* Set DSI PLL FREQ */ - writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ); + writel(PRCMU_PLLDSI_FREQ_SETTING, _PRCMU_BASE + PRCM_PLLDSI_FREQ); writel(PRCMU_DSI_PLLOUT_SEL_SETTING, - PRCM_DSI_PLLOUT_SEL); + _PRCMU_BASE + PRCM_DSI_PLLOUT_SEL); /* Enable Escape clocks */ - writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); + writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV); /* Start DSI PLL */ - writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE); + writel(PRCMU_ENABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE); /* Reset DSI PLL */ - writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET); + writel(PRCMU_DSI_RESET_SW, _PRCMU_BASE + PRCM_DSI_SW_RESET); for (i = 0; i < 10; i++) { - if ((readl(PRCM_PLLDSI_LOCKP) & + if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED) break; udelay(100); } + + if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) & + PRCMU_PLLDSI_LOCKP_LOCKED) + != PRCMU_PLLDSI_LOCKP_LOCKED) + ret = -EIO; /* Release DSIPLL_RESETN */ - writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET); - return 0; + writel(PRCMU_RESET_DSIPLL, _PRCMU_BASE + PRCM_APE_RESETN_SET); + return ret; } int db5500_prcmu_disable_dsipll(void) { /* Disable dsi pll */ - writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE); + writel(PRCMU_DISABLE_PLLDSI, _PRCMU_BASE + PRCM_PLLDSI_ENABLE); /* Disable escapeclock */ - writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); + writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, _PRCMU_BASE + PRCM_DSITVCLK_DIV); return 0; } @@ -276,27 +1763,150 @@ int db5500_prcmu_set_display_clocks(void) { /* HDMI and TVCLK Should be handled somewhere else */ /* PLLDIV=8, PLLSW=2, CLKEN=1 */ - writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT); + writel(PRCMU_DSI_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_HDMICLK_MGT); /* PLLDIV=14, PLLSW=2, CLKEN=1 */ - writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT); + writel(PRCMU_DSI_LP_CLOCK_SETTING, _PRCMU_BASE + DB5500_PRCM_TVCLK_MGT); return 0; } +u32 db5500_prcmu_read(unsigned int reg) +{ + return readl_relaxed(_PRCMU_BASE + reg); +} + +void db5500_prcmu_write(unsigned int reg, u32 value) +{ + writel_relaxed(value, _PRCMU_BASE + reg); +} + +void db5500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(_PRCMU_BASE + reg); + val = (val & ~mask) | (value & mask); + writel_relaxed(val, _PRCMU_BASE + reg); +} + +/** + * db5500_prcmu_system_reset - System reset + * + * Saves the reset reason code and then sets the APE_SOFTRST register which + * fires an interrupt to fw + */ +void db5500_prcmu_system_reset(u16 reset_code) +{ + writew(reset_code, PRCM_SW_RST_REASON); + writel(1, _PRCMU_BASE + PRCM_APE_SOFTRST); +} + +/** + * db5500_prcmu_get_reset_code - Retrieve SW reset reason code + * + * Retrieves the reset reason code stored by prcmu_system_reset() before + * last restart. + */ +u16 db5500_prcmu_get_reset_code(void) +{ + return readw(PRCM_SW_RST_REASON); +} + static void ack_dbb_wakeup(void) { unsigned long flags; spin_lock_irqsave(&mb0_transfer.lock, flags); - while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) cpu_relax(); - writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER); - writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); + writeb(MB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER); + writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_MBOX_CPU_SET); spin_unlock_irqrestore(&mb0_transfer.lock, flags); } +int db5500_prcmu_set_epod(u16 epod, u8 epod_state) +{ + int r = 0; + bool ram_retention = false; + + /* check argument */ + BUG_ON(epod < DB5500_EPOD_ID_BASE); + BUG_ON(epod_state > EPOD_STATE_ON); + BUG_ON((epod - DB5500_EPOD_ID_BASE) >= DB5500_NUM_EPOD_ID); + + if (epod == DB5500_EPOD_ID_ESRAM12) + ram_retention = true; + + /* check argument */ + BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention); + + /* get lock */ + mutex_lock(&mb2_transfer.lock); + + /* wait for mailbox */ + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) + cpu_relax(); + + /* Retention is allowed only for ESRAM12 */ + if (epod == DB5500_EPOD_ID_ESRAM12) { + switch (epod_state) { + case EPOD_STATE_ON: + mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] = + EPOD_OOR_ON; + break; + case EPOD_STATE_OFF: + mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] = + EPOD_OOR_OFF; + break; + case EPOD_STATE_RAMRET: + mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] = + EPOD_OOR_RET; + break; + default: + r = -EINVAL; + goto unlock_and_return; + break; + } + } else { + if (epod_state == EPOD_STATE_ON) + mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] = + EPOD_ON; + else if (epod_state == EPOD_STATE_OFF) + mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE] = + EPOD_OFF; + else { + r = -EINVAL; + goto unlock_and_return; + } + } + /* fill in mailbox */ + writeb((epod - DB5500_EPOD_ID_BASE), PRCM_REQ_MB2_EPOD_CLIENT); + writeb(mb2_transfer.req.epod_st[epod - DB5500_EPOD_ID_BASE], + PRCM_REQ_MB2_EPOD_STATE); + + writeb(MB2H_EPOD_REQUEST, PRCM_REQ_MB2_HEADER); + + writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_MBOX_CPU_SET); + + if (!wait_for_completion_timeout(&mb2_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: set_epod() failed.\n"); + r = -EIO; + WARN(1, "Failed to set epod"); + goto unlock_and_return; + } + + if (mb2_transfer.ack.status != RC_SUCCESS || + mb2_transfer.ack.header != MB2H_EPOD_REQUEST) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb2_transfer.lock); + return r; +} + static inline void print_unknown_header_warning(u8 n, u8 header) { pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", @@ -306,11 +1916,31 @@ static inline void print_unknown_header_warning(u8 n, u8 header) static bool read_mailbox_0(void) { bool r; + u32 ev; + unsigned int n; + u8 header; header = readb(PRCM_ACK_MB0_HEADER); switch (header) { - case AMB0H_WAKE_UP: + case MB0H_WAKE_UP: + if (readb(PRCM_ACK_MB0_READ_POINTER) & 1) + ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB); + else + ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB); + + prcmu_debug_register_mbox0_event(ev, + (mb0_transfer.req.dbb_irqs | + mb0_transfer.req.dbb_wakeups)); + + ev &= mb0_transfer.req.dbb_irqs; + + for (n = 0; n < NUM_DB5500_PRCMU_WAKEUPS; n++) { + if (ev & prcmu_irq_bit[n]) { + if (n != IRQ_INDEX(ABB)) + generic_handle_irq(IRQ_DB5500_PRCMU_BASE + n); + } + } r = true; break; default: @@ -318,31 +1948,123 @@ static bool read_mailbox_0(void) r = false; break; } - writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR); + writel(MBOX_BIT(0), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); return r; } static bool read_mailbox_1(void) { - writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR); + u8 header; + bool do_complete = true; + + header = mb1_transfer.ack.header = readb(PRCM_ACK_MB1_HEADER); + + switch (header) { + case MB1H_ARM_OPP: + mb1_transfer.ack.arm_opp = readb(PRCM_ACK_MB1_CURRENT_ARM_OPP); + mb1_transfer.ack.arm_voltage_st = + readb(PRCM_ACK_MB1_ARM_VOLT_STATUS); + break; + case MB1H_APE_OPP: + mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP); + mb1_transfer.ack.ape_voltage_st = + readb(PRCM_ACK_MB1_APE_VOLT_STATUS); + break; + case MB1H_ARM_APE_OPP: + mb1_transfer.ack.ape_opp = readb(PRCM_ACK_MB1_CURRENT_APE_OPP); + mb1_transfer.ack.ape_voltage_st = + readb(PRCM_ACK_MB1_APE_VOLT_STATUS); + break; + default: + print_unknown_header_warning(1, header); + do_complete = false; + break; + } + + writel(MBOX_BIT(1), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); + + if (do_complete) + complete(&mb1_transfer.work); + return false; } static bool read_mailbox_2(void) { - writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR); + u8 header; + + header = readb(PRCM_ACK_MB2_HEADER); + mb2_transfer.ack.header = header; + switch (header) { + case MB2H_EPOD_REQUEST: + mb2_transfer.ack.status = readb(PRCM_ACK_MB2_EPOD_STATUS); + break; + case MB2H_CLK_REQUEST: + mb2_transfer.ack.status = readb(PRCM_ACK_MB2_CLK_STATUS); + break; + case MB2H_PLL_REQUEST: + mb2_transfer.ack.status = readb(PRCM_ACK_MB2_PLL_STATUS); + break; + default: + writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); + pr_err("prcmu: Wrong ACK received for MB2 request \n"); + return false; + break; + } + writel(MBOX_BIT(2), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); + complete(&mb2_transfer.work); return false; } static bool read_mailbox_3(void) { - writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR); + u8 header; + + header = readb(PRCM_ACK_MB3_HEADER); + mb3_transfer.ack.header = header; + switch (header) { + case MB3H_REFCLK_REQUEST: + mb3_transfer.ack.status = readb(PRCM_ACK_MB3_REFCLK_REQ); + writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); + complete(&mb3_transfer.sysclk_work); + break; + default: + writel(MBOX_BIT(3), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); + pr_err("prcmu: wrong MB3 header\n"); + break; + } + return false; } static bool read_mailbox_4(void) { - writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR); + u8 header; + bool do_complete = true; + + header = readb(PRCM_ACK_MB4_HEADER); + mb4_transfer.ack.header = header; + switch (header) { + case MB4H_ACK_CFG_HOTDOG: + case MB4H_ACK_CFG_HOTMON: + case MB4H_ACK_CFG_HOTPERIOD: + case MB4H_ACK_CFG_MODEM_RESET: + case MB4H_ACK_CGF_A9WDOG_EN_PREBARK: + case MB4H_ACK_CGF_A9WDOG_EN_NOPREBARK: + case MB4H_ACK_CGF_A9WDOG_DIS: + mb4_transfer.ack.status = readb(PRCM_ACK_MB4_REQUESTS); + break; + default: + print_unknown_header_warning(4, header); + do_complete = false; + break; + } + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLEAR)); + + if (do_complete) + complete(&mb4_transfer.work); + return false; } @@ -363,19 +2085,19 @@ static bool read_mailbox_5(void) print_unknown_header_warning(5, header); break; } - writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR); + writel(MBOX_BIT(5), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); return false; } static bool read_mailbox_6(void) { - writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR); + writel(MBOX_BIT(6), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); return false; } static bool read_mailbox_7(void) { - writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR); + writel(MBOX_BIT(7), _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); return false; } @@ -396,7 +2118,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data) u8 n; irqreturn_t r; - bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); + bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); if (unlikely(!bits)) return IRQ_NONE; @@ -406,6 +2128,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data) bits -= MBOX_BIT(n); if (read_mailbox[n]()) r = IRQ_WAKE_THREAD; + prcmu_debug_register_interrupt(n); } } return r; @@ -413,39 +2136,271 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data) static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) { + u32 ev; + + /* + * ABB needs to be handled before the wakeup because + * the ping/pong buffers for ABB events could change + * after we acknowledge the wakeup. + */ + if (readb(PRCM_ACK_MB0_READ_POINTER) & 1) + ev = readl(PRCM_ACK_MB0_WAKEUP_1_DBB); + else + ev = readl(PRCM_ACK_MB0_WAKEUP_0_DBB); + + ev &= mb0_transfer.req.dbb_irqs; + if (ev & WAKEUP_BIT_ABB) + handle_nested_irq(IRQ_DB5500_PRCMU_ABB); + ack_dbb_wakeup(); + return IRQ_HANDLED; } +static void prcmu_mask_work(struct work_struct *work) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +static void prcmu_irq_mask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); + + mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE]; + + spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); + schedule_work(&mb0_transfer.mask_work); +} + +static void prcmu_irq_unmask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); + + mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_DB5500_PRCMU_BASE]; + + spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); + schedule_work(&mb0_transfer.mask_work); +} + +static void noop(struct irq_data *d) +{ +} + +static struct irq_chip prcmu_irq_chip = { + .name = "prcmu", + .irq_disable = prcmu_irq_mask, + .irq_ack = noop, + .irq_mask = prcmu_irq_mask, + .irq_unmask = prcmu_irq_unmask, +}; + void __init db5500_prcmu_early_init(void) { + unsigned int i; + void *tcpm_base = ioremap_nocache(U5500_PRCMU_TCPM_BASE, SZ_4K); + + if (tcpm_base != NULL) { + int version_high, version_low; + + version_high = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); + version_low = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET + 4); + prcmu_version.board = (version_high >> 24) & 0xFF; + prcmu_version.fw_version = version_high & 0xFF; + prcmu_version.api_version = version_low & 0xFF; + + pr_info("PRCMU Firmware Version: 0x%x\n", + prcmu_version.fw_version); + pr_info("PRCMU API Version: 0x%x\n", + prcmu_version.api_version); + + iounmap(tcpm_base); + } + tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE); + mtimer_base = __io_address(U5500_MTIMER_BASE); spin_lock_init(&mb0_transfer.lock); + spin_lock_init(&mb0_transfer.dbb_irqs_lock); + mutex_init(&mb0_transfer.ac_wake_lock); + mutex_init(&mb1_transfer.lock); + init_completion(&mb1_transfer.work); + mutex_init(&mb2_transfer.lock); + init_completion(&mb2_transfer.work); + mutex_init(&mb3_transfer.sysclk_lock); + init_completion(&mb3_transfer.sysclk_work); + mutex_init(&mb4_transfer.lock); + init_completion(&mb4_transfer.work); mutex_init(&mb5_transfer.lock); init_completion(&mb5_transfer.work); + + INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); + + /* Initalize irqs. */ + for (i = 0; i < NUM_DB5500_PRCMU_WAKEUPS; i++) { + unsigned int irq; + + irq = IRQ_DB5500_PRCMU_BASE + i; + irq_set_chip_and_handler(irq, &prcmu_irq_chip, + handle_simple_irq); + if (irq == IRQ_DB5500_PRCMU_ABB) + irq_set_nested_thread(irq, true); + set_irq_flags(irq, IRQF_VALID); + } + prcmu_ape_clocks_init(); +} + +/* + * Power domain switches (ePODs) modeled as regulators for the DB5500 SoC + */ +static struct regulator_consumer_supply db5500_vape_consumers[] = { + REGULATOR_SUPPLY("v-ape", NULL), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), + REGULATOR_SUPPLY("vcore", "sdi0"), + REGULATOR_SUPPLY("vcore", "sdi1"), + REGULATOR_SUPPLY("vcore", "sdi2"), + REGULATOR_SUPPLY("vcore", "sdi3"), + REGULATOR_SUPPLY("vcore", "sdi4"), + REGULATOR_SUPPLY("v-uart", "uart0"), + REGULATOR_SUPPLY("v-uart", "uart1"), + REGULATOR_SUPPLY("v-uart", "uart2"), + REGULATOR_SUPPLY("v-uart", "uart3"), + REGULATOR_SUPPLY("v-ape", "db5500-keypad"), +}; + +static struct regulator_consumer_supply db5500_sga_consumers[] = { + REGULATOR_SUPPLY("debug", "reg-virt-consumer.0"), + REGULATOR_SUPPLY("v-mali", NULL), +}; + +static struct regulator_consumer_supply db5500_hva_consumers[] = { + REGULATOR_SUPPLY("debug", "reg-virt-consumer.1"), + REGULATOR_SUPPLY("v-hva", NULL), +}; + +static struct regulator_consumer_supply db5500_sia_consumers[] = { + REGULATOR_SUPPLY("debug", "reg-virt-consumer.2"), + REGULATOR_SUPPLY("v-sia", "mmio_camera"), +}; + +static struct regulator_consumer_supply db5500_disp_consumers[] = { + REGULATOR_SUPPLY("debug", "reg-virt-consumer.3"), + REGULATOR_SUPPLY("vsupply", "b2r2_bus"), + REGULATOR_SUPPLY("vsupply", "mcde"), +}; + +static struct regulator_consumer_supply db5500_esram12_consumers[] = { + REGULATOR_SUPPLY("debug", "reg-virt-consumer.4"), + REGULATOR_SUPPLY("v-esram12", "mcde"), + REGULATOR_SUPPLY("esram12", "hva"), +}; + +#define DB5500_REGULATOR_SWITCH(lower, upper) \ +[DB5500_REGULATOR_SWITCH_##upper] = { \ + .constraints = { \ + .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ + }, \ + .consumer_supplies = db5500_##lower##_consumers, \ + .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\ } +#define DB5500_REGULATOR_SWITCH_VAPE(lower, upper) \ +[DB5500_REGULATOR_SWITCH_##upper] = { \ + .supply_regulator = "db5500-vape", \ + .constraints = { \ + .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ + }, \ + .consumer_supplies = db5500_##lower##_consumers, \ + .num_consumer_supplies = ARRAY_SIZE(db5500_##lower##_consumers),\ +} \ + +static struct regulator_init_data db5500_regulators[DB5500_NUM_REGULATORS] = { + [DB5500_REGULATOR_VAPE] = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = db5500_vape_consumers, + .num_consumer_supplies = ARRAY_SIZE(db5500_vape_consumers), + }, + DB5500_REGULATOR_SWITCH_VAPE(sga, SGA), + DB5500_REGULATOR_SWITCH_VAPE(hva, HVA), + DB5500_REGULATOR_SWITCH_VAPE(sia, SIA), + DB5500_REGULATOR_SWITCH_VAPE(disp, DISP), + /* + * ESRAM12 is put in retention by the firmware when VAPE is + * turned off so there's no need to hold VAPE. + */ + DB5500_REGULATOR_SWITCH(esram12, ESRAM12), +}; + +static struct mfd_cell db5500_prcmu_devs[] = { + { + .name = "db5500-prcmu-regulators", + .platform_data = &db5500_regulators, + .pdata_size = sizeof(db5500_regulators), + }, + { + .name = "cpufreq-u5500", + }, +}; + /** * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic * */ -int __init db5500_prcmu_init(void) +static int __init db5500_prcmu_probe(struct platform_device *pdev) { - int r = 0; + int err = 0; if (ux500_is_svp() || !cpu_is_u5500()) return -ENODEV; /* Clean up the mailbox interrupts after pre-kernel code. */ - writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); + writel(ALL_MBOX_BITS, _PRCMU_BASE + PRCM_ARM_IT1_CLEAR); - r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler, - prcmu_irq_thread_fn, 0, "prcmu", NULL); - if (r < 0) { + err = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler, + prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); + if (err < 0) { pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n"); - return -EBUSY; + err = -EBUSY; + goto no_irq_return; } - return 0; + + err = mfd_add_devices(&pdev->dev, 0, db5500_prcmu_devs, + ARRAY_SIZE(db5500_prcmu_devs), NULL, + 0); + + if (err) + pr_err("prcmu: Failed to add subdevices\n"); + else + pr_info("DB5500 PRCMU initialized\n"); + +no_irq_return: + return err; + +} + +static struct platform_driver db5500_prcmu_driver = { + .driver = { + .name = "db5500-prcmu", + .owner = THIS_MODULE, + }, +}; + +static int __init db5500_prcmu_init(void) +{ + return platform_driver_probe(&db5500_prcmu_driver, db5500_prcmu_probe); } arch_initcall(db5500_prcmu_init); diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5be32489714..7c26c41a7ef 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -30,11 +30,13 @@ #include <linux/mfd/dbx500-prcmu.h> #include <linux/regulator/db8500-prcmu.h> #include <linux/regulator/machine.h> +#include <linux/mfd/abx500.h> #include <asm/hardware/gic.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/db8500-regs.h> #include <mach/id.h> +#include <mach/prcmu-debug.h> #include "dbx500-prcmu-regs.h" /* Offset for the firmware version within the TCPM */ @@ -70,6 +72,8 @@ #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */ +#define PRCM_TCDM_VOICE_CALL_FLAG 0xDD4 /* 4 bytes */ + #define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */ #define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0) #define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1) @@ -214,10 +218,8 @@ #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) -#define PRCMU_I2C_WRITE(slave) \ - (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0)) -#define PRCMU_I2C_READ(slave) \ - (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0)) +#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6)) +#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6)) #define PRCMU_I2C_STOP_EN BIT(3) /* Mailbox 5 ACKs */ @@ -424,6 +426,13 @@ static DEFINE_SPINLOCK(clkout_lock); /* Global var to runtime determine TCDM base for v2 or v1 */ static __iomem void *tcdm_base; +/* + * Copies of the startup values of the reset status register and the SW reset + * code. + */ +static u32 reset_status_copy; +static u16 reset_code_copy; + struct clk_mgt { void __iomem *reg; u32 pllsw; @@ -637,6 +646,26 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value) spin_unlock_irqrestore(&prcmu_lock, flags); } +/* + * Dump AB8500 registers, PRCMU registers and PRCMU data memory + * on critical errors. + */ +static void db8500_prcmu_debug_dump(const char *func, + bool dump_prcmu, bool dump_abb) +{ + printk(KERN_DEBUG"%s: timeout\n", func); + + /* Dump AB8500 registers */ + if (dump_abb) + abx500_dump_all_banks(); + + /* Dump prcmu registers and data memory */ + if (dump_prcmu) { + prcmu_debug_dump_regs(); + prcmu_debug_dump_data_mem(); + } +} + struct prcmu_fw_version *prcmu_get_fw_version(void) { return fw_info.valid ? &fw_info.version : NULL; @@ -648,6 +677,11 @@ bool prcmu_has_arm_maxopp(void) PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK; } +void db8500_prcmu_vc(bool enable) +{ + writel((enable ? 0xF : 0), (tcdm_base + PRCM_TCDM_VOICE_CALL_FLAG)); +} + /** * prcmu_get_boot_status - PRCMU boot status checking * Returns: the current PRCMU boot status @@ -1049,7 +1083,7 @@ int db8500_prcmu_set_ddr_opp(u8 opp) if (opp < DDR_100_OPP || opp > DDR_25_OPP) return -EINVAL; /* Changing the DDR OPP can hang the hardware pre-v21 */ - if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20()) + if (!cpu_is_u8500v20()) writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); return 0; @@ -1111,12 +1145,14 @@ unlock_and_return: int db8500_prcmu_set_ape_opp(u8 opp) { int r = 0; + u8 prcmu_opp_req; if (opp == mb1_transfer.ape_opp) return 0; mutex_lock(&mb1_transfer.lock); + /* Exit APE_50_PARTLY_25_OPP */ if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP) request_even_slower_clocks(false); @@ -1126,20 +1162,22 @@ int db8500_prcmu_set_ape_opp(u8 opp) while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) cpu_relax(); + prcmu_opp_req = (opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp; + writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); - writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp), - (tcdm_base + PRCM_REQ_MB1_APE_OPP)); + writeb(prcmu_opp_req, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); wait_for_completion(&mb1_transfer.work); if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || - (mb1_transfer.ack.ape_opp != opp)) + (mb1_transfer.ack.ape_opp != prcmu_opp_req)) r = -EIO; skip_message: if ((!r && (opp == APE_50_PARTLY_25_OPP)) || + /* Set APE_50_PARTLY_25_OPP back in case new opp failed */ (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP))) request_even_slower_clocks(true); if (!r) @@ -1322,6 +1360,7 @@ int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", __func__); r = -EIO; + db8500_prcmu_debug_dump(__func__, true, true); goto unlock_and_return; } @@ -1416,6 +1455,7 @@ static int request_sysclk(bool enable) pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", __func__); r = -EIO; + db8500_prcmu_debug_dump(__func__, true, true); } mutex_unlock(&mb3_transfer.sysclk_lock); @@ -2190,6 +2230,7 @@ int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", __func__); r = -EIO; + db8500_prcmu_debug_dump(__func__, true, false); } else { r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO); } @@ -2240,6 +2281,7 @@ int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size) pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", __func__); r = -EIO; + db8500_prcmu_debug_dump(__func__, true, false); } else { r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO); } @@ -2287,6 +2329,7 @@ retry: if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, msecs_to_jiffies(5000))) { + db8500_prcmu_debug_dump(__func__, true, true); pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", __func__); goto unlock_and_return; @@ -2309,6 +2352,7 @@ retry: if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work, msecs_to_jiffies(5000))) goto retry; + db8500_prcmu_debug_dump(__func__, true, true); pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n", __func__); } @@ -2335,6 +2379,7 @@ void prcmu_ac_sleep_req() if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, msecs_to_jiffies(5000))) { + db8500_prcmu_debug_dump(__func__, true, true); pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", __func__); } @@ -2370,7 +2415,17 @@ void db8500_prcmu_system_reset(u16 reset_code) */ u16 db8500_prcmu_get_reset_code(void) { - return readw(tcdm_base + PRCM_SW_RST_REASON); + return reset_code_copy; +} + +/** + * db8500_prcmu_get_reset_status - Retrieve reset status + * + * Retrieves the value of the reset status register as read at startup. + */ +u32 db8500_prcmu_get_reset_status(void) +{ + return reset_status_copy; } /** @@ -2437,6 +2492,13 @@ static bool read_mailbox_0(void) if (ev & WAKEUP_BIT_SYSCLK_OK) complete(&mb3_transfer.sysclk_work); + prcmu_debug_register_mbox0_event(ev, + (mb0_transfer.req.dbb_irqs | + mb0_transfer.req.dbb_wakeups | + WAKEUP_BIT_AC_WAKE_ACK | + WAKEUP_BIT_AC_SLEEP_ACK | + WAKEUP_BIT_SYSCLK_OK)); + ev &= mb0_transfer.req.dbb_irqs; for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { @@ -2561,6 +2623,7 @@ static irqreturn_t prcmu_irq_handler(int irq, void *data) bits -= MBOX_BIT(n); if (read_mailbox[n]()) r = IRQ_WAKE_THREAD; + prcmu_debug_register_interrupt(n); } } return r; @@ -2646,29 +2709,38 @@ static char *fw_project_name(u8 project) void __init db8500_prcmu_early_init(void) { unsigned int i; - if (cpu_is_u8500v2()) { - void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); - - if (tcpm_base != NULL) { - u32 version; - version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); - fw_info.version.project = version & 0xFF; - fw_info.version.api_version = (version >> 8) & 0xFF; - fw_info.version.func_version = (version >> 16) & 0xFF; - fw_info.version.errata = (version >> 24) & 0xFF; - fw_info.valid = true; - pr_info("PRCMU firmware: %s, version %d.%d.%d\n", - fw_project_name(fw_info.version.project), - (version >> 8) & 0xFF, (version >> 16) & 0xFF, - (version >> 24) & 0xFF); - iounmap(tcpm_base); - } + void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); + void __iomem *sec_base; + + if (tcpm_base != NULL) { + u32 version; + version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); + fw_info.version.project = version & 0xFF; + fw_info.version.api_version = (version >> 8) & 0xFF; + fw_info.version.func_version = (version >> 16) & 0xFF; + fw_info.version.errata = (version >> 24) & 0xFF; + fw_info.valid = true; + pr_info("PRCMU firmware: %s, version %d.%d.%d\n", + fw_project_name(fw_info.version.project), + (version >> 8) & 0xFF, (version >> 16) & 0xFF, + (version >> 24) & 0xFF); + iounmap(tcpm_base); + } - tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); - } else { - pr_err("prcmu: Unsupported chip version\n"); - BUG(); + tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); + + /* + * Copy the value of the reset status register and if needed also + * the software reset code. + */ + sec_base = ioremap_nocache(U8500_PRCMU_SEC_BASE, SZ_4K); + if (sec_base != NULL) { + reset_status_copy = readl(sec_base + + DB8500_SEC_PRCM_RESET_STATUS); + iounmap(sec_base); } + if (reset_status_copy & DB8500_SEC_PRCM_RESET_STATUS_APE_SOFTWARE_RESET) + reset_code_copy = readw(tcdm_base + PRCM_SW_RST_REASON); spin_lock_init(&mb0_transfer.lock); spin_lock_init(&mb0_transfer.dbb_irqs_lock); @@ -2734,6 +2806,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = { REGULATOR_SUPPLY("vcore", "uart2"), REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), + REGULATOR_SUPPLY("vddvario", "smsc911x.0"), }; static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { @@ -2743,7 +2816,7 @@ static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { }; static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = { - REGULATOR_SUPPLY("vsupply", "b2r2_bus"), + REGULATOR_SUPPLY("vsupply", "b2r2_core"), REGULATOR_SUPPLY("vsupply", "mcde"), }; @@ -2962,9 +3035,6 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev) { int err = 0; - if (ux500_is_svp()) - return -ENODEV; - init_prcm_registers(); /* Clean up the mailbox interrupts after pre-kernel code. */ @@ -2978,8 +3048,7 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev) goto no_irq_return; } - if (cpu_is_u8500v20_or_later()) - prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); + prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, ARRAY_SIZE(db8500_prcmu_devs), NULL, diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h index 3a0bf91d778..0835a57dac1 100644 --- a/drivers/mfd/dbx500-prcmu-regs.h +++ b/drivers/mfd/dbx500-prcmu-regs.h @@ -34,6 +34,9 @@ #define PRCM_PER5CLK_MGT PRCM_CLK_MGT(0x038) #define PRCM_PER6CLK_MGT PRCM_CLK_MGT(0x03C) #define PRCM_PER7CLK_MGT PRCM_CLK_MGT(0x040) +#define PRCM_PWMCLK_MGT PRCM_CLK_MGT(0x044) /* for DB5500 */ +#define PRCM_IRDACLK_MGT PRCM_CLK_MGT(0x048) /* for DB5500 */ +#define PRCM_IRRCCLK_MGT PRCM_CLK_MGT(0x04C) /* for DB5500 */ #define PRCM_LCDCLK_MGT PRCM_CLK_MGT(0x044) #define PRCM_BMLCLK_MGT PRCM_CLK_MGT(0x04C) #define PRCM_HSITXCLK_MGT PRCM_CLK_MGT(0x050) @@ -124,7 +127,6 @@ #define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168) #define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484) #define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488) -#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018) /* System reset register */ #define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228) @@ -247,4 +249,17 @@ /* System reset register */ #define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228) +/* Secure read-only registers */ +#define DB8500_SEC_PRCM_RESET_STATUS 0x03C +#define DB8500_SEC_PRCM_RESET_STATUS_A9_CPU0_WATCHDOG_RESET BIT(0) +#define DB8500_SEC_PRCM_RESET_STATUS_A9_CPU1_WATCHDOG_RESET BIT(1) +#define DB8500_SEC_PRCM_RESET_STATUS_APE_SOFTWARE_RESET BIT(2) +#define DB8500_SEC_PRCM_RESET_STATUS_APE_RESET BIT(3) +#define DB8500_SEC_PRCM_RESET_STATUS_SECURE_WATCHDOG BIT(4) +#define DB8500_SEC_PRCM_RESET_STATUS_POWER_ON_RESET BIT(5) +#define DB8500_SEC_PRCM_RESET_STATUS_A9_RESTART BIT(6) +#define DB8500_SEC_PRCM_RESET_STATUS_APE_RESTART BIT(7) +#define DB8500_SEC_PRCM_RESET_STATUS_MODEM_SOFTWARE_RESET BIT(8) +#define DB8500_SEC_PRCM_RESET_STATUS_BOOT_ENGI BIT(16) + #endif /* __DB8500_PRCMU_REGS_H */ diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 2dd8d49cb30..6b8f9417c00 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -772,7 +772,7 @@ static irqreturn_t stmpe_irq(int irq, void *data) ret = stmpe_block_read(stmpe, israddr, num, isr); if (ret < 0) return IRQ_NONE; - +back: for (i = 0; i < num; i++) { int bank = num - i - 1; u8 status = isr[i]; @@ -794,6 +794,22 @@ static irqreturn_t stmpe_irq(int irq, void *data) stmpe_reg_write(stmpe, israddr + i, clear); } + /* + It may happen that on the first status read interrupt + sources may not showup, so read one more time. + */ + ret = stmpe_block_read(stmpe, israddr, num, isr); + if (ret >= 0) { + for (i = 0; i < num; i++) { + int bank = num - i - 1; + u8 status = isr[i]; + + status &= stmpe->ier[bank]; + if (status) + goto back; + } + } + return IRQ_HANDLED; } diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c new file mode 100644 index 00000000000..91211f29623 --- /dev/null +++ b/drivers/mfd/tc35892.c @@ -0,0 +1,503 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/mfd/core.h> +#include <linux/mfd/tc35892.h> + +#define TC35892_CLKMODE_MODCTL_SLEEP 0x0 +#define TC35892_CLKMODE_MODCTL_OPERATION (1 << 0) + +/** + * tc35892_reg_read() - read a single TC35892 register + * @tc35892: Device to read from + * @reg: Register to read + */ +int tc35892_reg_read(struct tc35892 *tc35892, u8 reg) +{ + int ret; + + ret = i2c_smbus_read_byte_data(tc35892->i2c, reg); + if (ret < 0) + dev_err(tc35892->dev, "failed to read reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_read); + +/** + * tc35892_reg_read() - write a single TC35892 register + * @tc35892: Device to write to + * @reg: Register to read + * @data: Value to write + */ +int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data) +{ + int ret; + + ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data); + if (ret < 0) + dev_err(tc35892->dev, "failed to write reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_write); + +/** + * tc35892_block_read() - read multiple TC35892 registers + * @tc35892: Device to read from + * @reg: First register + * @length: Number of registers + * @values: Buffer to write to + */ +int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values) +{ + int ret; + + ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values); + if (ret < 0) + dev_err(tc35892->dev, "failed to read regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_read); + +/** + * tc35892_block_write() - write multiple TC35892 registers + * @tc35892: Device to write to + * @reg: First register + * @length: Number of registers + * @values: Values to write + */ +int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length, + const u8 *values) +{ + int ret; + + ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length, + values); + if (ret < 0) + dev_err(tc35892->dev, "failed to write regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_write); + +/** + * tc35892_set_bits() - set the value of a bitfield in a TC35892 register + * @tc35892: Device to write to + * @reg: Register to write + * @mask: Mask of bits to set + * @values: Value to set + */ +int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val) +{ + int ret; + + mutex_lock(&tc35892->lock); + + ret = tc35892_reg_read(tc35892, reg); + if (ret < 0) + goto out; + + ret &= ~mask; + ret |= val; + + ret = tc35892_reg_write(tc35892, reg, ret); + +out: + mutex_unlock(&tc35892->lock); + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_set_bits); + +static struct resource gpio_resources[] = { + { + .start = TC35892_INT_GPIIRQ, + .end = TC35892_INT_GPIIRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell tc35892_devs[] = { + { + .name = "tc35892-gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = &gpio_resources[0], + }, +}; + +static irqreturn_t tc35892_irq(int irq, void *data) +{ + struct tc35892 *tc35892 = data; + int status; + +again: + status = tc35892_reg_read(tc35892, TC35892_IRQST); + if (status < 0) + return IRQ_NONE; + + while (status) { + int bit = __ffs(status); + + handle_nested_irq(tc35892->irq_base + bit); + status &= ~(1 << bit); + } + + /* + * A dummy read or write (to any register) appears to be necessary to + * have the last interrupt clear (for example, GPIO IC write) take + * effect. In such a case, recheck for any interrupt which is still + * pending. + */ + status = tc35892_reg_read(tc35892, TC35892_IRQST); + if (status) + goto again; + + return IRQ_HANDLED; +} + +static void tc35892_irq_dummy(unsigned int irq) +{ + /* No mask/unmask at this level */ +} + +static struct irq_chip tc35892_irq_chip = { + .name = "tc35892", + .irq_mask = tc35892_irq_dummy, + .irq_unmask = tc35892_irq_dummy, +}; + +static int tc35892_irq_init(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { + irq_set_chip_data(irq, tc35892); + irq_set_chip_and_handler(irq, &tc35892_irq_chip, + handle_edge_irq); + irq_set_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void tc35892_irq_remove(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + irq_set_chip_and_handler(irq, NULL, NULL); + irq_set_chip_data(irq, NULL); + } +} + +static int tc35892_chip_init(struct tc35892 *tc35892) +{ + int manf, ver, ret; + + manf = tc35892_reg_read(tc35892, TC35892_MANFCODE); + if (manf < 0) + return manf; + + ver = tc35892_reg_read(tc35892, TC35892_VERSION); + if (ver < 0) + return ver; + + if (manf != TC35892_MANFCODE_MAGIC) { + dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf); + return -EINVAL; + } + + dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver); + + /* + * Put everything except the IRQ module into reset; + * also spare the GPIO module for any pin initialization + * done during pre-kernel boot + */ + ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL, + TC35892_RSTCTRL_TIMRST + | TC35892_RSTCTRL_ROTRST + | TC35892_RSTCTRL_KBDRST); + if (ret < 0) + return ret; + + /* Clear the reset interrupt. */ + return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1); +} + +static int __devinit tc35892_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tc35892_platform_data *pdata = i2c->dev.platform_data; + struct tc35892 *tc35892; + int ret; + + if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_I2C_BLOCK)) + return -EIO; + + tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL); + if (!tc35892) + return -ENOMEM; + + mutex_init(&tc35892->lock); + + tc35892->dev = &i2c->dev; + tc35892->i2c = i2c; + tc35892->pdata = pdata; + tc35892->irq_base = pdata->irq_base; + tc35892->num_gpio = id->driver_data; + + i2c_set_clientdata(i2c, tc35892); + + ret = tc35892_chip_init(tc35892); + if (ret) + goto out_free; + + ret = tc35892_irq_init(tc35892); + if (ret) + goto out_free; + + ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "tc35892", tc35892); + if (ret) { + dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret); + goto out_removeirq; + } + + ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs, + ARRAY_SIZE(tc35892_devs), NULL, + tc35892->irq_base); + if (ret) { + dev_err(tc35892->dev, "failed to add children\n"); + goto out_freeirq; + } + + return 0; + +out_freeirq: + free_irq(tc35892->i2c->irq, tc35892); +out_removeirq: + tc35892_irq_remove(tc35892); +out_free: + kfree(tc35892); + return ret; +} + +static int __devexit tc35892_remove(struct i2c_client *client) +{ + struct tc35892 *tc35892 = i2c_get_clientdata(client); + + mfd_remove_devices(tc35892->dev); + + free_irq(tc35892->i2c->irq, tc35892); + tc35892_irq_remove(tc35892); + + kfree(tc35892); + + return 0; +} + +#ifdef CONFIG_PM + +static u32 sleep_regs[] = { + TC35892_IOPC0_L, + TC35892_IOPC0_H, + TC35892_IOPC1_L, + TC35892_IOPC1_H, + TC35892_IOPC2_L, + TC35892_IOPC2_H, + TC35892_DRIVE0_L, + TC35892_DRIVE0_H, + TC35892_DRIVE1_L, + TC35892_DRIVE1_H, + TC35892_DRIVE2_L, + TC35892_DRIVE2_H, + TC35892_DRIVE3, + TC35892_GPIODATA0, + TC35892_GPIOMASK0, + TC35892_GPIODATA1, + TC35892_GPIOMASK1, + TC35892_GPIODATA2, + TC35892_GPIOMASK2, + TC35892_GPIODIR0, + TC35892_GPIODIR1, + TC35892_GPIODIR2, + TC35892_GPIOIE0, + TC35892_GPIOIE1, + TC35892_GPIOIE2, + TC35892_RSTCTRL, + TC35892_CLKCFG, +}; + +static u8 sleep_regs_val[] = { + 0x00, /* TC35892_IOPC0_L */ + 0x00, /* TC35892_IOPC0_H */ + 0x00, /* TC35892_IOPC1_L */ + 0x00, /* TC35892_IOPC1_H */ + 0x00, /* TC35892_IOPC2_L */ + 0x00, /* TC35892_IOPC2_H */ + 0xff, /* TC35892_DRIVE0_L */ + 0xff, /* TC35892_DRIVE0_H */ + 0xff, /* TC35892_DRIVE1_L */ + 0xff, /* TC35892_DRIVE1_H */ + 0xff, /* TC35892_DRIVE2_L */ + 0xff, /* TC35892_DRIVE2_H */ + 0x0f, /* TC35892_DRIVE3 */ + 0x80, /* TC35892_GPIODATA0 */ + 0x80, /* TC35892_GPIOMASK0 */ + 0x80, /* TC35892_GPIODATA1 */ + 0x80, /* TC35892_GPIOMASK1 */ + 0x06, /* TC35892_GPIODATA2 */ + 0x06, /* TC35892_GPIOMASK2 */ + 0xf0, /* TC35892_GPIODIR0 */ + 0xe0, /* TC35892_GPIODIR1 */ + 0xee, /* TC35892_GPIODIR2 */ + 0x0f, /* TC35892_GPIOIE0 */ + 0x1f, /* TC35892_GPIOIE1 */ + 0x11, /* TC35892_GPIOIE2 */ + 0x0f, /* TC35892_RSTCTRL */ + 0xb0 /* TC35892_CLKCFG */ + +}; + +static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)]; + +static int tc35892_suspend(struct device *dev) +{ + struct tc35892 *tc35892 = dev_get_drvdata(dev); + struct i2c_client *client = tc35892->i2c; + int ret = 0; + int i, j; + int val; + + /* Put the system to sleep mode */ + if (!device_may_wakeup(&client->dev)) { + for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) { + val = tc35892_reg_read(tc35892, + sleep_regs[i]); + if (val < 0) + goto out; + + sleep_regs_backup[i] = (u8) (val & 0xff); + } + + for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) { + ret = tc35892_reg_write(tc35892, + sleep_regs[i], + sleep_regs_val[i]); + if (ret < 0) + goto fail; + + } + + ret = tc35892_reg_write(tc35892, + TC35892_CLKMODE, + TC35892_CLKMODE_MODCTL_SLEEP); + } +out: + return ret; +fail: + for (j = 0; j <= i; j++) { + ret = tc35892_reg_write(tc35892, + sleep_regs[i], + sleep_regs_backup[i]); + if (ret < 0) + break; + } + return ret; +} + +static int tc35892_resume(struct device *dev) +{ + struct tc35892 *tc35892 = dev_get_drvdata(dev); + struct i2c_client *client = tc35892->i2c; + int ret = 0; + int i; + + /* Enable the system into operation */ + if (!device_may_wakeup(&client->dev)) + { + ret = tc35892_reg_write(tc35892, + TC35892_CLKMODE, + TC35892_CLKMODE_MODCTL_OPERATION); + if (ret < 0) + goto out; + + for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) { + ret = tc35892_reg_write(tc35892, + sleep_regs[i], + sleep_regs_backup[i]); + /* Not much to do here if we fail */ + if (ret < 0) + break; + } + } +out: + return ret; +} + +static const struct dev_pm_ops tc35892_dev_pm_ops = { + .suspend = tc35892_suspend, + .resume = tc35892_resume, +}; +#endif + +static const struct i2c_device_id tc35892_id[] = { + { "tc35892", 24 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tc35892_id); + +static struct i2c_driver tc35892_driver = { + .driver.name = "tc35892", + .driver.owner = THIS_MODULE, +#ifdef CONFIG_PM + .driver.pm = &tc35892_dev_pm_ops, +#endif + .probe = tc35892_probe, + .remove = __devexit_p(tc35892_remove), + .id_table = tc35892_id, +}; + +static int __init tc35892_init(void) +{ + return i2c_add_driver(&tc35892_driver); +} +subsys_initcall(tc35892_init); + +static void __exit tc35892_exit(void) +{ + i2c_del_driver(&tc35892_driver); +} +module_exit(tc35892_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TC35892 MFD core driver"); +MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent"); diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index de979742c6f..0e79fe2d214 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -358,16 +358,114 @@ static int __devexit tc3589x_remove(struct i2c_client *client) } #ifdef CONFIG_PM + +static u32 sleep_regs[] = { + TC3589x_IOPC0_L, + TC3589x_IOPC0_H, + TC3589x_IOPC1_L, + TC3589x_IOPC1_H, + TC3589x_IOPC2_L, + TC3589x_IOPC2_H, + TC3589x_DRIVE0_L, + TC3589x_DRIVE0_H, + TC3589x_DRIVE1_L, + TC3589x_DRIVE1_H, + TC3589x_DRIVE2_L, + TC3589x_DRIVE2_H, + TC3589x_DRIVE3, + TC3589x_GPIODATA0, + TC3589x_GPIOMASK0, + TC3589x_GPIODATA1, + TC3589x_GPIOMASK1, + TC3589x_GPIODATA2, + TC3589x_GPIOMASK2, + TC3589x_GPIODIR0, + TC3589x_GPIODIR1, + TC3589x_GPIODIR2, + TC3589x_GPIOIE0, + TC3589x_GPIOIE1, + TC3589x_GPIOIE2, + TC3589x_RSTCTRL, + TC3589x_CLKCFG, +}; + +static u8 sleep_regs_val[] = { + 0x00, /* TC3589x_IOPC0_L */ + 0x00, /* TC3589x_IOPC0_H */ + 0x00, /* TC3589x_IOPC1_L */ + 0x00, /* TC3589x_IOPC1_H */ + 0x00, /* TC3589x_IOPC2_L */ + 0x00, /* TC3589x_IOPC2_H */ + 0xff, /* TC3589x_DRIVE0_L */ + 0xff, /* TC3589x_DRIVE0_H */ + 0xff, /* TC3589x_DRIVE1_L */ + 0xff, /* TC3589x_DRIVE1_H */ + 0xff, /* TC3589x_DRIVE2_L */ + 0xff, /* TC3589x_DRIVE2_H */ + 0x0f, /* TC3589x_DRIVE3 */ + 0x80, /* TC3589x_GPIODATA0 */ + 0x80, /* TC3589x_GPIOMASK0 */ + 0x80, /* TC3589x_GPIODATA1 */ + 0x80, /* TC3589x_GPIOMASK1 */ + 0x06, /* TC3589x_GPIODATA2 */ + 0x06, /* TC3589x_GPIOMASK2 */ + 0xf0, /* TC3589x_GPIODIR0 */ + 0xe0, /* TC3589x_GPIODIR1 */ + 0xee, /* TC3589x_GPIODIR2 */ + 0x0f, /* TC3589x_GPIOIE0 */ + 0x1f, /* TC3589x_GPIOIE1 */ + 0x11, /* TC3589x_GPIOIE2 */ + 0x0f, /* TC3589x_RSTCTRL */ + 0xb0 /* TC3589x_CLKCFG */ + +}; + +static u8 sleep_regs_backup[ARRAY_SIZE(sleep_regs)]; + static int tc3589x_suspend(struct device *dev) { struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; + int i, j; + int val; + + /* Put the system to sleep mode */ + if (!device_may_wakeup(&client->dev)) { + for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) { + val = tc3589x_reg_read(tc3589x, + sleep_regs[i]); + if (val < 0) + goto out; + + sleep_regs_backup[i] = (u8) (val & 0xff); + } - /* put the system to sleep mode */ - if (!device_may_wakeup(&client->dev)) - ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, - TC3589x_CLKMODE_MODCTL_SLEEP); + for (i = 0; i < ARRAY_SIZE(sleep_regs); i++) { + ret = tc3589x_reg_write(tc3589x, + sleep_regs[i], + sleep_regs_val[i]); + if (ret < 0) + goto fail; + + } + + ret = tc3589x_reg_write(tc3589x, + TC3589x_CLKMODE, + TC3589x_CLKMODE_MODCTL_SLEEP); + } else { + enable_irq_wake(client->irq); + } +out: + return ret; +fail: + for (j = 0; j <= i; j++) { + ret = tc3589x_reg_write(tc3589x, + sleep_regs[i], + sleep_regs_backup[i]); + if (ret < 0) + break; + } return ret; } @@ -377,12 +475,29 @@ static int tc3589x_resume(struct device *dev) struct tc3589x *tc3589x = dev_get_drvdata(dev); struct i2c_client *client = tc3589x->i2c; int ret = 0; + int i; - /* enable the system into operation */ + /* Enable the system into operation */ if (!device_may_wakeup(&client->dev)) - ret = tc3589x_reg_write(tc3589x, TC3589x_CLKMODE, - TC3589x_CLKMODE_MODCTL_OPERATION); - + { + ret = tc3589x_reg_write(tc3589x, + TC3589x_CLKMODE, + TC3589x_CLKMODE_MODCTL_OPERATION); + if (ret < 0) + goto out; + + for (i = ARRAY_SIZE(sleep_regs) - 1; i >= 0; i--) { + ret = tc3589x_reg_write(tc3589x, + sleep_regs[i], + sleep_regs_backup[i]); + /* Not much to do here if we fail */ + if (ret < 0) + break; + } + } else { + disable_irq_wake(client->irq); + } +out: return ret; } diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c index a293b978e27..d7b9e0c60ea 100644 --- a/drivers/mfd/tps6105x.c +++ b/drivers/mfd/tps6105x.c @@ -195,6 +195,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client, return 0; fail: + i2c_set_clientdata(client, NULL); kfree(tps6105x); return ret; } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index c7795096d43..f92560ef750 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -461,6 +461,40 @@ config BMP085 To compile this driver as a module, choose M here: the module will be called bmp085. +config DISPDEV + bool "Display overlay device" + depends on FB_MCDE + default n + help + This driver provides a way to use a second overlay for a display (in + addition to the framebuffer). The device allows for registration of + userspace buffers to be used with the overlay. + +config COMPDEV + bool "Display composition device" + depends on FB_MCDE && HWMEM + default n + help + This driver provides a way to use several overlays for a display. + This driver replaces the use of the framebuffer The device allows + for posting userspace buffers to be used with the overlays. + +config CLONEDEV + bool "Display cloning device" + depends on FB_MCDE && HWMEM && COMPDEV + default n + help + This driver provides a way to clone content between two compdev + devices. + +config CLONEDEV_DEBUG + bool "Display cloning device debug" + depends on CLONEDEV + default n + help + This driver provides a way to clone content between two compdev + devices. + config PCH_PHUB tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" depends on PCI @@ -481,6 +515,14 @@ config PCH_PHUB To compile this driver as a module, choose M here: the module will be called pch_phub. +config HWMEM + bool "Hardware memory driver" + default n + help + This driver provides a way to allocate contiguous system memory which + can be used by hardware. It also enables accessing hwmem allocated + memory buffers through a secure id which can be shared across processes. + config USB_SWITCH_FSA9480 tristate "FSA9480 USB Switch" depends on I2C @@ -498,6 +540,7 @@ config MAX8997_MUIC Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory detector and switch. +source "drivers/misc/Kconfig.stm" source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Kconfig.stm b/drivers/misc/Kconfig.stm new file mode 100644 index 00000000000..d509c85c79f --- /dev/null +++ b/drivers/misc/Kconfig.stm @@ -0,0 +1,120 @@ +menuconfig STM_TRACE + bool "STM MIPI Trace driver" + depends on ARCH_U8500 + help + Simple System Trace Module driver. It allows to use and configure the + STM, either from kernel space, or from user space. + +if STM_TRACE + +config STM_NUMBER_OF_CHANNEL + int + default 512 if ARCH_U8500 + default 256 + help + Number Max of channels always a multiple of 256 + +config STM_DEFAULT_MASTERS_MODES + hex "channel mode" + default 0xffffffff + help + Default config for enabling hardware mode tracing + +config STM_PRINTK + bool "printk support" + depends on STM_TRACE + help + Duplicate printk output on STM printk channel & activate stm_printk + +config STM_PRINTK_CHANNEL + int "printk channel" + range 0 255 + depends on STM_PRINTK + default 255 + help + STM printk channel number + +config STM_FTRACE + bool "functions tracing" + depends on FTRACE + default y + help + Output function tracing on STM dedicated channel + +config STM_FTRACE_CHANNEL + int "ftrace channel" + range 0 255 + depends on STM_FTRACE + default 254 + help + STM ftrace channel number + +config STM_CTX_SWITCH + bool "Context switch tracing" + depends on CONTEXT_SWITCH_TRACER + default y + help + Output scheduler context switch on STM dedicated channel + +config STM_CTX_SWITCH_CHANNEL + int "Context switch channel" + range 0 255 + depends on STM_CTX_SWITCH + default 253 + help + STM Context switch channel number + +config STM_WAKEUP + bool "Scheduler wakeup tracing" + depends on CONTEXT_SWITCH_TRACER + default y + help + Output scheduler wakeup on STM dedicated channel + +config STM_WAKEUP_CHANNEL + int "Wakeup channel" + range 0 255 + depends on STM_WAKEUP + default 252 + help + STM scheduler wakeup channel number + +config STM_STACK_TRACE + bool "Stack tracing" + depends on STACKTRACE + default y + help + Output stack tracing on STM dedicated channel + +config STM_STACK_TRACE_CHANNEL + int "Stack trace channel" + range 0 255 + depends on STM_STACK_TRACE + default 251 + help + STM stack trace channel number + +config STM_TRACE_PRINTK + bool "trace printk & binary printk support" + depends on TRACING + default y + help + Duplicate trace printk output on STM printk channel + +config STM_TRACE_PRINTK_CHANNEL + int "trace_printk channel" + range 0 255 + depends on TRACING + default 250 + help + STM trace_printk channel number + +config STM_TRACE_BPRINTK_CHANNEL + int "trace_bprintk channel" + range 0 255 + depends on TRACING + default 249 + help + STM trace binary printk channel number + +endif diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 3e1d80106f0..2741a01610f 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -46,6 +46,11 @@ obj-y += ti-st/ obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o obj-y += lis3lv02d/ obj-y += carma/ +obj-$(CONFIG_STM_TRACE) += stm.o +obj-$(CONFIG_HWMEM) += hwmem/ +obj-$(CONFIG_DISPDEV) += dispdev/ +obj-$(CONFIG_COMPDEV) += compdev/ +obj-$(CONFIG_CLONEDEV) += clonedev/ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/ obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c index d7a9aa14e5d..9d864e4db5a 100644 --- a/drivers/misc/ab8500-pwm.c +++ b/drivers/misc/ab8500-pwm.c @@ -8,10 +8,11 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pwm.h> +#include <linux/clk.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/module.h> - +#include <linux/mfd/ab8500/pwmleds.h> /* * PWM Out generators * Bank: 0x10 @@ -19,6 +20,11 @@ #define AB8500_PWM_OUT_CTRL1_REG 0x60 #define AB8500_PWM_OUT_CTRL2_REG 0x61 #define AB8500_PWM_OUT_CTRL7_REG 0x66 +#define AB8505_PWM_OUT_BLINK_CTRL1_REG 0x68 +#define AB8505_PWM_OUT_BLINK_CTRL4_REG 0x6B +#define AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT 4 +#define AB8505_PWM_OUT_BLINK_DUTYMASK (0x0F << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT) + /* backlight driver constants */ #define ENABLE_PWM 1 @@ -27,12 +33,73 @@ struct pwm_device { struct device *dev; struct list_head node; + struct clk *clk; const char *label; unsigned int pwm_id; + unsigned int num_pwm; + unsigned int blink_en; + struct ab8500 *parent; + bool clk_enabled; }; static LIST_HEAD(pwm_list); +int pwm_config_blink(struct pwm_device *pwm, int duty_ns, int period_ns) +{ + int ret; + unsigned int value; + u8 reg; + if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) { + dev_err(pwm->dev, "setting blinking for this " + "device not supported\n"); + return -EINVAL; + } + /* + * get the period value that is to be written to + * AB8500_PWM_OUT_BLINK_CTRL1 REGS[0:2] + */ + value = period_ns & 0x07; + /* + * get blink duty value to be written to + * AB8500_PWM_OUT_BLINK_CTRL REGS[7:4] + */ + value |= ((duty_ns << AB8505_PWM_OUT_BLINK_CTRL_DUTYBIT) & + AB8505_PWM_OUT_BLINK_DUTYMASK); + + reg = AB8505_PWM_OUT_BLINK_CTRL1_REG + (pwm->pwm_id - 1); + + ret = abx500_set_register_interruptible(pwm->dev, AB8500_MISC, + reg, (u8)value); + if (ret < 0) + dev_err(pwm->dev, "%s: Failed to config PWM blink,Error %d\n", + pwm->label, ret); + return ret; +} +EXPORT_SYMBOL(pwm_config_blink); + +int pwm_blink_ctrl(struct pwm_device *pwm , int enable) +{ + int ret; + + if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) { + dev_err(pwm->dev, "setting blinking for this " + "device not supported\n"); + return -EINVAL; + } + /* + * Enable/disable blinking feature for corresponding PWMOUT + * channel depending on value of enable. + */ + ret = abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8505_PWM_OUT_BLINK_CTRL4_REG, + 1 << (pwm->pwm_id-1), enable << (pwm->pwm_id-1)); + if (ret < 0) + dev_err(pwm->dev, "%s: Failed to control PWM blink,Error %d\n", + pwm->label, ret); + return ret; +} +EXPORT_SYMBOL(pwm_blink_ctrl); + int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { int ret = 0; @@ -67,11 +134,19 @@ int pwm_enable(struct pwm_device *pwm) { int ret; + if (!pwm->clk_enabled) { + ret = clk_enable(pwm->clk); + if (ret < 0) { + dev_err(pwm->dev, "failed to enable clock\n"); + return ret; + } + pwm->clk_enabled = true; + } ret = abx500_mask_and_set_register_interruptible(pwm->dev, AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, - 1 << (pwm->pwm_id-1), ENABLE_PWM); + 1 << (pwm->pwm_id-1), 1 << (pwm->pwm_id-1)); if (ret < 0) - dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", + dev_err(pwm->dev, "%s: Failed to enable PWM, Error %d\n", pwm->label, ret); return ret; } @@ -84,9 +159,27 @@ void pwm_disable(struct pwm_device *pwm) ret = abx500_mask_and_set_register_interruptible(pwm->dev, AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, 1 << (pwm->pwm_id-1), DISABLE_PWM); + /* + * Workaround to set PWM in disable. + * If enable bit is not toggled the PWM might output 50/50 duty cycle + * even though it should be disabled + */ + ret &= abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), + ENABLE_PWM << (pwm->pwm_id-1)); + ret &= abx500_mask_and_set_register_interruptible(pwm->dev, + AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, + 1 << (pwm->pwm_id-1), DISABLE_PWM); + if (ret < 0) dev_err(pwm->dev, "%s: Failed to disable PWM, Error %d\n", pwm->label, ret); + if (pwm->clk_enabled) { + clk_disable(pwm->clk); + pwm->clk_enabled = false; + } + return; } EXPORT_SYMBOL(pwm_disable); @@ -94,7 +187,6 @@ EXPORT_SYMBOL(pwm_disable); struct pwm_device *pwm_request(int pwm_id, const char *label) { struct pwm_device *pwm; - list_for_each_entry(pwm, &pwm_list, node) { if (pwm->pwm_id == pwm_id) { pwm->label = label; @@ -113,30 +205,131 @@ void pwm_free(struct pwm_device *pwm) } EXPORT_SYMBOL(pwm_free); +static ssize_t store_blink_status(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct pwm_device *pwm; + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + list_for_each_entry(pwm, &pwm_list, node) { + if (pwm->pwm_id == val) + break; + else { + /* check if PWM ID is valid*/ + if (val > pwm->num_pwm) { + dev_err(pwm->dev, "Invalid PWM ID\n"); + return -EINVAL; + } + } + } + if ((!is_ab8505(pwm->parent)) || (!pwm->blink_en)) { + dev_err(pwm->dev, "setting blinking for this " + "device not supported\n"); + return -EINVAL; + } + /*Disable blink functionlity */ + pwm_blink_ctrl(pwm, 0); + return count; +} + +static DEVICE_ATTR(disable_blink, S_IWUGO, NULL, store_blink_status); + +static struct attribute *pwmled_attributes[] = { + &dev_attr_disable_blink.attr, + NULL +}; + +static const struct attribute_group pwmled_attr_group = { + .attrs = pwmled_attributes, +}; + static int __devinit ab8500_pwm_probe(struct platform_device *pdev) { + struct ab8500 *parent = dev_get_drvdata(pdev->dev.parent); + struct ab8500_platform_data *plat = dev_get_platdata(parent->dev); + struct ab8500_pwmled_platform_data *pdata; struct pwm_device *pwm; + int ret = 0 , i; + + /* get pwmled specific platform data */ + if (!plat->pwmled) { + dev_err(&pdev->dev, "no pwm platform data supplied\n"); + return -EINVAL; + } + pdata = plat->pwmled; /* * Nothing to be done in probe, this is required to get the * device which is required for ab8500 read and write */ - pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); + pwm = kzalloc(((sizeof(struct pwm_device)) * pdata->num_pwm), + GFP_KERNEL); if (pwm == NULL) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } - pwm->dev = &pdev->dev; - pwm->pwm_id = pdev->id; - list_add_tail(&pwm->node, &pwm_list); + for (i = 0; i < pdata->num_pwm; i++) { + pwm[i].dev = &pdev->dev; + pwm[i].parent = parent; + pwm[i].blink_en = pdata->leds[i].blink_en; + pwm[i].pwm_id = pdata->leds[i].pwm_id; + pwm[i].num_pwm = pdata->num_pwm; + list_add_tail(&pwm[i].node, &pwm_list); + } + for (i = 0; i < pdata->num_pwm; i++) { + /*Implement sysfs only if blink is enabled*/ + if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) { + /* sysfs implementation to disable the blink */ + ret = sysfs_create_group(&pdev->dev.kobj, + &pwmled_attr_group); + if (ret) { + dev_err(&pdev->dev, "failed to create" + " sysfs entries\n"); + goto fail; + } + break; + } + } + pwm->clk = clk_get(pwm->dev, NULL); + if (IS_ERR(pwm->clk)) { + dev_err(pwm->dev, "clock request failed\n"); + ret = PTR_ERR(pwm->clk); + goto err_clk; + } platform_set_drvdata(pdev, pwm); + pwm->clk_enabled = false; dev_dbg(pwm->dev, "pwm probe successful\n"); - return 0; + return ret; + +err_clk: + for (i = 0; i < pdata->num_pwm; i++) { + if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) { + sysfs_remove_group(&pdev->dev.kobj, + &pwmled_attr_group); + break; + } + } +fail: + list_del(&pwm->node); + kfree(pwm); + return ret; } static int __devexit ab8500_pwm_remove(struct platform_device *pdev) { struct pwm_device *pwm = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < pwm->num_pwm; i++) { + if ((is_ab8505(pwm[i].parent)) && (pwm[i].blink_en)) { + sysfs_remove_group(&pdev->dev.kobj, + &pwmled_attr_group); + break; + } + } list_del(&pwm->node); + clk_put(pwm->clk); dev_dbg(&pdev->dev, "pwm driver removed\n"); kfree(pwm); return 0; diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c index 54f6f39f990..1035cb37695 100644 --- a/drivers/misc/bh1780gli.c +++ b/drivers/misc/bh1780gli.c @@ -18,11 +18,17 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/i2c.h> +#include <linux/err.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/module.h> +#include <linux/regulator/consumer.h> + +#ifdef CONFIG_HAS_EARLYSUSPEND +#include <linux/earlysuspend.h> +#endif #define BH1780_REG_CONTROL 0x80 #define BH1780_REG_PARTID 0x8A @@ -40,11 +46,20 @@ struct bh1780_data { struct i2c_client *client; + struct regulator *regulator; +#ifdef CONFIG_HAS_EARLYSUSPEND + struct early_suspend early_suspend; +#endif int power_state; /* lock for sysfs operations */ struct mutex lock; }; +#ifdef CONFIG_HAS_EARLYSUSPEND +static void bh1780_early_suspend(struct early_suspend *ddata); +static void bh1780_late_resume(struct early_suspend *ddata); +#endif + static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg) { int ret = i2c_smbus_write_byte_data(ddata->client, reg, val); @@ -72,6 +87,9 @@ static ssize_t bh1780_show_lux(struct device *dev, struct bh1780_data *ddata = platform_get_drvdata(pdev); int lsb, msb; + if (ddata->power_state == BH1780_POFF) + return -EINVAL; + lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW"); if (lsb < 0) return lsb; @@ -89,13 +107,9 @@ static ssize_t bh1780_show_power_state(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct bh1780_data *ddata = platform_get_drvdata(pdev); - int state; - - state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); - if (state < 0) - return state; - return sprintf(buf, "%d\n", state & BH1780_POWMASK); + /* we already maintain a sw state */ + return sprintf(buf, "%d\n", ddata->power_state); } static ssize_t bh1780_store_power_state(struct device *dev, @@ -104,7 +118,7 @@ static ssize_t bh1780_store_power_state(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct bh1780_data *ddata = platform_get_drvdata(pdev); - unsigned long val; + long val; int error; error = strict_strtoul(buf, 0, &val); @@ -114,15 +128,25 @@ static ssize_t bh1780_store_power_state(struct device *dev, if (val < BH1780_POFF || val > BH1780_PON) return -EINVAL; + if (ddata->power_state == val) + return count; + mutex_lock(&ddata->lock); + if (ddata->power_state == BH1780_POFF) + regulator_enable(ddata->regulator); + error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL"); if (error < 0) { mutex_unlock(&ddata->lock); + regulator_disable(ddata->regulator); return error; } - msleep(BH1780_PON_DELAY); + if (val == BH1780_POFF) + regulator_disable(ddata->regulator); + + mdelay(BH1780_PON_DELAY); ddata->power_state = val; mutex_unlock(&ddata->lock); @@ -131,7 +155,7 @@ static ssize_t bh1780_store_power_state(struct device *dev, static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL); -static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO, +static DEVICE_ATTR(power_state, S_IWUGO | S_IRUGO, bh1780_show_power_state, bh1780_store_power_state); static struct attribute *bh1780_attributes[] = { @@ -153,21 +177,42 @@ static int __devinit bh1780_probe(struct i2c_client *client, if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) { ret = -EIO; - goto err_op_failed; + return ret; } ddata = kzalloc(sizeof(struct bh1780_data), GFP_KERNEL); if (ddata == NULL) { + dev_err(&client->dev, "failed to alloc ddata\n"); ret = -ENOMEM; - goto err_op_failed; + return ret; } ddata->client = client; i2c_set_clientdata(client, ddata); + ddata->regulator = regulator_get(&client->dev, "vcc"); + if (IS_ERR(ddata->regulator)) { + dev_err(&client->dev, "failed to get regulator\n"); + ret = PTR_ERR(ddata->regulator); + goto free_ddata; + } + + regulator_enable(ddata->regulator); + ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID"); - if (ret < 0) - goto err_op_failed; + if (ret < 0) { + dev_err(&client->dev, "failed to read part ID\n"); + goto disable_regulator; + } +#ifdef CONFIG_HAS_EARLYSUSPEND + ddata->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; + ddata->early_suspend.suspend = bh1780_early_suspend; + ddata->early_suspend.resume = bh1780_late_resume; + register_early_suspend(&ddata->early_suspend); +#endif + + regulator_disable(ddata->regulator); + ddata->power_state = BH1780_POFF; dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n", (ret & BH1780_REVMASK)); @@ -175,12 +220,17 @@ static int __devinit bh1780_probe(struct i2c_client *client, mutex_init(&ddata->lock); ret = sysfs_create_group(&client->dev.kobj, &bh1780_attr_group); - if (ret) - goto err_op_failed; + if (ret) { + dev_err(&client->dev, "failed to create sysfs group\n"); + goto put_regulator; + } return 0; - -err_op_failed: +disable_regulator: + regulator_disable(ddata->regulator); +put_regulator: + regulator_put(ddata->regulator); +free_ddata: kfree(ddata); return ret; } @@ -196,50 +246,106 @@ static int __devexit bh1780_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM -static int bh1780_suspend(struct device *dev) +#if defined(CONFIG_HAS_EARLYSUSPEND) || defined(CONFIG_PM) +static int bh1780_do_suspend(struct bh1780_data *ddata) { - struct bh1780_data *ddata; - int state, ret; - struct i2c_client *client = to_i2c_client(dev); + int ret = 0; - ddata = i2c_get_clientdata(client); - state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); - if (state < 0) - return state; + mutex_lock(&ddata->lock); - ddata->power_state = state & BH1780_POWMASK; + if (ddata->power_state == BH1780_POFF) + goto unlock; - ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, - "CONTROL"); + ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF, "CONTROL"); if (ret < 0) - return ret; + goto unlock; - return 0; + if (ddata->regulator) + regulator_disable(ddata->regulator); +unlock: + mutex_unlock(&ddata->lock); + return ret; } -static int bh1780_resume(struct device *dev) +static int bh1780_do_resume(struct bh1780_data *ddata) { - struct bh1780_data *ddata; - int state, ret; - struct i2c_client *client = to_i2c_client(dev); + int ret = 0; - ddata = i2c_get_clientdata(client); - state = ddata->power_state; - ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, - "CONTROL"); + mutex_lock(&ddata->lock); + + if (ddata->power_state == BH1780_POFF) + goto unlock; + if (ddata->regulator) + regulator_enable(ddata->regulator); + + ret = bh1780_write(ddata, BH1780_REG_CONTROL, + ddata->power_state, "CONTROL"); + +unlock: + mutex_unlock(&ddata->lock); + return ret; +} +#endif + +#ifndef CONFIG_HAS_EARLYSUSPEND +#ifdef CONFIG_PM +static int bh1780_suspend(struct device *dev) +{ + struct bh1780_data *ddata = dev_get_drvdata(dev); + int ret = 0; + + ret = bh1780_do_suspend(ddata); if (ret < 0) - return ret; + dev_err(&ddata->client->dev, + "Error while suspending the device\n"); - return 0; + return ret; } + +static int bh1780_resume(struct device *dev) +{ + struct bh1780_data *ddata = dev_get_drvdata(dev); + int ret = 0; + + ret = bh1780_do_resume(ddata); + if (ret < 0) + dev_err(&ddata->client->dev, + "Error while resuming the device\n"); + + return ret; +} + static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume); #define BH1780_PMOPS (&bh1780_pm) +#endif /* CONFIG_PM */ #else #define BH1780_PMOPS NULL -#endif /* CONFIG_PM */ +static void bh1780_early_suspend(struct early_suspend *data) +{ + struct bh1780_data *ddata = + container_of(data, struct bh1780_data, early_suspend); + int ret; + + ret = bh1780_do_suspend(ddata); + if (ret < 0) + dev_err(&ddata->client->dev, + "Error while suspending the device\n"); +} + +static void bh1780_late_resume(struct early_suspend *data) +{ + struct bh1780_data *ddata = + container_of(data, struct bh1780_data, early_suspend); + int ret; + + ret = bh1780_do_resume(ddata); + if (ret < 0) + dev_err(&ddata->client->dev, + "Error while resuming the device\n"); +} +#endif /*!CONFIG_HAS_EARLYSUSPEND */ static const struct i2c_device_id bh1780_id[] = { { "bh1780", 0 }, @@ -252,7 +358,9 @@ static struct i2c_driver bh1780_driver = { .id_table = bh1780_id, .driver = { .name = "bh1780", +#if (!defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM)) .pm = BH1780_PMOPS, +#endif }, }; diff --git a/drivers/misc/clonedev/Makefile b/drivers/misc/clonedev/Makefile new file mode 100644 index 00000000000..f84859dd3ee --- /dev/null +++ b/drivers/misc/clonedev/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_CLONEDEV) += clonedev.o + +ifdef CONFIG_CLONEDEV_DEBUG +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/misc/clonedev/clonedev.c b/drivers/misc/clonedev/clonedev.c new file mode 100644 index 00000000000..d3b770fd324 --- /dev/null +++ b/drivers/misc/clonedev/clonedev.c @@ -0,0 +1,312 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Device for display cloning on external output. + * + * Author: Per-Daniel Olsson <per-daniel.olsson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/ioctl.h> + +#include <linux/clonedev.h> + +#include <linux/compdev.h> +#include <linux/mm.h> +#include <video/mcde.h> + +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(dev_list_lock); + +struct clonedev { + struct mutex lock; + struct miscdevice mdev; + struct list_head list; + bool open; + struct compdev *src_compdev; + struct compdev *dst_compdev; + bool overlay_case; + struct compdev_size dst_size; + struct compdev_scene_info s_info; +}; + +static void best_fit(struct compdev_rect *src_rect, + struct compdev_size *dst_size, + struct compdev_img *img) +{ + /* aspect ratio in 26.6 fixed point */ + int aspect = 1; + int dst_w; + int dst_h; + + if (img->rotation == COMPDEV_ROT_90_CCW || + img->rotation == COMPDEV_ROT_270_CCW) + aspect = (src_rect->height << 6) / src_rect->width; + else + aspect = (src_rect->width << 6) / src_rect->height; + + dst_w = aspect * dst_size->height >> 6; + dst_h = dst_size->height; + img->dst_rect.y = 0; + + if (dst_w > dst_size->width) { + /* + * Destination rectangle too wide. + * Clamp to image width. Keep aspect ratio. + */ + dst_h = (dst_size->width << 6) / aspect; + dst_w = dst_size->width; + } + + /* center the image */ + if (dst_w < dst_size->width) { + int offset = (dst_size->width - dst_w) / 2; + img->dst_rect.x = offset; + } + + if (dst_h < dst_size->height) { + int offset = (dst_size->height - dst_h) / 2; + img->dst_rect.y = offset; + } + + img->dst_rect.width = dst_w; + img->dst_rect.height = dst_h; +} + +static int clonedev_open(struct inode *inode, struct file *file) +{ + struct clonedev *cd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(cd, &dev_list, list) + if (cd->mdev.minor == iminor(inode)) + break; + + if (&cd->list == &dev_list) { + mutex_unlock(&dev_list_lock); + return -ENODEV; + } + + if (cd->open) { + mutex_unlock(&dev_list_lock); + return -EBUSY; + } + + cd->open = true; + + mutex_unlock(&dev_list_lock); + + file->private_data = cd; + + return 0; +} + +static int clonedev_release(struct inode *inode, struct file *file) +{ + struct clonedev *cd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(cd, &dev_list, list) + if (cd->mdev.minor == iminor(inode)) + break; + mutex_unlock(&dev_list_lock); + + if (&cd->list == &dev_list) + return -ENODEV; + + cd->open = false; + return 0; +} + +static long clonedev_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int ret; + struct clonedev *cd = (struct clonedev *)file->private_data; + + mutex_lock(&cd->lock); + + switch (cmd) { + case CLONEDEV_SET_MODE_IOC: + /* TODO: Get the user data */ + + break; + + default: + ret = -ENOSYS; + } + + mutex_unlock(&cd->lock); + + return ret; +} + +static const struct file_operations clonedev_fops = { + .open = clonedev_open, + .release = clonedev_release, + .unlocked_ioctl = clonedev_ioctl, +}; + +static void init_clonedev(struct clonedev *cd, const char *name) +{ + mutex_init(&cd->lock); + INIT_LIST_HEAD(&cd->list); + + cd->mdev.minor = MISC_DYNAMIC_MINOR; + cd->mdev.name = name; + cd->mdev.fops = &clonedev_fops; +} + +static void clonedev_post_buffer_callback(void *data, + struct compdev_img *cb_img) +{ + struct clonedev *cd = (struct clonedev *)data; + + mutex_lock(&cd->lock); + + if (!cd->overlay_case || (cd->overlay_case && + (cb_img->flags & COMPDEV_OVERLAY_FLAG))) { + struct compdev_img img; + + img = *cb_img; + + if (img.flags & COMPDEV_BYPASS_FLAG) + img.flags &= ~COMPDEV_BYPASS_FLAG; + + if (cd->overlay_case) + img.rotation = cd->s_info.ovly_rotation; + else + img.rotation = cd->s_info.fb_rotation; + + best_fit(&img.src_rect, &cd->dst_size, &img); + + compdev_post_buffer(cd->dst_compdev, &img); + } + mutex_unlock(&cd->lock); +} + +static void clonedev_post_scene_info_callback(void *data, + struct compdev_scene_info *s_info) +{ + struct clonedev *cd = (struct clonedev *)data; + + mutex_lock(&cd->lock); + if (s_info->img_count > 1) + cd->overlay_case = true; + else + cd->overlay_case = false; + + cd->s_info = *s_info; + cd->s_info.img_count = 1; + compdev_post_scene_info(cd->dst_compdev, &cd->s_info); + mutex_unlock(&cd->lock); +} + +int clonedev_create(void) +{ + int ret; + struct clonedev *cd; + + static int counter; + char name[10]; + + cd = kzalloc(sizeof(struct clonedev), GFP_KERNEL); + if (!cd) + return -ENOMEM; + + snprintf(name, sizeof(name), "%s%d", CLONEDEV_DEFAULT_DEVICE_PREFIX, + counter++); + init_clonedev(cd, name); + + ret = misc_register(&cd->mdev); + if (ret) + goto fail_register_misc; + mutex_lock(&dev_list_lock); + list_add_tail(&cd->list, &dev_list); + mutex_unlock(&dev_list_lock); + + mutex_lock(&cd->lock); + + compdev_get(0, &cd->src_compdev); + compdev_get(1, &cd->dst_compdev); + compdev_get_size(cd->dst_compdev, &cd->dst_size); + + compdev_register_listener_callbacks(cd->src_compdev, (void *)cd, + &clonedev_post_buffer_callback, + &clonedev_post_scene_info_callback); + + mutex_unlock(&cd->lock); + goto out; + +fail_register_misc: + kfree(cd); +out: + return ret; +} + +void clonedev_destroy(void) +{ + struct clonedev *cd; + struct clonedev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(cd, tmp, &dev_list, list) { + compdev_put(cd->src_compdev); + compdev_put(cd->dst_compdev); + compdev_deregister_callbacks(cd->src_compdev); + list_del(&cd->list); + misc_deregister(&cd->mdev); + kfree(cd); + break; + } + mutex_unlock(&dev_list_lock); +} + +static void clonedev_destroy_all(void) +{ + struct clonedev *cd; + struct clonedev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(cd, tmp, &dev_list, list) { + list_del(&cd->list); + misc_deregister(&cd->mdev); + kfree(cd); + } + mutex_unlock(&dev_list_lock); + + mutex_destroy(&dev_list_lock); +} + +static int __init clonedev_init(void) +{ + pr_info("%s\n", __func__); + + mutex_init(&dev_list_lock); + + return 0; +} +module_init(clonedev_init); + +static void __exit clonedev_exit(void) +{ + clonedev_destroy_all(); + pr_info("%s\n", __func__); +} +module_exit(clonedev_exit); + +MODULE_AUTHOR("Per-Daniel Olsson <per-daniel.olsson@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Device for display cloning on external output"); + diff --git a/drivers/misc/compdev/Makefile b/drivers/misc/compdev/Makefile new file mode 100644 index 00000000000..b8385848712 --- /dev/null +++ b/drivers/misc/compdev/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_COMPDEV) += compdev.o + +ifdef CONFIG_COMPDEV_DEBUG +EXTRA_CFLAGS += -DDEBUG +endif + diff --git a/drivers/misc/compdev/compdev.c b/drivers/misc/compdev/compdev.c new file mode 100644 index 00000000000..d929a02c565 --- /dev/null +++ b/drivers/misc/compdev/compdev.c @@ -0,0 +1,1381 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Display overlay compositer device driver + * + * Author: Anders Bauer <anders.bauer@stericsson.com> + * for ST-Ericsson. + * + * Modified: Per-Daniel Olsson <per-daniel.olsson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/ioctl.h> +#include <linux/sched.h> + +#include <linux/compdev.h> +#include <linux/hwmem.h> +#include <linux/mm.h> +#include <video/mcde_dss.h> +#include <video/b2r2_blt.h> +#include <linux/workqueue.h> +#include <linux/completion.h> + +#define BUFFER_CACHE_DEPTH 2 +#define NUM_COMPDEV_BUFS 2 + +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(dev_list_lock); +static int dev_counter; + +struct compdev_buffer { + struct hwmem_alloc *alloc; + enum compdev_ptr_type type; + u32 size; + u32 paddr; /* if pinned */ +}; + +struct compdev_img_internal { + struct compdev_img img; + u32 ref_count; +}; + +struct compdev_blt_work { + struct work_struct work; + struct compdev_img *src_img; + struct compdev_img_internal *dst_img; + int blt_handle; + bool mcde_rotation; + struct device *dev; +}; + +struct compdev_post_callback_work { + struct work_struct work; + struct compdev_img *img; + post_buffer_callback pb_cb; + void *cb_data; + struct device *dev; +}; + +struct buffer_cache_context { + struct compdev_img_internal + *img[BUFFER_CACHE_DEPTH]; + u8 index; + u8 unused_counter; + struct device *dev; +}; + +struct dss_context { + struct device *dev; + struct mcde_display_device *ddev; + struct mcde_overlay *ovly[NUM_COMPDEV_BUFS]; + struct compdev_buffer ovly_buffer[NUM_COMPDEV_BUFS]; + struct compdev_size phy_size; + enum mcde_display_rotation display_rotation; + enum compdev_rotation current_buffer_rotation; + int blt_handle; + u8 temp_img_count; + struct compdev_img_internal *temp_img[NUM_COMPDEV_BUFS]; + struct buffer_cache_context cache_ctx; +}; + +struct compdev { + struct mutex lock; + struct miscdevice mdev; + struct device *dev; + struct list_head list; + struct dss_context dss_ctx; + u16 ref_count; + struct workqueue_struct *worker_thread; + int dev_index; + post_buffer_callback pb_cb; + post_scene_info_callback si_cb; + struct compdev_scene_info s_info; + u8 sync_count; + u8 image_count; + struct compdev_img *images[NUM_COMPDEV_BUFS]; + struct completion fence; + void *cb_data; + bool mcde_rotation; +}; + +static struct compdev *compdevs[MAX_NBR_OF_COMPDEVS]; + +static int compdev_post_buffers_dss(struct dss_context *dss_ctx, + struct compdev_img *img1, struct compdev_img *img2); + + +static int compdev_open(struct inode *inode, struct file *file) +{ + struct compdev *cd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(cd, &dev_list, list) + if (cd->mdev.minor == iminor(inode)) + break; + + if (&cd->list == &dev_list) { + mutex_unlock(&dev_list_lock); + return -ENODEV; + } + mutex_unlock(&dev_list_lock); + file->private_data = cd; + return 0; +} + +static int disable_overlay(struct mcde_overlay *ovly) +{ + struct mcde_overlay_info info; + + mcde_dss_get_overlay_info(ovly, &info); + if (info.paddr != 0) { + /* Set the pointer to zero to disable the overlay */ + info.paddr = 0; + mcde_dss_apply_overlay(ovly, &info); + } + return 0; +} + +static int compdev_release(struct inode *inode, struct file *file) +{ + struct compdev *cd = NULL; + int i; + + mutex_lock(&dev_list_lock); + list_for_each_entry(cd, &dev_list, list) + if (cd->mdev.minor == iminor(inode)) + break; + mutex_unlock(&dev_list_lock); + + if (&cd->list == &dev_list) + return -ENODEV; + + for (i = 0; i < NUM_COMPDEV_BUFS; i++) { + disable_overlay(cd->dss_ctx.ovly[i]); + if (cd->dss_ctx.ovly_buffer[i].paddr && + cd->dss_ctx.ovly_buffer[i].type == + COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET) + hwmem_unpin(cd->dss_ctx.ovly_buffer[i].alloc); + + cd->dss_ctx.ovly_buffer[i].alloc = NULL; + cd->dss_ctx.ovly_buffer[i].size = 0; + cd->dss_ctx.ovly_buffer[i].paddr = 0; + } + + return 0; +} + +static enum mcde_ovly_pix_fmt get_ovly_fmt(enum compdev_fmt fmt) +{ + switch (fmt) { + default: + case COMPDEV_FMT_RGB565: + return MCDE_OVLYPIXFMT_RGB565; + case COMPDEV_FMT_RGB888: + return MCDE_OVLYPIXFMT_RGB888; + case COMPDEV_FMT_RGBA8888: + return MCDE_OVLYPIXFMT_RGBA8888; + case COMPDEV_FMT_RGBX8888: + return MCDE_OVLYPIXFMT_RGBX8888; + case COMPDEV_FMT_YUV422: + return MCDE_OVLYPIXFMT_YCbCr422; + } +} + +static int compdev_setup_ovly(struct compdev_img *img, + struct compdev_buffer *buffer, + struct mcde_overlay *ovly, + int z_order, + struct dss_context *dss_ctx) +{ + int ret = 0; + enum hwmem_mem_type memtype; + enum hwmem_access access; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 }; + struct mcde_overlay_info info; + + if (img->buf.type == COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET) { + buffer->type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET; + buffer->alloc = hwmem_resolve_by_name(img->buf.hwmem_buf_name); + if (IS_ERR(buffer->alloc)) { + ret = PTR_ERR(buffer->alloc); + dev_warn(dss_ctx->dev, + "HWMEM resolve failed, %d\n", ret); + goto resolve_failed; + } + + hwmem_get_info(buffer->alloc, &buffer->size, &memtype, + &access); + + if (!(access & HWMEM_ACCESS_READ) || + memtype != HWMEM_MEM_CONTIGUOUS_SYS) { + ret = -EACCES; + dev_warn(dss_ctx->dev, + "Invalid_mem overlay, %d\n", ret); + goto invalid_mem; + } + ret = hwmem_pin(buffer->alloc, &mem_chunk, &mem_chunk_length); + if (ret) { + dev_warn(dss_ctx->dev, + "Pin failed, %d\n", ret); + goto pin_failed; + } + + rgn.size = rgn.end = buffer->size; + ret = hwmem_set_domain(buffer->alloc, HWMEM_ACCESS_READ, + HWMEM_DOMAIN_SYNC, &rgn); + if (ret) + dev_warn(dss_ctx->dev, + "Set domain failed, %d\n", ret); + + buffer->paddr = mem_chunk.paddr; + } else if (img->buf.type == COMPDEV_PTR_PHYSICAL) { + buffer->type = COMPDEV_PTR_PHYSICAL; + buffer->alloc = NULL; + buffer->size = img->buf.len; + buffer->paddr = img->buf.offset; + } + + info.stride = img->pitch; + info.fmt = get_ovly_fmt(img->fmt); + info.src_x = 0; + info.src_y = 0; + info.dst_x = img->dst_rect.x; + info.dst_y = img->dst_rect.y; + info.dst_z = z_order; + info.w = img->dst_rect.width; + info.h = img->dst_rect.height; + info.dirty.x = 0; + info.dirty.y = 0; + info.dirty.w = img->dst_rect.width; + info.dirty.h = img->dst_rect.height; + info.paddr = buffer->paddr; + + mcde_dss_apply_overlay(ovly, &info); + return ret; + +pin_failed: +invalid_mem: + buffer->alloc = NULL; + buffer->size = 0; + buffer->paddr = 0; + +resolve_failed: + return ret; +} + +static int compdev_update_rotation(struct dss_context *dss_ctx, + enum compdev_rotation rotation) +{ + /* Set video mode */ + struct mcde_video_mode vmode; + int ret = 0; + + memset(&vmode, 0, sizeof(struct mcde_video_mode)); + mcde_dss_get_video_mode(dss_ctx->ddev, &vmode); + if ((dss_ctx->display_rotation + rotation) % 180) { + vmode.xres = dss_ctx->phy_size.height; + vmode.yres = dss_ctx->phy_size.width; + } else { + vmode.xres = dss_ctx->phy_size.width; + vmode.yres = dss_ctx->phy_size.height; + } + + /* Set rotation */ + ret = mcde_dss_set_rotation(dss_ctx->ddev, + (dss_ctx->display_rotation + rotation) % 360); + if (ret != 0) + goto exit; + + ret = mcde_dss_set_video_mode(dss_ctx->ddev, &vmode); + if (ret != 0) + goto exit; + + + /* Apply */ + ret = mcde_dss_apply_channel(dss_ctx->ddev); +exit: + return ret; +} + +static int release_prev_frame(struct dss_context *dss_ctx) +{ + int ret = 0; + int i; + + /* Handle unpin of previous buffers */ + for (i = 0; i < NUM_COMPDEV_BUFS; i++) { + if (dss_ctx->ovly_buffer[i].type == + COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET && + dss_ctx->ovly_buffer[i].paddr != 0) { + hwmem_unpin(dss_ctx->ovly_buffer[i].alloc); + hwmem_release(dss_ctx->ovly_buffer[i].alloc); + } + dss_ctx->ovly_buffer[i].alloc = NULL; + dss_ctx->ovly_buffer[i].size = 0; + dss_ctx->ovly_buffer[i].paddr = 0; + } + return ret; + +} + +static enum b2r2_blt_fmt compdev_to_blt_format(enum compdev_fmt fmt) +{ + switch (fmt) { + case COMPDEV_FMT_RGBA8888: + return B2R2_BLT_FMT_32_BIT_ABGR8888; + case COMPDEV_FMT_RGB888: + return B2R2_BLT_FMT_24_BIT_RGB888; + case COMPDEV_FMT_RGB565: + return B2R2_BLT_FMT_16_BIT_RGB565; + case COMPDEV_FMT_YUV422: + return B2R2_BLT_FMT_CB_Y_CR_Y; + case COMPDEV_FMT_YCBCR42XMBN: + return B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE; + case COMPDEV_FMT_YUV420_SP: + return B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR; + case COMPDEV_FMT_YVU420_SP: + return B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR; + case COMPDEV_FMT_YUV420_P: + return B2R2_BLT_FMT_YUV420_PACKED_PLANAR; + default: + return B2R2_BLT_FMT_UNUSED; + } +} + +static enum b2r2_blt_transform to_blt_transform + (enum compdev_rotation compdev_rot) +{ + switch (compdev_rot) { + case COMPDEV_ROT_0: + return B2R2_BLT_TRANSFORM_NONE; + case COMPDEV_ROT_90_CCW: + return B2R2_BLT_TRANSFORM_CCW_ROT_90; + case COMPDEV_ROT_180: + return B2R2_BLT_TRANSFORM_CCW_ROT_180; + case COMPDEV_ROT_270_CCW: + return B2R2_BLT_TRANSFORM_CCW_ROT_90; + default: + return B2R2_BLT_TRANSFORM_NONE; + } +} + +static u32 get_stride(u32 width, enum compdev_fmt fmt) +{ + u32 stride = 0; + switch (fmt) { + case COMPDEV_FMT_RGB565: + stride = width * 2; + break; + case COMPDEV_FMT_RGB888: + stride = width * 3; + break; + case COMPDEV_FMT_RGBX8888: + stride = width * 4; + break; + case COMPDEV_FMT_RGBA8888: + stride = width * 4; + break; + case COMPDEV_FMT_YUV422: + stride = width * 2; + break; + case COMPDEV_FMT_YCBCR42XMBN: + case COMPDEV_FMT_YUV420_SP: + case COMPDEV_FMT_YVU420_SP: + case COMPDEV_FMT_YUV420_P: + stride = width; + break; + } + + /* The display controller requires 8 byte aligned strides */ + if (stride % 8) + stride += 8 - (stride % 8); + + return stride; +} + +static int alloc_comp_internal_img(enum compdev_fmt fmt, + u16 width, u16 height, struct compdev_img_internal **img_pp) +{ + struct hwmem_alloc *alloc; + int name; + u32 size; + u32 stride; + struct compdev_img_internal *img; + + stride = get_stride(width, fmt); + size = stride * height; + size = PAGE_ALIGN(size); + + img = kzalloc(sizeof(struct compdev_img_internal), GFP_KERNEL); + + if (!img) + return -ENOMEM; + + alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED, + (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | + HWMEM_ACCESS_IMPORT), + HWMEM_MEM_CONTIGUOUS_SYS); + + if (IS_ERR(alloc)) { + kfree(img); + img = NULL; + return PTR_ERR(alloc); + } + + name = hwmem_get_name(alloc); + if (name < 0) { + kfree(img); + img = NULL; + hwmem_release(alloc); + return name; + } + + img->img.height = height; + img->img.width = width; + img->img.fmt = fmt; + img->img.pitch = stride; + img->img.buf.hwmem_buf_name = name; + img->img.buf.type = COMPDEV_PTR_HWMEM_BUF_NAME_OFFSET; + img->img.buf.offset = 0; + img->img.buf.len = size; + + img->ref_count = 1; + + *img_pp = img; + + return 0; +} + +static void free_comp_img_buf(struct compdev_img_internal *img, + struct device *dev) +{ + dev_dbg(dev, "%s\n", __func__); + + if (img != NULL && img->ref_count) { + img->ref_count--; + if (img->ref_count == 0) { + struct hwmem_alloc *alloc; + if (img->img.buf.hwmem_buf_name > 0) { + alloc = hwmem_resolve_by_name( + img->img.buf.hwmem_buf_name); + if (IS_ERR(alloc)) { + dev_err(dev, "%s: Error getting Alloc " + "from HWMEM\n", __func__); + return; + } + /* Double release needed */ + hwmem_release(alloc); + hwmem_release(alloc); + } + kfree(img); + } + } +} + +struct compdev_img_internal *compdev_buffer_cache_get_image( + struct buffer_cache_context *cache_ctx, enum compdev_fmt fmt, + u16 width, u16 height) +{ + int i; + struct compdev_img_internal *img = NULL; + + dev_dbg(cache_ctx->dev, "%s\n", __func__); + + /* First check for a cache hit */ + if (cache_ctx->unused_counter > 0) { + u8 active_index = cache_ctx->index; + struct compdev_img_internal *temp = + cache_ctx->img[active_index]; + if (temp != NULL && temp->img.fmt == fmt && + temp->img.width == width && + temp->img.height == height) { + img = temp; + cache_ctx->unused_counter = 0; + } + } + /* Check if there was a cache hit */ + if (img == NULL) { + /* Create new buffers and release old */ + for (i = 0; i < BUFFER_CACHE_DEPTH; i++) { + if (cache_ctx->img[i]) { + free_comp_img_buf(cache_ctx->img[i], + cache_ctx->dev); + cache_ctx->img[i] = NULL; + } + cache_ctx->index = 0; + if (alloc_comp_internal_img(fmt, width, height, + &cache_ctx->img[i])) + dev_err(cache_ctx->dev, + "%s: Allocation error\n", + __func__); + } + img = cache_ctx->img[0]; + } + + if (img != NULL) { + img->ref_count++; + cache_ctx->unused_counter = 0; + cache_ctx->index++; + if (cache_ctx->index >= BUFFER_CACHE_DEPTH) + cache_ctx->index = 0; + } + + return img; +} + +static void compdev_buffer_cache_mark_frame + (struct buffer_cache_context *cache_ctx) +{ + if (cache_ctx->unused_counter < 2) + cache_ctx->unused_counter++; + if (cache_ctx->unused_counter == 2) { + int i; + for (i = 0; i < BUFFER_CACHE_DEPTH; i++) { + if (cache_ctx->img[i]) { + free_comp_img_buf(cache_ctx->img[i], + cache_ctx->dev); + cache_ctx->img[i] = NULL; + } + } + } +} + +static bool check_hw_format(enum compdev_fmt fmt) +{ + if (fmt == COMPDEV_FMT_RGB565 || + fmt == COMPDEV_FMT_RGB888 || + fmt == COMPDEV_FMT_RGBA8888 || + fmt == COMPDEV_FMT_RGBX8888 || + fmt == COMPDEV_FMT_YUV422) + return true; + else + return false; +} + +static enum compdev_fmt find_compatible_fmt(enum compdev_fmt fmt, bool rotation) +{ + if (!rotation) { + switch (fmt) { + case COMPDEV_FMT_RGB565: + case COMPDEV_FMT_RGB888: + case COMPDEV_FMT_RGBA8888: + case COMPDEV_FMT_RGBX8888: + return fmt; + case COMPDEV_FMT_YUV422: + case COMPDEV_FMT_YCBCR42XMBN: + case COMPDEV_FMT_YUV420_SP: + case COMPDEV_FMT_YVU420_SP: + case COMPDEV_FMT_YUV420_P: + return COMPDEV_FMT_YUV422; + default: + return COMPDEV_FMT_RGBA8888; + } + } else { + switch (fmt) { + case COMPDEV_FMT_RGB565: + case COMPDEV_FMT_RGB888: + case COMPDEV_FMT_RGBA8888: + case COMPDEV_FMT_RGBX8888: + return fmt; + case COMPDEV_FMT_YUV422: + case COMPDEV_FMT_YCBCR42XMBN: + case COMPDEV_FMT_YUV420_SP: + case COMPDEV_FMT_YVU420_SP: + case COMPDEV_FMT_YUV420_P: + return COMPDEV_FMT_RGB888; + default: + return COMPDEV_FMT_RGBA8888; + } + } +} + +static void compdev_callback_worker_function(struct work_struct *work) +{ + struct compdev_post_callback_work *cb_work = + (struct compdev_post_callback_work *)work; + + if (cb_work->pb_cb != NULL) + cb_work->pb_cb(cb_work->cb_data, cb_work->img); +} +static void compdev_blt_worker_function(struct work_struct *work) +{ + struct compdev_blt_work *blt_work = (struct compdev_blt_work *)work; + struct compdev_img *src_img; + struct compdev_img *dst_img; + struct b2r2_blt_req req; + int req_id; + + dev_dbg(blt_work->dev, "%s\n", __func__); + + src_img = blt_work->src_img; + dst_img = &blt_work->dst_img->img; + + memset(&req, 0, sizeof(req)); + req.size = sizeof(req); + + if (src_img->buf.type == COMPDEV_PTR_PHYSICAL) { + req.src_img.buf.type = B2R2_BLT_PTR_PHYSICAL; + req.src_img.buf.fd = src_img->buf.fd; + } else { + struct hwmem_alloc *alloc; + + req.src_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET; + req.src_img.buf.hwmem_buf_name = src_img->buf.hwmem_buf_name; + + alloc = hwmem_resolve_by_name(src_img->buf.hwmem_buf_name); + if (IS_ERR(alloc)) { + dev_warn(blt_work->dev, + "HWMEM resolve failed\n"); + } + hwmem_set_access(alloc, + HWMEM_ACCESS_READ | HWMEM_ACCESS_IMPORT, + task_tgid_nr(current)); + hwmem_release(alloc); + } + req.src_img.pitch = src_img->pitch; + req.src_img.buf.offset = src_img->buf.offset; + req.src_img.buf.len = src_img->buf.len; + req.src_img.fmt = compdev_to_blt_format(src_img->fmt); + req.src_img.width = src_img->width; + req.src_img.height = src_img->height; + + req.src_rect.x = src_img->src_rect.x; + req.src_rect.y = src_img->src_rect.y; + req.src_rect.width = src_img->src_rect.width; + req.src_rect.height = src_img->src_rect.height; + + if (dst_img->buf.type == COMPDEV_PTR_PHYSICAL) { + req.dst_img.buf.type = B2R2_BLT_PTR_PHYSICAL; + req.dst_img.buf.fd = dst_img->buf.fd; + } else { + req.dst_img.buf.type = B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET; + req.dst_img.buf.hwmem_buf_name = dst_img->buf.hwmem_buf_name; + } + req.dst_img.pitch = dst_img->pitch; + req.dst_img.buf.offset = dst_img->buf.offset; + req.dst_img.buf.len = dst_img->buf.len; + req.dst_img.fmt = compdev_to_blt_format(dst_img->fmt); + req.dst_img.width = dst_img->width; + req.dst_img.height = dst_img->height; + + if (blt_work->mcde_rotation) + req.transform = B2R2_BLT_TRANSFORM_NONE; + else + req.transform = to_blt_transform(src_img->rotation); + req.dst_rect.x = 0; + req.dst_rect.y = 0; + req.dst_rect.width = src_img->dst_rect.width; + req.dst_rect.height = src_img->dst_rect.height; + + req.global_alpha = 0xff; + req.flags = B2R2_BLT_FLAG_DITHER; + + req_id = b2r2_blt_request(blt_work->blt_handle, &req); + + if (b2r2_blt_synch(blt_work->blt_handle, req_id) < 0) { + dev_err(blt_work->dev, + "%s: Could not perform b2r2_blt_synch", + __func__); + } + + dst_img->src_rect.x = 0; + dst_img->src_rect.x = 0; + dst_img->src_rect.width = dst_img->width; + dst_img->src_rect.height = dst_img->height; + + dst_img->dst_rect.x = src_img->dst_rect.x; + dst_img->dst_rect.y = src_img->dst_rect.y; + dst_img->dst_rect.width = src_img->dst_rect.width; + dst_img->dst_rect.height = src_img->dst_rect.height; + + dst_img->rotation = src_img->rotation; +} + +static int compdev_post_buffer_locked(struct compdev *cd, + struct compdev_img *src_img) +{ + int ret = 0; + int i; + bool transform_needed = false; + struct compdev_img *resulting_img; + struct compdev_blt_work blt_work; + struct compdev_post_callback_work cb_work; + bool callback_work = false; + bool bypass_case = false; + + dev_dbg(cd->dev, "%s\n", __func__); + + /* Free potential temp buffers */ + for (i = 0; i < cd->dss_ctx.temp_img_count; i++) + free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev); + cd->dss_ctx.temp_img_count = 0; + + /* Check for bypass images */ + if (src_img->flags & COMPDEV_BYPASS_FLAG) + bypass_case = true; + + /* Handle callback */ + if (cd->pb_cb != NULL) { + callback_work = true; + INIT_WORK((struct work_struct *)&cb_work, + compdev_callback_worker_function); + cb_work.img = src_img; + cb_work.pb_cb = cd->pb_cb; + cb_work.cb_data = cd->cb_data; + cb_work.dev = cd->dev; + queue_work(cd->worker_thread, (struct work_struct *)&cb_work); + } + + if (!bypass_case) { + /* Determine if transform is needed */ + /* First check scaling */ + if ((src_img->rotation == COMPDEV_ROT_0 || + src_img->rotation == COMPDEV_ROT_180) && + (src_img->src_rect.width != src_img->dst_rect.width || + src_img->src_rect.height != src_img->dst_rect.height)) + transform_needed = true; + else if ((src_img->rotation == COMPDEV_ROT_90_CCW || + src_img->rotation == COMPDEV_ROT_270_CCW) && + (src_img->src_rect.width != src_img->dst_rect.height || + src_img->src_rect.height != src_img->dst_rect.width)) + transform_needed = true; + + if (!transform_needed && check_hw_format(src_img->fmt) == false) + transform_needed = true; + + if (transform_needed) { + u16 width = 0; + u16 height = 0; + enum compdev_fmt fmt; + + INIT_WORK((struct work_struct *)&blt_work, + compdev_blt_worker_function); + + if (cd->dss_ctx.blt_handle == 0) { + dev_dbg(cd->dev, "%s: B2R2 opened\n", __func__); + cd->dss_ctx.blt_handle = b2r2_blt_open(); + if (cd->dss_ctx.blt_handle < 0) { + dev_warn(cd->dev, + "%s(%d): Failed to " + "open b2r2 device\n", + __func__, __LINE__); + } + } + blt_work.blt_handle = cd->dss_ctx.blt_handle; + blt_work.src_img = src_img; + blt_work.mcde_rotation = cd->mcde_rotation; + + width = src_img->dst_rect.width; + height = src_img->dst_rect.height; + + fmt = find_compatible_fmt(src_img->fmt, + (!cd->mcde_rotation) && + (src_img->rotation != COMPDEV_ROT_0)); + + blt_work.dst_img = compdev_buffer_cache_get_image + (&cd->dss_ctx.cache_ctx, + fmt, width, height); + + blt_work.dst_img->img.flags = src_img->flags; + blt_work.dev = cd->dev; + + queue_work(cd->worker_thread, + (struct work_struct *)&blt_work); + flush_work_sync((struct work_struct *)&blt_work); + + resulting_img = &blt_work.dst_img->img; + + cd->dss_ctx.temp_img[cd->dss_ctx.temp_img_count] = + blt_work.dst_img; + cd->dss_ctx.temp_img_count++; + + } else { + resulting_img = src_img; + } + + if (!cd->mcde_rotation) + resulting_img->rotation = COMPDEV_ROT_0; + + cd->images[cd->image_count] = resulting_img; + cd->image_count++; + + /* make sure that a potential callback has returned */ + if (callback_work) + flush_work_sync((struct work_struct *)&cb_work); + + if (cd->sync_count > 1) { + cd->sync_count--; + mutex_unlock(&cd->lock); + /* Wait for fence */ + wait_for_completion(&cd->fence); + mutex_lock(&cd->lock); + } else { + struct compdev_img *img1 = NULL; + struct compdev_img *img2 = NULL; + + if (cd->sync_count) + cd->sync_count--; + + img1 = cd->images[0]; + if (cd->image_count) + img2 = cd->images[1]; + + /* Do the refresh */ + compdev_post_buffers_dss(&cd->dss_ctx, img1, img2); + compdev_buffer_cache_mark_frame + (&cd->dss_ctx.cache_ctx); + + if (cd->s_info.img_count > 1) { + /* Releasing fence */ + complete(&cd->fence); + } + + cd->sync_count = 0; + cd->image_count = 0; + cd->images[0] = NULL; + cd->images[1] = NULL; + } + } else { + /* make sure that a potential callback has returned */ + if (callback_work) + flush_work_sync((struct work_struct *)&cb_work); + } + + return ret; +} + +static int compdev_post_buffers_dss(struct dss_context *dss_ctx, + struct compdev_img *img1, struct compdev_img *img2) +{ + int ret = 0; + int i = 0; + + struct compdev_img *fb_img = NULL; + struct compdev_img *ovly_img = NULL; + + /* Unpin the previous frame */ + release_prev_frame(dss_ctx); + + /* Set channel rotation */ + if (img1 != NULL && + (dss_ctx->current_buffer_rotation != img1->rotation)) { + if (compdev_update_rotation(dss_ctx, img1->rotation) != 0) + dev_warn(dss_ctx->dev, + "Failed to update MCDE rotation " + "(img1->rotation = %d), %d\n", + img1->rotation, ret); + else + dss_ctx->current_buffer_rotation = img1->rotation; + } + + if ((img1 != NULL) && (img1->flags & COMPDEV_OVERLAY_FLAG)) + ovly_img = img1; + else if (img1 != NULL) + fb_img = img1; + + + if ((img2 != NULL) && (img2->flags & COMPDEV_OVERLAY_FLAG)) + ovly_img = img2; + else if (img2 != NULL) + fb_img = img2; + + /* Handle buffers */ + if (fb_img != NULL) { + ret = compdev_setup_ovly(fb_img, + &dss_ctx->ovly_buffer[i], dss_ctx->ovly[0], 1, dss_ctx); + if (ret) + dev_warn(dss_ctx->dev, + "Failed to setup overlay[%d], %d\n", 0, ret); + i++; + } else { + disable_overlay(dss_ctx->ovly[0]); + } + + + if (ovly_img != NULL) { + ret = compdev_setup_ovly(ovly_img, + &dss_ctx->ovly_buffer[i], dss_ctx->ovly[1], 0, dss_ctx); + if (ret) + dev_warn(dss_ctx->dev, + "Failed to setup overlay[%d], %d\n", 1, ret); + } else { + disable_overlay(dss_ctx->ovly[1]); + } + + /* Do the display update */ + mcde_dss_update_overlay(dss_ctx->ovly[0], true); + + return ret; +} + +static int compdev_post_scene_info_locked(struct compdev *cd, + struct compdev_scene_info *s_info) +{ + int ret = 0; + + dev_dbg(cd->dev, "%s\n", __func__); + + cd->s_info = *s_info; + cd->sync_count = cd->s_info.img_count; + + /* always complete the fence in case someone is hanging incorrectly. */ + complete(&cd->fence); + init_completion(&cd->fence); + + /* Handle callback */ + if (cd->si_cb != NULL) { + mutex_unlock(&cd->lock); + cd->si_cb(cd->cb_data, s_info); + mutex_lock(&cd->lock); + } + return ret; +} + + +static int compdev_get_size_locked(struct dss_context *dss_ctx, + struct compdev_size *size) +{ + int ret = 0; + if ((dss_ctx->display_rotation) % 180) { + size->height = dss_ctx->phy_size.width; + size->width = dss_ctx->phy_size.height; + } else { + size->height = dss_ctx->phy_size.height; + size->width = dss_ctx->phy_size.width; + } + + return ret; +} + +static int compdev_get_listener_state_locked(struct compdev *cd, + enum compdev_listener_state *state) +{ + int ret = 0; + + *state = COMPDEV_LISTENER_OFF; + if (cd->pb_cb != NULL) + *state = COMPDEV_LISTENER_ON; + return ret; +} + +static long compdev_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int ret; + struct compdev *cd = (struct compdev *)file->private_data; + struct compdev_img img; + struct compdev_scene_info s_info; + + mutex_lock(&cd->lock); + + switch (cmd) { + case COMPDEV_GET_SIZE_IOC: + { + struct compdev_size tmp; + compdev_get_size_locked(&cd->dss_ctx, &tmp); + ret = copy_to_user((void __user *)arg, &tmp, + sizeof(tmp)); + if (ret) + ret = -EFAULT; + } + break; + case COMPDEV_GET_LISTENER_STATE_IOC: + { + enum compdev_listener_state state; + compdev_get_listener_state_locked(cd, &state); + ret = copy_to_user((void __user *)arg, &state, + sizeof(state)); + if (ret) + ret = -EFAULT; + } + break; + case COMPDEV_POST_BUFFER_IOC: + memset(&img, 0, sizeof(img)); + /* Get the user data */ + if (copy_from_user(&img, (void *)arg, sizeof(img))) { + dev_warn(cd->dev, + "%s: copy_from_user failed\n", + __func__); + mutex_unlock(&cd->lock); + return -EFAULT; + } + ret = compdev_post_buffer_locked(cd, &img); + + break; + case COMPDEV_POST_SCENE_INFO_IOC: + memset(&s_info, 0, sizeof(s_info)); + /* Get the user data */ + if (copy_from_user(&s_info, (void *)arg, sizeof(s_info))) { + dev_warn(cd->dev, + "%s: copy_from_user failed\n", + __func__); + mutex_unlock(&cd->lock); + return -EFAULT; + } + ret = compdev_post_scene_info_locked(cd, &s_info); + + break; + + default: + ret = -ENOSYS; + } + + mutex_unlock(&cd->lock); + + return ret; +} + +static const struct file_operations compdev_fops = { + .open = compdev_open, + .release = compdev_release, + .unlocked_ioctl = compdev_ioctl, +}; + +static void init_compdev(struct compdev *cd, const char *name) +{ + mutex_init(&cd->lock); + INIT_LIST_HEAD(&cd->list); + init_completion(&cd->fence); + + cd->mdev.minor = MISC_DYNAMIC_MINOR; + cd->mdev.name = name; + cd->mdev.fops = &compdev_fops; + cd->dev = cd->mdev.this_device; +} + +static void init_dss_context(struct dss_context *dss_ctx, + struct mcde_display_device *ddev, struct compdev *cd) +{ + dss_ctx->ddev = ddev; + dss_ctx->dev = cd->dev; + memset(&dss_ctx->cache_ctx, 0, sizeof(struct buffer_cache_context)); + dss_ctx->cache_ctx.dev = dss_ctx->dev; +} + +int compdev_create(struct mcde_display_device *ddev, + struct mcde_overlay *parent_ovly, bool mcde_rotation) +{ + int ret = 0; + int i; + struct compdev *cd; + struct mcde_video_mode vmode; + struct mcde_overlay_info info; + + char name[10]; + + if (dev_counter == 0) { + for (i = 0; i < MAX_NBR_OF_COMPDEVS; i++) + compdevs[i] = NULL; + } + + if (dev_counter > MAX_NBR_OF_COMPDEVS) + return -ENOMEM; + + cd = kzalloc(sizeof(struct compdev), GFP_KERNEL); + if (!cd) + return -ENOMEM; + + compdevs[dev_counter] = cd; + cd->dev_index = dev_counter; + + snprintf(name, sizeof(name), "%s%d", COMPDEV_DEFAULT_DEVICE_PREFIX, + dev_counter++); + init_compdev(cd, name); + + init_dss_context(&cd->dss_ctx, ddev, cd); + + mcde_dss_get_video_mode(ddev, &vmode); + + cd->worker_thread = create_workqueue(name); + if (!cd->worker_thread) { + ret = -ENOMEM; + goto fail_workqueue; + } + + cd->dss_ctx.ovly[0] = parent_ovly; + if (!cd->dss_ctx.ovly[0]) { + ret = -ENOMEM; + goto fail_create_ovly; + } + + for (i = 1; i < NUM_COMPDEV_BUFS; i++) { + cd->dss_ctx.ovly[i] = mcde_dss_create_overlay(ddev, &info); + if (!cd->dss_ctx.ovly[i]) { + ret = -ENOMEM; + goto fail_create_ovly; + } + if (mcde_dss_enable_overlay(cd->dss_ctx.ovly[i])) + goto fail_create_ovly; + if (disable_overlay(cd->dss_ctx.ovly[i])) + goto fail_create_ovly; + } + + mcde_dss_get_native_resolution(ddev, &cd->dss_ctx.phy_size.width, + &cd->dss_ctx.phy_size.height); + cd->dss_ctx.display_rotation = mcde_dss_get_rotation(ddev); + cd->dss_ctx.current_buffer_rotation = 0; + + cd->mcde_rotation = mcde_rotation; + + ret = misc_register(&cd->mdev); + if (ret) + goto fail_register_misc; + mutex_lock(&dev_list_lock); + list_add_tail(&cd->list, &dev_list); + mutex_unlock(&dev_list_lock); + + goto out; + +fail_register_misc: +fail_create_ovly: + for (i = 0; i < NUM_COMPDEV_BUFS; i++) { + if (cd->dss_ctx.ovly[i]) + mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]); + } +fail_workqueue: + kfree(cd); +out: + return ret; +} + + +int compdev_get(int dev_idx, struct compdev **cd_pp) +{ + struct compdev *cd; + cd = NULL; + + if (dev_idx >= MAX_NBR_OF_COMPDEVS) + return -ENOMEM; + + cd = compdevs[dev_idx]; + if (cd != NULL) { + mutex_lock(&cd->lock); + cd->ref_count++; + mutex_unlock(&cd->lock); + *cd_pp = cd; + return 0; + } else { + return -ENOMEM; + } +} +EXPORT_SYMBOL(compdev_get); + +int compdev_put(struct compdev *cd) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + + mutex_lock(&cd->lock); + cd->ref_count--; + if (cd->ref_count < 0) + dev_warn(cd->dev, + "%s: Incorrect ref count\n", __func__); + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_put); + +int compdev_get_size(struct compdev *cd, struct compdev_size *size) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + + mutex_lock(&cd->lock); + + ret = compdev_get_size_locked(&cd->dss_ctx, size); + + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_get_size); + +int compdev_get_listener_state(struct compdev *cd, + enum compdev_listener_state *listener_state) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + + mutex_lock(&cd->lock); + + ret = compdev_get_listener_state_locked(cd, listener_state); + + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_get_listener_state); + + +int compdev_post_buffer(struct compdev *cd, struct compdev_img *img) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + + mutex_lock(&cd->lock); + + ret = compdev_post_buffer_locked(cd, img); + + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_post_buffer); + +int compdev_post_scene_info(struct compdev *cd, + struct compdev_scene_info *s_info) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + + mutex_lock(&cd->lock); + + ret = compdev_post_scene_info_locked(cd, s_info); + + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_post_scene_info); + +int compdev_register_listener_callbacks(struct compdev *cd, void *data, + post_buffer_callback pb_cb, post_scene_info_callback si_cb) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + mutex_lock(&cd->lock); + cd->cb_data = data; + cd->pb_cb = pb_cb; + cd->si_cb = si_cb; + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_register_listener_callbacks); + +int compdev_deregister_callbacks(struct compdev *cd) +{ + int ret = 0; + if (cd == NULL) + return -ENOMEM; + mutex_lock(&cd->lock); + cd->cb_data = NULL; + cd->pb_cb = NULL; + cd->si_cb = NULL; + mutex_unlock(&cd->lock); + return ret; +} +EXPORT_SYMBOL(compdev_deregister_callbacks); + +void compdev_destroy(struct mcde_display_device *ddev) +{ + struct compdev *cd; + struct compdev *tmp; + int i; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(cd, tmp, &dev_list, list) { + if (cd->dss_ctx.ddev == ddev) { + list_del(&cd->list); + misc_deregister(&cd->mdev); + for (i = 1; i < NUM_COMPDEV_BUFS; i++) + mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]); + b2r2_blt_close(cd->dss_ctx.blt_handle); + + release_prev_frame(&cd->dss_ctx); + + /* Free potential temp buffers */ + for (i = 0; i < cd->dss_ctx.temp_img_count; i++) + free_comp_img_buf(cd->dss_ctx.temp_img[i], + cd->dev); + + for (i = 0; i < BUFFER_CACHE_DEPTH; i++) { + if (cd->dss_ctx.cache_ctx.img[i]) { + free_comp_img_buf + (cd->dss_ctx.cache_ctx.img[i], + cd->dev); + cd->dss_ctx.cache_ctx.img[i] = NULL; + } + } + + destroy_workqueue(cd->worker_thread); + kfree(cd); + break; + } + } + dev_counter--; + mutex_unlock(&dev_list_lock); +} + +static void compdev_destroy_all(void) +{ + struct compdev *cd; + struct compdev *tmp; + int i; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(cd, tmp, &dev_list, list) { + list_del(&cd->list); + misc_deregister(&cd->mdev); + for (i = 0; i < NUM_COMPDEV_BUFS; i++) + mcde_dss_destroy_overlay(cd->dss_ctx.ovly[i]); + + release_prev_frame(&cd->dss_ctx); + /* Free potential temp buffers */ + for (i = 0; i < cd->dss_ctx.temp_img_count; i++) + free_comp_img_buf(cd->dss_ctx.temp_img[i], cd->dev); + + for (i = 0; i < BUFFER_CACHE_DEPTH; i++) { + if (cd->dss_ctx.cache_ctx.img[i]) { + free_comp_img_buf + (cd->dss_ctx.cache_ctx.img[i], + cd->dev); + cd->dss_ctx.cache_ctx.img[i] = NULL; + } + } + + kfree(cd); + } + mutex_unlock(&dev_list_lock); + + mutex_destroy(&dev_list_lock); +} + +static int __init compdev_init(void) +{ + pr_info("%s\n", __func__); + + mutex_init(&dev_list_lock); + + return 0; +} +module_init(compdev_init); + +static void __exit compdev_exit(void) +{ + compdev_destroy_all(); + pr_info("%s\n", __func__); +} +module_exit(compdev_exit); + +MODULE_AUTHOR("Anders Bauer <anders.bauer@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Display overlay device driver"); + diff --git a/drivers/misc/dispdev/Makefile b/drivers/misc/dispdev/Makefile new file mode 100644 index 00000000000..11dc7611d26 --- /dev/null +++ b/drivers/misc/dispdev/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DISPDEV) += dispdev.o diff --git a/drivers/misc/dispdev/dispdev.c b/drivers/misc/dispdev/dispdev.c new file mode 100644 index 00000000000..5413a252d35 --- /dev/null +++ b/drivers/misc/dispdev/dispdev.c @@ -0,0 +1,659 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Display output device driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/ioctl.h> + +#include <linux/dispdev.h> +#include <linux/hwmem.h> +#include <video/mcde_dss.h> + +#define DENSITY_CHECK (16) +#define MAX_BUFFERS 4 + +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(dev_list_lock); + +enum buffer_state { + BUF_UNUSED = 0, + BUF_QUEUED, + BUF_ACTIVATED, +/*TODO:waitfordone BUF_DEACTIVATED,*/ + BUF_FREE, + BUF_DEQUEUED, +}; + +struct dispdev_buffer { + struct hwmem_alloc *alloc; + u32 size; + enum buffer_state state; + u32 paddr; /* if pinned */ +}; + +struct dispdev { + bool open; + struct mutex lock; + struct miscdevice mdev; + struct list_head list; + struct mcde_display_device *ddev; + struct mcde_overlay *ovly; + struct mcde_overlay *parent_ovly; + struct dispdev_config config; + bool overlay; + struct dispdev_buffer buffers[MAX_BUFFERS]; + wait_queue_head_t waitq_dq; + /* + * For the rotation use case + * buffers_need_update is used to ensure that a set_config that + * changes width or height is followed by a unregister_buffer. + */ + bool buffers_need_update; + /* + * For the overlay startup use case. + * first_update is used to handle the first update after a set_config. + * In this case a queue_buffer will arrive after set_config and not a + * unregister_buffer as in the rotation use case. + */ + bool first_update; + char name[sizeof(DISPDEV_DEFAULT_DEVICE_PREFIX) + 3]; +}; + +static int find_buf(struct dispdev *dd, enum buffer_state state) +{ + int i; + for (i = 0; i < MAX_BUFFERS; i++) + if (dd->buffers[i].state == state) + return i; + return -1; +} + +int dispdev_open(struct inode *inode, struct file *file) +{ + int ret; + struct dispdev *dd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(dd, &dev_list, list) + if (dd->mdev.minor == iminor(inode)) + break; + + if (&dd->list == &dev_list) { + mutex_unlock(&dev_list_lock); + return -ENODEV; + } + + if (dd->open) { + mutex_unlock(&dev_list_lock); + return -EBUSY; + } + + dd->open = true; + + mutex_unlock(&dev_list_lock); + + ret = mcde_dss_enable_overlay(dd->ovly); + if (ret) + return ret; + + file->private_data = dd; + + return 0; +} + +int dispdev_release(struct inode *inode, struct file *file) +{ + int i; + struct dispdev *dd = NULL; + + mutex_lock(&dev_list_lock); + list_for_each_entry(dd, &dev_list, list) + if (dd->mdev.minor == iminor(inode)) + break; + mutex_unlock(&dev_list_lock); + + if (&dd->list == &dev_list) + return -ENODEV; + + /* TODO: Make sure it waits for completion */ + mcde_dss_disable_overlay(dd->ovly); + for (i = 0; i < MAX_BUFFERS; i++) { + if (dd->buffers[i].paddr) + hwmem_unpin(dd->buffers[i].alloc); + if (dd->buffers[i].alloc) + hwmem_release(dd->buffers[i].alloc); + dd->buffers[i].alloc = NULL; + dd->buffers[i].state = BUF_UNUSED; + dd->buffers[i].size = 0; + dd->buffers[i].paddr = 0; + } + dd->open = false; + wake_up(&dd->waitq_dq); + return 0; +} + +static enum mcde_ovly_pix_fmt get_ovly_fmt(enum dispdev_fmt fmt) +{ + switch (fmt) { + default: + case DISPDEV_FMT_RGB565: + return MCDE_OVLYPIXFMT_RGB565; + case DISPDEV_FMT_RGB888: + return MCDE_OVLYPIXFMT_RGB888; + case DISPDEV_FMT_RGBA8888: + return MCDE_OVLYPIXFMT_RGBA8888; + case DISPDEV_FMT_RGBX8888: + return MCDE_OVLYPIXFMT_RGBX8888; + case DISPDEV_FMT_YUV422: + return MCDE_OVLYPIXFMT_YCbCr422; + } +} + +static void get_ovly_info(struct dispdev_config *cfg, + struct mcde_video_mode *vmode, + struct mcde_overlay_info *info, bool overlay) +{ + info->paddr = 0; + info->stride = cfg->stride; + info->fmt = get_ovly_fmt(cfg->format); + info->src_x = 0; + info->src_y = 0; + info->dst_x = cfg->x; + info->dst_y = cfg->y; + info->dst_z = cfg->z; + info->w = cfg->width; + info->h = cfg->height; + info->dirty.x = 0; + info->dirty.y = 0; + info->dirty.w = vmode->xres; + info->dirty.h = vmode->yres; +} + +static int dispdev_set_config(struct dispdev *dd, struct dispdev_config *cfg) +{ + int ret = 0; + if (memcmp(&dd->config, cfg, sizeof(struct dispdev_config)) == 0) + return 0; + + /* + * Only update MCDE if format, stride, width and height + * is the same. Otherwise just store the new config and update + * MCDE in the next queue buffer. This because the buffer that is + * active can be have the wrong format, width ... + */ + if (cfg->format == dd->config.format && + cfg->stride == dd->config.stride && + cfg->width == dd->config.width && + cfg->height == dd->config.height) { + + int buf_index; + if (!dd->buffers_need_update) { + buf_index = find_buf(dd, BUF_ACTIVATED); + if (buf_index >= 0) { + struct mcde_overlay_info info; + struct dispdev_buffer *buf; + struct mcde_video_mode vmode; + + buf = &dd->buffers[buf_index]; + mcde_dss_get_video_mode(dd->ddev, &vmode); + get_ovly_info(cfg, &vmode, &info, dd->overlay); + info.paddr = buf->paddr; + ret = mcde_dss_apply_overlay(dd->ovly, &info); + if (!ret) + mcde_dss_update_overlay(dd->ovly, + false); + } + } + } else { + dd->buffers_need_update = true; + } + + dd->config = *cfg; + + return ret; +} + +static int dispdev_register_buffer(struct dispdev *dd, s32 hwmem_name) +{ + int ret; + struct dispdev_buffer *buf; + enum hwmem_mem_type memtype; + enum hwmem_access access; + + ret = find_buf(dd, BUF_UNUSED); + if (ret < 0) + return -ENOMEM; + buf = &dd->buffers[ret]; + buf->alloc = hwmem_resolve_by_name(hwmem_name); + if (IS_ERR(buf->alloc)) { + ret = PTR_ERR(buf->alloc); + goto resolve_failed; + } + + hwmem_get_info(buf->alloc, &buf->size, &memtype, &access); + + if (!(access & HWMEM_ACCESS_READ) || + memtype != HWMEM_MEM_CONTIGUOUS_SYS) { + ret = -EACCES; + goto invalid_mem; + } + + buf->state = BUF_FREE; + goto out; +invalid_mem: + hwmem_release(buf->alloc); +resolve_failed: +out: + return ret; +} + +static int dispdev_unregister_buffer(struct dispdev *dd, u32 buf_idx) +{ + struct dispdev_buffer *buf = &dd->buffers[buf_idx]; + + if (buf_idx >= ARRAY_SIZE(dd->buffers)) + return -EINVAL; + + if (buf->state == BUF_UNUSED) + return -EINVAL; + + if (dd->buffers_need_update) + dd->buffers_need_update = false; + + if (buf->state == BUF_ACTIVATED) { + /* Disable the overlay */ + struct mcde_overlay_info info; + struct mcde_video_mode vmode; + /* TODO Wait for frame done */ + mcde_dss_get_video_mode(dd->ddev, &vmode); + get_ovly_info(&dd->config, &vmode, &info, dd->overlay); + mcde_dss_apply_overlay(dd->ovly, &info); + mcde_dss_update_overlay(dd->ovly, false); + hwmem_unpin(dd->buffers[buf_idx].alloc); + } + + hwmem_release(buf->alloc); + buf->state = BUF_UNUSED; + buf->alloc = NULL; + buf->size = 0; + buf->paddr = 0; + dd->first_update = false; + + return 0; +} + + +/** + * @brief Check if the buffer is transparent or black (ARGB = X000) + * Note: Only for ARGB32. + * Worst case: a ~full transparent buffer + * Results: ~2200us @800Mhz for a WVGA screen, with DENSITY_CHECK=8 + * ~520us @800Mhz for a WVGA screen, with DENSITY_CHECK=16 + * + * @param w witdh + * @param h height + * @param addr buffer addr + * + * @return 1 if the buffer is transparent, else 0 + */ +static int is_transparent(int w, int h, u32 *addr) +{ + int i, j; + u32 *c, *next_line; + u32 sum; + + next_line = addr; + sum = 0; + + /* TODO Optimize me */ + for (j = 0; j < h; j += DENSITY_CHECK) { + c = next_line; + for (i = 0; i < w; i += DENSITY_CHECK) { + sum += ((*c) & 0x00FFFFFF); + c += DENSITY_CHECK; + } + if (sum) + return 0; /* Not "transparent" */ + next_line += (w * DENSITY_CHECK); + } + + return 1; /* "Transparent" */ +} + +static int dispdev_queue_buffer(struct dispdev *dd, + struct dispdev_buffer_info *buffer) +{ + int ret, i; + struct mcde_overlay_info info; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + struct hwmem_region rgn = { .offset = 0, .count = 1, .start = 0 }; + struct hwmem_alloc *alloc; + struct mcde_video_mode vmode; + u32 buf_idx = buffer->buf_idx; + + if (buf_idx >= ARRAY_SIZE(dd->buffers) || + dd->buffers[buf_idx].state != BUF_DEQUEUED) + return -EINVAL; + + alloc = dd->buffers[buf_idx].alloc; + mcde_dss_get_video_mode(dd->ddev, &vmode); + get_ovly_info(&dd->config, &vmode, &info, dd->overlay); + ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); + if (ret) { + dev_warn(dd->mdev.this_device, "Pin failed, %d\n", ret); + return -EINVAL; + } + + rgn.size = rgn.end = dd->buffers[buf_idx].size; + ret = hwmem_set_domain(alloc, HWMEM_ACCESS_READ, + HWMEM_DOMAIN_SYNC, &rgn); + if (ret) + dev_warn(dd->mdev.this_device, "Set domain failed, %d\n", ret); + + i = find_buf(dd, BUF_ACTIVATED); + if (i >= 0) { + dd->buffers[i].state = BUF_FREE; + wake_up(&dd->waitq_dq); + } + + if (!dd->first_update) { + dd->first_update = true; + dd->buffers_need_update = false; + } + + dd->buffers[buf_idx].paddr = mem_chunk.paddr; + + if (buffer->display_update && !dd->buffers_need_update && + dd->config.width == buffer->buf_cfg.width && + dd->config.height == buffer->buf_cfg.height && + dd->config.format == buffer->buf_cfg.format && + dd->config.stride == buffer->buf_cfg.stride) { + info.paddr = mem_chunk.paddr; + mcde_dss_apply_overlay(dd->ovly, &info); + mcde_dss_update_overlay(dd->ovly, false); + } else if (buffer->display_update) { + dd->buffers_need_update = true; + } + + /* Disable the MCDE FB overlay */ + if ((dd->parent_ovly->state != NULL) && + (dd->ddev->check_transparency)) { + dd->ddev->check_transparency--; + mcde_dss_get_overlay_info(dd->parent_ovly, &info); + if (dd->ddev->check_transparency == 0) { + if (is_transparent(info.w, info.h, info.vaddr)) { + mcde_dss_disable_overlay(dd->parent_ovly); + printk(KERN_INFO "%s Disable overlay\n", + __func__); + } + } + } + + dd->buffers[buf_idx].state = BUF_ACTIVATED; + + return 0; +} + +static int dispdev_dequeue_buffer(struct dispdev *dd) +{ + int i; + + i = find_buf(dd, BUF_FREE); + if (i < 0) { + if (find_buf(dd, BUF_ACTIVATED) < 0) + return -EINVAL; + mutex_unlock(&dd->lock); + wait_event(dd->waitq_dq, (i = find_buf(dd, BUF_FREE)) >= 0); + mutex_lock(&dd->lock); + } + hwmem_unpin(dd->buffers[i].alloc); + dd->buffers[i].state = BUF_DEQUEUED; + dd->buffers[i].paddr = 0; + + return i; +} + +long dispdev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct dispdev *dd = (struct dispdev *)file->private_data; + + mutex_lock(&dd->lock); + + switch (cmd) { + case DISPDEV_SET_CONFIG_IOC: + { + struct dispdev_config cfg; + if (copy_from_user(&cfg, (void __user *)arg, + sizeof(cfg))) + ret = -EFAULT; + else + ret = dispdev_set_config(dd, &cfg); + } + break; + case DISPDEV_GET_CONFIG_IOC: + ret = copy_to_user((void __user *)arg, &dd->config, + sizeof(dd->config)); + if (ret) + ret = -EFAULT; + break; + case DISPDEV_REGISTER_BUFFER_IOC: + ret = dispdev_register_buffer(dd, (s32)arg); + break; + case DISPDEV_UNREGISTER_BUFFER_IOC: + ret = dispdev_unregister_buffer(dd, (u32)arg); + break; + case DISPDEV_QUEUE_BUFFER_IOC: + { + struct dispdev_buffer_info buffer; + if (copy_from_user(&buffer, (void __user *)arg, + sizeof(buffer))) + ret = -EFAULT; + else + ret = dispdev_queue_buffer(dd, &buffer); + break; + } + case DISPDEV_DEQUEUE_BUFFER_IOC: + ret = dispdev_dequeue_buffer(dd); + break; + default: + ret = -ENOSYS; + } + + mutex_unlock(&dd->lock); + + return ret; +} + +static const struct file_operations dispdev_fops = { + .open = dispdev_open, + .release = dispdev_release, + .unlocked_ioctl = dispdev_ioctl, +}; + +static void init_dispdev(struct dispdev *dd, struct mcde_display_device *ddev, + const char *name, bool overlay) +{ + u16 w, h; + int rotation; + + mutex_init(&dd->lock); + INIT_LIST_HEAD(&dd->list); + dd->ddev = ddev; + dd->overlay = overlay; + mcde_dss_get_native_resolution(ddev, &w, &h); + rotation = mcde_dss_get_rotation(ddev); + + if ((rotation == MCDE_DISPLAY_ROT_90_CCW) || + (rotation == MCDE_DISPLAY_ROT_90_CW)) { + dd->config.width = h; + dd->config.height = w; + } else { + dd->config.width = w; + dd->config.height = h; + } + dd->config.format = DISPDEV_FMT_RGB565; + dd->config.stride = sizeof(u16) * w; + dd->config.x = 0; + dd->config.y = 0; + dd->config.z = 0; + dd->buffers_need_update = false; + dd->first_update = false; + init_waitqueue_head(&dd->waitq_dq); + dd->mdev.minor = MISC_DYNAMIC_MINOR; + dd->mdev.name = name; + dd->mdev.fops = &dispdev_fops; + pr_info("%s: name=%s w=%d, h=%d, fmt=%d, stride=%d\n", __func__, name, + dd->config.width, dd->config.height, dd->config.format, + dd->config.stride); +} + +int dispdev_create(struct mcde_display_device *ddev, bool overlay, + struct mcde_overlay *parent_ovly) +{ + int ret = 0; + struct dispdev *dd; + struct mcde_video_mode vmode; + struct mcde_overlay_info info = {0}; + + static int counter; + + dd = kzalloc(sizeof(struct dispdev), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + snprintf(dd->name, sizeof(dd->name), "%s%d", + DISPDEV_DEFAULT_DEVICE_PREFIX, counter++); + init_dispdev(dd, ddev, dd->name, overlay); + + if (!overlay) { + ret = mcde_dss_enable_display(ddev); + if (ret) + goto fail_enable_display; + mcde_dss_get_video_mode(ddev, &vmode); + mcde_dss_try_video_mode(ddev, &vmode); + ret = mcde_dss_set_video_mode(ddev, &vmode); + if (ret) + goto fail_set_video_mode; + mcde_dss_set_pixel_format(ddev, info.fmt); + mcde_dss_apply_channel(ddev); + } else + mcde_dss_get_video_mode(ddev, &vmode); + get_ovly_info(&dd->config, &vmode, &info, overlay); + + /* Save the MCDE FB overlay */ + dd->parent_ovly = parent_ovly; + + dd->ovly = mcde_dss_create_overlay(ddev, &info); + if (!dd->ovly) { + ret = -ENOMEM; + goto fail_create_ovly; + } + + ret = misc_register(&dd->mdev); + if (ret) + goto fail_register_misc; + mutex_lock(&dev_list_lock); + list_add_tail(&dd->list, &dev_list); + mutex_unlock(&dev_list_lock); + + goto out; + +fail_register_misc: + mcde_dss_destroy_overlay(dd->ovly); +fail_create_ovly: + if (!overlay) + mcde_dss_disable_display(ddev); +fail_set_video_mode: +fail_enable_display: + kfree(dd); +out: + return ret; +} + +void dispdev_destroy(struct mcde_display_device *ddev) +{ + struct dispdev *dd; + struct dispdev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(dd, tmp, &dev_list, list) { + if (dd->ddev == ddev) { + list_del(&dd->list); + misc_deregister(&dd->mdev); + mcde_dss_destroy_overlay(dd->ovly); + /* + * TODO: Uncomment when DSS has reference + * counting of enable/disable + */ + /* mcde_dss_disable_display(dd->ddev); */ + kfree(dd); + break; + } + } + mutex_unlock(&dev_list_lock); +} + +static void dispdev_destroy_all(void) +{ + struct dispdev *dd; + struct dispdev *tmp; + + mutex_lock(&dev_list_lock); + list_for_each_entry_safe(dd, tmp, &dev_list, list) { + list_del(&dd->list); + misc_deregister(&dd->mdev); + mcde_dss_destroy_overlay(dd->ovly); + /* + * TODO: Uncomment when DSS has reference + * counting of enable/disable + */ + /* mcde_dss_disable_display(dd->ddev); */ + kfree(dd); + } + mutex_unlock(&dev_list_lock); + + mutex_destroy(&dev_list_lock); +} + +static int __init dispdev_init(void) +{ + pr_info("%s\n", __func__); + + mutex_init(&dev_list_lock); + + return 0; +} +module_init(dispdev_init); + +static void __exit dispdev_exit(void) +{ + dispdev_destroy_all(); + pr_info("%s\n", __func__); +} +module_exit(dispdev_exit); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Display output device driver"); + diff --git a/drivers/misc/hwmem/Makefile b/drivers/misc/hwmem/Makefile new file mode 100644 index 00000000000..c307616a181 --- /dev/null +++ b/drivers/misc/hwmem/Makefile @@ -0,0 +1,3 @@ +hwmem-objs := hwmem-main.o hwmem-ioctl.o cache_handler.o contig_alloc.o + +obj-$(CONFIG_HWMEM) += hwmem.o diff --git a/drivers/misc/hwmem/cache_handler.c b/drivers/misc/hwmem/cache_handler.c new file mode 100644 index 00000000000..e0ab4ee6cf8 --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.c @@ -0,0 +1,510 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/hwmem.h> + +#include <asm/pgtable.h> + +#include <mach/dcache.h> + +#include "cache_handler.h" + +#define U32_MAX (~(u32)0) + +enum hwmem_alloc_flags cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings); +void cachi_set_pgprot_cache_options(enum hwmem_alloc_flags cache_settings, + pgprot_t *pgprot); + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region); +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region); + +static void invalidate_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void clean_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); +static void flush_cpu_cache(struct cach_buf *buf, + struct cach_range *range_2b_used); + +static void null_range(struct cach_range *range); +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add); +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range); +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove); +static bool is_non_empty_range(struct cach_range *range); +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection); +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment); +static u32 range_length(struct cach_range *range); +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range); + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset); +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset); + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment); +static u32 align_down(u32 value, u32 alignment); + +/* + * Exported functions + */ + +void cach_init_buf(struct cach_buf *buf, enum hwmem_alloc_flags cache_settings, + u32 size) +{ + buf->vstart = NULL; + buf->pstart = 0; + buf->size = size; + + buf->cache_settings = cachi_get_cache_settings(cache_settings); +} + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr) +{ + bool tmp; + + buf->vstart = vaddr; + buf->pstart = paddr; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { + /* + * Keep whatever is in the cache. This way we avoid an + * unnecessary synch if CPU is the first user. + */ + buf->range_in_cpu_cache.start = 0; + buf->range_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_in_cpu_cache, + get_dcache_granularity()); + buf->range_dirty_in_cpu_cache.start = 0; + buf->range_dirty_in_cpu_cache.end = buf->size; + align_range_up(&buf->range_dirty_in_cpu_cache, + get_dcache_granularity()); + } else { + flush_cpu_dcache(buf->vstart, buf->pstart, buf->size, false, + &tmp); + drain_cpu_write_buf(); + + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } + null_range(&buf->range_invalid_in_cpu_cache); +} + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot) +{ + cachi_set_pgprot_cache_options(buf->cache_settings, pgprot); +} + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + struct hwmem_region *__region; + struct hwmem_region full_region; + + if (region != NULL) { + __region = region; + } else { + full_region.offset = 0; + full_region.count = 1; + full_region.start = 0; + full_region.end = buf->size; + full_region.size = buf->size; + + __region = &full_region; + } + + switch (domain) { + case HWMEM_DOMAIN_SYNC: + sync_buf_post_cpu(buf, access, __region); + + break; + + case HWMEM_DOMAIN_CPU: + sync_buf_pre_cpu(buf, access, __region); + + break; + } +} + +/* + * Local functions + */ + +enum hwmem_alloc_flags __attribute__((weak)) cachi_get_cache_settings( + enum hwmem_alloc_flags requested_cache_settings) +{ + static const u32 CACHE_ON_FLAGS_MASK = HWMEM_ALLOC_HINT_CACHED | + HWMEM_ALLOC_HINT_CACHE_WB | HWMEM_ALLOC_HINT_CACHE_WT | + HWMEM_ALLOC_HINT_CACHE_NAOW | HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE | + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY; + /* We don't know the cache setting so we assume worst case. */ + static const u32 CACHE_SETTING = HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_CACHED | HWMEM_ALLOC_HINT_CACHE_WB | + HWMEM_ALLOC_HINT_CACHE_AOW | + HWMEM_ALLOC_HINT_INNER_AND_OUTER_CACHE; + + if (requested_cache_settings & CACHE_ON_FLAGS_MASK) + return CACHE_SETTING; + else if (requested_cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE || + (requested_cache_settings & HWMEM_ALLOC_HINT_UNCACHED && + !(requested_cache_settings & + HWMEM_ALLOC_HINT_NO_WRITE_COMBINE))) + return HWMEM_ALLOC_HINT_WRITE_COMBINE; + else if (requested_cache_settings & + (HWMEM_ALLOC_HINT_NO_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED)) + return 0; + else + /* Nothing specified, use cached */ + return CACHE_SETTING; +} + +void __attribute__((weak)) cachi_set_pgprot_cache_options( + enum hwmem_alloc_flags cache_settings, pgprot_t *pgprot) +{ + if (cache_settings & HWMEM_ALLOC_HINT_CACHED) + *pgprot = *pgprot; /* To silence compiler and checkpatch */ + else if (cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) + *pgprot = pgprot_writecombine(*pgprot); + else + *pgprot = pgprot_noncached(*pgprot); +} + +bool __attribute__((weak)) speculative_data_prefetch(void) +{ + /* We don't know so we go with the safe alternative */ + return true; +} + +static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, + struct hwmem_region *region) +{ + bool write = access & HWMEM_ACCESS_WRITE; + bool read = access & HWMEM_ACCESS_READ; + + if (!write && !read) + return; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { + struct cach_range region_range; + + region_2_range(region, buf->size, ®ion_range); + + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_WB)) + /* Perform defered invalidates */ + invalidate_cpu_cache(buf, ®ion_range); + if (read || (write && buf->cache_settings & + HWMEM_ALLOC_HINT_CACHE_AOW)) + expand_range(&buf->range_in_cpu_cache, ®ion_range); + if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) { + struct cach_range dirty_range_addition; + + if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) + dirty_range_addition = region_range; + else + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &dirty_range_addition); + + expand_range(&buf->range_dirty_in_cpu_cache, + &dirty_range_addition); + } + } + if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) { + if (write) + buf->in_cpu_write_buf = true; + } +} + +static void sync_buf_post_cpu(struct cach_buf *buf, + enum hwmem_access next_access, struct hwmem_region *next_region) +{ + bool write = next_access & HWMEM_ACCESS_WRITE; + bool read = next_access & HWMEM_ACCESS_READ; + struct cach_range region_range; + + if (!write && !read) + return; + + region_2_range(next_region, buf->size, ®ion_range); + + if (write) { + if (speculative_data_prefetch()) { + /* Defer invalidate */ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, + ®ion_range, &intersection); + + expand_range(&buf->range_invalid_in_cpu_cache, + &intersection); + + clean_cpu_cache(buf, ®ion_range); + } else { + flush_cpu_cache(buf, ®ion_range); + } + } + if (read) + clean_cpu_cache(buf, ®ion_range); + + if (buf->in_cpu_write_buf) { + drain_cpu_write_buf(); + + buf->in_cpu_write_buf = false; + } +} + +static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_invalid_in_cpu_cache, range, + &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, + &buf->range_invalid_in_cpu_cache); + + /* + * Cache handler never uses invalidate to discard data in the + * cache so we can use flush instead which is considerably + * faster for large buffers. + */ + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &flushed_everything); + + if (flushed_everything) { + null_range(&buf->range_invalid_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + } else { + /* + * No need to shrink range_in_cpu_cache as invalidate + * is only used when we can't keep track of what's in + * the CPU cache. + */ + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } + } +} + +static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool cleaned_everything; + + expand_range_2_edge(&intersection, + &buf->range_dirty_in_cpu_cache); + + clean_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &cleaned_everything); + + if (cleaned_everything) + null_range(&buf->range_dirty_in_cpu_cache); + else + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + } +} + +static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range) +{ + struct cach_range intersection; + + intersect_range(&buf->range_in_cpu_cache, range, &intersection); + if (is_non_empty_range(&intersection)) { + bool flushed_everything; + + expand_range_2_edge(&intersection, &buf->range_in_cpu_cache); + + flush_cpu_dcache( + offset_2_vaddr(buf, intersection.start), + offset_2_paddr(buf, intersection.start), + range_length(&intersection), + buf->cache_settings & + HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, + &flushed_everything); + + if (flushed_everything) { + if (!speculative_data_prefetch()) + null_range(&buf->range_in_cpu_cache); + null_range(&buf->range_dirty_in_cpu_cache); + null_range(&buf->range_invalid_in_cpu_cache); + } else { + if (!speculative_data_prefetch()) + shrink_range(&buf->range_in_cpu_cache, + &intersection); + shrink_range(&buf->range_dirty_in_cpu_cache, + &intersection); + shrink_range(&buf->range_invalid_in_cpu_cache, + &intersection); + } + } +} + +static void null_range(struct cach_range *range) +{ + range->start = U32_MAX; + range->end = 0; +} + +static void expand_range(struct cach_range *range, + struct cach_range *range_2_add) +{ + range->start = min(range->start, range_2_add->start); + range->end = max(range->end, range_2_add->end); +} + +/* + * Expands range to one of enclosing_range's two edges. The function will + * choose which of enclosing_range's edges to expand range to in such a + * way that the size of range is minimized. range must be located inside + * enclosing_range. + */ +static void expand_range_2_edge(struct cach_range *range, + struct cach_range *enclosing_range) +{ + u32 space_on_low_side = range->start - enclosing_range->start; + u32 space_on_high_side = enclosing_range->end - range->end; + + if (space_on_low_side < space_on_high_side) + range->start = enclosing_range->start; + else + range->end = enclosing_range->end; +} + +static void shrink_range(struct cach_range *range, + struct cach_range *range_2_remove) +{ + if (range_2_remove->start > range->start) + range->end = min(range->end, range_2_remove->start); + else + range->start = max(range->start, range_2_remove->end); + + if (range->start >= range->end) + null_range(range); +} + +static bool is_non_empty_range(struct cach_range *range) +{ + return range->end > range->start; +} + +static void intersect_range(struct cach_range *range_1, + struct cach_range *range_2, struct cach_range *intersection) +{ + intersection->start = max(range_1->start, range_2->start); + intersection->end = min(range_1->end, range_2->end); + + if (intersection->start >= intersection->end) + null_range(intersection); +} + +/* Align_up restrictions apply here to */ +static void align_range_up(struct cach_range *range, u32 alignment) +{ + if (!is_non_empty_range(range)) + return; + + range->start = align_down(range->start, alignment); + range->end = align_up(range->end, alignment); +} + +static u32 range_length(struct cach_range *range) +{ + if (is_non_empty_range(range)) + return range->end - range->start; + else + return 0; +} + +static void region_2_range(struct hwmem_region *region, u32 buffer_size, + struct cach_range *range) +{ + /* + * We don't care about invalid regions, instead we limit the region's + * range to the buffer's range. This should work good enough, worst + * case we synch the entire buffer when we get an invalid region which + * is acceptable. + */ + range->start = region->offset + region->start; + range->end = min(region->offset + (region->count * region->size) - + (region->size - region->end), buffer_size); + if (range->start >= range->end) { + null_range(range); + return; + } + + align_range_up(range, get_dcache_granularity()); +} + +static void *offset_2_vaddr(struct cach_buf *buf, u32 offset) +{ + return (void *)((u32)buf->vstart + offset); +} + +static u32 offset_2_paddr(struct cach_buf *buf, u32 offset) +{ + return buf->pstart + offset; +} + +/* Saturates, might return unaligned values when that happens */ +static u32 align_up(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + u32 value_2_add; + + if (remainder == 0) + return value; + + value_2_add = alignment - remainder; + + if (value_2_add > U32_MAX - value) /* Will overflow */ + return U32_MAX; + + return value + value_2_add; +} + +static u32 align_down(u32 value, u32 alignment) +{ + u32 remainder = value % alignment; + if (remainder == 0) + return value; + + return value - remainder; +} diff --git a/drivers/misc/hwmem/cache_handler.h b/drivers/misc/hwmem/cache_handler.h new file mode 100644 index 00000000000..792105196fa --- /dev/null +++ b/drivers/misc/hwmem/cache_handler.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Cache handler + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +/* + * Cache handler can not handle simultaneous execution! The caller has to + * ensure such a situation does not occur. + */ + +#ifndef _CACHE_HANDLER_H_ +#define _CACHE_HANDLER_H_ + +#include <linux/types.h> +#include <linux/hwmem.h> + +/* + * To not have to double all datatypes we've used hwmem datatypes. If someone + * want's to use cache handler but not hwmem then we'll have to define our own + * datatypes. + */ + +struct cach_range { + u32 start; /* Inclusive */ + u32 end; /* Exclusive */ +}; + +/* + * Internal, do not touch! + */ +struct cach_buf { + void *vstart; + u32 pstart; + u32 size; + + /* Remaining hints are active */ + enum hwmem_alloc_flags cache_settings; + + bool in_cpu_write_buf; + struct cach_range range_in_cpu_cache; + struct cach_range range_dirty_in_cpu_cache; + struct cach_range range_invalid_in_cpu_cache; +}; + +void cach_init_buf(struct cach_buf *buf, + enum hwmem_alloc_flags cache_settings, u32 size); + +void cach_set_buf_addrs(struct cach_buf *buf, void* vaddr, u32 paddr); + +void cach_set_pgprot_cache_options(struct cach_buf *buf, pgprot_t *pgprot); + +void cach_set_domain(struct cach_buf *buf, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region); + +#endif /* _CACHE_HANDLER_H_ */ diff --git a/drivers/misc/hwmem/contig_alloc.c b/drivers/misc/hwmem/contig_alloc.c new file mode 100644 index 00000000000..31533ed5988 --- /dev/null +++ b/drivers/misc/hwmem/contig_alloc.c @@ -0,0 +1,571 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Contiguous memory allocator + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>, + * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <asm/sizes.h> + +#define MAX_INSTANCE_NAME_LENGTH 31 + +struct alloc { + struct list_head list; + + bool in_use; + phys_addr_t paddr; + size_t size; +}; + +struct instance { + struct list_head list; + + char name[MAX_INSTANCE_NAME_LENGTH + 1]; + + phys_addr_t region_paddr; + void *region_kaddr; + size_t region_size; + + struct list_head alloc_list; + +#ifdef CONFIG_DEBUG_FS + struct inode *debugfs_inode; + int cona_status_free; + int cona_status_used; + int cona_status_max_cont; + int cona_status_max_check; + int cona_status_biggest_free; + int cona_status_printed; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static LIST_HEAD(instance_list); + +static DEFINE_MUTEX(lock); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size); +void *cona_alloc(void *instance, size_t size); +void cona_free(void *instance, void *alloc); +phys_addr_t cona_get_alloc_paddr(void *alloc); +void *cona_get_alloc_kaddr(void *instance, void *alloc); +size_t cona_get_alloc_size(void *alloc); + +static int init_alloc_list(struct instance *instance); +static void clean_alloc_list(struct instance *instance); +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size); +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size); +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc); + +void *cona_create(const char *name, phys_addr_t region_paddr, + size_t region_size) +{ + int ret; + struct instance *instance; + struct vm_struct *vm_area; + + if (region_size == 0) + return ERR_PTR(-EINVAL); + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (instance == NULL) + return ERR_PTR(-ENOMEM); + + memcpy(instance->name, name, MAX_INSTANCE_NAME_LENGTH + 1); + /* Truncate name if necessary */ + instance->name[MAX_INSTANCE_NAME_LENGTH] = '\0'; + instance->region_paddr = region_paddr; + instance->region_size = region_size; + + vm_area = get_vm_area(region_size, VM_IOREMAP); + if (vm_area == NULL) { + printk(KERN_WARNING "CONA: Failed to allocate %u bytes" + " kernel virtual memory", region_size); + ret = -ENOMSG; + goto vmem_alloc_failed; + } + instance->region_kaddr = vm_area->addr; + + INIT_LIST_HEAD(&instance->alloc_list); + ret = init_alloc_list(instance); + if (ret < 0) + goto init_alloc_list_failed; + + mutex_lock(&lock); + list_add_tail(&instance->list, &instance_list); + mutex_unlock(&lock); + + return instance; + +init_alloc_list_failed: + vm_area = remove_vm_area(instance->region_kaddr); + if (vm_area == NULL) + printk(KERN_ERR "CONA: Failed to free kernel virtual memory," + " resource leak!\n"); + + kfree(vm_area); +vmem_alloc_failed: + kfree(instance); + + return ERR_PTR(ret); +} + +void *cona_alloc(void *instance, size_t size) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc; + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + alloc = find_free_alloc_bestfit(instance_l, size); + if (IS_ERR(alloc)) + goto out; + if (size < alloc->size) { + alloc = split_allocation(alloc, size); + if (IS_ERR(alloc)) + goto out; + } else { + alloc->in_use = true; + } +#ifdef CONFIG_DEBUG_FS + instance_l->cona_status_max_cont += alloc->size; + instance_l->cona_status_max_check = + max(instance_l->cona_status_max_check, + instance_l->cona_status_max_cont); +#endif /* #ifdef CONFIG_DEBUG_FS */ + +out: + mutex_unlock(&lock); + + return alloc; +} + +void cona_free(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + struct alloc *alloc_l = (struct alloc *)alloc; + struct alloc *other; + + mutex_lock(&lock); + + alloc_l->in_use = false; + +#ifdef CONFIG_DEBUG_FS + instance_l->cona_status_max_cont -= alloc_l->size; +#endif /* #ifdef CONFIG_DEBUG_FS */ + + other = list_entry(alloc_l->list.prev, struct alloc, list); + if ((alloc_l->list.prev != &instance_l->alloc_list) && + !other->in_use) { + other->size += alloc_l->size; + list_del(&alloc_l->list); + kfree(alloc_l); + alloc_l = other; + } + other = list_entry(alloc_l->list.next, struct alloc, list); + if ((alloc_l->list.next != &instance_l->alloc_list) && + !other->in_use) { + alloc_l->size += other->size; + list_del(&other->list); + kfree(other); + } + + mutex_unlock(&lock); +} + +phys_addr_t cona_get_alloc_paddr(void *alloc) +{ + return ((struct alloc *)alloc)->paddr; +} + +void *cona_get_alloc_kaddr(void *instance, void *alloc) +{ + struct instance *instance_l = (struct instance *)instance; + + return instance_l->region_kaddr + get_alloc_offset(instance_l, + (struct alloc *)alloc); +} + +size_t cona_get_alloc_size(void *alloc) +{ + return ((struct alloc *)alloc)->size; +} + +static int init_alloc_list(struct instance *instance) +{ + /* + * Hack to not get any allocs that cross a 64MiB boundary as B2R2 can't + * handle that. + */ + int ret; + u32 curr_pos = instance->region_paddr; + u32 region_end = instance->region_paddr + instance->region_size; + u32 next_64mib_boundary = (curr_pos + SZ_64M) & ~(SZ_64M - 1); + struct alloc *alloc; + + if (PAGE_SIZE >= SZ_64M) { + printk(KERN_WARNING "CONA: PAGE_SIZE >= 64MiB\n"); + return -ENOMSG; + } + + while (next_64mib_boundary < region_end) { + if (next_64mib_boundary - curr_pos > PAGE_SIZE) { + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = next_64mib_boundary - curr_pos - + PAGE_SIZE; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = PAGE_SIZE; + alloc->in_use = true; + list_add_tail(&alloc->list, &instance->alloc_list); + curr_pos = alloc->paddr + alloc->size; + +#ifdef CONFIG_DEBUG_FS + instance->cona_status_max_cont += alloc->size; +#endif /* #ifdef CONFIG_DEBUG_FS */ + + next_64mib_boundary += SZ_64M; + } + + alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto error; + } + alloc->paddr = curr_pos; + alloc->size = region_end - curr_pos; + alloc->in_use = false; + list_add_tail(&alloc->list, &instance->alloc_list); + + return 0; + +error: + clean_alloc_list(instance); + + return ret; +} + +static void clean_alloc_list(struct instance *instance) +{ + while (list_empty(&instance->alloc_list) == 0) { + struct alloc *i = list_first_entry(&instance->alloc_list, + struct alloc, list); + + list_del(&i->list); + + kfree(i); + } +} + +static struct alloc *find_free_alloc_bestfit(struct instance *instance, + size_t size) +{ + size_t best_diff = ~(size_t)0; + struct alloc *alloc = NULL, *i; + + list_for_each_entry(i, &instance->alloc_list, list) { + size_t diff = i->size - size; + if (i->in_use || i->size < size) + continue; + if (diff < best_diff) { + alloc = i; + best_diff = diff; + } + } + + return alloc != NULL ? alloc : ERR_PTR(-ENOMEM); +} + +static struct alloc *split_allocation(struct alloc *alloc, + size_t new_alloc_size) +{ + struct alloc *new_alloc; + + new_alloc = kzalloc(sizeof(struct alloc), GFP_KERNEL); + if (new_alloc == NULL) + return ERR_PTR(-ENOMEM); + + new_alloc->in_use = true; + new_alloc->paddr = alloc->paddr; + new_alloc->size = new_alloc_size; + alloc->size -= new_alloc_size; + alloc->paddr += new_alloc_size; + + list_add_tail(&new_alloc->list, &alloc->list); + + return new_alloc; +} + +static phys_addr_t get_alloc_offset(struct instance *instance, + struct alloc *alloc) +{ + return alloc->paddr - instance->region_paddr; +} + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int print_alloc(struct instance *instance, struct alloc *alloc, + char **buf, size_t buf_size); +static int print_alloc_status(struct instance *instance, char **buf, + size_t buf_size); +static struct instance *get_instance_from_file(struct file *file); +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct instance *instance, struct alloc *alloc, + char **buf, size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + if (i == 1) { + if (alloc->in_use) + instance->cona_status_used += alloc->size; + else + instance->cona_status_free += alloc->size; + } + + if (!alloc->in_use) { + instance->cona_status_biggest_free = + max((size_t)alloc->size, + (size_t)instance->cona_status_biggest_free); + } + + ret = snprintf(*buf, buf_size_l, "paddr: %10x\tsize: %10u\t" + "in use: %1u\t used: %10u (%dMB)" + " \t free: %10u (%dMB)\n", + alloc->paddr, + alloc->size, + alloc->in_use, + instance->cona_status_used, + instance->cona_status_used/1024/1024, + instance->cona_status_free, + instance->cona_status_free/1024/1024); + + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static int print_alloc_status(struct instance *instance, char **buf, + size_t buf_size) +{ + int ret; + int i; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, "Overall peak usage:\t%10u " + "(%dMB)\nCurrent max usage:\t%10u (%dMB)\n" + "Current biggest free:\t%10d (%dMB)\n", + instance->cona_status_max_check, + instance->cona_status_max_check/1024/1024, + instance->cona_status_max_cont, + instance->cona_status_max_cont/1024/1024, + instance->cona_status_biggest_free, + instance->cona_status_biggest_free/1024/1024); + + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static struct instance *get_instance_from_file(struct file *file) +{ + struct instance *curr_instance; + + list_for_each_entry(curr_instance, &instance_list, list) { + if (file->f_dentry->d_inode == curr_instance->debugfs_inode) + return curr_instance; + } + + return ERR_PTR(-ENOENT); +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + struct instance *instance; + struct alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + bool readout_aborted = false; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + instance = get_instance_from_file(file); + if (IS_ERR(instance)) { + ret = PTR_ERR(instance); + goto out; + } + + list_for_each_entry(curr_alloc, &instance->alloc_list, list) { + phys_addr_t alloc_offset = get_alloc_offset(instance, + curr_alloc); + if (alloc_offset < (phys_addr_t)*curr_pos) + continue; + + ret = print_alloc(instance, curr_alloc, &local_buf_pos, + available_space - (size_t)(local_buf_pos - + local_buf)); + + if (ret == -EINVAL) { /* No more room */ + readout_aborted = true; + break; + } else if (ret < 0) { + goto out; + } + /* + * There could be an overflow issue here in the unlikely case + * where the region is placed at the end of the address range + * and the last alloc is 1 byte large. Since this is debug code + * and that case most likely never will happen I've chosen to + * defer fixing it till it happens. + */ + *curr_pos = (void *)(alloc_offset + 1); + + /* Make sure to also print status if there were any prints */ + instance->cona_status_printed = false; + } + + if (!readout_aborted && !instance->cona_status_printed) { + ret = print_alloc_status(instance, &local_buf_pos, + available_space - + (size_t)(local_buf_pos - local_buf)); + + if (ret == -EINVAL) /* No more room */ + readout_aborted = true; + else if (ret < 0) + goto out; + else + instance->cona_status_printed = true; + } + + if (!readout_aborted) { + instance->cona_status_free = 0; + instance->cona_status_used = 0; + instance->cona_status_biggest_free = 0; + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + mutex_unlock(&lock); + + return ret; +} + +static int __init init_debugfs(void) +{ + struct instance *curr_instance; + struct dentry *debugfs_root_dir = debugfs_create_dir("cona", NULL); + + mutex_lock(&lock); + + list_for_each_entry(curr_instance, &instance_list, list) { + struct dentry *file_dentry; + char tmp_str[MAX_INSTANCE_NAME_LENGTH + 7 + 1]; + tmp_str[0] = '\0'; + strcat(tmp_str, curr_instance->name); + strcat(tmp_str, "_allocs"); + file_dentry = debugfs_create_file(tmp_str, 0444, + debugfs_root_dir, 0, &debugfs_allocs_fops); + if (file_dentry != NULL) + curr_instance->debugfs_inode = file_dentry->d_inode; + } + + mutex_unlock(&lock); + + return 0; +} +/* + * Must be executed after all instances have been created, hence the + * late_initcall. + */ +late_initcall(init_debugfs); + +#endif /* #ifdef CONFIG_DEBUG_FS */ diff --git a/drivers/misc/hwmem/hwmem-ioctl.c b/drivers/misc/hwmem/hwmem-ioctl.c new file mode 100644 index 00000000000..e9e50de78bd --- /dev/null +++ b/drivers/misc/hwmem/hwmem-ioctl.c @@ -0,0 +1,532 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/mm_types.h> +#include <linux/hwmem.h> +#include <linux/device.h> +#include <linux/sched.h> + +static int hwmem_open(struct inode *inode, struct file *file); +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma); +static int hwmem_release_fop(struct inode *inode, struct file *file); +static long hwmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags); + +static const struct file_operations hwmem_fops = { + .open = hwmem_open, + .mmap = hwmem_ioctl_mmap, + .unlocked_ioctl = hwmem_ioctl, + .release = hwmem_release_fop, + .get_unmapped_area = hwmem_get_unmapped_area, +}; + +static struct miscdevice hwmem_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hwmem", + .fops = &hwmem_fops, +}; + +struct hwmem_file { + struct mutex lock; + struct idr idr; /* id -> struct hwmem_alloc*, ref counted */ + struct hwmem_alloc *fd_alloc; /* Ref counted */ +}; + +static s32 create_id(struct hwmem_file *hwfile, struct hwmem_alloc *alloc) +{ + int id, ret; + + while (true) { + if (idr_pre_get(&hwfile->idr, GFP_KERNEL) == 0) + return -ENOMEM; + + ret = idr_get_new_above(&hwfile->idr, alloc, 1, &id); + if (ret == 0) + break; + else if (ret != -EAGAIN) + return -ENOMEM; + } + + /* + * IDR always returns the lowest free id so there is no wrapping issue + * because of this. + */ + if (id >= (s32)1 << (31 - PAGE_SHIFT)) { + dev_err(hwmem_device.this_device, "Out of IDs!\n"); + idr_remove(&hwfile->idr, id); + return -ENOMSG; + } + + return (s32)id << PAGE_SHIFT; +} + +static void remove_id(struct hwmem_file *hwfile, s32 id) +{ + idr_remove(&hwfile->idr, id >> PAGE_SHIFT); +} + +static struct hwmem_alloc *resolve_id(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = id ? idr_find(&hwfile->idr, id >> PAGE_SHIFT) : + hwfile->fd_alloc; + if (alloc == NULL) + alloc = ERR_PTR(-EINVAL); + + return alloc; +} + +static s32 alloc(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + s32 ret = 0; + struct hwmem_alloc *alloc; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + ret = create_id(hwfile, alloc); + if (ret < 0) + hwmem_release(alloc); + + return ret; +} + +static int alloc_fd(struct hwmem_file *hwfile, struct hwmem_alloc_request *req) +{ + struct hwmem_alloc *alloc; + + if (hwfile->fd_alloc) + return -EINVAL; + + alloc = hwmem_alloc(req->size, req->flags, req->default_access, + req->mem_type); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwfile->fd_alloc = alloc; + + return 0; +} + +static int release(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + if (id == 0) + return -EINVAL; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + remove_id(hwfile, id); + hwmem_release(alloc); + + return 0; +} + +static int set_cpu_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_CPU, + (struct hwmem_region *)&req->region); +} + +static int set_sync_domain(struct hwmem_file *hwfile, + struct hwmem_set_domain_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_domain(alloc, req->access, HWMEM_DOMAIN_SYNC, + (struct hwmem_region *)&req->region); +} + +static int pin(struct hwmem_file *hwfile, struct hwmem_pin_request *req) +{ + int ret; + struct hwmem_alloc *alloc; + enum hwmem_mem_type mem_type; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_get_info(alloc, NULL, &mem_type, NULL); + if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) + return -EINVAL; + + ret = hwmem_pin(alloc, &mem_chunk, &mem_chunk_length); + if (ret < 0) + return ret; + + req->phys_addr = mem_chunk.paddr; + + return 0; +} + +static int unpin(struct hwmem_file *hwfile, s32 id) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_unpin(alloc); + + return 0; +} + +static int set_access(struct hwmem_file *hwfile, + struct hwmem_set_access_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + return hwmem_set_access(alloc, req->access, req->pid); +} + +static int get_info(struct hwmem_file *hwfile, + struct hwmem_get_info_request *req) +{ + struct hwmem_alloc *alloc; + + alloc = resolve_id(hwfile, req->id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + hwmem_get_info(alloc, &req->size, &req->mem_type, &req->access); + + return 0; +} + +static s32 export(struct hwmem_file *hwfile, s32 id) +{ + s32 ret; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + alloc = resolve_id(hwfile, id); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* + * The user could be about to send the buffer to a driver but + * there is a chance the current thread group don't have import rights + * if it gained access to the buffer via a inter-process fd transfer + * (fork, Android binder), if this is the case the driver will not be + * able to resolve the buffer name. To avoid this situation we give the + * current thread group import rights. This will not breach the + * security as the process already has access to the buffer (otherwise + * it would not be able to get here). + */ + hwmem_get_info(alloc, NULL, NULL, &access); + + ret = hwmem_set_access(alloc, (access | HWMEM_ACCESS_IMPORT), + task_tgid_nr(current)); + if (ret < 0) + return ret; + + return hwmem_get_name(alloc); +} + +static s32 import(struct hwmem_file *hwfile, s32 name) +{ + s32 ret = 0; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* Check access permissions for process */ + hwmem_get_info(alloc, NULL, NULL, &access); + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + + ret = create_id(hwfile, alloc); + if (ret < 0) + goto error; + + return ret; + +error: + hwmem_release(alloc); + + return ret; +} + +static int import_fd(struct hwmem_file *hwfile, s32 name) +{ + int ret; + struct hwmem_alloc *alloc; + enum hwmem_access access; + + if (hwfile->fd_alloc) + return -EINVAL; + + alloc = hwmem_resolve_by_name(name); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + /* Check access permissions for process */ + hwmem_get_info(alloc, NULL, NULL, &access); + if (!(access & HWMEM_ACCESS_IMPORT)) { + ret = -EPERM; + goto error; + } + + hwfile->fd_alloc = alloc; + + return 0; + +error: + hwmem_release(alloc); + + return ret; +} + +static int hwmem_open(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile; + + hwfile = kzalloc(sizeof(struct hwmem_file), GFP_KERNEL); + if (hwfile == NULL) + return -ENOMEM; + + idr_init(&hwfile->idr); + mutex_init(&hwfile->lock); + file->private_data = hwfile; + + return 0; +} + +static int hwmem_ioctl_mmap(struct file *file, struct vm_area_struct *vma) +{ + int ret; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + struct hwmem_alloc *alloc; + + mutex_lock(&hwfile->lock); + + alloc = resolve_id(hwfile, (s32)vma->vm_pgoff << PAGE_SHIFT); + if (IS_ERR(alloc)) { + ret = PTR_ERR(alloc); + goto out; + } + + ret = hwmem_mmap(alloc, vma); + +out: + mutex_unlock(&hwfile->lock); + + return ret; +} + +static int hwmem_release_idr_for_each_wrapper(int id, void *ptr, void *data) +{ + hwmem_release((struct hwmem_alloc *)ptr); + + return 0; +} + +static int hwmem_release_fop(struct inode *inode, struct file *file) +{ + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + idr_for_each(&hwfile->idr, hwmem_release_idr_for_each_wrapper, NULL); + idr_remove_all(&hwfile->idr); + idr_destroy(&hwfile->idr); + + if (hwfile->fd_alloc) + hwmem_release(hwfile->fd_alloc); + + mutex_destroy(&hwfile->lock); + + kfree(hwfile); + + return 0; +} + +static long hwmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -ENOSYS; + struct hwmem_file *hwfile = (struct hwmem_file *)file->private_data; + + mutex_lock(&hwfile->lock); + + switch (cmd) { + case HWMEM_ALLOC_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc(hwfile, &req); + } + break; + case HWMEM_ALLOC_FD_IOC: + { + struct hwmem_alloc_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_alloc_request))) + ret = -EFAULT; + else + ret = alloc_fd(hwfile, &req); + } + break; + case HWMEM_RELEASE_IOC: + ret = release(hwfile, (s32)arg); + break; + case HWMEM_SET_CPU_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = set_cpu_domain(hwfile, &req); + } + break; + case HWMEM_SET_SYNC_DOMAIN_IOC: + { + struct hwmem_set_domain_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_domain_request))) + ret = -EFAULT; + else + ret = set_sync_domain(hwfile, &req); + } + break; + case HWMEM_PIN_IOC: + { + struct hwmem_pin_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + else + ret = pin(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_pin_request))) + ret = -EFAULT; + } + break; + case HWMEM_UNPIN_IOC: + ret = unpin(hwfile, (s32)arg); + break; + case HWMEM_SET_ACCESS_IOC: + { + struct hwmem_set_access_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_set_access_request))) + ret = -EFAULT; + else + ret = set_access(hwfile, &req); + } + break; + case HWMEM_GET_INFO_IOC: + { + struct hwmem_get_info_request req; + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + else + ret = get_info(hwfile, &req); + if (ret == 0 && copy_to_user((void __user *)arg, &req, + sizeof(struct hwmem_get_info_request))) + ret = -EFAULT; + } + break; + case HWMEM_EXPORT_IOC: + ret = export(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_IOC: + ret = import(hwfile, (s32)arg); + break; + case HWMEM_IMPORT_FD_IOC: + ret = import_fd(hwfile, (s32)arg); + break; + } + + mutex_unlock(&hwfile->lock); + + return ret; +} + +static unsigned long hwmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + /* + * pgoff will not be valid as it contains a buffer id (right shifted + * PAGE_SHIFT bits). To not confuse get_unmapped_area we'll not pass + * on file or pgoff. + */ + return current->mm->get_unmapped_area(NULL, addr, len, 0, flags); +} + +int __init hwmem_ioctl_init(void) +{ + if (PAGE_SHIFT < 1 || PAGE_SHIFT > 30 || sizeof(size_t) != 4 || + sizeof(int) > 4 || sizeof(enum hwmem_alloc_flags) != 4 || + sizeof(enum hwmem_access) != 4 || + sizeof(enum hwmem_mem_type) != 4) { + dev_err(hwmem_device.this_device, "PAGE_SHIFT < 1 || PAGE_SHIFT" + " > 30 || sizeof(size_t) != 4 || sizeof(int) > 4 ||" + " sizeof(enum hwmem_alloc_flags) != 4 || sizeof(enum" + " hwmem_access) != 4 || sizeof(enum hwmem_mem_type)" + " != 4\n"); + return -ENOMSG; + } + if (PAGE_SHIFT > 15) + dev_warn(hwmem_device.this_device, "Due to the page size only" + " %u id:s per file instance are available\n", + ((u32)1 << (31 - PAGE_SHIFT)) - 1); + + return misc_register(&hwmem_device); +} + +void __exit hwmem_ioctl_exit(void) +{ + misc_deregister(&hwmem_device); +} diff --git a/drivers/misc/hwmem/hwmem-main.c b/drivers/misc/hwmem/hwmem-main.c new file mode 100644 index 00000000000..b91d99bc2be --- /dev/null +++ b/drivers/misc/hwmem/hwmem-main.c @@ -0,0 +1,726 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Hardware memory driver, hwmem + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>, + * Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/pid.h> +#include <linux/list.h> +#include <linux/hwmem.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/kallsyms.h> +#include <linux/vmalloc.h> +#include "cache_handler.h" + +#define S32_MAX 2147483647 + +struct hwmem_alloc_threadg_info { + struct list_head list; + + struct pid *threadg_pid; /* Ref counted */ + + enum hwmem_access access; +}; + +struct hwmem_alloc { + struct list_head list; + + atomic_t ref_cnt; + + enum hwmem_alloc_flags flags; + struct hwmem_mem_type_struct *mem_type; + + void *allocator_hndl; + phys_addr_t paddr; + void *kaddr; + size_t size; + s32 name; + + /* Access control */ + enum hwmem_access default_access; + struct list_head threadg_info_list; + + /* Cache handling */ + struct cach_buf cach_buf; + +#ifdef CONFIG_DEBUG_FS + /* Debug */ + void *creator; + pid_t creator_tgid; +#endif /* #ifdef CONFIG_DEBUG_FS */ +}; + +static struct platform_device *hwdev; + +static LIST_HEAD(alloc_list); +static DEFINE_IDR(global_idr); +static DEFINE_MUTEX(lock); + +static void vm_open(struct vm_area_struct *vma); +static void vm_close(struct vm_area_struct *vma); +static struct vm_operations_struct vm_ops = { + .open = vm_open, + .close = vm_close, +}; + +static void kunmap_alloc(struct hwmem_alloc *alloc); + +/* Helpers */ + +static void destroy_alloc_threadg_info( + struct hwmem_alloc_threadg_info *info) +{ + if (info->threadg_pid) + put_pid(info->threadg_pid); + + kfree(info); +} + +static void clean_alloc_threadg_info_list(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct hwmem_alloc_threadg_info *tmp; + + list_for_each_entry_safe(info, tmp, &(alloc->threadg_info_list), + list) { + list_del(&info->list); + destroy_alloc_threadg_info(info); + } +} + +static enum hwmem_access get_access(struct hwmem_alloc *alloc) +{ + struct hwmem_alloc_threadg_info *info; + struct pid *my_pid; + bool found = false; + + my_pid = find_get_pid(task_tgid_nr(current)); + if (!my_pid) + return 0; + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == my_pid) { + found = true; + break; + } + } + + put_pid(my_pid); + + if (found) + return info->access; + else + return alloc->default_access; +} + +static void clear_alloc_mem(struct hwmem_alloc *alloc) +{ + cach_set_domain(&alloc->cach_buf, HWMEM_ACCESS_WRITE, + HWMEM_DOMAIN_CPU, NULL); + + memset(alloc->kaddr, 0, alloc->size); +} + +static void destroy_alloc(struct hwmem_alloc *alloc) +{ + list_del(&alloc->list); + + if (alloc->name != 0) { + idr_remove(&global_idr, alloc->name); + alloc->name = 0; + } + + clean_alloc_threadg_info_list(alloc); + + kunmap_alloc(alloc); + + if (!IS_ERR_OR_NULL(alloc->allocator_hndl)) + alloc->mem_type->allocator_api.free( + alloc->mem_type->allocator_instance, + alloc->allocator_hndl); + + kfree(alloc); +} + +static int kmap_alloc(struct hwmem_alloc *alloc) +{ + int ret; + pgprot_t pgprot; + void *alloc_kaddr; + + alloc_kaddr = alloc->mem_type->allocator_api.get_alloc_kaddr( + alloc->mem_type->allocator_instance, alloc->allocator_hndl); + if (IS_ERR(alloc_kaddr)) + return PTR_ERR(alloc_kaddr); + + pgprot = PAGE_KERNEL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &pgprot); + + ret = ioremap_page_range((unsigned long)alloc_kaddr, + (unsigned long)alloc_kaddr + alloc->size, alloc->paddr, pgprot); + if (ret < 0) { + dev_warn(&hwdev->dev, "Failed to map %#x - %#x", alloc->paddr, + alloc->paddr + alloc->size); + return ret; + } + + alloc->kaddr = alloc_kaddr; + + return 0; +} + +static void kunmap_alloc(struct hwmem_alloc *alloc) +{ + if (alloc->kaddr == NULL) + return; + + unmap_kernel_range((unsigned long)alloc->kaddr, alloc->size); + + alloc->kaddr = NULL; +} + +static struct hwmem_mem_type_struct *resolve_mem_type( + enum hwmem_mem_type mem_type) +{ + unsigned int i; + for (i = 0; i < hwmem_num_mem_types; i++) { + if (hwmem_mem_types[i].id == mem_type) + return &hwmem_mem_types[i]; + } + + return ERR_PTR(-ENOENT); +} + +/* HWMEM API */ + +struct hwmem_alloc *hwmem_alloc(size_t size, enum hwmem_alloc_flags flags, + enum hwmem_access def_access, enum hwmem_mem_type mem_type) +{ + int ret; + struct hwmem_alloc *alloc; + + if (hwdev == NULL) { + printk(KERN_ERR "HWMEM: Badly configured\n"); + return ERR_PTR(-ENOMSG); + } + + if (size == 0) + return ERR_PTR(-EINVAL); + + mutex_lock(&lock); + + size = PAGE_ALIGN(size); + + alloc = kzalloc(sizeof(struct hwmem_alloc), GFP_KERNEL); + if (alloc == NULL) { + ret = -ENOMEM; + goto alloc_alloc_failed; + } + + INIT_LIST_HEAD(&alloc->list); + atomic_inc(&alloc->ref_cnt); + alloc->flags = flags; + alloc->default_access = def_access; + INIT_LIST_HEAD(&alloc->threadg_info_list); +#ifdef CONFIG_DEBUG_FS + alloc->creator = __builtin_return_address(0); + alloc->creator_tgid = task_tgid_nr(current); +#endif + alloc->mem_type = resolve_mem_type(mem_type); + if (IS_ERR(alloc->mem_type)) { + ret = PTR_ERR(alloc->mem_type); + goto resolve_mem_type_failed; + } + + alloc->allocator_hndl = alloc->mem_type->allocator_api.alloc( + alloc->mem_type->allocator_instance, size); + if (IS_ERR(alloc->allocator_hndl)) { + ret = PTR_ERR(alloc->allocator_hndl); + goto allocator_failed; + } + + alloc->paddr = alloc->mem_type->allocator_api.get_alloc_paddr( + alloc->allocator_hndl); + alloc->size = alloc->mem_type->allocator_api.get_alloc_size( + alloc->allocator_hndl); + + cach_init_buf(&alloc->cach_buf, alloc->flags, alloc->size); + ret = kmap_alloc(alloc); + if (ret < 0) + goto kmap_alloc_failed; + cach_set_buf_addrs(&alloc->cach_buf, alloc->kaddr, alloc->paddr); + + list_add_tail(&alloc->list, &alloc_list); + + clear_alloc_mem(alloc); + + goto out; + +kmap_alloc_failed: +allocator_failed: +resolve_mem_type_failed: + destroy_alloc(alloc); +alloc_alloc_failed: + alloc = ERR_PTR(ret); + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_alloc); + +void hwmem_release(struct hwmem_alloc *alloc) +{ + mutex_lock(&lock); + + if (atomic_dec_and_test(&alloc->ref_cnt)) + destroy_alloc(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_release); + +int hwmem_set_domain(struct hwmem_alloc *alloc, enum hwmem_access access, + enum hwmem_domain domain, struct hwmem_region *region) +{ + mutex_lock(&lock); + + cach_set_domain(&alloc->cach_buf, access, domain, region); + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_set_domain); + +int hwmem_pin(struct hwmem_alloc *alloc, struct hwmem_mem_chunk *mem_chunks, + u32 *mem_chunks_length) +{ + if (*mem_chunks_length < 1) { + *mem_chunks_length = 1; + return -ENOSPC; + } + + mutex_lock(&lock); + + mem_chunks[0].paddr = alloc->paddr; + mem_chunks[0].size = alloc->size; + *mem_chunks_length = 1; + + mutex_unlock(&lock); + + return 0; +} +EXPORT_SYMBOL(hwmem_pin); + +void hwmem_unpin(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_unpin); + +static void vm_open(struct vm_area_struct *vma) +{ + atomic_inc(&((struct hwmem_alloc *)vma->vm_private_data)->ref_cnt); +} + +static void vm_close(struct vm_area_struct *vma) +{ + hwmem_release((struct hwmem_alloc *)vma->vm_private_data); +} + +int hwmem_mmap(struct hwmem_alloc *alloc, struct vm_area_struct *vma) +{ + int ret = 0; + unsigned long vma_size = vma->vm_end - vma->vm_start; + enum hwmem_access access; + mutex_lock(&lock); + + access = get_access(alloc); + + /* Check permissions */ + if ((!(access & HWMEM_ACCESS_WRITE) && + (vma->vm_flags & VM_WRITE)) || + (!(access & HWMEM_ACCESS_READ) && + (vma->vm_flags & VM_READ))) { + ret = -EPERM; + goto illegal_access; + } + + if (vma_size > alloc->size) { + ret = -EINVAL; + goto illegal_size; + } + + /* + * We don't want Linux to do anything (merging etc) with our VMAs as + * the offset is not necessarily valid + */ + vma->vm_flags |= VM_SPECIAL; + cach_set_pgprot_cache_options(&alloc->cach_buf, &vma->vm_page_prot); + vma->vm_private_data = (void *)alloc; + atomic_inc(&alloc->ref_cnt); + vma->vm_ops = &vm_ops; + + ret = remap_pfn_range(vma, vma->vm_start, alloc->paddr >> PAGE_SHIFT, + min(vma_size, (unsigned long)alloc->size), vma->vm_page_prot); + if (ret < 0) + goto map_failed; + + goto out; + +map_failed: + atomic_dec(&alloc->ref_cnt); +illegal_size: +illegal_access: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_mmap); + +void *hwmem_kmap(struct hwmem_alloc *alloc) +{ + void *ret; + + mutex_lock(&lock); + + ret = alloc->kaddr; + + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_kmap); + +void hwmem_kunmap(struct hwmem_alloc *alloc) +{ +} +EXPORT_SYMBOL(hwmem_kunmap); + +int hwmem_set_access(struct hwmem_alloc *alloc, + enum hwmem_access access, pid_t pid_nr) +{ + int ret; + struct hwmem_alloc_threadg_info *info; + struct pid *pid; + bool found = false; + + pid = find_get_pid(pid_nr); + if (!pid) { + ret = -EINVAL; + goto error_get_pid; + } + + list_for_each_entry(info, &(alloc->threadg_info_list), list) { + if (info->threadg_pid == pid) { + found = true; + break; + } + } + + if (!found) { + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ret = -ENOMEM; + goto error_alloc_info; + } + + info->threadg_pid = pid; + info->access = access; + + list_add_tail(&(info->list), &(alloc->threadg_info_list)); + } else { + info->access = access; + } + + return 0; + +error_alloc_info: + put_pid(pid); +error_get_pid: + return ret; +} +EXPORT_SYMBOL(hwmem_set_access); + +void hwmem_get_info(struct hwmem_alloc *alloc, u32 *size, + enum hwmem_mem_type *mem_type, enum hwmem_access *access) +{ + mutex_lock(&lock); + + if (size != NULL) + *size = alloc->size; + if (mem_type != NULL) + *mem_type = alloc->mem_type->id; + if (access != NULL) + *access = get_access(alloc); + + mutex_unlock(&lock); +} +EXPORT_SYMBOL(hwmem_get_info); + +s32 hwmem_get_name(struct hwmem_alloc *alloc) +{ + int ret = 0, name; + + mutex_lock(&lock); + + if (alloc->name != 0) { + ret = alloc->name; + goto out; + } + + while (true) { + if (idr_pre_get(&global_idr, GFP_KERNEL) == 0) { + ret = -ENOMEM; + goto pre_get_id_failed; + } + + ret = idr_get_new_above(&global_idr, alloc, 1, &name); + if (ret == 0) + break; + else if (ret != -EAGAIN) + goto get_id_failed; + } + + if (name > S32_MAX) { + ret = -ENOMSG; + goto overflow; + } + + alloc->name = name; + + ret = name; + goto out; + +overflow: + idr_remove(&global_idr, name); +get_id_failed: +pre_get_id_failed: + +out: + mutex_unlock(&lock); + + return ret; +} +EXPORT_SYMBOL(hwmem_get_name); + +struct hwmem_alloc *hwmem_resolve_by_name(s32 name) +{ + struct hwmem_alloc *alloc; + + mutex_lock(&lock); + + alloc = idr_find(&global_idr, name); + if (alloc == NULL) { + alloc = ERR_PTR(-EINVAL); + goto find_failed; + } + atomic_inc(&alloc->ref_cnt); + + goto out; + +find_failed: + +out: + mutex_unlock(&lock); + + return alloc; +} +EXPORT_SYMBOL(hwmem_resolve_by_name); + +/* Debug */ + +#ifdef CONFIG_DEBUG_FS + +static int debugfs_allocs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos); + +static const struct file_operations debugfs_allocs_fops = { + .owner = THIS_MODULE, + .read = debugfs_allocs_read, +}; + +static int print_alloc(struct hwmem_alloc *alloc, char **buf, size_t buf_size) +{ + int ret; + char creator[KSYM_SYMBOL_LEN]; + int i; + + if (sprint_symbol(creator, (unsigned long)alloc->creator) < 0) + creator[0] = '\0'; + + for (i = 0; i < 2; i++) { + size_t buf_size_l; + if (i == 0) + buf_size_l = 0; + else + buf_size_l = buf_size; + + ret = snprintf(*buf, buf_size_l, + "%#x\n" + "\tSize: %u\n" + "\tMemory type: %u\n" + "\tName: %#x\n" + "\tReference count: %i\n" + "\tAllocation flags: %#x\n" + "\t$ settings: %#x\n" + "\tDefault access: %#x\n" + "\tPhysical address: %#x\n" + "\tKernel virtual address: %#x\n" + "\tCreator: %s\n" + "\tCreator thread group id: %u\n", + (unsigned int)alloc, alloc->size, alloc->mem_type->id, + alloc->name, atomic_read(&alloc->ref_cnt), + alloc->flags, alloc->cach_buf.cache_settings, + alloc->default_access, alloc->paddr, + (unsigned int)alloc->kaddr, creator, + alloc->creator_tgid); + if (ret < 0) + return -ENOMSG; + else if (ret + 1 > buf_size) + return -EINVAL; + } + + *buf += ret; + + return 0; +} + +static int debugfs_allocs_read(struct file *file, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* + * We assume the supplied buffer and PAGE_SIZE is large enough to hold + * information about at least one alloc, if not no data will be + * returned. + */ + + int ret; + size_t i = 0; + struct hwmem_alloc *curr_alloc; + char *local_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *local_buf_pos = local_buf; + size_t available_space = min((size_t)PAGE_SIZE, count); + /* private_data is intialized to NULL in open which I assume is 0. */ + void **curr_pos = &file->private_data; + size_t bytes_read; + + if (local_buf == NULL) + return -ENOMEM; + + mutex_lock(&lock); + + list_for_each_entry(curr_alloc, &alloc_list, list) { + if (i++ < (size_t)*curr_pos) + continue; + + ret = print_alloc(curr_alloc, &local_buf_pos, available_space - + (size_t)(local_buf_pos - local_buf)); + if (ret == -EINVAL) /* No more room */ + break; + else if (ret < 0) + goto out; + + *curr_pos = (void *)i; + } + + bytes_read = (size_t)(local_buf_pos - local_buf); + + ret = copy_to_user(buf, local_buf, bytes_read); + if (ret < 0) + goto out; + + ret = bytes_read; + +out: + kfree(local_buf); + + mutex_unlock(&lock); + + return ret; +} + +static void init_debugfs(void) +{ + /* Hwmem is never unloaded so dropping the dentrys is ok. */ + struct dentry *debugfs_root_dir = debugfs_create_dir("hwmem", NULL); + (void)debugfs_create_file("allocs", 0444, debugfs_root_dir, 0, + &debugfs_allocs_fops); +} + +#endif /* #ifdef CONFIG_DEBUG_FS */ + +/* Module */ + +extern int hwmem_ioctl_init(void); + +static int __devinit hwmem_probe(struct platform_device *pdev) +{ + int ret; + + if (hwdev) { + dev_err(&pdev->dev, "Probed multiple times\n"); + return -EINVAL; + } + + hwdev = pdev; + + /* + * No need to flush the caches here. If we can keep track of the cache + * content then none of our memory will be in the caches, if we can't + * keep track of the cache content we always assume all our memory is + * in the caches. + */ + + ret = hwmem_ioctl_init(); + if (ret < 0) + dev_warn(&pdev->dev, "Failed to start hwmem-ioctl, continuing" + " anyway\n"); + +#ifdef CONFIG_DEBUG_FS + init_debugfs(); +#endif + + dev_info(&pdev->dev, "Probed OK\n"); + + return 0; +} + +static struct platform_driver hwmem_driver = { + .probe = hwmem_probe, + .driver = { + .name = "hwmem", + }, +}; + +static int __init hwmem_init(void) +{ + return platform_driver_register(&hwmem_driver); +} +subsys_initcall(hwmem_init); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hardware memory driver"); + diff --git a/drivers/misc/stm.c b/drivers/misc/stm.c new file mode 100644 index 00000000000..33bb26c27ca --- /dev/null +++ b/drivers/misc/stm.c @@ -0,0 +1,850 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Pierre Peiffer <pierre.peiffer@stericsson.com> for ST-Ericsson. + * Philippe Langlais <philippe.Langlais@stericsson.com> for ST-Ericsson. + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/cdev.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <trace/stm.h> + +/* STM Registers */ +#define STM_CR (stm.virtbase) +#define STM_MMC (stm.virtbase + 0x008) +#define STM_TER (stm.virtbase + 0x010) +#define STMPERIPHID0 (stm.virtbase + 0xFC0) +#define STMPERIPHID1 (stm.virtbase + 0xFC8) +#define STMPERIPHID2 (stm.virtbase + 0xFD0) +#define STMPERIPHID3 (stm.virtbase + 0xFD8) +#define STMPCELLID0 (stm.virtbase + 0xFE0) +#define STMPCELLID1 (stm.virtbase + 0xFE8) +#define STMPCELLID2 (stm.virtbase + 0xFF0) +#define STMPCELLID3 (stm.virtbase + 0xFF8) + +#define STM_CLOCK_SHIFT 6 +#define STM_CLOCK_MASK 0x1C0 + +/* Hardware mode for all sources */ +#define STM_MMC_DEFAULT CONFIG_STM_DEFAULT_MASTERS_MODES + +/* Max number of channels (multiple of 256) */ +#define STM_NUMBER_OF_CHANNEL CONFIG_STM_NUMBER_OF_CHANNEL + +/* # dynamically allocated channel with stm_trace_buffer */ +#define NB_KERNEL_DYNAMIC_CHANNEL 128 + +static struct stm_device { + const struct stm_platform_data *pdata; + void __iomem *virtbase; + /* Used to register the allocated channels */ + DECLARE_BITMAP(ch_bitmap, STM_NUMBER_OF_CHANNEL); +} stm; + +volatile struct stm_channel __iomem *stm_channels; + +static struct cdev cdev; +static struct class *stm_class; +static int stm_major; + +static DEFINE_SPINLOCK(lock); + +/* Middle value for clock divisor */ +static enum clock_div stm_clockdiv = STM_CLOCK_DIV8; + +/* Default value for STM output connection */ +static enum stm_connection_type stm_connection = STM_DEFAULT_CONNECTION; + +#define STM_BUFSIZE 256 +struct channel_data { + DECLARE_BITMAP(bitmap, STM_NUMBER_OF_CHANNEL); + int numero; + spinlock_t lock; + u8 data_buffer[STM_BUFSIZE]; +}; + +static u64 stm_printk_buf[1024/sizeof(u64)]; +static arch_spinlock_t stm_buf_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; + +static char *mipi60 = "none"; +module_param(mipi60, charp, S_IRUGO); +MODULE_PARM_DESC(mipi60, "STM Trace to output on probe2 of mipi60 " + "('none' or 'ape' or 'modem')"); + +static char *mipi34 = "none"; +module_param(mipi34, charp, S_IRUGO); +MODULE_PARM_DESC(mipi34, "STM Trace to output on mipi34 " + "('none' or 'ape' or 'modem')"); + +static char *microsd = "none"; +module_param(microsd, charp, S_IRUGO); +MODULE_PARM_DESC(microsd, "STM Trace to output on SD card connector " + "('none' or 'ape' or 'modem')"); + +static unsigned int stm_ter; +module_param(stm_ter, uint, 0); +MODULE_PARM_DESC(stm_ter, "Value for STM_TER (trace control register). " + "Should be set by user as environment variable stm.stm_ter"); + +#define IS_APE_ON_MIPI34 (mipi34 && !strcmp(mipi34, "ape")) +#define IS_APE_ON_MIPI60 (mipi60 && !strcmp(mipi60, "ape")) +#define IS_APE_ON_MICROSD (microsd && !strcmp(microsd, "ape")) +#define IS_MODEM_ON_MICROSD (microsd && !strcmp(microsd, "modem")) + +static int stm_connection_set(void *data, u64 val); + +int stm_alloc_channel(int offset) +{ + int channel; + + /* Look for a free channel from offset */ + do { + channel = find_next_zero_bit(stm.ch_bitmap, + STM_NUMBER_OF_CHANNEL, offset); + } while ((channel < STM_NUMBER_OF_CHANNEL) + && test_and_set_bit(channel, stm.ch_bitmap)); + return channel; +} +EXPORT_SYMBOL(stm_alloc_channel); + +void stm_free_channel(int channel) +{ + clear_bit(channel, stm.ch_bitmap); +} +EXPORT_SYMBOL(stm_free_channel); + +static int stm_get_channel(struct channel_data *ch_data, int __user *arg) +{ + int channel, err; + + channel = stm_alloc_channel(0); + if (channel < STM_NUMBER_OF_CHANNEL) { + /* One free found ! */ + err = put_user(channel, arg); + if (err) + stm_free_channel(channel); + else + /* Register it in the context of the file */ + set_bit(channel, ch_data->bitmap); + } else + err = -ENOMEM; + return err; +} + +static int stm_release_channel(struct channel_data *ch_data, int channel) +{ + if ((channel < 0) || (channel >= STM_NUMBER_OF_CHANNEL)) + return -EINVAL; + stm_free_channel(channel); + clear_bit(channel, ch_data->bitmap); + return 0; +} + +/* + * Trace a buffer on a given channel + * with auto time stamping on last byte(s) only + */ +int stm_trace_buffer_onchannel(int channel, + const void *data, size_t length) +{ + int i, mod64; + volatile struct stm_channel __iomem *pch; + + if (channel >= STM_NUMBER_OF_CHANNEL || !stm_channels) + return 0; + + pch = &stm_channels[channel]; + + /* Align data pointer to u64 & time stamp last byte(s) */ + mod64 = (int)data & 7; + i = length - 8 + mod64; + switch (mod64) { + case 0: + if (i) + pch->no_stamp64 = *(u64 *)data; + else { + pch->stamp64 = *(u64 *)data; + return length; + } + data += 8; + break; + case 1: + pch->no_stamp8 = *(u8 *)data; + pch->no_stamp16 = *(u16 *)(data+1); + if (i) + pch->no_stamp32 = *(u32 *)(data+3); + else { + pch->stamp32 = *(u32 *)(data+3); + return length; + } + data += 7; + break; + case 2: + pch->no_stamp16 = *(u16 *)data; + if (i) + pch->no_stamp32 = *(u32 *)(data+2); + else { + pch->stamp32 = *(u32 *)(data+2); + return length; + } + data += 6; + break; + case 3: + pch->no_stamp8 = *(u8 *)data; + if (i) + pch->no_stamp32 = *(u32 *)(data+1); + else { + pch->stamp32 = *(u32 *)(data+1); + return length; + } + data += 5; + break; + case 4: + if (i) + pch->no_stamp32 = *(u32 *)data; + else { + pch->stamp32 = *(u32 *)data; + return length; + } + data += 4; + break; + case 5: + pch->no_stamp8 = *(u8 *)data; + if (i) + pch->no_stamp16 = *(u16 *)(data+1); + else { + pch->stamp16 = *(u16 *)(data+1); + return length; + } + data += 3; + break; + case 6: + if (i) + pch->no_stamp16 = *(u16 *)data; + else { + pch->stamp16 = *(u16 *)data; + return length; + } + data += 2; + break; + case 7: + if (i) + pch->no_stamp8 = *(u8 *)data; + else { + pch->stamp8 = *(u8 *)data; + return length; + } + data++; + break; + } + for (;;) { + if (i > 8) { + pch->no_stamp64 = *(u64 *)data; + data += 8; + i -= 8; + } else if (i == 8) { + pch->stamp64 = *(u64 *)data; + break; + } else if (i > 4) { + pch->no_stamp32 = *(u32 *)data; + data += 4; + i -= 4; + } else if (i == 4) { + pch->stamp32 = *(u32 *)data; + break; + } else if (i > 2) { + pch->no_stamp16 = *(u16 *)data; + data += 2; + i -= 2; + } else if (i == 2) { + pch->stamp16 = *(u16 *)data; + break; + } else { + pch->stamp8 = *(u8 *)data; + break; + } + } + return length; +} +EXPORT_SYMBOL(stm_trace_buffer_onchannel); + +static int stm_open(struct inode *inode, struct file *file) +{ + struct channel_data *channel_data; + int retval = 0; + + channel_data = kzalloc(sizeof(struct channel_data), GFP_KERNEL); + if (channel_data == NULL) + return -ENOMEM; + + spin_lock_init(&channel_data->lock); + channel_data->numero = -1; /* Channel not yet allocated */ + file->private_data = channel_data; + + /* + * Check if microsd is selected as trace interface + * and enable corresponding pins muxing. + */ + if (IS_MODEM_ON_MICROSD) + retval = stm_connection_set(NULL, STM_STE_MODEM_ON_MICROSD); + else if (IS_APE_ON_MICROSD) + retval = stm_connection_set(NULL, STM_STE_APE_ON_MICROSD); + + if (retval) + pr_alert("stm_open: failed to connect STM output\n"); + + return retval; +} + +static int stm_release(struct inode *inode, struct file *file) +{ + struct channel_data *channel; + + channel = (struct channel_data *)file->private_data; + + /* Free allocated channel if necessary */ + if (channel->numero != -1) + stm_free_channel(channel->numero); + + bitmap_andnot(stm.ch_bitmap, stm.ch_bitmap, + channel->bitmap, STM_NUMBER_OF_CHANNEL); + + kfree(channel); + return 0; +} + +static ssize_t stm_write(struct file *file, const char __user *buf, + size_t size, loff_t *off) +{ + struct channel_data *channel = file->private_data; + + /* Alloc channel at first write */ + if (channel->numero == -1) { + channel->numero = stm_alloc_channel(0); + if (channel->numero > STM_NUMBER_OF_CHANNEL) + return -ENOMEM; + } + + if (size > STM_BUFSIZE) + size = STM_BUFSIZE; + + spin_lock(&channel->lock); + + if (copy_from_user + (channel->data_buffer, (void __user *) buf, size)) { + spin_unlock(&channel->lock); + return -EFAULT; + } + size = stm_trace_buffer_onchannel(channel->numero, + channel->data_buffer, size); + + spin_unlock(&channel->lock); + + return size; +} + +static int stm_mmap(struct file *file, struct vm_area_struct *vma) +{ + /* + * Don't allow a mapping that covers more than the STM channels + */ + if ((vma->vm_end - vma->vm_start) > + STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel)) + return -EINVAL; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (io_remap_pfn_range(vma, vma->vm_start, + stm.pdata->channels_phys_base>>PAGE_SHIFT, + STM_NUMBER_OF_CHANNEL*sizeof(struct stm_channel), + vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/* Enable the trace for given sources (bitfield) */ +static void stm_enable_src(unsigned int v) +{ + unsigned int cr_val; + spin_lock(&lock); + cr_val = readl(STM_CR); + cr_val &= ~STM_CLOCK_MASK; + writel(cr_val|(stm_clockdiv<<STM_CLOCK_SHIFT), STM_CR); + /* + * If the kernel argument stm_ter has been set by the boot loader + * all calls to stm_enable_src will be ignored + */ + v = stm_ter ? stm_ter : v; + writel(v, STM_TER); + spin_unlock(&lock); +} + +/* Disable all sources */ +static void stm_disable_src(void) +{ + writel(0x0, STM_CR); /* stop clock */ + writel(0x0, STM_TER); /* Disable cores */ +} + +/* Set clock speed */ +static int stm_set_ckdiv(enum clock_div v) +{ + unsigned int val; + + spin_lock(&lock); + val = readl(STM_CR); + val &= ~STM_CLOCK_MASK; + writel(val | ((v << STM_CLOCK_SHIFT) & STM_CLOCK_MASK), STM_CR); + spin_unlock(&lock); + stm_clockdiv = v; + + return 0; +} + +/* Return the control register */ +static inline unsigned int stm_get_cr(void) +{ + return readl(STM_CR); +} + +/* + * Set Trace MODE lossless/lossy (Software/Hardware) + * each bit represent the corresponding mode of this source + */ +static inline void stm_set_modes(unsigned int modes) +{ + writel(modes, STM_MMC); +} + +/* Get Trace MODE lossless/lossy (Software/Hardware) + * each bit represent the corresponding mode of this source */ +static inline unsigned int stm_get_modes(void) +{ + return readl(STM_MMC); +} + +/* Count # of free channels */ +static int stm_nb_free_channels(void) +{ + int nb_channels, offset; + + nb_channels = 0; + offset = 0; + for (;;) { + offset = find_next_zero_bit(stm.ch_bitmap, + STM_NUMBER_OF_CHANNEL, offset); + if (offset == STM_NUMBER_OF_CHANNEL) + break; + offset++; + nb_channels++; + } + return nb_channels; +} + +static long stm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct channel_data *channel = file->private_data; + + switch (cmd) { + + case STM_CONNECTION: + if (stm.pdata->stm_connection) + stm.pdata->stm_connection(arg); + stm_connection = arg; + break; + + case STM_DISABLE: + stm_disable_src(); + break; + + case STM_GET_NB_MAX_CHANNELS: + err = put_user(STM_NUMBER_OF_CHANNEL, (unsigned int *)arg); + break; + + case STM_GET_NB_FREE_CHANNELS: + err = put_user(stm_nb_free_channels(), (unsigned int *)arg); + break; + + case STM_GET_CHANNEL_NO: + err = put_user(channel->numero, (unsigned int *)arg); + break; + + case STM_SET_CLOCK_DIV: + err = stm_set_ckdiv((enum clock_div) arg); + break; + + case STM_SET_MODE: + stm_set_modes(arg); + break; + + case STM_GET_MODE: + err = put_user(stm_get_modes(), (unsigned int *)arg); + break; + + case STM_GET_CTRL_REG: + err = put_user(stm_get_cr(), (unsigned int *)arg); + break; + + case STM_ENABLE_SRC: + stm_enable_src(arg); + break; + + case STM_GET_FREE_CHANNEL: + err = stm_get_channel(channel, (int *)arg); + break; + + case STM_RELEASE_CHANNEL: + err = stm_release_channel(channel, arg); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +/* + * Trace a buffer on a dynamically allocated channel + * with auto time stamping on the first byte(s) only + * Dynamic channel number >= + * STM_NUMBER_OF_CHANNEL - NB_KERNEL_DYNAMIC_CHANNEL + */ +int stm_trace_buffer(const void *data, size_t length) +{ + int channel; + + channel = stm_alloc_channel(STM_NUMBER_OF_CHANNEL + - NB_KERNEL_DYNAMIC_CHANNEL); + if (channel < STM_NUMBER_OF_CHANNEL) { + length = stm_trace_buffer_onchannel(channel, data, length); + stm_free_channel(channel); + return length; + } + return 0; +} +EXPORT_SYMBOL(stm_trace_buffer); + +static const struct file_operations stm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = stm_ioctl, + .open = stm_open, + .llseek = no_llseek, + .write = stm_write, + .release = stm_release, + .mmap = stm_mmap, +}; + +/* + * Init and deinit driver + */ + +static int __devinit stm_probe(struct platform_device *pdev) +{ + int retval = 0; + + if (!pdev || !pdev->dev.platform_data) { + pr_alert("No device/platform_data found on STM driver\n"); + return -ENODEV; + } + + stm.pdata = pdev->dev.platform_data; + + cdev_init(&cdev, &stm_fops); + cdev.owner = THIS_MODULE; + + stm_channels = + ioremap_nocache(stm.pdata->channels_phys_base, + STM_NUMBER_OF_CHANNEL*sizeof(*stm_channels)); + if (stm_channels == NULL) { + dev_err(&pdev->dev, "could not remap STM Msg register\n"); + return -ENODEV; + } + + stm.virtbase = ioremap_nocache(stm.pdata->regs_phys_base, SZ_4K); + if (stm.virtbase == NULL) { + retval = -EIO; + dev_err(&pdev->dev, "could not remap STM Register\n"); + goto err_channels; + } + + retval = cdev_add(&cdev, MKDEV(stm_major, 0), 1); + if (retval) { + dev_err(&pdev->dev, "chardev registration failed\n"); + goto err_channels; + } + + if (IS_ERR(device_create(stm_class, &pdev->dev, + MKDEV(stm_major, 0), NULL, STM_DEV_NAME))) + dev_err(&pdev->dev, "can't create device\n"); + + /* Check chip IDs if necessary */ + if (stm.pdata->id_mask) { + u32 periph_id, cell_id; + + periph_id = (readb(STMPERIPHID3)<<24) + + (readb(STMPERIPHID2)<<16) + + (readb(STMPERIPHID1)<<8) + + readb(STMPERIPHID0); + cell_id = (readb(STMPCELLID3)<<24) + + (readb(STMPCELLID2)<<16) + + (readb(STMPCELLID1)<<8) + + readb(STMPCELLID0); + /* Only warns if it isn't a ST-Ericsson supported one */ + if ((periph_id & stm.pdata->id_mask) != 0x00080dec || + cell_id != 0xb105f00d) { + dev_warn(&pdev->dev, "STM-Trace IC not compatible\n"); + dev_warn(&pdev->dev, "periph_id=%x\n", periph_id); + dev_warn(&pdev->dev, "pcell_id=%x\n", cell_id); + } + } + + /* Reserve channels if necessary */ + if (stm.pdata->channels_reserved_sz) { + int i; + + for (i = 0; i < stm.pdata->channels_reserved_sz; i++) { + set_bit(stm.pdata->channels_reserved[i], + stm.ch_bitmap); + } + } + /* Reserve kernel trace channels on demand */ +#ifdef CONFIG_STM_PRINTK + set_bit(CONFIG_STM_PRINTK_CHANNEL, stm.ch_bitmap); +#endif +#ifdef CONFIG_STM_FTRACE + set_bit(CONFIG_STM_FTRACE_CHANNEL, stm.ch_bitmap); +#endif +#ifdef CONFIG_STM_CTX_SWITCH + set_bit(CONFIG_STM_CTX_SWITCH_CHANNEL, stm.ch_bitmap); +#endif +#ifdef CONFIG_STM_WAKEUP + set_bit(CONFIG_STM_WAKEUP_CHANNEL, stm.ch_bitmap); +#endif +#ifdef CONFIG_STM_STACK_TRACE + set_bit(CONFIG_STM_STACK_TRACE_CHANNEL, stm.ch_bitmap); +#endif +#ifdef CONFIG_STM_TRACE_PRINTK + set_bit(CONFIG_STM_TRACE_PRINTK_CHANNEL, stm.ch_bitmap); + set_bit(CONFIG_STM_TRACE_BPRINTK_CHANNEL, stm.ch_bitmap); +#endif + + /* Check kernel's environment parameters first */ + if (IS_APE_ON_MIPI34) + stm_connection = STM_STE_APE_ON_MIPI34_NONE_ON_MIPI60; + else if (IS_APE_ON_MIPI60) + stm_connection = STM_STE_MODEM_ON_MIPI34_APE_ON_MIPI60; + + /* Apply parameters to driver */ + if (stm.pdata->stm_connection) { + retval = stm.pdata->stm_connection(stm_connection); + if (retval) { + dev_err(&pdev->dev, "failed to connect STM output\n"); + goto err_channels; + } + } + + /* Enable STM Masters given in pdata */ + if (stm.pdata->masters_enabled) + stm_enable_src(stm.pdata->masters_enabled); + stm_set_modes(STM_MMC_DEFAULT); /* Set all sources in HW mode */ + + dev_info(&pdev->dev, "STM-Trace driver probed successfully\n"); + stm_printk("STM-Trace driver initialized\n"); + return 0; + +err_channels: + iounmap(stm_channels); + return retval; +} + +static int __devexit stm_remove(struct platform_device *pdev) +{ + device_destroy(stm_class, MKDEV(stm_major, 0)); + cdev_del(&cdev); + + if (stm.pdata->stm_connection) + (void) stm.pdata->stm_connection(STM_DISCONNECT); + + stm_disable_src(); + iounmap(stm.virtbase); + iounmap(stm_channels); + + return 0; +} + +int stm_printk(const char *fmt, ...) +{ + int ret; + size_t size; + va_list args; + + va_start(args, fmt); + arch_spin_lock(&stm_buf_lock); + size = vscnprintf((char *)stm_printk_buf, + sizeof(stm_printk_buf), fmt, args); + ret = stm_trace_buffer(stm_printk_buf, size); + arch_spin_unlock(&stm_buf_lock); + va_end(args); + return ret; +} +EXPORT_SYMBOL(stm_printk); + +/* + * Debugfs interface + */ + +static int stm_connection_show(void *data, u64 *val) +{ + *val = stm_connection; + return 0; +} + +static int stm_connection_set(void *data, u64 val) +{ + int retval = 0; + + if (stm.pdata->stm_connection) { + stm_connection = val; + retval = stm.pdata->stm_connection(val); + } + return retval; +} + +DEFINE_SIMPLE_ATTRIBUTE(stm_connection_fops, stm_connection_show, + stm_connection_set, "%llu\n"); + +static int stm_clockdiv_show(void *data, u64 *val) +{ + *val = stm_clockdiv; + return 0; +} + +static int stm_clockdiv_set(void *data, u64 val) +{ + stm_set_ckdiv(val); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(stm_clockdiv_fops, stm_clockdiv_show, + stm_clockdiv_set, "%llu\n"); + +static int stm_masters_enable_show(void *data, u64 *val) +{ + *val = readl(STM_TER); + return 0; +} + +static int stm_masters_enable_set(void *data, u64 val) +{ + stm_enable_src(val); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(stm_masters_enable_fops, stm_masters_enable_show, + stm_masters_enable_set, "%08llx\n"); + +static int stm_masters_modes_show(void *data, u64 *val) +{ + *val = stm_get_modes(); + return 0; +} + +static int stm_masters_modes_set(void *data, u64 val) +{ + stm_set_modes(val); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(stm_masters_modes_fops, stm_masters_modes_show, + stm_masters_modes_set, "%08llx\n"); + +/* Count # of free channels */ +static int stm_free_channels_show(void *data, u64 *val) +{ + *val = stm_nb_free_channels(); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(stm_free_channels_fops, stm_free_channels_show, + NULL, "%lld\n"); + +static __init int stm_init_debugfs(void) +{ + struct dentry *d_stm; + + d_stm = debugfs_create_dir(STM_DEV_NAME, NULL); + if (!d_stm) + return -ENOMEM; + + (void) debugfs_create_file("connection", S_IRUGO | S_IWUGO, d_stm, + NULL, &stm_connection_fops); + (void) debugfs_create_file("clockdiv", S_IRUGO | S_IWUGO, d_stm, + NULL, &stm_clockdiv_fops); + (void) debugfs_create_file("masters_enable", S_IRUGO | S_IWUGO, d_stm, + NULL, &stm_masters_enable_fops); + (void) debugfs_create_file("masters_modes", S_IRUGO | S_IWUGO, d_stm, + NULL, &stm_masters_modes_fops); + (void) debugfs_create_file("free_channels", S_IRUGO, d_stm, + NULL, &stm_free_channels_fops); + return 0; +} +fs_initcall(stm_init_debugfs); + +static struct platform_driver stm_driver = { + .probe = stm_probe, + .remove = __devexit_p(stm_remove), + .driver = { + .name = STM_DEV_NAME, + .owner = THIS_MODULE, + } +}; + +static int __init stm_init(void) +{ + int retval; + dev_t dev; + + stm_class = class_create(THIS_MODULE, STM_DEV_NAME); + if (IS_ERR(stm_class)) { + pr_err("stm: can't register stm class\n"); + return PTR_ERR(stm_class); + } + + retval = alloc_chrdev_region(&dev, 0, 1, STM_DEV_NAME); + if (retval) { + pr_err("stm: can't register character device\n"); + class_destroy(stm_class); + return retval; + } + stm_major = MAJOR(dev); + return platform_driver_register(&stm_driver); +} + +static void __exit stm_exit(void) +{ + platform_driver_unregister(&stm_driver); + unregister_chrdev_region(MKDEV(stm_major, 0), 1); + class_destroy(stm_class); +} + +arch_initcall(stm_init); /* STM init ASAP need to wait GPIO init */ +module_exit(stm_exit); + +MODULE_AUTHOR("Paul Ghaleb - ST Microelectronics"); +MODULE_AUTHOR("Pierre Peiffer - ST-Ericsson"); +MODULE_AUTHOR("Philippe Langlais - ST-Ericsson"); +MODULE_DESCRIPTION("System Trace Module driver"); +MODULE_ALIAS("stm"); +MODULE_ALIAS("stm-trace"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index dabec556ebb..4c1b59aff9d 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -107,6 +107,7 @@ struct mmc_blk_data { unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; + struct device_attribute power_ro_lock_legacy; int area_type; }; @@ -167,6 +168,87 @@ static void mmc_blk_put(struct mmc_blk_data *md) mutex_unlock(&open_lock); } +#define EXT_CSD_BOOT_WP_PWR_WP_TEXT "pwr_ro" +#define EXT_CSD_BOOT_WP_PERM_WP_TEXT "perm_ro" +#define EXT_CSD_BOOT_WP_WP_DISABLED_TEXT "rw" +static ssize_t boot_partition_ro_lock_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + struct mmc_card *card = md->queue.card; + const char *out_text; + + if (card->ext_csd.boot_ro_lock + & EXT_CSD_BOOT_WP_B_PERM_WP_EN) + out_text = EXT_CSD_BOOT_WP_PERM_WP_TEXT; + else if (card->ext_csd.boot_ro_lock + & EXT_CSD_BOOT_WP_B_PWR_WP_EN) + out_text = EXT_CSD_BOOT_WP_PWR_WP_TEXT; + else + out_text = EXT_CSD_BOOT_WP_WP_DISABLED_TEXT; + + ret = snprintf(buf, PAGE_SIZE, "%s\n", out_text); + + return ret; +} + +static ssize_t boot_partition_ro_lock_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + struct mmc_blk_data *md, *part_md; + struct mmc_card *card; + u8 set = 0; + + md = mmc_blk_get(dev_to_disk(dev)); + card = md->queue.card; + + if (!strncmp(buf, EXT_CSD_BOOT_WP_PWR_WP_TEXT, + strlen(EXT_CSD_BOOT_WP_PWR_WP_TEXT))) + set = EXT_CSD_BOOT_WP_B_PWR_WP_EN; + else if (!strncmp(buf, EXT_CSD_BOOT_WP_PERM_WP_TEXT, + strlen(EXT_CSD_BOOT_WP_PERM_WP_TEXT))) + set = EXT_CSD_BOOT_WP_B_PERM_WP_EN; + + if (set) { + mmc_claim_host(card->host); + + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BOOT_WP, + set, + card->ext_csd.part_time); + if (ret) + pr_err("Boot Partition Lock failed: %d", ret); + else + card->ext_csd.boot_ro_lock = set; + + mmc_release_host(card->host); + + if (!ret) { + pr_info("%s: Locking boot partition " + "%s", + md->disk->disk_name, + buf); + set_disk_ro(md->disk, 1); + + list_for_each_entry(part_md, &md->part, part) + if (part_md->area_type == + MMC_BLK_DATA_AREA_BOOT) { + pr_info("%s: Locking boot partition " + "%s", + part_md->disk->disk_name, + buf); + set_disk_ro(part_md->disk, 1); + } + } + } + ret = count; + + mmc_blk_put(md); + return ret; +} + static ssize_t power_ro_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1411,6 +1493,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + /* + * We must make sure we have not claimed the host before + * doing a flush to prevent deadlock, thus we check if + * the host needs a resume first. + */ + if (mmc_host_needs_resume(card->host)) + mmc_resume_host_sync(card->host); + if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ mmc_claim_host(card->host); @@ -1654,9 +1744,12 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && - card->ext_csd.boot_ro_lockable) + card->ext_csd.boot_ro_lockable) { device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); + device_remove_file(disk_to_dev(md->disk), + &md->power_ro_lock_legacy); + } /* Stop new requests from getting into the queue */ del_gendisk(md->disk); @@ -1716,9 +1809,24 @@ static int mmc_add_disk(struct mmc_blk_data *md) &md->power_ro_lock); if (ret) goto power_ro_lock_fail; + + /* Legacy mode */ + mode = S_IRUGO | S_IWUSR; + + md->power_ro_lock_legacy.show = boot_partition_ro_lock_show; + md->power_ro_lock_legacy.store = boot_partition_ro_lock_store; + sysfs_attr_init(&md->power_ro_lock_legacy.attr); + md->power_ro_lock_legacy.attr.mode = mode; + md->power_ro_lock_legacy.attr.name = "ro_lock"; + ret = device_create_file(disk_to_dev(md->disk), + &md->power_ro_lock_legacy); + if (ret) + goto power_ro_lock_fail_legacy; } return ret; +power_ro_lock_fail_legacy: + device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); power_ro_lock_fail: device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 759714ed6be..6622f2e6e05 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -1253,6 +1253,130 @@ static int mmc_test_align_multi_read(struct mmc_test_card *test) return 0; } + +/* helper function for various address alignment and sg length alignment */ +static int mmc_test_align_multi(struct mmc_test_card *test, bool do_write, + struct scatterlist *sg, + u32 *sizes, int sg_len, int offset) +{ + int ret, i; + unsigned int size; + u32 buf_off; + u32 sg_size; + + if (test->card->host->max_blk_count == 1) + return RESULT_UNSUP_HOST; + + size = PAGE_SIZE * 2; + size = min(size, test->card->host->max_req_size); + size = min(size, test->card->host->max_seg_size); + size = min(size, test->card->host->max_blk_count * 512); + size -= offset; + size -= size % 512; + + if (size < 1024) + return RESULT_UNSUP_HOST; + + for (i = 0, sg_size = 0; + i < sg_len && sg_size + sizes[i] < size; i++) + sg_size += sizes[i]; + + if (sg_size < size) + sizes[i-1] += size - sg_size; + sg_len = i; + + sg_init_table(sg, sg_len); + for (i = 0, buf_off = offset; i < sg_len; i++) { + sg_set_buf(&sg[i], test->buffer + buf_off, sizes[i]); + buf_off += sizes[i]; + } + + ret = mmc_test_transfer(test, sg, sg_len, 0, size/512, 512, do_write); + if (ret) + return ret; + + return 0; +} + +static int mmc_test_align_length_32(struct mmc_test_card *test, bool do_write) +{ + u32 sizes[] = {512, 32*1, 32*2, 32*3, 32*4, 32*5, 32*6, 32*7, + 32*8, 32*9, 32*10, 32*11, 32*12, 32*13, 2048}; + struct scatterlist sg[ARRAY_SIZE(sizes)]; + + return mmc_test_align_multi(test, do_write, sg, sizes, + ARRAY_SIZE(sg), 0); +} + +static int mmc_test_align_length_4(struct mmc_test_card *test, bool do_write) +{ + u32 sizes[] = {512, 4*1, 4*2, 4*3, 4*4, 4*5, 4*6, 4*7, + 4*8, 4*9, 520, 1040, 2080}; + struct scatterlist sg[ARRAY_SIZE(sizes)]; + + return mmc_test_align_multi(test, do_write, sg, sizes, + ARRAY_SIZE(sg), 0); +} + +static int mmc_test_align_length_4_write(struct mmc_test_card *test) +{ + bool do_write = true; + return mmc_test_align_length_4(test, do_write); +} + +static int mmc_test_align_length_4_read(struct mmc_test_card *test) +{ + bool do_write = false; + return mmc_test_align_length_4(test, do_write); +} + +static int mmc_test_align_length_32_write(struct mmc_test_card *test) +{ + bool do_write = true; + return mmc_test_align_length_32(test, do_write); +} + +static int mmc_test_align_length_32_read(struct mmc_test_card *test) +{ + bool do_write = false; + return mmc_test_align_length_32(test, do_write); +} + +/* helper function for testing address alignment */ +static int mmc_test_align_address(struct mmc_test_card *test, bool do_write, + u32 offset) +{ + u32 sizes[] = {512, 512, 1024, 1024, 2048}; + struct scatterlist sg[ARRAY_SIZE(sizes)]; + + return mmc_test_align_multi(test, do_write, sg, + sizes, ARRAY_SIZE(sg), offset); +} + +static int mmc_test_align_address_4_write(struct mmc_test_card *test) +{ + bool do_write = true; + return mmc_test_align_address(test, do_write, 4); +} + +static int mmc_test_align_address_4_read(struct mmc_test_card *test) +{ + bool do_write = false; + return mmc_test_align_address(test, do_write, 4); +} + +static int mmc_test_align_address_32_write(struct mmc_test_card *test) +{ + bool do_write = true; + return mmc_test_align_address(test, do_write, 32); +} + +static int mmc_test_align_address_32_read(struct mmc_test_card *test) +{ + bool do_write = false; + return mmc_test_align_address(test, do_write, 32); +} + static int mmc_test_xfersize_write(struct mmc_test_card *test) { int ret; @@ -2451,6 +2575,62 @@ static const struct mmc_test_case mmc_test_cases[] = { }, { + .name = "4 bytes aligned sg-element length write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_align_length_4_write, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "4 bytes aligned sg-element length read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_align_length_4_read, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "32 bytes aligned sg-element length write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_align_length_32_write, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "32 bytes aligned sg-element length read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_align_length_32_read, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "4 bytes aligned sg-element address write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_align_address_4_write, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "4 bytes aligned sg-element address read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_align_address_4_read, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "32 bytes aligned sg-element address write", + .prepare = mmc_test_prepare_write, + .run = mmc_test_align_address_32_write, + .cleanup = mmc_test_cleanup, + }, + + { + .name = "32 bytes aligned sg-element address read", + .prepare = mmc_test_prepare_read, + .run = mmc_test_align_address_32_read, + .cleanup = mmc_test_cleanup, + }, + + { .name = "Correct xfer_size at write (start failure)", .run = mmc_test_xfersize_write, }, diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index ba821fe70bc..9cce415f1ff 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2010,7 +2010,7 @@ void mmc_rescan(struct work_struct *work) container_of(work, struct mmc_host, detect.work); int i; - if (host->rescan_disable) + if (host->rescan_disable || mmc_host_needs_resume(host)) return; mmc_bus_get(host); @@ -2273,8 +2273,13 @@ int mmc_suspend_host(struct mmc_host *host) int err = 0; cancel_delayed_work(&host->detect); + cancel_delayed_work_sync(&host->resume); mmc_flush_scheduled_work(); + /* Skip suspend, if deferred resume were scheduled but not completed. */ + if (mmc_host_needs_resume(host)) + return 0; + err = mmc_cache_ctrl(host, 0); if (err) goto out; @@ -2300,6 +2305,10 @@ int mmc_suspend_host(struct mmc_host *host) mmc_release_host(host); host->pm_flags = 0; err = 0; + } else if (mmc_card_mmc(host->card) || + mmc_card_sd(host->card)) { + host->pm_state |= MMC_HOST_DEFERRED_RESUME | + MMC_HOST_NEEDS_RESUME; } } mmc_bus_put(host); @@ -2321,6 +2330,12 @@ int mmc_resume_host(struct mmc_host *host) { int err = 0; + if (mmc_host_deferred_resume(host)) { + mmc_schedule_delayed_work(&host->resume, + msecs_to_jiffies(3000)); + return 0; + } + mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { if (!mmc_card_keep_power(host)) { @@ -2355,6 +2370,24 @@ int mmc_resume_host(struct mmc_host *host) } EXPORT_SYMBOL(mmc_resume_host); +void mmc_resume_work(struct work_struct *work) +{ + struct mmc_host *host = + container_of(work, struct mmc_host, resume.work); + + host->pm_state &= ~MMC_HOST_DEFERRED_RESUME; + mmc_resume_host(host); + host->pm_state &= ~MMC_HOST_NEEDS_RESUME; + + mmc_detect_change(host, 0); +} + +void mmc_resume_host_sync(struct mmc_host *host) +{ + flush_delayed_work_sync(&host->resume); +} +EXPORT_SYMBOL(mmc_resume_host_sync); + /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3bdafbca354..5796d2d85f4 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -59,6 +59,7 @@ static inline void mmc_delay(unsigned int ms) void mmc_rescan(struct work_struct *work); void mmc_start_host(struct mmc_host *host); void mmc_stop_host(struct mmc_host *host); +void mmc_resume_work(struct work_struct *work); int _mmc_detect_card_removed(struct mmc_host *host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 91c84c7a182..d87d1152ea2 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -330,6 +330,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); + INIT_DELAYED_WORK(&host->resume, mmc_resume_work); #ifdef CONFIG_PM host->pm_notify.notifier_call = mmc_pm_notify; #endif diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 032b84791a1..7df4119aaa2 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -19,6 +19,7 @@ #include <linux/err.h> #include <linux/highmem.h> #include <linux/log2.h> +#include <linux/mmc/pm.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/amba/bus.h> @@ -46,6 +47,7 @@ static unsigned int fmax = 515633; * struct variant_data - MMCI variant-specific quirks * @clkreg: default value for MCICLOCK register * @clkreg_enable: enable value for MMCICLOCK register + * @dma_sdio_req_ctrl: enable value for DMAREQCTL register for SDIO write * @datalength_bits: number of bits in the MMCIDATALENGTH register * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY * is asserted (likewise for RX) @@ -56,10 +58,13 @@ static unsigned int fmax = 515633; * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register * @pwrreg_powerup: power up value for MMCIPOWER register * @signal_direction: input/out direction of bus signals can be indicated + * @non_power_of_2_blksize: true if block sizes can be other than power of two + * @pwrreg_ctrl_power: bits in MMCIPOWER register controls ext. power supply */ struct variant_data { unsigned int clkreg; unsigned int clkreg_enable; + unsigned int dma_sdio_req_ctrl; unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; @@ -68,6 +73,8 @@ struct variant_data { bool blksz_datactrl16; u32 pwrreg_powerup; bool signal_direction; + bool non_power_of_2_blksize; + bool pwrreg_ctrl_power; }; static struct variant_data variant_arm = { @@ -75,6 +82,7 @@ static struct variant_data variant_arm = { .fifohalfsize = 8 * 4, .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, + .pwrreg_ctrl_power = true, }; static struct variant_data variant_arm_extended_fifo = { @@ -82,6 +90,7 @@ static struct variant_data variant_arm_extended_fifo = { .fifohalfsize = 64 * 4, .datalength_bits = 16, .pwrreg_powerup = MCI_PWR_UP, + .pwrreg_ctrl_power = true, }; static struct variant_data variant_u300 = { @@ -99,6 +108,7 @@ static struct variant_data variant_ux500 = { .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, + .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, @@ -111,15 +121,42 @@ static struct variant_data variant_ux500v2 = { .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, + .dma_sdio_req_ctrl = MCI_ST_DPSM_DMAREQCTL, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, .blksz_datactrl16 = true, .pwrreg_powerup = MCI_PWR_ON, .signal_direction = true, + .non_power_of_2_blksize = true, }; /* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!host->variant->non_power_of_2_blksize && + !is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + if (data->sg->offset & 3) { + dev_err(mmc_dev(host->mmc), + "unsupported alginment (0x%x)\n", data->sg->offset); + return -EINVAL; + } + + return 0; +} + +/* * This must be called with host->lock held */ static void mmci_write_clkreg(struct mmci_host *host, u32 clk) @@ -228,6 +265,7 @@ static void mmci_stop_data(struct mmci_host *host) writel(0, host->base + MMCIDATACTRL); mmci_set_mask1(host, 0); host->data = NULL; + host->datactrl_reg = 0; } static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) @@ -338,10 +376,33 @@ static inline void mmci_dma_release(struct mmci_host *host) host->dma_rx_channel = host->dma_tx_channel = NULL; } +static void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; + data->host_cookie = 0; +} + static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { - struct dma_chan *chan = host->dma_current; + struct dma_chan *chan; enum dma_data_direction dir; + + if (data->flags & MMC_DATA_READ) { + dir = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + dir = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ u32 status; int i; @@ -360,19 +421,12 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - dmaengine_terminate_all(chan); - if (!data->error) - data->error = -EIO; - } - - if (data->flags & MMC_DATA_WRITE) { - dir = DMA_TO_DEVICE; - } else { - dir = DMA_FROM_DEVICE; + data->error = -EIO; + mmci_dma_data_error(host, data); } if (!data->host_cookie) - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + mmci_dma_unmap(host, data); /* * Use of DMA with scatter-gather is impossible. @@ -382,16 +436,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } -} -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; } -static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, - struct mmci_host_next *next) +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { @@ -409,16 +462,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, enum dma_data_direction buffer_dirn; int nr_sg; - /* Check if next job is already prepared */ - if (data->host_cookie && !next && - host->dma_current && host->dma_desc_current) - return 0; - - if (!next) { - host->dma_current = NULL; - host->dma_desc_current = NULL; - } - if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; buffer_dirn = DMA_FROM_DEVICE; @@ -433,8 +476,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (!chan) return -EINVAL; - /* If less than or equal to the fifo size, don't bother with DMA */ - if (data->blksz * data->blocks <= variant->fifosize) + /* + * If less than or equal to the fifo size, don't bother with DMA + * SDIO transfers may not be 4 bytes aligned, fall back to PIO + */ + if (data->blksz * data->blocks <= variant->fifosize || + (data->blksz * data->blocks) & 3) return -EINVAL; device = chan->device; @@ -448,29 +495,42 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, if (!desc) goto unmap_exit; - if (next) { - next->dma_chan = chan; - next->dma_desc = desc; - } else { - host->dma_current = chan; - host->dma_desc_current = desc; - } + *dma_chan = chan; + *dma_desc = desc; return 0; unmap_exit: - if (!next) - dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } -static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +static int inline mmci_dma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + /* Check if next job is already prepared. */ + if (host->dma_current && host->dma_desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return __mmci_dma_prep_data(host, data, &host->dma_current, + &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, + struct mmc_data *data) +{ + struct mmci_host_next *nd = &host->next_data; + return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + +static int mmci_dma_start_data(struct mmci_host *host) { int ret; struct mmc_data *data = host->data; + struct variant_data *variant = host->variant; - ret = mmci_dma_prep_data(host, host->data, NULL); + ret = mmci_dma_prep_data(host, host->data); if (ret) return ret; @@ -481,10 +541,15 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) dmaengine_submit(host->dma_desc_current); dma_async_issue_pending(host->dma_current); - datactrl |= MCI_DPSM_DMAENABLE; + host->datactrl_reg |= MCI_DPSM_DMAENABLE; + + /* Some hardware versions need special flags for SDIO DMA write */ + if (variant->sdio && host->mmc->card && mmc_card_sdio(host->mmc->card) + && (data->flags & MMC_DATA_WRITE)) + host->datactrl_reg |= variant->dma_sdio_req_ctrl; /* Trigger the DMA transfer */ - writel(datactrl, host->base + MMCIDATACTRL); + writel(host->datactrl_reg, host->base + MMCIDATACTRL); /* * Let the MMCI say when the data is ended and it's time @@ -501,18 +566,14 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) struct mmci_host_next *next = &host->next_data; if (data->host_cookie && data->host_cookie != next->cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" + pr_err("[%s] invalid cookie: data->host_cookie %d" " host->next_data.cookie %d\n", __func__, data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; + BUG(); } - if (!data->host_cookie) - return; - host->dma_desc_current = next->dma_desc; host->dma_current = next->dma_chan; - next->dma_desc = NULL; next->dma_chan = NULL; } @@ -527,19 +588,18 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, if (!data) return; - if (data->host_cookie) { - data->host_cookie = 0; + BUG_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) return; - } - /* if config for dma */ - if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || - ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { - if (mmci_dma_prep_data(host, data, nd)) - data->host_cookie = 0; - else - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; - } + /* + * Don't prepare DMA if there is no previous request, + * is_first_req is set. Instead, prepare DMA while + * start command is being issued. + */ + if (!is_first_req && !mmci_dma_prep_next(host, data)) + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; } static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, @@ -547,29 +607,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, { struct mmci_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct dma_chan *chan; - enum dma_data_direction dir; - if (!data) + if (!data || !data->host_cookie) return; - if (data->flags & MMC_DATA_READ) { - dir = DMA_FROM_DEVICE; - chan = host->dma_rx_channel; - } else { - dir = DMA_TO_DEVICE; - chan = host->dma_tx_channel; - } + mmci_dma_unmap(host, data); + if (err) { + struct mmci_host_next *next = &host->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = host->dma_rx_channel; + else + chan = host->dma_tx_channel; + dmaengine_terminate_all(chan); - /* if config for dma */ - if (chan) { - if (err) - dmaengine_terminate_all(chan); - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, dir); - mrq->data->host_cookie = 0; + next->dma_desc = NULL; + next->dma_chan = NULL; } } @@ -590,11 +644,20 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } -static inline void mmci_dma_data_error(struct mmci_host *host) +static inline void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline void mmci_dma_data_error(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline int mmci_dma_start_data(struct mmci_host *host) { + return -ENOSYS; } -static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +static inline int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data) { return -ENOSYS; } @@ -604,10 +667,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac #endif -static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) +static void mmci_setup_datactrl(struct mmci_host *host, struct mmc_data *data) { struct variant_data *variant = host->variant; - unsigned int datactrl, timeout, irqmask; + unsigned int datactrl, timeout; unsigned long long clks; void __iomem *base; int blksz_bits; @@ -629,7 +692,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) writel(host->size, base + MMCIDATALENGTH); blksz_bits = ffs(data->blksz) - 1; - BUG_ON(1 << blksz_bits != data->blksz); if (variant->blksz_datactrl16) datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); @@ -641,14 +703,43 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) /* The ST Micro variants has a special bit to enable SDIO */ if (variant->sdio && host->mmc->card) - if (mmc_card_sdio(host->mmc->card)) + if (mmc_card_sdio(host->mmc->card)) { + /* + * The ST Micro variants has a special bit + * to enable SDIO. + */ + u32 clk; + datactrl |= MCI_ST_DPSM_SDIOEN; + /* + * The ST Micro variant for SDIO write transfer sizes + * less then 8 bytes needs to have clock H/W flow + * control disabled. + */ + if ((host->size < 8) && + (data->flags & MMC_DATA_WRITE)) + clk = host->clk_reg & ~variant->clkreg_enable; + else + clk = host->clk_reg | variant->clkreg_enable; + + mmci_write_clkreg(host, clk); + } + host->datactrl_reg = datactrl; + writel(datactrl, base + MMCIDATACTRL); +} + +static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) +{ + unsigned int irqmask; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; + /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode */ - if (!mmci_dma_start_data(host, datactrl)) + if (!mmci_dma_start_data(host)) return; /* IRQ mode, map the SG list for CPU reading/writing */ @@ -672,7 +763,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) irqmask = MCI_TXFIFOHALFEMPTYMASK; } - writel(datactrl, base + MMCIDATACTRL); writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); mmci_set_mask1(host, irqmask); } @@ -699,6 +789,14 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) if (/*interrupt*/0) c |= MCI_CPSM_INTERRUPT; + /* + * For levelshifters we must not use more than 25MHz when + * sending commands. + */ + host->cclk_desired = host->cclk; + if (host->plat->ios_handler && (host->cclk_desired > 25000000)) + mmci_set_clkreg(host, 25000000); + host->cmd = cmd; writel(cmd->arg, base + MMCIARGUMENT); @@ -715,8 +813,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) - mmci_dma_data_error(host); + if (dma_inprogress(host)) { + mmci_dma_data_error(host, data); + mmci_dma_unmap(host, data); + } /* * Calculate how far we are into the transfer. Note that @@ -755,7 +855,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) - mmci_dma_unmap(host, data); + mmci_dma_finalize(host, data); mmci_stop_data(host); if (!data->error) @@ -789,15 +889,24 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, cmd->resp[3] = readl(base + MMCIRESPONSE3); } + /* + * For levelshifters we might have decreased cclk to 25MHz when + * sending commands, then we restore the frequency here. + */ + if (host->plat->ios_handler && (host->cclk_desired > host->cclk)) + mmci_set_clkreg(host, host->cclk_desired); + if (!cmd->data || cmd->error) { - if (host->data) { - /* Terminate the DMA transfer */ - if (dma_inprogress(host)) - mmci_dma_data_error(host); - mmci_stop_data(host); + /* Terminate the DMA transfer */ + if (dma_inprogress(host)) { + mmci_dma_data_error(host, host->mrq->data); + mmci_dma_unmap(host, host->mrq->data); } + if (host->data) + mmci_stop_data(host); mmci_request_end(host, cmd->mrq); } else if (!(cmd->data->flags & MMC_DATA_READ)) { + mmci_setup_datactrl(host, cmd->data); mmci_start_data(host, cmd->data); } } @@ -864,22 +973,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem count = min(remain, maxcnt); /* - * The ST Micro variant for SDIO transfer sizes - * less then 8 bytes should have clock H/W flow - * control disabled. - */ - if (variant->sdio && - mmc_card_sdio(host->mmc->card)) { - u32 clk; - if (count < 8) - clk = host->clk_reg & ~variant->clkreg_enable; - else - clk = host->clk_reg | variant->clkreg_enable; - - mmci_write_clkreg(host, clk); - } - - /* * SDIO especially may want to send something that is * not divisible by 4 (as opposed to card sectors * etc), and the FIFO only accept full 32-bit writes. @@ -1032,13 +1125,12 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mmci_host *host = mmc_priv(mmc); unsigned long flags; + bool dmaprep_after_cmd = false; WARN_ON(host->mrq != NULL); - if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", - mrq->data->blksz); - mrq->cmd->error = -EINVAL; + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { mmc_request_done(mmc, mrq); return; } @@ -1049,14 +1141,28 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq = mrq; - if (mrq->data) + if (mrq->data) { + dmaprep_after_cmd = + (host->variant->clkreg_enable && + (mrq->data->flags & MMC_DATA_READ)) || + !(mrq->data->flags & MMC_DATA_READ); mmci_get_next_data(host, mrq->data); - - if (mrq->data && mrq->data->flags & MMC_DATA_READ) - mmci_start_data(host, mrq->data); + if (mrq->data->flags & MMC_DATA_READ) { + mmci_setup_datactrl(host, mrq->data); + if (!dmaprep_after_cmd) + mmci_start_data(host, mrq->data); + } + } mmci_start_command(host, mrq->cmd, 0); + if (mrq->data && dmaprep_after_cmd) { + mmci_dma_prep_data(host, mrq->data); + + if (mrq->data->flags & MMC_DATA_READ) + mmci_start_data(host, mrq->data); + } + spin_unlock_irqrestore(&host->lock, flags); } @@ -1178,6 +1284,21 @@ static int mmci_get_cd(struct mmc_host *mmc) return status; } +static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct mmci_host *host = mmc_priv(mmc); + int ret = 0; + + if (host->plat->ios_handler) { + pm_runtime_get_sync(mmc_dev(mmc)); + ret = host->plat->ios_handler(mmc_dev(mmc), ios); + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + } + + return ret; +} + static irqreturn_t mmci_cd_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; @@ -1194,6 +1315,7 @@ static const struct mmc_host_ops mmci_ops = { .set_ios = mmci_set_ios, .get_ro = mmci_get_ro, .get_cd = mmci_get_cd, + .start_signal_voltage_switch = mmci_sig_volt_switch, }; static int __devinit mmci_probe(struct amba_device *dev, @@ -1321,6 +1443,9 @@ static int __devinit mmci_probe(struct amba_device *dev, mmc->caps = plat->capabilities; mmc->caps2 = plat->capabilities2; + /* We support these PM capabilities. */ + mmc->pm_caps = MMC_PM_KEEP_POWER; + /* * We can do SGIO */ @@ -1390,7 +1515,8 @@ static int __devinit mmci_probe(struct amba_device *dev, } if ((host->plat->status || host->gpio_cd != -ENOSYS) - && host->gpio_cd_irq < 0) + && host->gpio_cd_irq < 0 + && !(mmc->caps & MMC_CAP_NONREMOVABLE)) mmc->caps |= MMC_CAP_NEEDS_POLL; ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); @@ -1503,6 +1629,75 @@ static int __devexit mmci_remove(struct amba_device *dev) return 0; } +#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME) +static int mmci_save(struct amba_device *dev) +{ + struct mmc_host *mmc = amba_get_drvdata(dev); + unsigned long flags; + struct mmc_ios ios; + int ret = 0; + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + + /* Let the ios_handler act on a POWER_OFF to save power. */ + if (host->plat->ios_handler) { + memcpy(&ios, &mmc->ios, sizeof(struct mmc_ios)); + ios.power_mode = MMC_POWER_OFF; + ret = host->plat->ios_handler(mmc_dev(mmc), + &ios); + if (ret) + return ret; + } + + spin_lock_irqsave(&host->lock, flags); + + /* + * Make sure we do not get any interrupts when we disabled the + * clock and the regulator and as well make sure to clear the + * registers for clock and power. + */ + writel(0, host->base + MMCIMASK0); + writel(0, host->base + MMCIPOWER); + writel(0, host->base + MMCICLOCK); + + spin_unlock_irqrestore(&host->lock, flags); + + clk_disable(host->clk); + } + + return ret; +} + +static int mmci_restore(struct amba_device *dev) +{ + struct mmc_host *mmc = amba_get_drvdata(dev); + unsigned long flags; + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + + clk_enable(host->clk); + + spin_lock_irqsave(&host->lock, flags); + + /* Restore registers and re-enable interrupts. */ + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE, host->base + MMCIMASK0); + + spin_unlock_irqrestore(&host->lock, flags); + + /* Restore settings done by the ios_handler. */ + if (host->plat->ios_handler) + host->plat->ios_handler(mmc_dev(mmc), + &mmc->ios); + } + + return 0; +} +#endif + #ifdef CONFIG_SUSPEND static int mmci_suspend(struct device *dev) { @@ -1511,12 +1706,11 @@ static int mmci_suspend(struct device *dev) int ret = 0; if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - ret = mmc_suspend_host(mmc); if (ret == 0) { pm_runtime_get_sync(dev); - writel(0, host->base + MMCIMASK0); + mmci_save(adev); + amba_pclk_disable(adev); } } @@ -1530,9 +1724,8 @@ static int mmci_resume(struct device *dev) int ret = 0; if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + amba_pclk_enable(adev); + mmci_restore(adev); pm_runtime_put(dev); ret = mmc_resume_host(mmc); @@ -1542,8 +1735,43 @@ static int mmci_resume(struct device *dev) } #endif +#ifdef CONFIG_PM_RUNTIME +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + int ret = 0; + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; + if (!variant->pwrreg_ctrl_power) + ret = mmci_save(adev); + } + + return ret; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); + int ret = 0; + + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; + if (!variant->pwrreg_ctrl_power) + ret = mmci_restore(adev); + } + + return ret; +} +#endif + static const struct dev_pm_ops mmci_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) + SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index d437ccf62d6..5a17beafd05 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -60,6 +60,13 @@ #define MCI_ST_DPSM_RWMOD (1 << 10) #define MCI_ST_DPSM_SDIOEN (1 << 11) /* Control register extensions in the ST Micro Ux500 versions */ +/* + * DMA request control is required for write + * if transfer size is not 32 byte aligned. + * DMA request control is also needed if the total + * transfer size is 32 byte aligned but any of the + * sg element lengths are not aligned with 32 byte. + */ #define MCI_ST_DPSM_DMAREQCTL (1 << 12) #define MCI_ST_DPSM_DBOOTMODEEN (1 << 13) #define MCI_ST_DPSM_BUSYMODE (1 << 14) @@ -179,8 +186,10 @@ struct mmci_host { unsigned int mclk; unsigned int cclk; + unsigned int cclk_desired; u32 pwr_reg; u32 clk_reg; + u32 datactrl_reg; struct mmci_platform_data *plat; struct variant_data *variant; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index cd3defb11ff..edbf830a23f 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -33,6 +33,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crc32.h> +#include <linux/clk.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/etherdevice.h> @@ -144,6 +145,9 @@ struct smsc911x_data { /* regulators */ struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES]; + + /* clock */ + struct clk *fsmc_clk; }; /* Easy access to information */ @@ -369,7 +373,7 @@ out: } /* - * enable resources, currently just regulators. + * enable resources, regulators & clocks. */ static int smsc911x_enable_resources(struct platform_device *pdev) { @@ -379,9 +383,17 @@ static int smsc911x_enable_resources(struct platform_device *pdev) ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies), pdata->supplies); - if (ret) + if (ret) { netdev_err(ndev, "failed to enable regulators %d\n", ret); + return ret; + } + + if (pdata->fsmc_clk) { + ret = clk_enable(pdata->fsmc_clk); + if (ret < 0) + netdev_err(ndev, "failed to enable clock %d\n", ret); + } return ret; } @@ -396,6 +408,8 @@ static int smsc911x_disable_resources(struct platform_device *pdev) ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies), pdata->supplies); + if (pdata->fsmc_clk) + clk_disable(pdata->fsmc_clk); return ret; } @@ -418,9 +432,17 @@ static int smsc911x_request_resources(struct platform_device *pdev) ret = regulator_bulk_get(&pdev->dev, ARRAY_SIZE(pdata->supplies), pdata->supplies); - if (ret) - netdev_err(ndev, "couldn't get regulators %d\n", - ret); + if (ret) { + netdev_err(ndev, "couldn't get regulators %d\n", ret); + return ret; + } + + /* Request clock, ignore if not here */ + pdata->fsmc_clk = clk_get(NULL, "fsmc"); + if (IS_ERR(pdata->fsmc_clk)) { + netdev_warn(ndev, "couldn't get clock %d\n", ret); + pdata->fsmc_clk = NULL; + } return ret; } @@ -436,6 +458,12 @@ static void smsc911x_free_resources(struct platform_device *pdev) /* Free regulators */ regulator_bulk_free(ARRAY_SIZE(pdata->supplies), pdata->supplies); + + /* Free clock */ + if (pdata->fsmc_clk) { + clk_put(pdata->fsmc_clk); + pdata->fsmc_clk = NULL; + } } /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read @@ -2343,6 +2371,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) unsigned int intcfg = 0; int res_size, irq_flags; int retval; + int to = 100; pr_info("Driver version %s\n", SMSC_DRV_VERSION); @@ -2419,6 +2448,18 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (pdata->config.shift) pdata->ops = &shifted_smsc911x_ops; + /* poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms + */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + + if (to == 0) { + pr_err("Device not READY in 100ms aborting\n"); + goto out_0; + } + + retval = smsc911x_init(dev); if (retval < 0) goto out_disable_resources; diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 99dc29f2f2f..56dfe432b74 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -307,4 +307,23 @@ config AB8500_BATTERY_THERM_ON_BATCTRL help Say Y to enable battery temperature measurements using thermistor connected on BATCTRL ADC. + +config AB8500_9100_LI_ION_BATTERY + bool "Enable support of the 9100 Li-ion battery charging" + depends on AB8500_BM + help + Say Y to enable support of the 9100 Li-ion battery charging. + +config AB5500_BM + bool "AB5500 Battery Management Driver" + depends on AB5500_CORE && AB5500_GPADC && MACH_U5500 + help + Say Y to include support for AB5500 battery management. + +config AB5500_BATTERY_THERM_ON_BATCTRL + bool "Thermistor connected on BATCTRL ADC" + depends on AB5500_BM + help + Say Y to enable battery temperature measurements using + thermistor connected on BATCTRL ADC. endif # POWER_SUPPLY diff --git a/drivers/power/Makefile b/drivers/power/Makefile index b6b243416c0..af27f1d08aa 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o +obj-$(CONFIG_AB5500_BM) += ab5500_charger.o ab5500_btemp.o ab5500_fg.o abx500_chargalg.o obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o diff --git a/drivers/power/ab5500_btemp.c b/drivers/power/ab5500_btemp.c new file mode 100644 index 00000000000..3709299d6cb --- /dev/null +++ b/drivers/power/ab5500_btemp.c @@ -0,0 +1,994 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Battery temperature driver for ab5500 + * + * License Terms: GNU General Public License v2 + * Authors: + * Johan Palsson <johan.palsson@stericsson.com> + * Karl Komierowski <karl.komierowski@stericsson.com> + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <linux/completion.h> +#include <linux/workqueue.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500/ab5500-bm.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> + +#define BTEMP_THERMAL_LOW_LIMIT -10 +#define BTEMP_THERMAL_MED_LIMIT 0 +#define BTEMP_THERMAL_HIGH_LIMIT_62 62 + +#define BTEMP_BATCTRL_CURR_SRC_7UA 7 +#define BTEMP_BATCTRL_CURR_SRC_15UA 15 +#define BTEMP_BATCTRL_CURR_SRC_20UA 20 + +#define UART_MODE 0x0F +#define BAT_CUR_SRC 0x1F +#define RESIS_ID_MODE 0x03 +#define RESET 0x00 +#define ADOUT_10K_PULL_UP 0x07 + +#define to_ab5500_btemp_device_info(x) container_of((x), \ + struct ab5500_btemp, btemp_psy); + +/** + * struct ab5500_btemp_interrupts - ab5500 interrupts + * @name: name of the interrupt + * @isr function pointer to the isr + */ +struct ab5500_btemp_interrupts { + char *name; + irqreturn_t (*isr)(int irq, void *data); +}; + +struct ab5500_btemp_events { + bool batt_rem; + bool usb_conn; +}; + +/** + * struct ab5500_btemp - ab5500 BTEMP device information + * @dev: Pointer to the structure device + * @chip_id: Chip-Id of the AB5500 + * @curr_source: What current source we use, in uA + * @bat_temp: Battery temperature in degree Celcius + * @prev_bat_temp Last dispatched battery temperature + * @node: struct of type list_head + * @parent: Pointer to the struct ab5500 + * @gpadc: Pointer to the struct gpadc + * @gpadc-auto: Pointer to the struct adc_auto_input + * @pdata: Pointer to the ab5500_btemp platform data + * @bat: Pointer to the ab5500_bm platform data + * @btemp_psy: Structure for BTEMP specific battery properties + * @events: Structure for information about events triggered + * @btemp_wq: Work queue for measuring the temperature periodically + * @btemp_periodic_work: Work for measuring the temperature periodically + */ +struct ab5500_btemp { + struct device *dev; + u8 chip_id; + int curr_source; + int bat_temp; + int prev_bat_temp; + struct list_head node; + struct ab5500 *parent; + struct ab5500_gpadc *gpadc; + struct adc_auto_input *gpadc_auto; + struct abx500_btemp_platform_data *pdata; + struct abx500_bm_data *bat; + struct power_supply btemp_psy; + struct ab5500_btemp_events events; + struct workqueue_struct *btemp_wq; + struct delayed_work btemp_periodic_work; +}; + +/* BTEMP power supply properties */ +static enum power_supply_property ab5500_btemp_props[] = { + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_TEMP, +}; + +static LIST_HEAD(ab5500_btemp_list); + +static int ab5500_btemp_bat_temp_trig(int mux); + +struct ab5500_btemp *ab5500_btemp_get(void) +{ + struct ab5500_btemp *di; + di = list_first_entry(&ab5500_btemp_list, struct ab5500_btemp, node); + + return di; +} + +/** + * ab5500_btemp_get_batctrl_temp() - get the temperature + * @di: pointer to the ab5500_btemp structure + * + * Returns the batctrl temperature in millidegrees + */ +int ab5500_btemp_get_batctrl_temp(struct ab5500_btemp *di) +{ + return di->bat_temp * 1000; +} + +/** + * ab5500_btemp_volt_to_res() - convert batctrl voltage to resistance + * @di: pointer to the ab5500_btemp structure + * @volt: measured batctrl/btemp_ball voltage + * @batcrtl: batctrl/btemp_ball node + * + * This function returns the battery resistance that is + * derived from the BATCTRL/BTEMP_BALL voltage. + * Returns value in Ohms. + */ +static int ab5500_btemp_volt_to_res(struct ab5500_btemp *di, + int volt, bool batctrl) +{ + int rbs; + + if (batctrl) { + /* + * If the battery has internal NTC, we use the current + * source to calculate the resistance, 7uA or 20uA + */ + rbs = volt * 1000 / di->curr_source; + } else { + /* + * BTEMP_BALL is internally + * connected to 1.8V through a 10k resistor + */ + rbs = (10000 * volt) / (1800 - volt); + } + return rbs; +} + +/** + * ab5500_btemp_read_batctrl_voltage() - measure batctrl voltage + * @di: pointer to the ab5500_btemp structure + * + * This function returns the voltage on BATCTRL. Returns value in mV. + */ +static int ab5500_btemp_read_batctrl_voltage(struct ab5500_btemp *di) +{ + int vbtemp; + static int prev; + + vbtemp = ab5500_gpadc_convert(di->gpadc, BAT_CTRL); + if (vbtemp < 0) { + dev_err(di->dev, + "%s gpadc conversion failed, using previous value", + __func__); + return prev; + } + prev = vbtemp; + return vbtemp; +} + +/** + * ab5500_btemp_curr_source_enable() - enable/disable batctrl current source + * @di: pointer to the ab5500_btemp structure + * @enable: enable or disable the current source + * + * Enable or disable the current sources for the BatCtrl AD channel + */ +static int ab5500_btemp_curr_source_enable(struct ab5500_btemp *di, + bool enable) +{ + int ret = 0; + + /* Only do this for batteries with internal NTC */ + if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) { + + dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source); + + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART, + UART_MODE, RESIS_ID_MODE); + if (ret) { + dev_err(di->dev, + "%s failed setting resistance identification mode\n", + __func__); + return ret; + } + + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI, + BAT_CUR_SRC, BAT_CTRL_15U_ENA); + if (ret) { + dev_err(di->dev, "%s failed enabling current source\n", + __func__); + goto disable_curr_source; + } + } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) { + dev_dbg(di->dev, "Disable BATCTRL curr source\n"); + + /* Write 0 to the curr bits */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI, + BAT_CUR_SRC, RESET); + if (ret) { + dev_err(di->dev, "%s failed disabling current source\n", + __func__); + goto disable_curr_source; + } + + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART, + UART_MODE, RESET); + if (ret) { + dev_err(di->dev, "%s failed disabling force comp\n", + __func__); + } + } + return ret; +disable_curr_source: + /* Write 0 to the curr bits */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_URI, + BAT_CUR_SRC, RESET); + if (ret) { + dev_err(di->dev, "%s failed disabling current source\n", + __func__); + } + return ret; +} + +/** + * ab5500_btemp_get_batctrl_res() - get battery resistance + * @di: pointer to the ab5500_btemp structure + * + * This function returns the battery pack identification resistance. + * Returns value in Ohms. + */ +static int ab5500_btemp_get_batctrl_res(struct ab5500_btemp *di) +{ + int ret; + int batctrl; + int res; + + ret = ab5500_btemp_curr_source_enable(di, true); + /* TODO: This delay has to be optimised */ + msleep(100); + if (ret) { + dev_err(di->dev, "%s curr source enable failed\n", __func__); + return ret; + } + + batctrl = ab5500_btemp_read_batctrl_voltage(di); + res = ab5500_btemp_volt_to_res(di, batctrl, true); + + ret = ab5500_btemp_curr_source_enable(di, false); + if (ret) { + dev_err(di->dev, "%s curr source disable failed\n", __func__); + return ret; + } + + dev_dbg(di->dev, "%s batctrl: %d res: %d ", + __func__, batctrl, res); + + return res; +} + +/** + * ab5500_btemp_get_btemp_ball_res() - get battery resistance + * @di: pointer to the ab5500_btemp structure + * + * This function returns the battery pack identification + * resistance using resistor pull-up mode. Returns value in Ohms. + */ +static int ab5500_btemp_get_btemp_ball_res(struct ab5500_btemp *di) +{ + int ret, vntc; + + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_UART, + UART_MODE, ADOUT_10K_PULL_UP); + if (ret) { + dev_err(di->dev, + "failed to enable 10k pull up to Vadout\n"); + return ret; + } + + vntc = ab5500_gpadc_convert(di->gpadc, BTEMP_BALL); + if (vntc < 0) { + dev_err(di->dev, "%s gpadc conversion failed," + " using previous value\n", __func__); + return vntc; + } + + return ab5500_btemp_volt_to_res(di, vntc, false); +} + +/** + * ab5500_btemp_temp_to_res() - temperature to resistance + * @di: pointer to the ab5500_btemp structure + * @tbl: pointer to the resiatance to temperature table + * @tbl_size: size of the resistance to temperature table + * @temp: temperature to calculate the resistance from + * + * This function returns the battery resistance in ohms + * based on temperature. + */ +static int ab5500_btemp_temp_to_res(struct ab5500_btemp *di, + const struct abx500_res_to_temp *tbl, int tbl_size, int temp) +{ + int i, res; + /* + * Calculate the formula for the straight line + * Simple interpolation if we are within + * the resistance table limits, extrapolate + * if resistance is outside the limits. + */ + if (temp < tbl[0].temp) + i = 0; + else if (temp >= tbl[tbl_size - 1].temp) + i = tbl_size - 2; + else { + i = 0; + while (!(temp >= tbl[i].temp && + temp < tbl[i + 1].temp)) + i++; + } + + res = tbl[i].resist + ((tbl[i + 1].resist - tbl[i].resist) * + (temp - tbl[i].temp)) / (tbl[i + 1].temp - tbl[i].temp); + return res; +} + +/** + * ab5500_btemp_temp_to_volt() - temperature to adc voltage + * @di: pointer to the ab5500_btemp structure + * @temp: temperature to calculate the voltage from + * + * This function returns the adc voltage in millivolts + * based on temperature. + */ +static int ab5500_btemp_temp_to_volt(struct ab5500_btemp *di, int temp) +{ + int res, id; + + id = di->bat->batt_id; + res = ab5500_btemp_temp_to_res(di, + di->bat->bat_type[id].r_to_t_tbl, + di->bat->bat_type[id].n_temp_tbl_elements, + temp); + /* + * BTEMP_BALL is internally connected to 1.8V + * through a 10k resistor + */ + return((1800 * res) / (10000 + res)); +} + +/** + * ab5500_btemp_res_to_temp() - resistance to temperature + * @di: pointer to the ab5500_btemp structure + * @tbl: pointer to the resiatance to temperature table + * @tbl_size: size of the resistance to temperature table + * @res: resistance to calculate the temperature from + * + * This function returns the battery temperature in degrees Celcius + * based on the NTC resistance. + */ +static int ab5500_btemp_res_to_temp(struct ab5500_btemp *di, + const struct abx500_res_to_temp *tbl, int tbl_size, int res) +{ + int i, temp; + /* + * Calculate the formula for the straight line + * Simple interpolation if we are within + * the resistance table limits, extrapolate + * if resistance is outside the limits. + */ + if (res > tbl[0].resist) + i = 0; + else if (res <= tbl[tbl_size - 1].resist) + i = tbl_size - 2; + else { + i = 0; + while (!(res <= tbl[i].resist && + res > tbl[i + 1].resist)) + i++; + } + + temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) * + (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); + return temp; +} + +/** + * ab5500_btemp_measure_temp() - measure battery temperature + * @di: pointer to the ab5500_btemp structure + * + * Returns battery temperature (on success) else the previous temperature + */ +static int ab5500_btemp_measure_temp(struct ab5500_btemp *di) +{ + int temp, rbat; + u8 id; + + id = di->bat->batt_id; + if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && + id != BATTERY_UNKNOWN && !di->bat->auto_trig) + rbat = ab5500_btemp_get_batctrl_res(di); + else + rbat = ab5500_btemp_get_btemp_ball_res(di); + + if (rbat < 0) { + dev_err(di->dev, "%s failed to get resistance\n", __func__); + /* + * Return out-of-range temperature so that + * charging is stopped + */ + return BTEMP_THERMAL_LOW_LIMIT; + } + + temp = ab5500_btemp_res_to_temp(di, + di->bat->bat_type[id].r_to_t_tbl, + di->bat->bat_type[id].n_temp_tbl_elements, rbat); + dev_dbg(di->dev, "Battery temperature is %d\n", temp); + + return temp; +} + +/** + * ab5500_btemp_id() - Identify the connected battery + * @di: pointer to the ab5500_btemp structure + * + * This function will try to identify the battery by reading the ID + * resistor. Some brands use a combined ID resistor with a NTC resistor to + * both be able to identify and to read the temperature of it. + */ +static int ab5500_btemp_id(struct ab5500_btemp *di) +{ + int res; + u8 i; + + di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA; + di->bat->batt_id = BATTERY_UNKNOWN; + + res = ab5500_btemp_get_batctrl_res(di); + if (res < 0) { + dev_err(di->dev, "%s get batctrl res failed\n", __func__); + return -ENXIO; + } + + /* BATTERY_UNKNOWN is defined on position 0, skip it! */ + for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) { + if ((res <= di->bat->bat_type[i].resis_high) && + (res >= di->bat->bat_type[i].resis_low)) { + dev_dbg(di->dev, "Battery detected on %s" + " low %d < res %d < high: %d" + " index: %d\n", + di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ? + "BATCTRL" : "BATTEMP", + di->bat->bat_type[i].resis_low, res, + di->bat->bat_type[i].resis_high, i); + + di->bat->batt_id = i; + break; + } + } + + if (di->bat->batt_id == BATTERY_UNKNOWN) { + dev_warn(di->dev, "Battery identified as unknown" + ", resistance %d Ohm\n", res); + return -ENXIO; + } + + /* + * We only have to change current source if the + * detected type is Type 1, else we use the 7uA source + */ + if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && + di->bat->batt_id == 1) { + dev_dbg(di->dev, "Set BATCTRL current source to 15uA\n"); + di->curr_source = BTEMP_BATCTRL_CURR_SRC_15UA; + } + + return di->bat->batt_id; +} + +/** + * ab5500_btemp_periodic_work() - Measuring the temperature periodically + * @work: pointer to the work_struct structure + * + * Work function for measuring the temperature periodically + */ +static void ab5500_btemp_periodic_work(struct work_struct *work) +{ + struct ab5500_btemp *di = container_of(work, + struct ab5500_btemp, btemp_periodic_work.work); + + di->bat_temp = ab5500_btemp_measure_temp(di); + + if (di->bat_temp != di->prev_bat_temp) { + di->prev_bat_temp = di->bat_temp; + power_supply_changed(&di->btemp_psy); + } + di->bat->temp_now = di->bat_temp; + + if (!di->bat->auto_trig) { + /* Check for temperature limits */ + if (di->bat_temp <= BTEMP_THERMAL_LOW_LIMIT) { + dev_err(di->dev, + "battery temp less than lower threshold\n"); + power_supply_changed(&di->btemp_psy); + } else if (di->bat_temp >= BTEMP_THERMAL_HIGH_LIMIT_62) { + dev_err(di->dev, + "battery temp greater them max threshold\n"); + power_supply_changed(&di->btemp_psy); + } + + /* Schedule a new measurement */ + if (di->events.usb_conn) + queue_delayed_work(di->btemp_wq, + &di->btemp_periodic_work, + round_jiffies(di->bat->interval_charging * HZ)); + else + queue_delayed_work(di->btemp_wq, + &di->btemp_periodic_work, + round_jiffies(di->bat->interval_not_charging * HZ)); + } else { + /* Schedule a new measurement */ + queue_delayed_work(di->btemp_wq, + &di->btemp_periodic_work, + round_jiffies(di->bat->interval_charging * HZ)); + } +} + +/** + * ab5500_btemp_batt_removal_handler() - battery removal detected + * @irq: interrupt number + * @_di: void pointer that has to address of ab5500_btemp + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_btemp_batt_removal_handler(int irq, void *_di) +{ + struct ab5500_btemp *di = _di; + dev_err(di->dev, "Battery removal detected!\n"); + + di->events.batt_rem = true; + power_supply_changed(&di->btemp_psy); + + return IRQ_HANDLED; +} + +/** + * ab5500_btemp_batt_attach_handler() - battery insertion detected + * @irq: interrupt number + * @_di: void pointer that has to address of ab5500_btemp + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_btemp_batt_attach_handler(int irq, void *_di) +{ + struct ab5500_btemp *di = _di; + dev_err(di->dev, "Battery attached!\n"); + + di->events.batt_rem = false; + power_supply_changed(&di->btemp_psy); + + return IRQ_HANDLED; +} + +/** + * ab5500_btemp_periodic() - Periodic temperature measurements + * @di: pointer to the ab5500_btemp structure + * @enable: enable or disable periodic temperature measurements + * + * Starts of stops periodic temperature measurements. Periodic measurements + * should only be done when a charger is connected. + */ +static void ab5500_btemp_periodic(struct ab5500_btemp *di, + bool enable) +{ + dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n", + enable); + + if (enable) + queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0); + else + cancel_delayed_work_sync(&di->btemp_periodic_work); +} + +/** + * ab5500_btemp_get_property() - get the btemp properties + * @psy: pointer to the power_supply structure + * @psp: pointer to the power_supply_property structure + * @val: pointer to the power_supply_propval union + * + * This function gets called when an application tries to get the btemp + * properties by reading the sysfs files. + * online: presence of the battery + * present: presence of the battery + * technology: battery technology + * temp: battery temperature + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_btemp_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ab5500_btemp *di; + + di = to_ab5500_btemp_device_info(psy); + + switch (psp) { + case POWER_SUPPLY_PROP_PRESENT: + case POWER_SUPPLY_PROP_ONLINE: + if (di->events.batt_rem) + val->intval = 0; + else + val->intval = 1; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = di->bat->bat_type[di->bat->batt_id].name; + break; + case POWER_SUPPLY_PROP_TEMP: + if (di->bat->batt_id == BATTERY_UNKNOWN) + /* + * In case the battery is not identified, its assumed that + * we are using the power supply and since no monitoring is + * done for the same, a nominal temp is hardocded. + */ + val->intval = 250; + else + val->intval = di->bat_temp * 10; + break; + default: + return -EINVAL; + } + return 0; +} + +static int ab5500_btemp_get_ext_psy_data(struct device *dev, void *data) +{ + struct power_supply *psy; + struct power_supply *ext; + struct ab5500_btemp *di; + union power_supply_propval ret; + int i, j; + bool psy_found = false; + + psy = (struct power_supply *)data; + ext = dev_get_drvdata(dev); + di = to_ab5500_btemp_device_info(psy); + + /* + * For all psy where the name of your driver + * appears in any supplied_to + */ + for (i = 0; i < ext->num_supplicants; i++) { + if (!strcmp(ext->supplied_to[i], psy->name)) + psy_found = true; + } + + if (!psy_found) + return 0; + + /* Go through all properties for the psy */ + for (j = 0; j < ext->num_properties; j++) { + enum power_supply_property prop; + prop = ext->properties[j]; + + if (ext->get_property(ext, prop, &ret)) + continue; + + switch (prop) { + case POWER_SUPPLY_PROP_PRESENT: + switch (ext->type) { + case POWER_SUPPLY_TYPE_USB: + /* USB disconnected */ + if (!ret.intval && di->events.usb_conn) { + di->events.usb_conn = false; + if (di->bat->auto_trig) + ab5500_btemp_periodic(di, + false); + } + /* USB connected */ + else if (ret.intval && !di->events.usb_conn) { + di->events.usb_conn = true; + if (di->bat->auto_trig) + ab5500_btemp_periodic(di, true); + } + break; + default: + break; + } + break; + default: + break; + } + } + return 0; +} + +/** + * ab5500_btemp_external_power_changed() - callback for power supply changes + * @psy: pointer to the structure power_supply + * + * This function is pointing to the function pointer external_power_changed + * of the structure power_supply. + * This function gets executed when there is a change in the external power + * supply to the btemp. + */ +static void ab5500_btemp_external_power_changed(struct power_supply *psy) +{ + struct ab5500_btemp *di = to_ab5500_btemp_device_info(psy); + + class_for_each_device(power_supply_class, NULL, + &di->btemp_psy, ab5500_btemp_get_ext_psy_data); +} + +/* ab5500 btemp driver interrupts and their respective isr */ +static struct ab5500_btemp_interrupts ab5500_btemp_irq[] = { + {"BATT_REMOVAL", ab5500_btemp_batt_removal_handler}, + {"BATT_ATTACH", ab5500_btemp_batt_attach_handler}, +}; + +static int ab5500_btemp_bat_temp_trig(int mux) +{ + struct ab5500_btemp *di = ab5500_btemp_get(); + int temp = ab5500_btemp_measure_temp(di); + + if (temp < (BTEMP_THERMAL_LOW_LIMIT+1)) { + dev_err(di->dev, + "battery temp less than lower threshold (-10 deg cel)\n"); + power_supply_changed(&di->btemp_psy); + } else if (temp > (BTEMP_THERMAL_HIGH_LIMIT_62-1)) { + dev_err(di->dev, "battery temp greater them max threshold\n"); + power_supply_changed(&di->btemp_psy); + } + + return 0; +} + +static int ab5500_btemp_auto_temp(struct ab5500_btemp *di) +{ + struct adc_auto_input *auto_ip; + int ret = 0; + + auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL); + if (!auto_ip) { + dev_err(di->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + auto_ip->mux = BTEMP_BALL; + auto_ip->freq = MS500; + auto_ip->min = ab5500_btemp_temp_to_volt(di, + BTEMP_THERMAL_HIGH_LIMIT_62); + auto_ip->max = ab5500_btemp_temp_to_volt(di, + BTEMP_THERMAL_LOW_LIMIT); + auto_ip->auto_adc_callback = ab5500_btemp_bat_temp_trig; + di->gpadc_auto = auto_ip; + ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto); + if (ret) + dev_err(di->dev, + "failed to set auto trigger for battery temp\n"); + return ret; +} + +#if defined(CONFIG_PM) +static int ab5500_btemp_resume(struct platform_device *pdev) +{ + struct ab5500_btemp *di = platform_get_drvdata(pdev); + + if (di->events.usb_conn) + ab5500_btemp_periodic(di, true); + + return 0; +} + +static int ab5500_btemp_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct ab5500_btemp *di = platform_get_drvdata(pdev); + + if (di->events.usb_conn) + ab5500_btemp_periodic(di, false); + + return 0; +} +#else +#define ab5500_btemp_suspend NULL +#define ab5500_btemp_resume NULL +#endif + +static int __devexit ab5500_btemp_remove(struct platform_device *pdev) +{ + struct ab5500_btemp *di = platform_get_drvdata(pdev); + int i, irq; + + /* Disable interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name); + free_irq(irq, di); + } + + /* Delete the work queue */ + destroy_workqueue(di->btemp_wq); + + flush_scheduled_work(); + power_supply_unregister(&di->btemp_psy); + platform_set_drvdata(pdev, NULL); + kfree(di->gpadc_auto); + kfree(di); + + return 0; +} + +static int __devinit ab5500_btemp_probe(struct platform_device *pdev) +{ + int irq, i, ret = 0; + struct abx500_bm_plat_data *plat_data; + + struct ab5500_btemp *di = + kzalloc(sizeof(struct ab5500_btemp), GFP_KERNEL); + if (!di) + return -ENOMEM; + + /* get parent data */ + di->dev = &pdev->dev; + di->parent = dev_get_drvdata(pdev->dev.parent); + di->gpadc = ab5500_gpadc_get("ab5500-adc.0"); + + plat_data = pdev->dev.platform_data; + di->pdata = plat_data->btemp; + di->bat = plat_data->battery; + + /* get btemp specific platform data */ + if (!di->pdata) { + dev_err(di->dev, "no btemp platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + + /* get battery specific platform data */ + if (!di->bat) { + dev_err(di->dev, "no battery platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + + /* BTEMP supply */ + di->btemp_psy.name = "ab5500_btemp"; + di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY; + di->btemp_psy.properties = ab5500_btemp_props; + di->btemp_psy.num_properties = ARRAY_SIZE(ab5500_btemp_props); + di->btemp_psy.get_property = ab5500_btemp_get_property; + di->btemp_psy.supplied_to = di->pdata->supplied_to; + di->btemp_psy.num_supplicants = di->pdata->num_supplicants; + di->btemp_psy.external_power_changed = + ab5500_btemp_external_power_changed; + + + /* Create a work queue for the btemp */ + di->btemp_wq = + create_singlethread_workqueue("ab5500_btemp_wq"); + if (di->btemp_wq == NULL) { + dev_err(di->dev, "failed to create work queue\n"); + goto free_device_info; + } + + /* Init work for measuring temperature periodically */ + INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work, + ab5500_btemp_periodic_work); + + /* Get Chip ID of the ABB ASIC */ + ret = abx500_get_chip_id(di->dev); + if (ret < 0) { + dev_err(di->dev, "failed to get chip ID\n"); + goto free_btemp_wq; + } + di->chip_id = ret; + dev_dbg(di->dev, "ab5500 CID is: 0x%02x\n", + di->chip_id); + + /* Identify the battery */ + if (ab5500_btemp_id(di) < 0) + dev_warn(di->dev, "failed to identify the battery\n"); + + /* Measure temperature once initially */ + di->bat_temp = ab5500_btemp_measure_temp(di); + di->bat->temp_now = di->bat_temp; + + /* Register BTEMP power supply class */ + ret = power_supply_register(di->dev, &di->btemp_psy); + if (ret) { + dev_err(di->dev, "failed to register BTEMP psy\n"); + goto free_btemp_wq; + } + + /* Register interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_btemp_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name); + ret = request_threaded_irq(irq, NULL, ab5500_btemp_irq[i].isr, + IRQF_SHARED | IRQF_NO_SUSPEND, + ab5500_btemp_irq[i].name, di); + + if (ret) { + dev_err(di->dev, "failed to request %s IRQ %d: %d\n" + , ab5500_btemp_irq[i].name, irq, ret); + goto free_irq; + } + dev_dbg(di->dev, "Requested %s IRQ %d: %d\n", + ab5500_btemp_irq[i].name, irq, ret); + } + + if (!di->bat->auto_trig) { + /* Schedule monitoring work only if battery type is known */ + if (di->bat->batt_id != BATTERY_UNKNOWN) + queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0); + } else { + ret = ab5500_btemp_auto_temp(di); + if (ret) { + dev_err(di->dev, + "failed to register auto trigger for battery temp\n"); + goto free_irq; + } + } + + platform_set_drvdata(pdev, di); + list_add_tail(&di->node, &ab5500_btemp_list); + + dev_info(di->dev, "probe success\n"); + return ret; + +free_irq: + power_supply_unregister(&di->btemp_psy); + + /* We also have to free all successfully registered irqs */ + for (i = i - 1; i >= 0; i--) { + irq = platform_get_irq_byname(pdev, ab5500_btemp_irq[i].name); + free_irq(irq, di); + } +free_btemp_wq: + destroy_workqueue(di->btemp_wq); +free_device_info: + kfree(di); + + return ret; +} + +static struct platform_driver ab5500_btemp_driver = { + .probe = ab5500_btemp_probe, + .remove = __devexit_p(ab5500_btemp_remove), + .suspend = ab5500_btemp_suspend, + .resume = ab5500_btemp_resume, + .driver = { + .name = "ab5500-btemp", + .owner = THIS_MODULE, + }, +}; + +static int __init ab5500_btemp_init(void) +{ + return platform_driver_register(&ab5500_btemp_driver); +} + +static void __exit ab5500_btemp_exit(void) +{ + platform_driver_unregister(&ab5500_btemp_driver); +} + +subsys_initcall_sync(ab5500_btemp_init); +module_exit(ab5500_btemp_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Johan Palsson, Karl Komierowski"); +MODULE_ALIAS("platform:ab5500-btemp"); +MODULE_DESCRIPTION("AB5500 battery temperature driver"); diff --git a/drivers/power/ab5500_charger.c b/drivers/power/ab5500_charger.c new file mode 100644 index 00000000000..b90c51a4f31 --- /dev/null +++ b/drivers/power/ab5500_charger.c @@ -0,0 +1,1820 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Charger driver for AB5500 + * + * License Terms: GNU General Public License v2 + * Authors: + * Johan Palsson <johan.palsson@stericsson.com> + * Karl Komierowski <karl.komierowski@stericsson.com> + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <linux/completion.h> +#include <linux/regulator/consumer.h> +#include <linux/err.h> +#include <linux/workqueue.h> +#include <linux/kobject.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500/ab5500-bm.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> +#include <linux/mfd/abx500/ux500_chargalg.h> +#include <linux/usb/otg.h> + +/* Charger constants */ +#define NO_PW_CONN 0 +#define USB_PW_CONN 2 + +/* HW failure constants */ +#define VBUS_CH_NOK 0x0A +#define VBUS_OVV_TH 0x06 + +/* AB5500 Charger constants */ +#define AB5500_USB_LINK_STATUS 0x78 +#define CHARGER_REV_SUP 0x10 +#define SW_EOC 0x40 +#define USB_CHAR_DET 0x02 +#define VBUS_RISING 0x20 +#define VBUS_FALLING 0x40 +#define USB_LINK_UPDATE 0x02 +#define USB_CH_TH_PROT_LOW 0x02 +#define USB_CH_TH_PROT_HIGH 0x01 +#define USB_ID_HOST_DET_ENA_MASK 0x02 +#define USB_ID_HOST_DET_ENA 0x02 +#define USB_ID_DEVICE_DET_ENA_MASK 0x01 +#define USB_ID_DEVICE_DET_ENA 0x01 +#define CHARGER_ISET_IN_1_1A 0x0C +#define LED_ENABLE 0x01 +#define RESET 0x00 +#define SSW_ENABLE_REBOOT 0x80 +#define SSW_REBOOT_EN 0x40 +#define SSW_CONTROL_AUTOC 0x04 +#define SSW_PSEL_480S 0x00 + +/* UsbLineStatus register - usb types */ +enum ab5500_charger_link_status { + USB_STAT_NOT_CONFIGURED, + USB_STAT_STD_HOST_NC, + USB_STAT_STD_HOST_C_NS, + USB_STAT_STD_HOST_C_S, + USB_STAT_HOST_CHG_NM, + USB_STAT_HOST_CHG_HS, + USB_STAT_HOST_CHG_HS_CHIRP, + USB_STAT_DEDICATED_CHG, + USB_STAT_ACA_RID_A, + USB_STAT_ACA_RID_B, + USB_STAT_ACA_RID_C_NM, + USB_STAT_ACA_RID_C_HS, + USB_STAT_ACA_RID_C_HS_CHIRP, + USB_STAT_HM_IDGND, + USB_STAT_RESERVED, + USB_STAT_NOT_VALID_LINK, +}; + +enum ab5500_usb_state { + AB5500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */ + AB5500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */ + AB5500_BM_USB_STATE_CONFIGURED, + AB5500_BM_USB_STATE_SUSPEND, + AB5500_BM_USB_STATE_RESUME, + AB5500_BM_USB_STATE_MAX, +}; + +/* VBUS input current limits supported in AB5500 in mA */ +#define USB_CH_IP_CUR_LVL_0P05 50 +#define USB_CH_IP_CUR_LVL_0P09 98 +#define USB_CH_IP_CUR_LVL_0P19 193 +#define USB_CH_IP_CUR_LVL_0P29 290 +#define USB_CH_IP_CUR_LVL_0P38 380 +#define USB_CH_IP_CUR_LVL_0P45 450 +#define USB_CH_IP_CUR_LVL_0P5 500 +#define USB_CH_IP_CUR_LVL_0P6 600 +#define USB_CH_IP_CUR_LVL_0P7 700 +#define USB_CH_IP_CUR_LVL_0P8 800 +#define USB_CH_IP_CUR_LVL_0P9 900 +#define USB_CH_IP_CUR_LVL_1P0 1000 +#define USB_CH_IP_CUR_LVL_1P1 1100 +#define USB_CH_IP_CUR_LVL_1P3 1300 +#define USB_CH_IP_CUR_LVL_1P4 1400 +#define USB_CH_IP_CUR_LVL_1P5 1500 + +#define to_ab5500_charger_usb_device_info(x) container_of((x), \ + struct ab5500_charger, usb_chg) + +/** + * struct ab5500_charger_interrupts - ab5500 interupts + * @name: name of the interrupt + * @isr function pointer to the isr + */ +struct ab5500_charger_interrupts { + char *name; + irqreturn_t (*isr)(int irq, void *data); +}; + +struct ab5500_charger_info { + int charger_connected; + int charger_online; + int charger_voltage; + int cv_active; + bool wd_expired; +}; + +struct ab5500_charger_event_flags { + bool usb_thermal_prot; + bool vbus_ovv; + bool usbchargernotok; + bool vbus_collapse; +}; + +struct ab5500_charger_usb_state { + bool usb_changed; + int usb_current; + enum ab5500_usb_state state; + spinlock_t usb_lock; +}; + +/** + * struct ab5500_charger - ab5500 Charger device information + * @dev: Pointer to the structure device + * @chip_id: Chip-Id of the ab5500 + * @max_usb_in_curr: Max USB charger input current + * @vbus_detected: VBUS detected + * @vbus_detected_start: + * VBUS detected during startup + * @parent: Pointer to the struct ab5500 + * @gpadc: Pointer to the struct gpadc + * @pdata: Pointer to the ab5500_charger platform data + * @bat: Pointer to the ab5500_bm platform data + * @flags: Structure for information about events triggered + * @usb_state: Structure for usb stack information + * @usb_chg: USB charger power supply + * @ac: Structure that holds the AC charger properties + * @usb: Structure that holds the USB charger properties + * @charger_wq: Work queue for the IRQs and checking HW state + * @check_hw_failure_work: Work for checking HW state + * @check_usbchgnotok_work: Work for checking USB charger not ok status + * @ac_work: Work for checking AC charger connection + * @detect_usb_type_work: Work for detecting the USB type connected + * @usb_link_status_work: Work for checking the new USB link status + * @usb_state_changed_work: Work for checking USB state + * @check_main_thermal_prot_work: + * Work for checking Main thermal status + * @check_usb_thermal_prot_work: + * Work for checking USB thermal status + * @ otg: pointer to struct otg_transceiver, used to + * notify the current during a standard host + * charger. + * @nb: structture of type notifier_block, which has + * a function pointer referenced by usb driver. + */ +struct ab5500_charger { + struct device *dev; + u8 chip_id; + int max_usb_in_curr; + bool vbus_detected; + bool vbus_detected_start; + struct ab5500 *parent; + struct ab5500_gpadc *gpadc; + struct abx500_charger_platform_data *pdata; + struct abx500_bm_data *bat; + struct ab5500_charger_event_flags flags; + struct ab5500_charger_usb_state usb_state; + struct ux500_charger usb_chg; + struct ab5500_charger_info usb; + struct workqueue_struct *charger_wq; + struct delayed_work check_hw_failure_work; + struct delayed_work check_usbchgnotok_work; + struct work_struct detect_usb_type_work; + struct work_struct usb_link_status_work; + struct work_struct usb_state_changed_work; + struct work_struct check_usb_thermal_prot_work; + struct otg_transceiver *otg; + struct notifier_block nb; +}; + +/* USB properties */ +static enum power_supply_property ab5500_charger_usb_props[] = { + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, +}; + +/** + * ab5500_charger_get_vbus_voltage() - get vbus voltage + * @di: pointer to the ab5500_charger structure + * + * This function returns the vbus voltage. + * Returns vbus voltage (on success) + */ +static int ab5500_charger_get_vbus_voltage(struct ab5500_charger *di) +{ + int vch; + + /* Only measure voltage if the charger is connected */ + if (di->usb.charger_connected) { + vch = ab5500_gpadc_convert(di->gpadc, VBUS_V); + if (vch < 0) + dev_err(di->dev, "%s gpadc conv failed\n", __func__); + } else { + vch = 0; + } + return vch; +} + +/** + * ab5500_charger_get_usb_current() - get usb charger current + * @di: pointer to the ab5500_charger structure + * + * This function returns the usb charger current. + * Returns usb current (on success) and error code on failure + */ +static int ab5500_charger_get_usb_current(struct ab5500_charger *di) +{ + int ich; + + /* Only measure current if the charger is online */ + if (di->usb.charger_online) { + ich = ab5500_gpadc_convert(di->gpadc, USB_CHARGER_C); + if (ich < 0) + dev_err(di->dev, "%s gpadc conv failed\n", __func__); + } else { + ich = 0; + } + return ich; +} + +/** + * ab5500_charger_detect_chargers() - Detect the connected chargers + * @di: pointer to the ab5500_charger structure + * + * Returns the type of charger connected. + * For USB it will not mean we can actually charge from it + * but that there is a USB cable connected that we have to + * identify. This is used during startup when we don't get + * interrupts of the charger detection + * + * Returns an integer value, that means, + * NO_PW_CONN no power supply is connected + * USB_PW_CONN if the USB power supply is connected + */ +static int ab5500_charger_detect_chargers(struct ab5500_charger *di) +{ + int result = NO_PW_CONN; + int ret; + u8 val; + /* Check for USB charger */ + /* + * TODO: Since there are no status register validating by + * reading the IT souce registers + */ + ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_IT, + AB5500_IT_SOURCE8, &val); + if (ret < 0) { + dev_err(di->dev, "%s ab5500 read failed\n", __func__); + return ret; + } + + if (val & VBUS_RISING) + result |= USB_PW_CONN; + else if (val & VBUS_FALLING) + result = NO_PW_CONN; + + return result; +} + +/** + * ab5500_charger_max_usb_curr() - get the max curr for the USB type + * @di: pointer to the ab5500_charger structure + * @link_status: the identified USB type + * + * Get the maximum current that is allowed to be drawn from the host + * based on the USB type. + * Returns error code in case of failure else 0 on success + */ +static int ab5500_charger_max_usb_curr(struct ab5500_charger *di, + enum ab5500_charger_link_status link_status) +{ + int ret = 0; + + switch (link_status) { + case USB_STAT_STD_HOST_NC: + case USB_STAT_STD_HOST_C_NS: + case USB_STAT_STD_HOST_C_S: + dev_dbg(di->dev, "USB Type - Standard host is " + "detected through USB driver\n"); + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09; + break; + case USB_STAT_HOST_CHG_HS_CHIRP: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5; + break; + case USB_STAT_HOST_CHG_HS: + case USB_STAT_ACA_RID_C_HS: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9; + break; + case USB_STAT_ACA_RID_A: + /* + * Dedicated charger level minus maximum current accessory + * can consume (300mA). Closest level is 1100mA + */ + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1; + break; + case USB_STAT_ACA_RID_B: + /* + * Dedicated charger level minus 120mA (20mA for ACA and + * 100mA for potential accessory). Closest level is 1300mA + */ + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3; + break; + case USB_STAT_DEDICATED_CHG: + case USB_STAT_HOST_CHG_NM: + case USB_STAT_ACA_RID_C_HS_CHIRP: + case USB_STAT_ACA_RID_C_NM: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5; + break; + case USB_STAT_RESERVED: + /* + * This state is used to indicate that VBUS has dropped below + * the detection level 4 times in a row. This is due to the + * charger output current is set to high making the charger + * voltage collapse. This have to be propagated through to + * chargalg. This is done using the property + * POWER_SUPPLY_PROP_CURRENT_AVG = 1 + */ + di->flags.vbus_collapse = true; + dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED " + "VBUS has collapsed\n"); + ret = -1; + break; + case USB_STAT_HM_IDGND: + case USB_STAT_NOT_CONFIGURED: + case USB_STAT_NOT_VALID_LINK: + dev_err(di->dev, "USB Type - Charging not allowed\n"); + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; + ret = -ENXIO; + break; + default: + dev_err(di->dev, "USB Type - Unknown\n"); + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; + ret = -ENXIO; + break; + }; + + dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", + link_status, di->max_usb_in_curr); + + return ret; +} + +/** + * ab5500_charger_read_usb_type() - read the type of usb connected + * @di: pointer to the ab5500_charger structure + * + * Detect the type of the plugged USB + * Returns error code in case of failure else 0 on success + */ +static int ab5500_charger_read_usb_type(struct ab5500_charger *di) +{ + int ret; + u8 val; + + ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_USB, + AB5500_USB_LINE_STATUS, &val); + if (ret < 0) { + dev_err(di->dev, "%s ab5500 read failed\n", __func__); + return ret; + } + + /* get the USB type */ + val = (val & AB5500_USB_LINK_STATUS) >> 3; + ret = ab5500_charger_max_usb_curr(di, + (enum ab5500_charger_link_status) val); + + return ret; +} + +static int ab5500_charger_voltage_map[] = { + 3500 , + 3525 , + 3550 , + 3575 , + 3600 , + 3625 , + 3650 , + 3675 , + 3700 , + 3725 , + 3750 , + 3775 , + 3800 , + 3825 , + 3850 , + 3875 , + 3900 , + 3925 , + 3950 , + 3975 , + 4000 , + 4025 , + 4050 , + 4060 , + 4070 , + 4080 , + 4090 , + 4100 , + 4110 , + 4120 , + 4130 , + 4140 , + 4150 , + 4160 , + 4170 , + 4180 , + 4190 , + 4200 , + 4210 , + 4220 , + 4230 , + 4240 , + 4250 , + 4260 , + 4270 , + 4280 , + 4290 , + 4300 , + 4310 , + 4320 , + 4330 , + 4340 , + 4350 , + 4360 , + 4370 , + 4380 , + 4390 , + 4400 , + 4410 , + 4420 , + 4430 , + 4440 , + 4450 , + 4460 , + 4470 , + 4480 , + 4490 , + 4500 , + 4510 , + 4520 , + 4530 , + 4540 , + 4550 , + 4560 , + 4570 , + 4580 , + 4590 , + 4600 , +}; + +/* + * This array maps the raw hex value to charger current used by the ab5500 + * Values taken from the AB5500 product specification manual + */ +static int ab5500_charger_current_map[] = { + 100 , + 200 , + 300 , + 400 , + 500 , + 600 , + 700 , + 800 , + 900 , + 1000, + 1100, + 1200, + 1300, + 1400, + 1500, + 1500, +}; + +static int ab5500_icsr_current_map[] = { + 50, + 93, + 193, + 290, + 380, + 450, + 500 , + 600 , + 700 , + 800 , + 900 , + 1000, + 1100, + 1300, + 1400, + 1500, +}; + +static int ab5500_cvrec_voltage_map[] = { + 3300, + 3325, + 3350, + 3375, + 3400, + 3425, + 3450, + 3475, + 3500, + 3525, + 3550, + 3575, + 3600, + 3625, + 3650, + 3675, + 3700, + 3725, + 3750, + 3775, + 3800, + 3825, + 3850, + 3875, + 3900, + 3925, + 4000, + 4025, + 4050, + 4075, + 4100, + 4125, + 4150, + 4175, + 4200, + 4225, + 4250, + 4275, + 4300, + 4325, + 4350, + 4375, + 4400, + 4425, + 4450, + 4475, + 4500, + 4525, + 4550, + 4575, + 4600, +}; + +static int ab5500_cvrec_voltage_to_regval(int voltage) +{ + int i; + + /* Special case for voltage below 3.3V */ + if (voltage < ab5500_cvrec_voltage_map[0]) + return 0; + + for (i = 1; i < ARRAY_SIZE(ab5500_cvrec_voltage_map); i++) { + if (voltage < ab5500_cvrec_voltage_map[i]) + return i - 1; + } + + /* If not last element, return error */ + i = ARRAY_SIZE(ab5500_cvrec_voltage_map) - 1; + if (voltage == ab5500_cvrec_voltage_map[i]) + return i; + else + return -1; +} + +static int ab5500_voltage_to_regval(int voltage) +{ + int i; + + /* Special case for voltage below 3.3V */ + if (voltage < ab5500_charger_voltage_map[0]) + return 0; + + for (i = 1; i < ARRAY_SIZE(ab5500_charger_voltage_map); i++) { + if (voltage < ab5500_charger_voltage_map[i]) + return i - 1; + } + + /* If not last element, return error */ + i = ARRAY_SIZE(ab5500_charger_voltage_map) - 1; + if (voltage == ab5500_charger_voltage_map[i]) + return i; + else + return -1; +} + +static int ab5500_icsr_curr_to_regval(int curr) +{ + int i; + + if (curr < ab5500_icsr_current_map[0]) + return 0; + + for (i = 0; i < ARRAY_SIZE(ab5500_icsr_current_map); i++) { + if (curr < ab5500_icsr_current_map[i]) + return i - 1; + } + + /* If not last element, return error */ + i = ARRAY_SIZE(ab5500_icsr_current_map) - 1; + if (curr == ab5500_icsr_current_map[i]) + return i; + else + return -1; +} + +static int ab5500_current_to_regval(int curr) +{ + int i; + + if (curr < ab5500_charger_current_map[0]) + return 0; + + for (i = 0; i < ARRAY_SIZE(ab5500_charger_current_map); i++) { + if (curr < ab5500_charger_current_map[i]) + return i - 1; + } + + /* If not last element, return error */ + i = ARRAY_SIZE(ab5500_charger_current_map) - 1; + if (curr == ab5500_charger_current_map[i]) + return i; + else + return -1; +} + +/** + * ab5500_charger_get_usb_cur() - get usb current + * @di: pointer to the ab5500_charger structre + * + * The usb stack provides the maximum current that can be drawn from + * the standard usb host. This will be in mA. + * This function converts current in mA to a value that can be written + * to the register. Returns -1 if charging is not allowed + */ +static int ab5500_charger_get_usb_cur(struct ab5500_charger *di) +{ + switch (di->usb_state.usb_current) { + case 50: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; + break; + case 100: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09; + break; + case 200: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19; + break; + case 300: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29; + break; + case 400: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38; + break; + case 500: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5; + break; + default: + di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05; + return -1; + break; + }; + return 0; +} + +/** + * ab5500_charger_set_vbus_in_curr() - set VBUS input current limit + * @di: pointer to the ab5500_charger structure + * @ich_in: charger input current limit + * + * Sets the current that can be drawn from the USB host + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_charger_set_vbus_in_curr(struct ab5500_charger *di, + int ich_in) +{ + int ret; + int input_curr_index; + int min_value; + + /* We should always use to lowest current limit */ + min_value = min(di->bat->chg_params->usb_curr_max, ich_in); + + input_curr_index = ab5500_icsr_curr_to_regval(min_value); + if (input_curr_index < 0) { + dev_err(di->dev, "VBUS input current limit too high\n"); + return -ENXIO; + } + + ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG, + AB5500_ICSR, input_curr_index); + if (ret) + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + + return ret; +} + +/** + * ab5500_charger_usb_en() - enable usb charging + * @di: pointer to the ab5500_charger structure + * @enable: enable/disable flag + * @vset: charging voltage + * @ich_out: charger output current + * + * Enable/Disable USB charging and turns on/off the charging led respectively. + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_charger_usb_en(struct ux500_charger *charger, + int enable, int vset, int ich_out) +{ + int ret; + int volt_index; + int curr_index; + + struct ab5500_charger *di = to_ab5500_charger_usb_device_info(charger); + + if (enable) { + /* Check if USB is connected */ + if (!di->usb.charger_connected) { + dev_err(di->dev, "USB charger not connected\n"); + return -ENXIO; + } + + /* Enable USB charging */ + dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out); + + volt_index = ab5500_voltage_to_regval(vset); + curr_index = ab5500_current_to_regval(ich_out) ; + + /* ChVoltLevel: max voltage upto which battery can be charged */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + + /* current that can be drawn from the usb */ + ret = ab5500_charger_set_vbus_in_curr(di, ich_out); + if (ret) { + dev_err(di->dev, "%s setting icsr failed %d\n", + __func__, __LINE__); + return ret; + } + + /* ChOutputCurentLevel: protected output current */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + + /* + * Battery voltage when charging should be resumed after + * completion of charging + */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_CVREC, + ab5500_cvrec_voltage_to_regval( + di->bat->bat_type[di->bat->batt_id].recharge_vol)); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + /* + * Battery temperature: + * Input to the TBDATA register corresponds to the battery + * temperature(temp being multiples of 2) + * In order to obatain the value to be written to this reg + * divide the temperature obtained from gpadc by 2 + */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_TBDATA, + di->bat->temp_now / 2); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + + /* If success power on charging LED indication */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_LEDT, LED_ENABLE); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + + /* + * Register DCIOCURRENT is one among the charging watchdog + * rekick sequence, hence irrespective of usb charging this + * register will have to be written. + */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_DCIOCURRENT, + RESET); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + + di->usb.charger_online = 1; + } else { + /* ChVoltLevel: max voltage upto which battery can be charged */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_VSRC, RESET); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + /* USBChInputCurr: current that can be drawn from the usb */ + ret = ab5500_charger_set_vbus_in_curr(di, RESET); + if (ret) { + dev_err(di->dev, "%s resetting icsr failed %d\n", + __func__, __LINE__); + return ret; + } + /* If success power off charging LED indication */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_LEDT, RESET); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", + __func__, __LINE__); + return ret; + } + di->usb.charger_online = 0; + di->usb.wd_expired = false; + dev_dbg(di->dev, "%s Disabled USB charging\n", __func__); + } + power_supply_changed(&di->usb_chg.psy); + + return ret; +} + +/** + * ab5500_charger_watchdog_kick() - kick charger watchdog + * @di: pointer to the ab5500_charger structure + * + * Kick charger watchdog + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_charger_watchdog_kick(struct ux500_charger *charger) +{ + int ret; + struct ab5500_charger *di; + int volt_index, curr_index; + u8 value = 0; + + /* TODO: update */ + if (charger->psy.type == POWER_SUPPLY_TYPE_USB) + di = to_ab5500_charger_usb_device_info(charger); + else + return -ENXIO; + + ret = abx500_get_register_interruptible(di->dev, AB5500_BANK_STARTUP, + AB5500_MCB, &value); + if (ret) + dev_err(di->dev, "Failed to read!\n"); + + value = value | (SSW_ENABLE_REBOOT | SSW_REBOOT_EN | + SSW_CONTROL_AUTOC | SSW_PSEL_480S); + ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_STARTUP, + AB5500_MCB, value); + if (ret) + dev_err(di->dev, "Failed to kick WD!\n"); + + volt_index = ab5500_voltage_to_regval( + di->bat->bat_type[di->bat->batt_id].normal_vol_lvl); + curr_index = ab5500_current_to_regval(di->max_usb_in_curr); + + /* ChVoltLevel: max voltage upto which battery can be charged */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_VSRC, (u8) volt_index); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + + /* current that can be drawn from the usb */ + ret = ab5500_charger_set_vbus_in_curr(di, di->max_usb_in_curr); + if (ret) { + dev_err(di->dev, "%s setting icsr failed %d\n", + __func__, __LINE__); + return ret; + } + + /* ChOutputCurentLevel: protected output current */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_OCSRV, (u8) curr_index); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + + /* + * Battery voltage when charging should be resumed after + * completion of charging + */ + /* Charger_Vrechar[5:0] = '4.025 V' */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_CVREC, + ab5500_cvrec_voltage_to_regval( + di->bat->bat_type[di->bat->batt_id].recharge_vol)); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + /* + * Battery temperature: + * Input to the TBDATA register corresponds to the battery + * temperature(temp being multiples of 2) + * In order to obatain the value to be written to this reg + * divide the temperature obtained from gpadc by 2 + */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_TBDATA, + di->bat->temp_now / 2); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + /* + * Register DCIOCURRENT is one among the charging watchdog + * rekick sequence, hence irrespective of usb charging this + * register will have to be written. + */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_DCIOCURRENT, + RESET); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + + return ret; +} + +/** + * ab5500_charger_update_charger_current() - update charger current + * @di: pointer to the ab5500_charger structure + * + * Update the charger output current for the specified charger + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_charger_update_charger_current(struct ux500_charger *charger, + int ich_out) +{ + int ret = 0; + int curr_index; + struct ab5500_charger *di; + + if (charger->psy.type == POWER_SUPPLY_TYPE_USB) + di = to_ab5500_charger_usb_device_info(charger); + else + return -ENXIO; + + curr_index = ab5500_current_to_regval(ich_out); + if (curr_index < 0) { + dev_err(di->dev, + "Charger current too high, " + "charging not started\n"); + return -ENXIO; + } + + ret = abx500_set_register_interruptible(di->dev, AB5500_BANK_CHG, + AB5500_OCSRV, (u8) curr_index); + if (ret) { + dev_err(di->dev, "%s write failed %d\n", __func__, __LINE__); + return ret; + } + + return ret; +} + +/** + * ab5500_charger_check_hw_failure_work() - check main charger failure + * @work: pointer to the work_struct structure + * + * Work queue function for checking the main charger status + */ +static void ab5500_charger_check_hw_failure_work(struct work_struct *work) +{ + int ret; + u8 reg_value; + + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, check_hw_failure_work.work); + + /* Check if the status bits for HW failure is still active */ + if (di->flags.vbus_ovv) { + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_USB, AB5500_USB_PHY_STATUS, + ®_value); + if (ret < 0) { + dev_err(di->dev, "%s ab5500 read failed\n", __func__); + return; + } + if (!(reg_value & VBUS_OVV_TH)) { + di->flags.vbus_ovv = false; + power_supply_changed(&di->usb_chg.psy); + } + } + /* If we still have a failure, schedule a new check */ + if (di->flags.vbus_ovv) { + queue_delayed_work(di->charger_wq, + &di->check_hw_failure_work, round_jiffies(HZ)); + } +} + +/** + * ab5500_charger_detect_usb_type_work() - work to detect USB type + * @work: Pointer to the work_struct structure + * + * Detect the type of USB plugged + */ +void ab5500_charger_detect_usb_type_work(struct work_struct *work) +{ + int ret; + + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, detect_usb_type_work); + + /* + * Since we can't be sure that the events are received + * synchronously, we have the check if is + * connected by reading the status register + */ + ret = ab5500_charger_detect_chargers(di); + if (ret < 0) + return; + + if (!(ret & USB_PW_CONN)) { + di->vbus_detected = 0; + di->usb.charger_connected = 0; + power_supply_changed(&di->usb_chg.psy); + } else { + di->vbus_detected = 1; + } +} + +/** + * ab5500_charger_usb_link_status_work() - work to detect USB type + * @work: pointer to the work_struct structure + * + * Detect the type of USB plugged + */ +static void ab5500_charger_usb_link_status_work(struct work_struct *work) +{ + int ret; + + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, usb_link_status_work); + + /* + * Since we can't be sure that the events are received + * synchronously, we have the check if is + * connected by reading the status register + */ + ret = ab5500_charger_detect_chargers(di); + if (ret < 0) + return; + + if (!(ret & USB_PW_CONN)) { + di->vbus_detected = 0; + di->usb.charger_connected = 0; + power_supply_changed(&di->usb_chg.psy); + } else { + di->vbus_detected = 1; + ret = ab5500_charger_read_usb_type(di); + if (!ret) { + /* Update maximum input current */ + ret = ab5500_charger_set_vbus_in_curr(di, + di->max_usb_in_curr); + if (ret) + return; + + di->usb.charger_connected = 1; + power_supply_changed(&di->usb_chg.psy); + } else if (ret == -ENXIO) { + /* No valid charger type detected */ + di->usb.charger_connected = 0; + power_supply_changed(&di->usb_chg.psy); + } + } +} + +static void ab5500_charger_usb_state_changed_work(struct work_struct *work) +{ + int ret; + unsigned long flags; + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, usb_state_changed_work); + + if (!di->vbus_detected) + return; + + spin_lock_irqsave(&di->usb_state.usb_lock, flags); + di->usb_state.usb_changed = false; + spin_unlock_irqrestore(&di->usb_state.usb_lock, flags); + + /* + * wait for some time until you get updates from the usb stack + * and negotiations are completed + */ + msleep(250); + + if (di->usb_state.usb_changed) + return; + + dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n", + __func__, di->usb_state.state, di->usb_state.usb_current); + + switch (di->usb_state.state) { + case AB5500_BM_USB_STATE_RESET_HS: + case AB5500_BM_USB_STATE_RESET_FS: + case AB5500_BM_USB_STATE_SUSPEND: + case AB5500_BM_USB_STATE_MAX: + di->usb.charger_connected = 0; + power_supply_changed(&di->usb_chg.psy); + break; + + case AB5500_BM_USB_STATE_RESUME: + /* + * when suspend->resume there should be delay + * of 1sec for enabling charging + */ + msleep(1000); + /* Intentional fall through */ + case AB5500_BM_USB_STATE_CONFIGURED: + /* + * USB is configured, enable charging with the charging + * input current obtained from USB driver + */ + if (!ab5500_charger_get_usb_cur(di)) { + /* Update maximum input current */ + ret = ab5500_charger_set_vbus_in_curr(di, + di->max_usb_in_curr); + if (ret) + return; + + di->usb.charger_connected = 1; + power_supply_changed(&di->usb_chg.psy); + } + break; + + default: + break; + }; +} + +/** + * ab5500_charger_check_usbchargernotok_work() - check USB chg not ok status + * @work: pointer to the work_struct structure + * + * Work queue function for checking the USB charger Not OK status + */ +static void ab5500_charger_check_usbchargernotok_work(struct work_struct *work) +{ + int ret; + u8 reg_value; + bool prev_status; + + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, check_usbchgnotok_work.work); + + /* Check if the status bit for usbchargernotok is still active */ + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_USB, AB5500_CHGFSM_CHARGER_DETECT, ®_value); + if (ret < 0) { + dev_err(di->dev, "%s ab5500 read failed\n", __func__); + return; + } + prev_status = di->flags.usbchargernotok; + + if (reg_value & VBUS_CH_NOK) { + di->flags.usbchargernotok = true; + /* Check again in 1sec */ + queue_delayed_work(di->charger_wq, + &di->check_usbchgnotok_work, HZ); + } else { + di->flags.usbchargernotok = false; + di->flags.vbus_collapse = false; + } + + if (prev_status != di->flags.usbchargernotok) + power_supply_changed(&di->usb_chg.psy); +} + +/** + * ab5500_charger_check_usb_thermal_prot_work() - check usb thermal status + * @work: pointer to the work_struct structure + * + * Work queue function for checking the USB thermal prot status + */ +static void ab5500_charger_check_usb_thermal_prot_work( + struct work_struct *work) +{ + int ret; + u8 reg_value; + + struct ab5500_charger *di = container_of(work, + struct ab5500_charger, check_usb_thermal_prot_work); + + /* Check if the status bit for usb_thermal_prot is still active */ + /* TODO: Interrupt source reg 15 bit 4 */ + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_USB, AB5500_CHGFSM_USB_BTEMP_CURR_LIM, ®_value); + if (ret < 0) { + dev_err(di->dev, "%s ab5500 read failed\n", __func__); + return; + } + if (reg_value & USB_CH_TH_PROT_LOW || reg_value & USB_CH_TH_PROT_HIGH) + di->flags.usb_thermal_prot = true; + else + di->flags.usb_thermal_prot = false; + + power_supply_changed(&di->usb_chg.psy); +} + +/** + * ab5500_charger_vbusdetf_handler() - VBUS falling detected + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_vbusdetf_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, "VBUS falling detected\n"); + queue_work(di->charger_wq, &di->detect_usb_type_work); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_vbusdetr_handler() - VBUS rising detected + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_vbusdetr_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + di->vbus_detected = true; + dev_dbg(di->dev, "VBUS rising detected\n"); + queue_work(di->charger_wq, &di->detect_usb_type_work); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_usblinkstatus_handler() - USB link status has changed + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_usblinkstatus_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, "USB link status changed\n"); + + if (!di->usb.charger_online) + queue_work(di->charger_wq, &di->usb_link_status_work); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_usbchthprotr_handler() - Die temp is above usb charger + * thermal protection threshold + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_usbchthprotr_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, + "Die temp above USB charger thermal protection threshold\n"); + queue_work(di->charger_wq, &di->check_usb_thermal_prot_work); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_usbchargernotokr_handler() - USB charger not ok detected + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_usbchargernotokr_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, "Not allowed USB charger detected\n"); + queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_chwdexp_handler() - Charger watchdog expired + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_chwdexp_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, "Charger watchdog expired\n"); + + /* + * The charger that was online when the watchdog expired + * needs to be restarted for charging to start again + */ + if (di->usb.charger_online) { + di->usb.wd_expired = true; + power_supply_changed(&di->usb_chg.psy); + } + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_vbusovv_handler() - VBUS overvoltage detected + * @irq: interrupt number + * @_di: pointer to the ab5500_charger structure + * + * Returns IRQ status(IRQ_HANDLED) + */ +static irqreturn_t ab5500_charger_vbusovv_handler(int irq, void *_di) +{ + struct ab5500_charger *di = _di; + + dev_dbg(di->dev, "VBUS overvoltage detected\n"); + di->flags.vbus_ovv = true; + power_supply_changed(&di->usb_chg.psy); + + /* Schedule a new HW failure check */ + queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0); + + return IRQ_HANDLED; +} + +/** + * ab5500_charger_usb_get_property() - get the usb properties + * @psy: pointer to the power_supply structure + * @psp: pointer to the power_supply_property structure + * @val: pointer to the power_supply_propval union + * + * This function gets called when an application tries to get the usb + * properties by reading the sysfs files. + * USB properties are online, present and voltage. + * online: usb charging is in progress or not + * present: presence of the usb + * voltage: vbus voltage + * Returns error code in case of failure else 0(on success) + */ +static int ab5500_charger_usb_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ab5500_charger *di; + + di = to_ab5500_charger_usb_device_info(psy_to_ux500_charger(psy)); + + switch (psp) { + case POWER_SUPPLY_PROP_HEALTH: + if (di->flags.usbchargernotok) + val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; + else if (di->usb.wd_expired) + val->intval = POWER_SUPPLY_HEALTH_DEAD; + else if (di->flags.usb_thermal_prot) + val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; + else if (di->flags.vbus_ovv) + val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; + else + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_ONLINE: + val->intval = di->usb.charger_online; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = di->usb.charger_connected; + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + di->usb.charger_voltage = ab5500_charger_get_vbus_voltage(di); + val->intval = di->usb.charger_voltage * 1000; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = ab5500_charger_get_usb_current(di) * 1000; + break; + case POWER_SUPPLY_PROP_CURRENT_AVG: + /* + * This property is used to indicate when VBUS has collapsed + * due to too high output current from the USB charger + */ + if (di->flags.vbus_collapse) + val->intval = 1; + else + val->intval = 0; + break; + default: + return -EINVAL; + } + return 0; +} + +/** + * ab5500_charger_hw_registers() - Set up charger related registers + * @di: pointer to the ab5500_charger structure + * + * Set up charger OVV, watchdog and maximum voltage registers as well as + * charging of the backup battery + */ +static int ab5500_charger_init_hw_registers(struct ab5500_charger *di) +{ + int ret = 0; + + /* Enable ID Host and Device detection */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_USB, AB5500_USB_OTG_CTRL, + USB_ID_HOST_DET_ENA_MASK, USB_ID_HOST_DET_ENA); + if (ret) { + dev_err(di->dev, "failed to enable usb charger detection\n"); + goto out; + } + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_USB, AB5500_USB_OTG_CTRL, + USB_ID_DEVICE_DET_ENA_MASK, USB_ID_DEVICE_DET_ENA); + if (ret) { + dev_err(di->dev, "failed to enable usb charger detection\n"); + goto out; + } + + /* Over current protection for reverse supply */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_CREVS, CHARGER_REV_SUP, + CHARGER_REV_SUP); + if (ret) { + dev_err(di->dev, + "failed to enable over current protection for reverse supply\n"); + goto out; + } + + /* Enable SW EOC at flatcurrent detection */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_CHG, AB5500_CCTRL, SW_EOC, SW_EOC); + if (ret) { + dev_err(di->dev, + "failed to enable end of charge at flatcurrent detection\n"); + goto out; + } +out: + return ret; +} + +/* + * ab5500 charger driver interrupts and their respective isr + */ +static struct ab5500_charger_interrupts ab5500_charger_irq[] = { + {"VBUS_FALLING", ab5500_charger_vbusdetf_handler}, + {"VBUS_RISING", ab5500_charger_vbusdetr_handler}, + {"USB_LINK_UPDATE", ab5500_charger_usblinkstatus_handler}, + {"USB_CH_TH_PROTECTION", ab5500_charger_usbchthprotr_handler}, + {"USB_CH_NOT_OK", ab5500_charger_usbchargernotokr_handler}, + {"OVV", ab5500_charger_vbusovv_handler}, + /* TODO: Interrupt missing, will be available in cut 2 */ + /*{"CHG_SW_TIMER_OUT", ab5500_charger_chwdexp_handler},*/ +}; + +static int ab5500_charger_usb_notifier_call(struct notifier_block *nb, + unsigned long event, void *power) +{ + struct ab5500_charger *di = + container_of(nb, struct ab5500_charger, nb); + enum ab5500_usb_state bm_usb_state; + unsigned mA = *((unsigned *)power); + + if (event != USB_EVENT_VBUS) { + dev_dbg(di->dev, "not a standard host, returning\n"); + return NOTIFY_DONE; + } + + /* TODO: State is fabricate here. See if charger really needs USB + * state or if mA is enough + */ + if ((di->usb_state.usb_current == 2) && (mA > 2)) + bm_usb_state = AB5500_BM_USB_STATE_RESUME; + else if (mA == 0) + bm_usb_state = AB5500_BM_USB_STATE_RESET_HS; + else if (mA == 2) + bm_usb_state = AB5500_BM_USB_STATE_SUSPEND; + else if (mA >= 8) /* 8, 100, 500 */ + bm_usb_state = AB5500_BM_USB_STATE_CONFIGURED; + else /* Should never occur */ + bm_usb_state = AB5500_BM_USB_STATE_RESET_FS; + + dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n", + __func__, bm_usb_state, mA); + + spin_lock(&di->usb_state.usb_lock); + di->usb_state.usb_changed = true; + di->usb_state.state = bm_usb_state; + di->usb_state.usb_current = mA; + spin_unlock(&di->usb_state.usb_lock); + + queue_work(di->charger_wq, &di->usb_state_changed_work); + + return NOTIFY_OK; +} + +#if defined(CONFIG_PM) +static int ab5500_charger_resume(struct platform_device *pdev) +{ + struct ab5500_charger *di = platform_get_drvdata(pdev); + + /* If we still have a HW failure, schedule a new check */ + if (di->flags.usbchargernotok || di->flags.vbus_ovv) { + queue_delayed_work(di->charger_wq, + &di->check_hw_failure_work, 0); + } + + return 0; +} + +static int ab5500_charger_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct ab5500_charger *di = platform_get_drvdata(pdev); + + /* Cancel any pending HW failure check */ + if (delayed_work_pending(&di->check_hw_failure_work)) + cancel_delayed_work(&di->check_hw_failure_work); + + return 0; +} +#else +#define ab5500_charger_suspend NULL +#define ab5500_charger_resume NULL +#endif + +static int __devexit ab5500_charger_remove(struct platform_device *pdev) +{ + struct ab5500_charger *di = platform_get_drvdata(pdev); + int i, irq; + + /* Disable USB charging */ + ab5500_charger_usb_en(&di->usb_chg, false, 0, 0); + + /* Disable interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name); + free_irq(irq, di); + } + + otg_unregister_notifier(di->otg, &di->nb); + otg_put_transceiver(di->otg); + + /* Delete the work queue */ + destroy_workqueue(di->charger_wq); + + flush_scheduled_work(); + power_supply_unregister(&di->usb_chg.psy); + platform_set_drvdata(pdev, NULL); + kfree(di); + + return 0; +} + +static int __devinit ab5500_charger_probe(struct platform_device *pdev) +{ + int irq, i, charger_status, ret = 0; + struct abx500_bm_plat_data *plat_data; + + struct ab5500_charger *di = + kzalloc(sizeof(struct ab5500_charger), GFP_KERNEL); + if (!di) + return -ENOMEM; + + /* get parent data */ + di->dev = &pdev->dev; + di->parent = dev_get_drvdata(pdev->dev.parent); + di->gpadc = ab5500_gpadc_get("ab5500-adc.0"); + + /* initialize lock */ + spin_lock_init(&di->usb_state.usb_lock); + + plat_data = pdev->dev.platform_data; + di->pdata = plat_data->charger; + di->bat = plat_data->battery; + + /* get charger specific platform data */ + if (!di->pdata) { + dev_err(di->dev, "no charger platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + + /* get battery specific platform data */ + if (!di->bat) { + dev_err(di->dev, "no battery platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + /* USB supply */ + /* power_supply base class */ + di->usb_chg.psy.name = "ab5500_usb"; + di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB; + di->usb_chg.psy.properties = ab5500_charger_usb_props; + di->usb_chg.psy.num_properties = ARRAY_SIZE(ab5500_charger_usb_props); + di->usb_chg.psy.get_property = ab5500_charger_usb_get_property; + di->usb_chg.psy.supplied_to = di->pdata->supplied_to; + di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants; + /* ux500_charger sub-class */ + di->usb_chg.ops.enable = &ab5500_charger_usb_en; + di->usb_chg.ops.kick_wd = &ab5500_charger_watchdog_kick; + di->usb_chg.ops.update_curr = &ab5500_charger_update_charger_current; + di->usb_chg.max_out_volt = ab5500_charger_voltage_map[ + ARRAY_SIZE(ab5500_charger_voltage_map) - 1]; + di->usb_chg.max_out_curr = ab5500_charger_current_map[ + ARRAY_SIZE(ab5500_charger_current_map) - 1]; + + + /* Create a work queue for the charger */ + di->charger_wq = + create_singlethread_workqueue("ab5500_charger_wq"); + if (di->charger_wq == NULL) { + dev_err(di->dev, "failed to create work queue\n"); + goto free_device_info; + } + + /* Init work for HW failure check */ + INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work, + ab5500_charger_check_hw_failure_work); + INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work, + ab5500_charger_check_usbchargernotok_work); + + /* Init work for charger detection */ + INIT_WORK(&di->usb_link_status_work, + ab5500_charger_usb_link_status_work); + INIT_WORK(&di->detect_usb_type_work, + ab5500_charger_detect_usb_type_work); + + INIT_WORK(&di->usb_state_changed_work, + ab5500_charger_usb_state_changed_work); + + /* Init work for checking HW status */ + INIT_WORK(&di->check_usb_thermal_prot_work, + ab5500_charger_check_usb_thermal_prot_work); + + /* Get Chip ID of the ABB ASIC */ + ret = abx500_get_chip_id(di->dev); + if (ret < 0) { + dev_err(di->dev, "failed to get chip ID\n"); + goto free_charger_wq; + } + di->chip_id = ret; + dev_dbg(di->dev, "AB5500 CID is: 0x%02x\n", di->chip_id); + + /* Initialize OVV, and other registers */ + ret = ab5500_charger_init_hw_registers(di); + if (ret) { + dev_err(di->dev, "failed to initialize ABB registers\n"); + goto free_device_info; + } + + /* Register USB charger class */ + ret = power_supply_register(di->dev, &di->usb_chg.psy); + if (ret) { + dev_err(di->dev, "failed to register USB charger\n"); + goto free_device_info; + } + + di->otg = otg_get_transceiver(); + if (!di->otg) { + dev_err(di->dev, "failed to get otg transceiver\n"); + goto free_usb; + } + di->nb.notifier_call = ab5500_charger_usb_notifier_call; + ret = otg_register_notifier(di->otg, &di->nb); + if (ret) { + dev_err(di->dev, "failed to register otg notifier\n"); + goto put_otg_transceiver; + } + + /* Identify the connected charger types during startup */ + charger_status = ab5500_charger_detect_chargers(di); + if (charger_status & USB_PW_CONN) { + dev_dbg(di->dev, "VBUS Detect during startup\n"); + di->vbus_detected = true; + di->vbus_detected_start = true; + queue_work(di->charger_wq, + &di->usb_link_status_work); + } + + /* Register interrupts */ + for (i = 0; i < ARRAY_SIZE(ab5500_charger_irq); i++) { + irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name); + ret = request_threaded_irq(irq, NULL, ab5500_charger_irq[i].isr, + IRQF_SHARED | IRQF_NO_SUSPEND, + ab5500_charger_irq[i].name, di); + + if (ret != 0) { + dev_err(di->dev, "failed to request %s IRQ %d: %d\n" + , ab5500_charger_irq[i].name, irq, ret); + goto free_irq; + } + dev_dbg(di->dev, "Requested %s IRQ %d: %d\n", + ab5500_charger_irq[i].name, irq, ret); + } + + platform_set_drvdata(pdev, di); + + dev_info(di->dev, "probe success\n"); + return ret; + +free_irq: + otg_unregister_notifier(di->otg, &di->nb); + + /* We also have to free all successfully registered irqs */ + for (i = i - 1; i >= 0; i--) { + irq = platform_get_irq_byname(pdev, ab5500_charger_irq[i].name); + free_irq(irq, di); + } +put_otg_transceiver: + otg_put_transceiver(di->otg); +free_usb: + power_supply_unregister(&di->usb_chg.psy); +free_charger_wq: + destroy_workqueue(di->charger_wq); +free_device_info: + kfree(di); + + return ret; +} + +static struct platform_driver ab5500_charger_driver = { + .probe = ab5500_charger_probe, + .remove = __devexit_p(ab5500_charger_remove), + .suspend = ab5500_charger_suspend, + .resume = ab5500_charger_resume, + .driver = { + .name = "ab5500-charger", + .owner = THIS_MODULE, + }, +}; + +static int __init ab5500_charger_init(void) +{ + return platform_driver_register(&ab5500_charger_driver); +} + +static void __exit ab5500_charger_exit(void) +{ + platform_driver_unregister(&ab5500_charger_driver); +} + +subsys_initcall_sync(ab5500_charger_init); +module_exit(ab5500_charger_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Johan Palsson, Karl Komierowski"); +MODULE_ALIAS("platform:ab5500-charger"); +MODULE_DESCRIPTION("AB5500 charger management driver"); diff --git a/drivers/power/ab5500_fg.c b/drivers/power/ab5500_fg.c new file mode 100644 index 00000000000..c74d351bd8b --- /dev/null +++ b/drivers/power/ab5500_fg.c @@ -0,0 +1,1954 @@ +/* + * Copyright (C) ST-Ericsson AB 2011 + * + * Main and Back-up battery management driver. + * + * Note: Backup battery management is required in case of Li-Ion battery and not + * for capacitive battery. HREF boards have capacitive battery and hence backup + * battery management is not used and the supported code is available in this + * driver. + * + * License Terms: GNU General Public License v2 + * Authors: + * Johan Palsson <johan.palsson@stericsson.com> + * Karl Komierowski <karl.komierowski@stericsson.com> + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/power_supply.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> +#include <linux/mfd/abx500/ab5500-bm.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> + +static LIST_HEAD(ab5500_fg_list); + +/* U5500 Constants */ +#define FG_ON_MASK 0x04 +#define FG_ON 0x04 +#define FG_ACC_RESET_ON_READ_MASK 0x08 +#define FG_ACC_RESET_ON_READ 0x08 +#define EN_READOUT_MASK 0x01 +#define EN_READOUT 0x01 +#define EN_ACC_RESET_ON_READ 0x08 +#define ACC_RESET_ON_READ 0x08 +#define RESET 0x00 +#define EOC_52_mA 0x04 +#define MILLI_TO_MICRO 1000 +#define FG_LSB_IN_MA 770 +#define QLSB_NANO_AMP_HOURS_X100 5353 +#define SEC_TO_SAMPLE(S) (S * 4) +#define NBR_AVG_SAMPLES 20 +#define LOW_BAT_CHECK_INTERVAL (2 * HZ) +#define FG_PERIODIC_START_INTERVAL (250 * HZ)/1000 /* 250 msec */ + +#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */ + +#define interpolate(x, x1, y1, x2, y2) \ + ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1)))); + +#define to_ab5500_fg_device_info(x) container_of((x), \ + struct ab5500_fg, fg_psy); + +/** + * struct ab5500_fg_interrupts - ab5500 fg interupts + * @name: name of the interrupt + * @isr function pointer to the isr + */ +struct ab5500_fg_interrupts { + char *name; + irqreturn_t (*isr)(int irq, void *data); +}; + +enum ab5500_fg_discharge_state { + AB5500_FG_DISCHARGE_INIT, + AB5500_FG_DISCHARGE_INITMEASURING, + AB5500_FG_DISCHARGE_INIT_RECOVERY, + AB5500_FG_DISCHARGE_RECOVERY, + AB5500_FG_DISCHARGE_READOUT, + AB5500_FG_DISCHARGE_WAKEUP, +}; + +static char *discharge_state[] = { + "DISCHARGE_INIT", + "DISCHARGE_INITMEASURING", + "DISCHARGE_INIT_RECOVERY", + "DISCHARGE_RECOVERY", + "DISCHARGE_READOUT", + "DISCHARGE_WAKEUP", +}; + +enum ab5500_fg_charge_state { + AB5500_FG_CHARGE_INIT, + AB5500_FG_CHARGE_READOUT, +}; + +static char *charge_state[] = { + "CHARGE_INIT", + "CHARGE_READOUT", +}; + +enum ab5500_fg_calibration_state { + AB5500_FG_CALIB_INIT, + AB5500_FG_CALIB_WAIT, + AB5500_FG_CALIB_END, +}; + +struct ab5500_fg_avg_cap { + int avg; + int samples[NBR_AVG_SAMPLES]; + __kernel_time_t time_stamps[NBR_AVG_SAMPLES]; + int pos; + int nbr_samples; + int sum; +}; + +struct ab5500_fg_battery_capacity { + int max_mah_design; + int max_mah; + int mah; + int permille; + int level; + int prev_mah; + int prev_percent; + int prev_level; +}; + +struct ab5500_fg_flags { + bool fg_enabled; + bool conv_done; + bool charging; + bool fully_charged; + bool low_bat_delay; + bool low_bat; + bool bat_ovv; + bool batt_unknown; + bool calibrate; +}; + +/** + * struct ab5500_fg - ab5500 FG device information + * @dev: Pointer to the structure device + * @vbat: Battery voltage in mV + * @vbat_nom: Nominal battery voltage in mV + * @inst_curr: Instantenous battery current in mA + * @avg_curr: Average battery current in mA + * @fg_samples: Number of samples used in the FG accumulation + * @accu_charge: Accumulated charge from the last conversion + * @recovery_cnt: Counter for recovery mode + * @high_curr_cnt: Counter for high current mode + * @init_cnt: Counter for init mode + * @v_to_cap: capacity based on battery voltage + * @recovery_needed: Indicate if recovery is needed + * @high_curr_mode: Indicate if we're in high current mode + * @init_capacity: Indicate if initial capacity measuring should be done + * @calib_state State during offset calibration + * @discharge_state: Current discharge state + * @charge_state: Current charge state + * @flags: Structure for information about events triggered + * @bat_cap: Structure for battery capacity specific parameters + * @avg_cap: Average capacity filter + * @parent: Pointer to the struct ab5500 + * @gpadc: Pointer to the struct gpadc + * @gpadc_auto: Pointer tot he struct adc_auto_input + * @pdata: Pointer to the ab5500_fg platform data + * @bat: Pointer to the ab5500_bm platform data + * @fg_psy: Structure that holds the FG specific battery properties + * @fg_wq: Work queue for running the FG algorithm + * @fg_periodic_work: Work to run the FG algorithm periodically + * @fg_low_bat_work: Work to check low bat condition + * @fg_reinit_work: Work to reset and re-initialize fuel gauge + * @fg_work: Work to run the FG algorithm instantly + * @fg_acc_cur_work: Work to read the FG accumulator + * @cc_lock: Mutex for locking the CC + * @node: struct of type list_head + */ +struct ab5500_fg { + struct device *dev; + int vbat; + int vbat_nom; + int inst_curr; + int avg_curr; + int fg_samples; + int accu_charge; + int recovery_cnt; + int high_curr_cnt; + int init_cnt; + int v_to_cap; + bool recovery_needed; + bool high_curr_mode; + bool init_capacity; + enum ab5500_fg_calibration_state calib_state; + enum ab5500_fg_discharge_state discharge_state; + enum ab5500_fg_charge_state charge_state; + struct ab5500_fg_flags flags; + struct ab5500_fg_battery_capacity bat_cap; + struct ab5500_fg_avg_cap avg_cap; + struct ab5500 *parent; + struct ab5500_gpadc *gpadc; + struct adc_auto_input *gpadc_auto; + struct abx500_fg_platform_data *pdata; + struct abx500_bm_data *bat; + struct power_supply fg_psy; + struct workqueue_struct *fg_wq; + struct delayed_work fg_periodic_work; + struct delayed_work fg_low_bat_work; + struct delayed_work fg_reinit_work; + struct work_struct fg_work; + struct delayed_work fg_acc_cur_work; + struct mutex cc_lock; + struct list_head node; + struct timer_list avg_current_timer; +}; + +/* Main battery properties */ +static enum power_supply_property ab5500_fg_props[] = { + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, + POWER_SUPPLY_PROP_ENERGY_FULL, + POWER_SUPPLY_PROP_ENERGY_NOW, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, +}; + +/* Function Prototype */ +static int ab5500_fg_bat_v_trig(int mux); + +static int prev_samples, prev_val; + +struct ab5500_fg *ab5500_fg_get(void) +{ + struct ab5500_fg *di; + di = list_first_entry(&ab5500_fg_list, struct ab5500_fg, node); + + return di; +} + +/** + * ab5500_fg_is_low_curr() - Low or high current mode + * @di: pointer to the ab5500_fg structure + * @curr: the current to base or our decision on + * + * Low current mode if the current consumption is below a certain threshold + */ +static int ab5500_fg_is_low_curr(struct ab5500_fg *di, int curr) +{ + /* + * We want to know if we're in low current mode + */ + if (curr > -di->bat->fg_params->high_curr_threshold) + return true; + else + return false; +} + +/** + * ab5500_fg_add_cap_sample() - Add capacity to average filter + * @di: pointer to the ab5500_fg structure + * @sample: the capacity in mAh to add to the filter + * + * A capacity is added to the filter and a new mean capacity is calculated and + * returned + */ +static int ab5500_fg_add_cap_sample(struct ab5500_fg *di, int sample) +{ + struct timespec ts; + struct ab5500_fg_avg_cap *avg = &di->avg_cap; + + getnstimeofday(&ts); + + do { + avg->sum += sample - avg->samples[avg->pos]; + avg->samples[avg->pos] = sample; + avg->time_stamps[avg->pos] = ts.tv_sec; + avg->pos++; + + if (avg->pos == NBR_AVG_SAMPLES) + avg->pos = 0; + + if (avg->nbr_samples < NBR_AVG_SAMPLES) + avg->nbr_samples++; + + /* + * Check the time stamp for each sample. If too old, + * replace with latest sample + */ + } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]); + + avg->avg = avg->sum / avg->nbr_samples; + + return avg->avg; +} + +/** + * ab5500_fg_clear_cap_samples() - Clear average filter + * @di: pointer to the ab5500_fg structure + * + * The capacity filter is is reset to zero. + */ +static void ab5500_fg_clear_cap_samples(struct ab5500_fg *di) +{ + int i; + struct ab5500_fg_avg_cap *avg = &di->avg_cap; + + avg->pos = 0; + avg->nbr_samples = 0; + avg->sum = 0; + avg->avg = 0; + + for (i = 0; i < NBR_AVG_SAMPLES; i++) { + avg->samples[i] = 0; + avg->time_stamps[i] = 0; + } +} + + +/** + * ab5500_fg_fill_cap_sample() - Fill average filter + * @di: pointer to the ab5500_fg structure + * @sample: the capacity in mAh to fill the filter with + * + * The capacity filter is filled with a capacity in mAh + */ +static void ab5500_fg_fill_cap_sample(struct ab5500_fg *di, int sample) +{ + int i; + struct timespec ts; + struct ab5500_fg_avg_cap *avg = &di->avg_cap; + + getnstimeofday(&ts); + + for (i = 0; i < NBR_AVG_SAMPLES; i++) { + avg->samples[i] = sample; + avg->time_stamps[i] = ts.tv_sec; + } + + avg->pos = 0; + avg->nbr_samples = NBR_AVG_SAMPLES; + avg->sum = sample * NBR_AVG_SAMPLES; + avg->avg = sample; +} + +/** + * ab5500_fg_coulomb_counter() - enable coulomb counter + * @di: pointer to the ab5500_fg structure + * @enable: enable/disable + * + * Enable/Disable coulomb counter. + * On failure returns negative value. + */ +static int ab5500_fg_coulomb_counter(struct ab5500_fg *di, bool enable) +{ + int ret = 0; + mutex_lock(&di->cc_lock); + if (enable) { + /* Power-up the CC */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + (FG_ON | FG_ACC_RESET_ON_READ)); + if (ret) + goto cc_err; + + di->flags.fg_enabled = true; + } else { + /* Stop the CC */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + FG_ON_MASK, RESET); + if (ret) + goto cc_err; + + di->flags.fg_enabled = false; + + } + dev_dbg(di->dev, " CC enabled: %d Samples: %d\n", + enable, di->fg_samples); + + mutex_unlock(&di->cc_lock); + + return ret; +cc_err: + dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__); + mutex_unlock(&di->cc_lock); + return ret; +} + +/** + * ab5500_fg_inst_curr() - battery instantaneous current + * @di: pointer to the ab5500_fg structure + * + * Returns battery instantenous current(on success) else error code + */ +static int ab5500_fg_inst_curr(struct ab5500_fg *di) +{ + u8 low, high; + static int val; + int ret = 0; + bool fg_off = false; + + if (!di->flags.fg_enabled) { + fg_off = true; + /* Power-up the CC */ + ab5500_fg_coulomb_counter(di, true); + msleep(250); + } + + mutex_lock(&di->cc_lock); + + /* Enable read request */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_B, + EN_READOUT_MASK, EN_READOUT); + if (ret) + goto inst_curr_err; + + /* Read CC Sample conversion value Low and high */ + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FGDIR_READ0, &low); + if (ret < 0) + goto inst_curr_err; + + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FGDIR_READ1, &high); + if (ret < 0) + goto inst_curr_err; + + /* + * negative value for Discharging + * convert 2's compliment into decimal + */ + if (high & 0x10) + val = (low | (high << 8) | 0xFFFFE000); + else + val = (low | (high << 8)); + + /* + * Convert to unit value in mA + * R(FGSENSE) = 20 mOhm + * Scaling of LSB: This corresponds fro R(FGSENSE) to a current of + * I = Q/t = 192.7 uC * 4 Hz = 0.77mA + */ + val = (val * 770) / 1000; + + mutex_unlock(&di->cc_lock); + + if (fg_off) { + dev_dbg(di->dev, "%s Disable FG\n", __func__); + /* Power-off the CC */ + ab5500_fg_coulomb_counter(di, false); + } + + return val; + +inst_curr_err: + dev_err(di->dev, "%s Get instanst current failed\n", __func__); + mutex_unlock(&di->cc_lock); + return ret; +} + +static void ab5500_fg_acc_cur_timer_expired(unsigned long data) +{ + struct ab5500_fg *di = (struct ab5500_fg *) data; + dev_dbg(di->dev, "Avg current timer expired\n"); + + /* Trigger execution of the algorithm instantly */ + queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, 0); +} + +/** + * ab5500_fg_acc_cur_work() - average battery current + * @work: pointer to the work_struct structure + * + * Updated the average battery current obtained from the + * coulomb counter. + */ +static void ab5500_fg_acc_cur_work(struct work_struct *work) +{ + int val, raw_val, sample; + int ret; + u8 low, med, high, cnt_low, cnt_high; + + struct ab5500_fg *di = container_of(work, + struct ab5500_fg, fg_acc_cur_work.work); + + if (!di->flags.fg_enabled) { + /* Power-up the CC */ + ab5500_fg_coulomb_counter(di, true); + msleep(250); + } + mutex_lock(&di->cc_lock); + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_C, + EN_READOUT_MASK, EN_READOUT); + if (ret < 0) + goto exit; + /* If charging read charging registers for accumulated values */ + if (di->flags.charging) { + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ); + if (ret < 0) + goto exit; + /* Read CC Sample conversion value Low and high */ + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_CH0, &low); + if (ret < 0) + goto exit; + + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_CH1, &med); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_CH2, &high); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_VAL_COUNT0, &cnt_low); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_VAL_COUNT1, &cnt_high); + if (ret < 0) + goto exit; + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + ACC_RESET_ON_READ, RESET); + if (ret < 0) + goto exit; + queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, + di->bat->interval_charging * HZ); + } else { /* discharging */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + ACC_RESET_ON_READ, EN_ACC_RESET_ON_READ); + if (ret < 0) + goto exit; + /* Read CC Sample conversion value Low and high */ + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_DIS_CH0, &low); + if (ret < 0) + goto exit; + + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_DIS_CH1, &med); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_DIS_CH2, &high); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_VAL_COUNT0, &cnt_low); + if (ret < 0) + goto exit; + ret = abx500_get_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, + AB5500_FG_VAL_COUNT1, &cnt_high); + if (ret < 0) + goto exit; + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + ACC_RESET_ON_READ, RESET); + if (ret < 0) + goto exit; + queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, + di->bat->interval_not_charging * HZ); + } + di->fg_samples = (cnt_low | (cnt_high << 8)); + /* + * TODO: Workaround due to the hardware issue that accumulator is not + * reset after setting reset_on_read bit and reading the accumulator + * Registers. + */ + if (prev_samples > di->fg_samples) { + /* overflow has occured */ + sample = (0xFFFF - prev_samples) + di->fg_samples; + } else + sample = di->fg_samples - prev_samples; + prev_samples = di->fg_samples; + di->fg_samples = sample; + val = (low | (med << 8) | (high << 16)); + /* + * TODO: Workaround due to the hardware issue that accumulator is not + * reset after setting reset_on_read bit and reading the accumulator + * Registers. + */ + if (prev_val > val) + raw_val = (0xFFFFFF - prev_val) + val; + else + raw_val = val - prev_val; + prev_val = val; + val = raw_val; + + if (di->fg_samples) { + di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X100)/100000; + di->avg_curr = (val * FG_LSB_IN_MA) / (di->fg_samples * 1000); + } else + dev_err(di->dev, + "samples is zero, using previous calculated average current\n"); + di->flags.conv_done = true; + di->calib_state = AB5500_FG_CALIB_END; + + mutex_unlock(&di->cc_lock); + + queue_work(di->fg_wq, &di->fg_work); + + return; +exit: + dev_err(di->dev, + "Failed to read or write gas gauge registers\n"); + mutex_unlock(&di->cc_lock); + queue_work(di->fg_wq, &di->fg_work); +} + +/** + * ab5500_fg_bat_voltage() - get battery voltage + * @di: pointer to the ab5500_fg structure + * + * Returns battery voltage(on success) else error code + */ +static int ab5500_fg_bat_voltage(struct ab5500_fg *di) +{ + int vbat; + static int prev; + + vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V); + if (vbat < 0) { + dev_err(di->dev, + "%s gpadc conversion failed, using previous value\n", + __func__); + return prev; + } + + prev = vbat; + return vbat; +} + +/** + * ab5500_fg_volt_to_capacity() - Voltage based capacity + * @di: pointer to the ab5500_fg structure + * @voltage: The voltage to convert to a capacity + * + * Returns battery capacity in per mille based on voltage + */ +static int ab5500_fg_volt_to_capacity(struct ab5500_fg *di, int voltage) +{ + int i, tbl_size; + struct abx500_v_to_cap *tbl; + int cap = 0; + + tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl, + tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements; + + for (i = 0; i < tbl_size; ++i) { + if (di->vbat < tbl[i].voltage && di->vbat > tbl[i+1].voltage) + di->v_to_cap = tbl[i].capacity; + } + + for (i = 0; i < tbl_size; ++i) { + if (voltage > tbl[i].voltage) + break; + } + + if ((i > 0) && (i < tbl_size)) { + cap = interpolate(voltage, + tbl[i].voltage, + tbl[i].capacity * 10, + tbl[i-1].voltage, + tbl[i-1].capacity * 10); + } else if (i == 0) { + cap = 1000; + } else { + cap = 0; + } + + dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille", + __func__, voltage, cap); + + return cap; +} + +/** + * ab5500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity + * @di: pointer to the ab5500_fg structure + * + * Returns battery capacity based on battery voltage that is not compensated + * for the voltage drop due to the load + */ +static int ab5500_fg_uncomp_volt_to_capacity(struct ab5500_fg *di) +{ + di->vbat = ab5500_fg_bat_voltage(di); + return ab5500_fg_volt_to_capacity(di, di->vbat); +} + +/** + * ab5500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity + * @di: pointer to the ab5500_fg structure + * + * Returns battery capacity based on battery voltage that is load compensated + * for the voltage drop + */ +static int ab5500_fg_load_comp_volt_to_capacity(struct ab5500_fg *di) +{ + int vbat_comp; + + di->inst_curr = ab5500_fg_inst_curr(di); + di->vbat = ab5500_fg_bat_voltage(di); + + /* Use Ohms law to get the load compensated voltage */ + vbat_comp = di->vbat - (di->inst_curr * + di->bat->bat_type[di->bat->batt_id].battery_resistance) / 1000; + + dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, " + "R: %dmOhm, Current: %dmA\n", + __func__, + di->vbat, + vbat_comp, + di->bat->bat_type[di->bat->batt_id].battery_resistance, + di->inst_curr); + + return ab5500_fg_volt_to_capacity(di, vbat_comp); +} + +/** + * ab5500_fg_convert_mah_to_permille() - Capacity in mAh to permille + * @di: pointer to the ab5500_fg structure + * @cap_mah: capacity in mAh + * + * Converts capacity in mAh to capacity in permille + */ +static int ab5500_fg_convert_mah_to_permille(struct ab5500_fg *di, int cap_mah) +{ + return (cap_mah * 1000) / di->bat_cap.max_mah_design; +} + +/** + * ab5500_fg_convert_permille_to_mah() - Capacity in permille to mAh + * @di: pointer to the ab5500_fg structure + * @cap_pm: capacity in permille + * + * Converts capacity in permille to capacity in mAh + */ +static int ab5500_fg_convert_permille_to_mah(struct ab5500_fg *di, int cap_pm) +{ + return cap_pm * di->bat_cap.max_mah_design / 1000; +} + +/** + * ab5500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh + * @di: pointer to the ab5500_fg structure + * @cap_mah: capacity in mAh + * + * Converts capacity in mAh to capacity in uWh + */ +static int ab5500_fg_convert_mah_to_uwh(struct ab5500_fg *di, int cap_mah) +{ + u64 div_res; + u32 div_rem; + + div_res = ((u64) cap_mah) * ((u64) di->vbat_nom); + div_rem = do_div(div_res, 1000); + + /* Make sure to round upwards if necessary */ + if (div_rem >= 1000 / 2) + div_res++; + + return (int) div_res; +} + +/** + * ab5500_fg_calc_cap_charging() - Calculate remaining capacity while charging + * @di: pointer to the ab5500_fg structure + * + * Return the capacity in mAh based on previous calculated capcity and the FG + * accumulator register value. The filter is filled with this capacity + */ +static int ab5500_fg_calc_cap_charging(struct ab5500_fg *di) +{ + dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n", + __func__, + di->bat_cap.mah, + di->accu_charge); + + /* Capacity should not be less than 0 */ + if (di->bat_cap.mah + di->accu_charge > 0) + di->bat_cap.mah += di->accu_charge; + else + di->bat_cap.mah = 0; + + /* + * We force capacity to 100% as long as the algorithm + * reports that it's full. + */ + if (di->bat_cap.mah >= di->bat_cap.max_mah_design || + di->flags.fully_charged) + di->bat_cap.mah = di->bat_cap.max_mah_design; + + ab5500_fg_fill_cap_sample(di, di->bat_cap.mah); + di->bat_cap.permille = + ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah); + + /* We need to update battery voltage and inst current when charging */ + di->vbat = ab5500_fg_bat_voltage(di); + di->inst_curr = ab5500_fg_inst_curr(di); + + return di->bat_cap.mah; +} + +/** + * ab5500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage + * @di: pointer to the ab5500_fg structure + * @comp: if voltage should be load compensated before capacity calc + * + * Return the capacity in mAh based on the battery voltage. The voltage can + * either be load compensated or not. This value is added to the filter and a + * new mean value is calculated and returned. + */ +static int ab5500_fg_calc_cap_discharge_voltage(struct ab5500_fg *di, bool comp) +{ + int permille, mah; + + if (comp) + permille = ab5500_fg_load_comp_volt_to_capacity(di); + else + permille = ab5500_fg_uncomp_volt_to_capacity(di); + + mah = ab5500_fg_convert_permille_to_mah(di, permille); + + di->bat_cap.mah = ab5500_fg_add_cap_sample(di, mah); + di->bat_cap.permille = + ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah); + + return di->bat_cap.mah; +} + +/** + * ab5500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG + * @di: pointer to the ab5500_fg structure + * + * Return the capacity in mAh based on previous calculated capcity and the FG + * accumulator register value. This value is added to the filter and a + * new mean value is calculated and returned. + */ +static int ab5500_fg_calc_cap_discharge_fg(struct ab5500_fg *di) +{ + int permille_volt, permille; + + dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n", + __func__, + di->bat_cap.mah, + di->accu_charge); + + /* Capacity should not be less than 0 */ + if (di->bat_cap.mah + di->accu_charge > 0) + di->bat_cap.mah += di->accu_charge; + else + di->bat_cap.mah = 0; + + if (di->bat_cap.mah >= di->bat_cap.max_mah_design) + di->bat_cap.mah = di->bat_cap.max_mah_design; + + /* + * Check against voltage based capacity. It can not be lower + * than what the uncompensated voltage says + */ + permille = ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah); + permille_volt = ab5500_fg_uncomp_volt_to_capacity(di); + + if (permille < permille_volt) { + di->bat_cap.permille = permille_volt; + di->bat_cap.mah = ab5500_fg_convert_permille_to_mah(di, + di->bat_cap.permille); + + dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n", + __func__, + permille, + permille_volt); + + ab5500_fg_fill_cap_sample(di, di->bat_cap.mah); + } else { + ab5500_fg_fill_cap_sample(di, di->bat_cap.mah); + di->bat_cap.permille = + ab5500_fg_convert_mah_to_permille(di, di->bat_cap.mah); + } + + return di->bat_cap.mah; +} + +/** + * ab5500_fg_capacity_level() - Get the battery capacity level + * @di: pointer to the ab5500_fg structure + * + * Get the battery capacity level based on the capacity in percent + */ +static int ab5500_fg_capacity_level(struct ab5500_fg *di) +{ + int ret, percent; + + percent = di->bat_cap.permille / 10; + + if (percent <= di->bat->cap_levels->critical || + di->flags.low_bat) + ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + else if (percent <= di->bat->cap_levels->low) + ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW; + else if (percent <= di->bat->cap_levels->normal) + ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + else if (percent <= di->bat->cap_levels->high) + ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; + else + ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + + return ret; +} + +/** + * ab5500_fg_check_capacity_limits() - Check if capacity has changed + * @di: pointer to the ab5500_fg structure + * @init: capacity is allowed to go up in init mode + * + * Check if capacity or capacity limit has changed and notify the system + * about it using the power_supply framework + */ +static void ab5500_fg_check_capacity_limits(struct ab5500_fg *di, bool init) +{ + bool changed = false; + + di->bat_cap.level = ab5500_fg_capacity_level(di); + + if (di->bat_cap.level != di->bat_cap.prev_level) { + /* + * We do not allow reported capacity level to go up + * unless we're charging or if we're in init + */ + if (!(!di->flags.charging && di->bat_cap.level > + di->bat_cap.prev_level) || init) { + dev_dbg(di->dev, "level changed from %d to %d\n", + di->bat_cap.prev_level, + di->bat_cap.level); + di->bat_cap.prev_level = di->bat_cap.level; + changed = true; + } else { + dev_dbg(di->dev, "level not allowed to go up " + "since no charger is connected: %d to %d\n", + di->bat_cap.prev_level, + di->bat_cap.level); + } + } + + /* + * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate + * shutdown + */ + if (di->flags.low_bat) { + dev_dbg(di->dev, "Battery low, set capacity to 0\n"); + di->bat_cap.prev_percent = 0; + di->bat_cap.permille = 0; + di->bat_cap.prev_mah = 0; + di->bat_cap.mah = 0; + changed = true; + } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) { + if (di->bat_cap.permille / 10 == 0) { + /* + * We will not report 0% unless we've got + * the LOW_BAT IRQ, no matter what the FG + * algorithm says. + */ + di->bat_cap.prev_percent = 1; + di->bat_cap.permille = 1; + di->bat_cap.prev_mah = 1; + di->bat_cap.mah = 1; + + changed = true; + } else if (!(!di->flags.charging && + (di->bat_cap.permille / 10) > + di->bat_cap.prev_percent) || init) { + /* + * We do not allow reported capacity to go up + * unless we're charging or if we're in init + */ + dev_dbg(di->dev, + "capacity changed from %d to %d (%d)\n", + di->bat_cap.prev_percent, + di->bat_cap.permille / 10, + di->bat_cap.permille); + di->bat_cap.prev_percent = di->bat_cap.permille / 10; + di->bat_cap.prev_mah = di->bat_cap.mah; + + changed = true; + } else { + dev_dbg(di->dev, "capacity not allowed to go up since " + "no charger is connected: %d to %d (%d)\n", + di->bat_cap.prev_percent, + di->bat_cap.permille / 10, + di->bat_cap.permille); + } + } + + if (changed) + power_supply_changed(&di->fg_psy); + +} + +static void ab5500_fg_charge_state_to(struct ab5500_fg *di, + enum ab5500_fg_charge_state new_state) +{ + dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n", + di->charge_state, + charge_state[di->charge_state], + new_state, + charge_state[new_state]); + + di->charge_state = new_state; +} + +static void ab5500_fg_discharge_state_to(struct ab5500_fg *di, + enum ab5500_fg_charge_state new_state) +{ + dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n", + di->discharge_state, + discharge_state[di->discharge_state], + new_state, + discharge_state[new_state]); + + di->discharge_state = new_state; +} + +/** + * ab5500_fg_algorithm_charging() - FG algorithm for when charging + * @di: pointer to the ab5500_fg structure + * + * Battery capacity calculation state machine for when we're charging + */ +static void ab5500_fg_algorithm_charging(struct ab5500_fg *di) +{ + /* + * If we change to discharge mode + * we should start with recovery + */ + if (di->discharge_state != AB5500_FG_DISCHARGE_INIT_RECOVERY) + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_INIT_RECOVERY); + + switch (di->charge_state) { + case AB5500_FG_CHARGE_INIT: + di->fg_samples = SEC_TO_SAMPLE( + di->bat->fg_params->accu_charging); + + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_READOUT); + + break; + + case AB5500_FG_CHARGE_READOUT: + /* + * Read the FG and calculate the new capacity + */ + mutex_lock(&di->cc_lock); + if (!di->flags.conv_done) { + /* Wasn't the CC IRQ that got us here */ + mutex_unlock(&di->cc_lock); + dev_dbg(di->dev, "%s CC conv not done\n", + __func__); + + break; + } + di->flags.conv_done = false; + mutex_unlock(&di->cc_lock); + + ab5500_fg_calc_cap_charging(di); + + break; + + default: + break; + } + + /* Check capacity limits */ + ab5500_fg_check_capacity_limits(di, false); +} + +/** + * ab5500_fg_algorithm_discharging() - FG algorithm for when discharging + * @di: pointer to the ab5500_fg structure + * + * Battery capacity calculation state machine for when we're discharging + */ +static void ab5500_fg_algorithm_discharging(struct ab5500_fg *di) +{ + int sleep_time; + + /* If we change to charge mode we should start with init */ + if (di->charge_state != AB5500_FG_CHARGE_INIT) + ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT); + + switch (di->discharge_state) { + case AB5500_FG_DISCHARGE_INIT: + /* We use the FG IRQ to work on */ + di->init_cnt = 0; + di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer); + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_INITMEASURING); + + /* Intentional fallthrough */ + case AB5500_FG_DISCHARGE_INITMEASURING: + /* + * Discard a number of samples during startup. + * After that, use compensated voltage for a few + * samples to get an initial capacity. + * Then go to READOUT + */ + sleep_time = di->bat->fg_params->init_timer; + + /* Discard the first [x] seconds */ + if (di->init_cnt > + di->bat->fg_params->init_discard_time) { + + ab5500_fg_calc_cap_discharge_voltage(di, true); + + ab5500_fg_check_capacity_limits(di, true); + } + + di->init_cnt += sleep_time; + if (di->init_cnt > + di->bat->fg_params->init_total_time) { + di->fg_samples = SEC_TO_SAMPLE( + di->bat->fg_params->accu_high_curr); + + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_READOUT); + } + + break; + + case AB5500_FG_DISCHARGE_INIT_RECOVERY: + di->recovery_cnt = 0; + di->recovery_needed = true; + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_RECOVERY); + + /* Intentional fallthrough */ + + case AB5500_FG_DISCHARGE_RECOVERY: + sleep_time = di->bat->fg_params->recovery_sleep_timer; + + /* + * We should check the power consumption + * If low, go to READOUT (after x min) or + * RECOVERY_SLEEP if time left. + * If high, go to READOUT + */ + di->inst_curr = ab5500_fg_inst_curr(di); + + if (ab5500_fg_is_low_curr(di, di->inst_curr)) { + if (di->recovery_cnt > + di->bat->fg_params->recovery_total_time) { + di->fg_samples = SEC_TO_SAMPLE( + di->bat->fg_params->accu_high_curr); + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_READOUT); + di->recovery_needed = false; + } else { + queue_delayed_work(di->fg_wq, + &di->fg_periodic_work, + sleep_time * HZ); + } + di->recovery_cnt += sleep_time; + } else { + di->fg_samples = SEC_TO_SAMPLE( + di->bat->fg_params->accu_high_curr); + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_READOUT); + } + + break; + + case AB5500_FG_DISCHARGE_READOUT: + di->inst_curr = ab5500_fg_inst_curr(di); + + if (ab5500_fg_is_low_curr(di, di->inst_curr)) { + /* Detect mode change */ + if (di->high_curr_mode) { + di->high_curr_mode = false; + di->high_curr_cnt = 0; + } + + if (di->recovery_needed) { + ab5500_fg_discharge_state_to(di, + AB5500_FG_DISCHARGE_RECOVERY); + + queue_delayed_work(di->fg_wq, + &di->fg_periodic_work, + 0); + + break; + } + + ab5500_fg_calc_cap_discharge_voltage(di, true); + } else { + mutex_lock(&di->cc_lock); + if (!di->flags.conv_done) { + /* Wasn't the CC IRQ that got us here */ + mutex_unlock(&di->cc_lock); + dev_dbg(di->dev, "%s CC conv not done\n", + __func__); + + break; + } + di->flags.conv_done = false; + mutex_unlock(&di->cc_lock); + + /* Detect mode change */ + if (!di->high_curr_mode) { + di->high_curr_mode = true; + di->high_curr_cnt = 0; + } + + di->high_curr_cnt += + di->bat->fg_params->accu_high_curr; + if (di->high_curr_cnt > + di->bat->fg_params->high_curr_time) + di->recovery_needed = true; + + ab5500_fg_calc_cap_discharge_fg(di); + } + + ab5500_fg_check_capacity_limits(di, false); + + break; + + case AB5500_FG_DISCHARGE_WAKEUP: + ab5500_fg_coulomb_counter(di, true); + di->inst_curr = ab5500_fg_inst_curr(di); + + ab5500_fg_calc_cap_discharge_voltage(di, true); + + di->fg_samples = SEC_TO_SAMPLE( + di->bat->fg_params->accu_high_curr); + /* Re-program number of samples set above */ + ab5500_fg_coulomb_counter(di, true); + ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_READOUT); + + ab5500_fg_check_capacity_limits(di, false); + + break; + + default: + break; + } +} + +/** + * ab5500_fg_algorithm_calibrate() - Internal columb counter offset calibration + * @di: pointer to the ab5500_fg structure + * + */ +static void ab5500_fg_algorithm_calibrate(struct ab5500_fg *di) +{ + int ret; + + switch (di->calib_state) { + case AB5500_FG_CALIB_INIT: + dev_dbg(di->dev, "Calibration ongoing...\n"); + /* TODO: For Cut 1.1 no calibration */ + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_CONTROL_A, + FG_ACC_RESET_ON_READ_MASK, FG_ACC_RESET_ON_READ); + if (ret) + goto err; + di->calib_state = AB5500_FG_CALIB_WAIT; + break; + case AB5500_FG_CALIB_END: + di->flags.calibrate = false; + dev_dbg(di->dev, "Calibration done...\n"); + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); + break; + case AB5500_FG_CALIB_WAIT: + dev_dbg(di->dev, "Calibration WFI\n"); + default: + break; + } + return; +err: + /* Something went wrong, don't calibrate then */ + dev_err(di->dev, "failed to calibrate the CC\n"); + di->flags.calibrate = false; + di->calib_state = AB5500_FG_CALIB_INIT; + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); +} + +/** + * ab5500_fg_algorithm() - Entry point for the FG algorithm + * @di: pointer to the ab5500_fg structure + * + * Entry point for the battery capacity calculation state machine + */ +static void ab5500_fg_algorithm(struct ab5500_fg *di) +{ + if (di->flags.calibrate) + ab5500_fg_algorithm_calibrate(di); + else { + if (di->flags.charging) + ab5500_fg_algorithm_charging(di); + else + ab5500_fg_algorithm_discharging(di); + } + + dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d " + "%d %d %d %d %d %d %d\n", + di->bat_cap.max_mah_design, + di->bat_cap.mah, + di->bat_cap.permille, + di->bat_cap.level, + di->bat_cap.prev_mah, + di->bat_cap.prev_percent, + di->bat_cap.prev_level, + di->vbat, + di->inst_curr, + di->avg_curr, + di->accu_charge, + di->flags.charging, + di->charge_state, + di->discharge_state, + di->high_curr_mode, + di->recovery_needed); +} + +/** + * ab5500_fg_periodic_work() - Run the FG state machine periodically + * @work: pointer to the work_struct structure + * + * Work queue function for periodic work + */ +static void ab5500_fg_periodic_work(struct work_struct *work) +{ + struct ab5500_fg *di = container_of(work, struct ab5500_fg, + fg_periodic_work.work); + + if (di->init_capacity) { + /* A dummy read that will return 0 */ + di->inst_curr = ab5500_fg_inst_curr(di); + /* Get an initial capacity calculation */ + ab5500_fg_calc_cap_discharge_voltage(di, true); + ab5500_fg_check_capacity_limits(di, true); + di->init_capacity = false; + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); + } else + ab5500_fg_algorithm(di); +} + +/** + * ab5500_fg_low_bat_work() - Check LOW_BAT condition + * @work: pointer to the work_struct structure + * + * Work queue function for checking the LOW_BAT condition + */ +static void ab5500_fg_low_bat_work(struct work_struct *work) +{ + int vbat; + + struct ab5500_fg *di = container_of(work, struct ab5500_fg, + fg_low_bat_work.work); + + vbat = ab5500_fg_bat_voltage(di); + + /* Check if LOW_BAT still fulfilled */ + if (vbat < di->bat->fg_params->lowbat_threshold) { + di->flags.low_bat = true; + dev_warn(di->dev, "Battery voltage still LOW\n"); + + /* + * We need to re-schedule this check to be able to detect + * if the voltage increases again during charging + */ + queue_delayed_work(di->fg_wq, &di->fg_low_bat_work, + round_jiffies(LOW_BAT_CHECK_INTERVAL)); + power_supply_changed(&di->fg_psy); + } else { + di->flags.low_bat = false; + dev_warn(di->dev, "Battery voltage OK again\n"); + power_supply_changed(&di->fg_psy); + } + + /* This is needed to dispatch LOW_BAT */ + ab5500_fg_check_capacity_limits(di, false); + + /* Set this flag to check if LOW_BAT IRQ still occurs */ + di->flags.low_bat_delay = false; +} + +/** + * ab5500_fg_instant_work() - Run the FG state machine instantly + * @work: pointer to the work_struct structure + * + * Work queue function for instant work + */ +static void ab5500_fg_instant_work(struct work_struct *work) +{ + struct ab5500_fg *di = container_of(work, struct ab5500_fg, fg_work); + + ab5500_fg_algorithm(di); +} + +/** + * ab5500_fg_get_property() - get the fg properties + * @psy: pointer to the power_supply structure + * @psp: pointer to the power_supply_property structure + * @val: pointer to the power_supply_propval union + * + * This function gets called when an application tries to get the + * fg properties by reading the sysfs files. + * voltage_now: battery voltage + * current_now: battery instant current + * current_avg: battery average current + * charge_full_design: capacity where battery is considered full + * charge_now: battery capacity in nAh + * capacity: capacity in percent + * capacity_level: capacity level + * + * Returns error code in case of failure else 0 on success + */ +static int ab5500_fg_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct ab5500_fg *di; + + di = to_ab5500_fg_device_info(psy); + + /* + * If battery is identified as unknown and charging of unknown + * batteries is disabled, we always report 100% capacity and + * capacity level UNKNOWN, since we can't calculate + * remaining capacity + */ + + switch (psp) { + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + if (di->flags.bat_ovv) + val->intval = 47500000; + else { + di->vbat = ab5500_gpadc_convert + (di->gpadc, MAIN_BAT_V); + val->intval = di->vbat * 1000; + } + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + di->inst_curr = ab5500_fg_inst_curr(di); + val->intval = di->inst_curr * 1000; + break; + case POWER_SUPPLY_PROP_CURRENT_AVG: + val->intval = di->avg_curr * 1000; + break; + case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: + val->intval = ab5500_fg_convert_mah_to_uwh(di, + di->bat_cap.max_mah_design); + break; + case POWER_SUPPLY_PROP_ENERGY_FULL: + val->intval = ab5500_fg_convert_mah_to_uwh(di, + di->bat_cap.max_mah); + break; + case POWER_SUPPLY_PROP_ENERGY_NOW: + if (di->flags.batt_unknown && !di->bat->chg_unknown_bat) + val->intval = ab5500_fg_convert_mah_to_uwh(di, + di->bat_cap.max_mah); + else + val->intval = ab5500_fg_convert_mah_to_uwh(di, + di->bat_cap.prev_mah); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + val->intval = di->bat_cap.max_mah_design; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = di->bat_cap.max_mah; + break; + case POWER_SUPPLY_PROP_CHARGE_NOW: + if (di->flags.batt_unknown && !di->bat->chg_unknown_bat) + val->intval = di->bat_cap.max_mah; + else + val->intval = di->bat_cap.prev_mah; + break; + case POWER_SUPPLY_PROP_CAPACITY: + if (di->flags.batt_unknown && !di->bat->chg_unknown_bat) + val->intval = 100; + else + val->intval = di->bat_cap.prev_percent; + break; + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + if (di->flags.batt_unknown && !di->bat->chg_unknown_bat) + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + else + val->intval = di->bat_cap.prev_level; + break; + default: + return -EINVAL; + } + return 0; +} + +static int ab5500_fg_get_ext_psy_data(struct device *dev, void *data) +{ + struct power_supply *psy; + struct power_supply *ext; + struct ab5500_fg *di; + union power_supply_propval ret; + int i, j; + bool psy_found = false; + + psy = (struct power_supply *)data; + ext = dev_get_drvdata(dev); + di = to_ab5500_fg_device_info(psy); + + /* + * For all psy where the name of your driver + * appears in any supplied_to + */ + for (i = 0; i < ext->num_supplicants; i++) { + if (!strcmp(ext->supplied_to[i], psy->name)) + psy_found = true; + } + + if (!psy_found) + return 0; + + /* Go through all properties for the psy */ + for (j = 0; j < ext->num_properties; j++) { + enum power_supply_property prop; + prop = ext->properties[j]; + + if (ext->get_property(ext, prop, &ret)) + continue; + + switch (prop) { + case POWER_SUPPLY_PROP_STATUS: + switch (ext->type) { + case POWER_SUPPLY_TYPE_BATTERY: + switch (ret.intval) { + case POWER_SUPPLY_STATUS_UNKNOWN: + case POWER_SUPPLY_STATUS_DISCHARGING: + case POWER_SUPPLY_STATUS_NOT_CHARGING: + if (!di->flags.charging) + break; + di->flags.charging = false; + di->flags.fully_charged = false; + queue_work(di->fg_wq, &di->fg_work); + break; + case POWER_SUPPLY_STATUS_FULL: + if (di->flags.fully_charged) + break; + di->flags.fully_charged = true; + /* Save current capacity as maximum */ + di->bat_cap.max_mah = di->bat_cap.mah; + queue_work(di->fg_wq, &di->fg_work); + break; + case POWER_SUPPLY_STATUS_CHARGING: + if (di->flags.charging) + break; + di->flags.charging = true; + di->flags.fully_charged = false; + queue_work(di->fg_wq, &di->fg_work); + break; + }; + default: + break; + }; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + switch (ext->type) { + case POWER_SUPPLY_TYPE_BATTERY: + if (ret.intval) + di->flags.batt_unknown = false; + else + di->flags.batt_unknown = true; + break; + default: + break; + } + break; + default: + break; + } + } + return 0; +} + +/** + * ab5500_fg_init_hw_registers() - Set up FG related registers + * @di: pointer to the ab5500_fg structure + * + * Set up battery OVV, low battery voltage registers + */ +static int ab5500_fg_init_hw_registers(struct ab5500_fg *di) +{ + int ret; + struct adc_auto_input *auto_ip; + + auto_ip = kzalloc(sizeof(struct adc_auto_input), GFP_KERNEL); + if (!auto_ip) { + dev_err(di->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + auto_ip->mux = MAIN_BAT_V; + auto_ip->freq = MS500; + auto_ip->min = di->bat->fg_params->lowbat_threshold; + auto_ip->max = di->bat->fg_params->overbat_threshold; + auto_ip->auto_adc_callback = ab5500_fg_bat_v_trig; + di->gpadc_auto = auto_ip; + ret = ab5500_gpadc_convert_auto(di->gpadc, di->gpadc_auto); + if (ret) + dev_err(di->dev, + "failed to set auto trigger for battery votlage\n"); + /* set End Of Charge current to 247mA */ + ret = abx500_set_register_interruptible(di->dev, + AB5500_BANK_FG_BATTCOM_ACC, AB5500_FG_EOC, EOC_52_mA); + return ret; +} + +static int ab5500_fg_bat_v_trig(int mux) +{ + struct ab5500_fg *di = ab5500_fg_get(); + + di->vbat = ab5500_gpadc_convert(di->gpadc, MAIN_BAT_V); + + /* check if the battery voltage is below low threshold */ + if (di->vbat < di->bat->fg_params->lowbat_threshold) { + dev_warn(di->dev, "Battery voltage is below LOW threshold\n"); + di->flags.low_bat_delay = true; + /* + * Start a timer to check LOW_BAT again after some time + * This is done to avoid shutdown on single voltage dips + */ + queue_delayed_work(di->fg_wq, &di->fg_low_bat_work, + round_jiffies(LOW_BAT_CHECK_INTERVAL)); + power_supply_changed(&di->fg_psy); + } + /* check if battery votlage is above OVV */ + else if (di->vbat > di->bat->fg_params->overbat_threshold) { + dev_warn(di->dev, "Battery OVV\n"); + di->flags.bat_ovv = true; + + power_supply_changed(&di->fg_psy); + } else + dev_err(di->dev, + "Invalid gpadc auto trigger for battery voltage\n"); + + kfree(di->gpadc_auto); + ab5500_fg_init_hw_registers(di); + return 0; +} + +/** + * ab5500_fg_external_power_changed() - callback for power supply changes + * @psy: pointer to the structure power_supply + * + * This function is the entry point of the pointer external_power_changed + * of the structure power_supply. + * This function gets executed when there is a change in any external power + * supply that this driver needs to be notified of. + */ +static void ab5500_fg_external_power_changed(struct power_supply *psy) +{ + struct ab5500_fg *di = to_ab5500_fg_device_info(psy); + + class_for_each_device(power_supply_class, NULL, + &di->fg_psy, ab5500_fg_get_ext_psy_data); +} + +/** + * abab5500_fg_reinit_work() - work to reset the FG algorithm + * @work: pointer to the work_struct structure + * + * Used to reset the current battery capacity to be able to + * retrigger a new voltage base capacity calculation. For + * test and verification purpose. + */ +static void ab5500_fg_reinit_work(struct work_struct *work) +{ + struct ab5500_fg *di = container_of(work, struct ab5500_fg, + fg_reinit_work.work); + + if (di->flags.calibrate == false) { + dev_dbg(di->dev, "Resetting FG state machine to init.\n"); + ab5500_fg_clear_cap_samples(di); + ab5500_fg_calc_cap_discharge_voltage(di, true); + ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT); + ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT); + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0); + + } else { + dev_err(di->dev, + "Residual offset calibration ongoing retrying..\n"); + /* Wait one second until next try*/ + queue_delayed_work(di->fg_wq, &di->fg_reinit_work, + round_jiffies(1)); + } +} + +/** + * ab5500_fg_reinit() - forces FG algorithm to reinitialize with current values + * + * This function can be used to force the FG algorithm to recalculate a new + * voltage based battery capacity. + */ +void ab5500_fg_reinit(void) +{ + struct ab5500_fg *di = ab5500_fg_get(); + /* User won't be notified if a null pointer returned. */ + if (di != NULL) + queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0); +} + +#if defined(CONFIG_PM) +static int ab5500_fg_resume(struct platform_device *pdev) +{ + struct ab5500_fg *di = platform_get_drvdata(pdev); + + /* + * Change state if we're not charging. If we're charging we will wake + * up on the FG IRQ + */ + if (!di->flags.charging) { + ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_WAKEUP); + queue_work(di->fg_wq, &di->fg_work); + } + + return 0; +} + +static int ab5500_fg_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct ab5500_fg *di = platform_get_drvdata(pdev); + + flush_delayed_work(&di->fg_periodic_work); + + /* + * If the FG is enabled we will disable it before going to suspend + * only if we're not charging + */ + if (di->flags.fg_enabled && !di->flags.charging) + ab5500_fg_coulomb_counter(di, false); + + return 0; +} +#else +#define ab5500_fg_suspend NULL +#define ab5500_fg_resume NULL +#endif + +static int __devexit ab5500_fg_remove(struct platform_device *pdev) +{ + int ret = 0; + struct ab5500_fg *di = platform_get_drvdata(pdev); + + /* Disable coulomb counter */ + ret = ab5500_fg_coulomb_counter(di, false); + if (ret) + dev_err(di->dev, "failed to disable coulomb counter\n"); + + destroy_workqueue(di->fg_wq); + + flush_scheduled_work(); + power_supply_unregister(&di->fg_psy); + platform_set_drvdata(pdev, NULL); + kfree(di->gpadc_auto); + kfree(di); + return ret; +} + +static int __devinit ab5500_fg_probe(struct platform_device *pdev) +{ + struct abx500_bm_plat_data *plat_data; + int ret = 0; + + struct ab5500_fg *di = + kzalloc(sizeof(struct ab5500_fg), GFP_KERNEL); + if (!di) + return -ENOMEM; + + mutex_init(&di->cc_lock); + + /* get parent data */ + di->dev = &pdev->dev; + di->parent = dev_get_drvdata(pdev->dev.parent); + di->gpadc = ab5500_gpadc_get("ab5500-adc.0"); + + plat_data = pdev->dev.platform_data; + di->pdata = plat_data->fg; + di->bat = plat_data->battery; + + /* get fg specific platform data */ + if (!di->pdata) { + dev_err(di->dev, "no fg platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + + /* get battery specific platform data */ + if (!di->bat) { + dev_err(di->dev, "no battery platform data supplied\n"); + ret = -EINVAL; + goto free_device_info; + } + /* powerup fg to start sampling */ + ab5500_fg_coulomb_counter(di, true); + + di->fg_psy.name = "ab5500_fg"; + di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY; + di->fg_psy.properties = ab5500_fg_props; + di->fg_psy.num_properties = ARRAY_SIZE(ab5500_fg_props); + di->fg_psy.get_property = ab5500_fg_get_property; + di->fg_psy.supplied_to = di->pdata->supplied_to; + di->fg_psy.num_supplicants = di->pdata->num_supplicants; + di->fg_psy.external_power_changed = ab5500_fg_external_power_changed; + + di->bat_cap.max_mah_design = MILLI_TO_MICRO * + di->bat->bat_type[di->bat->batt_id].charge_full_design; + + di->bat_cap.max_mah = di->bat_cap.max_mah_design; + + di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage; + + di->init_capacity = true; + + ab5500_fg_charge_state_to(di, AB5500_FG_CHARGE_INIT); + ab5500_fg_discharge_state_to(di, AB5500_FG_DISCHARGE_INIT); + + /* Create a work queue for running the FG algorithm */ + di->fg_wq = create_singlethread_workqueue("ab5500_fg_wq"); + if (di->fg_wq == NULL) { + dev_err(di->dev, "failed to create work queue\n"); + goto free_device_info; + } + + /* Init work for running the fg algorithm instantly */ + INIT_WORK(&di->fg_work, ab5500_fg_instant_work); + + /* Init work for getting the battery accumulated current */ + INIT_DELAYED_WORK_DEFERRABLE(&di->fg_acc_cur_work, + ab5500_fg_acc_cur_work); + + /* Init work for reinitialising the fg algorithm */ + INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work, + ab5500_fg_reinit_work); + + /* Work delayed Queue to run the state machine */ + INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work, + ab5500_fg_periodic_work); + + /* Work to check low battery condition */ + INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work, + ab5500_fg_low_bat_work); + + list_add_tail(&di->node, &ab5500_fg_list); + + /* Consider battery unknown until we're informed otherwise */ + di->flags.batt_unknown = true; + + /* Register FG power supply class */ + ret = power_supply_register(di->dev, &di->fg_psy); + if (ret) { + dev_err(di->dev, "failed to register FG psy\n"); + goto free_fg_wq; + } + + /* Initialize OVV, and other registers */ + ret = ab5500_fg_init_hw_registers(di); + if (ret) { + dev_err(di->dev, "failed to initialize registers\n"); + goto pow_unreg; + } + + di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer); + + /* Initilialize avg current timer */ + init_timer(&di->avg_current_timer); + di->avg_current_timer.function = ab5500_fg_acc_cur_timer_expired; + di->avg_current_timer.data = (unsigned long) di; + di->avg_current_timer.expires = 60 * HZ; + if (!timer_pending(&di->avg_current_timer)) + add_timer(&di->avg_current_timer); + else + mod_timer(&di->avg_current_timer, 60 * HZ); + + platform_set_drvdata(pdev, di); + + /* Calibrate the fg first time */ + di->flags.calibrate = true; + di->calib_state = AB5500_FG_CALIB_INIT; + /* Run the FG algorithm */ + queue_delayed_work(di->fg_wq, &di->fg_periodic_work, + FG_PERIODIC_START_INTERVAL); + queue_delayed_work(di->fg_wq, &di->fg_acc_cur_work, + FG_PERIODIC_START_INTERVAL); + + dev_info(di->dev, "probe success\n"); + return ret; + +pow_unreg: + power_supply_unregister(&di->fg_psy); +free_fg_wq: + destroy_workqueue(di->fg_wq); +free_device_info: + kfree(di); + + return ret; +} + +static struct platform_driver ab5500_fg_driver = { + .probe = ab5500_fg_probe, + .remove = __devexit_p(ab5500_fg_remove), + .suspend = ab5500_fg_suspend, + .resume = ab5500_fg_resume, + .driver = { + .name = "ab5500-fg", + .owner = THIS_MODULE, + }, +}; + +static int __init ab5500_fg_init(void) +{ + return platform_driver_register(&ab5500_fg_driver); +} + +static void __exit ab5500_fg_exit(void) +{ + platform_driver_unregister(&ab5500_fg_driver); +} + +subsys_initcall_sync(ab5500_fg_init); +module_exit(ab5500_fg_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Johan Palsson, Karl Komierowski"); +MODULE_ALIAS("platform:ab5500-fg"); +MODULE_DESCRIPTION("AB5500 Fuel Gauge driver"); diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c index d8bb99394ac..5e5700cf24a 100644 --- a/drivers/power/ab8500_btemp.c +++ b/drivers/power/ab8500_btemp.c @@ -83,6 +83,7 @@ struct ab8500_btemp_ranges { * @btemp_ranges: Battery temperature range structure * @btemp_wq: Work queue for measuring the temperature periodically * @btemp_periodic_work: Work for measuring the temperature periodically + * @initialized: True if battery id read. */ struct ab8500_btemp { struct device *dev; @@ -100,6 +101,7 @@ struct ab8500_btemp { struct ab8500_btemp_ranges btemp_ranges; struct workqueue_struct *btemp_wq; struct delayed_work btemp_periodic_work; + bool initialized; }; /* BTEMP power supply properties */ @@ -569,6 +571,13 @@ static void ab8500_btemp_periodic_work(struct work_struct *work) struct ab8500_btemp *di = container_of(work, struct ab8500_btemp, btemp_periodic_work.work); + if (!di->initialized) { + di->initialized = true; + /* Identify the battery */ + if (ab8500_btemp_id(di) < 0) + dev_warn(di->dev, "failed to identify the battery\n"); + } + di->bat_temp = ab8500_btemp_measure_temp(di); if (di->bat_temp != di->prev_bat_temp) { @@ -964,7 +973,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev) { int irq, i, ret = 0; u8 val; - struct abx500_bm_plat_data *plat_data; + struct ab8500_platform_data *plat_data; struct ab8500_btemp *di = kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL); @@ -976,8 +985,10 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev) di->parent = dev_get_drvdata(pdev->dev.parent); di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); + di->initialized = false; + /* get btemp specific platform data */ - plat_data = pdev->dev.platform_data; + plat_data = dev_get_platdata(di->parent->dev); di->pdata = plat_data->btemp; if (!di->pdata) { dev_err(di->dev, "no btemp platform data supplied\n"); @@ -1017,10 +1028,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev) INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work, ab8500_btemp_periodic_work); - /* Identify the battery */ - if (ab8500_btemp_id(di) < 0) - dev_warn(di->dev, "failed to identify the battery\n"); - /* Set BTEMP thermal limits. Low and Med are fixed */ di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT; di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT; diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c index e2b4accbec8..19f62729a0a 100644 --- a/drivers/power/ab8500_charger.c +++ b/drivers/power/ab8500_charger.c @@ -29,6 +29,7 @@ #include <linux/mfd/abx500/ab8500-gpadc.h> #include <linux/mfd/abx500/ux500_chargalg.h> #include <linux/usb/otg.h> +#include <asm/mach-types.h> /* Charger constants */ #define NO_PW_CONN 0 @@ -77,6 +78,9 @@ /* Lowest charger voltage is 3.39V -> 0x4E */ #define LOW_VOLT_REG 0x4E +/* Step up/down delay in us */ +#define STEP_UDELAY 1000 + /* UsbLineStatus register - usb types */ enum ab8500_charger_link_status { USB_STAT_NOT_CONFIGURED, @@ -934,6 +938,88 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di) } /** + * ab8500_charger_set_current() - set charger current + * @di: pointer to the ab8500_charger structure + * @ich: charger current, in mA + * @reg: select what charger register to set + * + * Set charger current. + * There is no state machine in the AB to step up/down the charger + * current to avoid dips and spikes on MAIN, VBUS and VBAT when + * charging is started. Instead we need to implement + * this charger current step-up/down here. + * Returns error code in case of failure else 0(on success) + */ +static int ab8500_charger_set_current(struct ab8500_charger *di, + int ich, int reg) +{ + int ret, i; + int curr_index, prev_curr_index, shift_value; + u8 reg_value; + + switch (reg) { + case AB8500_MCH_IPT_CURLVL_REG: + shift_value = MAIN_CH_INPUT_CURR_SHIFT; + curr_index = ab8500_current_to_regval(ich); + break; + case AB8500_USBCH_IPT_CRNTLVL_REG: + shift_value = VBUS_IN_CURR_LIM_SHIFT; + curr_index = ab8500_vbus_in_curr_to_regval(ich); + break; + case AB8500_CH_OPT_CRNTLVL_REG: + shift_value = 0; + curr_index = ab8500_current_to_regval(ich); + break; + default: + dev_err(di->dev, "%s current register not valid\n", __func__); + return -ENXIO; + } + + if (curr_index < 0) { + dev_err(di->dev, "requested current limit out-of-range\n"); + return -ENXIO; + } + + ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER, + reg, ®_value); + if (ret < 0) { + dev_err(di->dev, "%s read failed\n", __func__); + return ret; + } + prev_curr_index = (reg_value >> shift_value); + + /* only update current if it's been changed */ + if (prev_curr_index == curr_index) + return 0; + + dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n", + __func__, ich, reg); + + if (prev_curr_index > curr_index) { + for (i = prev_curr_index - 1; i >= curr_index; i--) { + ret = abx500_set_register_interruptible(di->dev, + AB8500_CHARGER, reg, (u8) i << shift_value); + if (ret) { + dev_err(di->dev, "%s write failed\n", __func__); + return ret; + } + usleep_range(STEP_UDELAY, STEP_UDELAY * 2); + } + } else { + for (i = prev_curr_index + 1; i <= curr_index; i++) { + ret = abx500_set_register_interruptible(di->dev, + AB8500_CHARGER, reg, (u8) i << shift_value); + if (ret) { + dev_err(di->dev, "%s write failed\n", __func__); + return ret; + } + usleep_range(STEP_UDELAY, STEP_UDELAY * 2); + } + } + return ret; +} + +/** * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit * @di: pointer to the ab8500_charger structure * @ich_in: charger input current limit @@ -944,8 +1030,6 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di) static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di, int ich_in) { - int ret; - int input_curr_index; int min_value; /* We should always use to lowest current limit */ @@ -964,19 +1048,38 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di, break; } - input_curr_index = ab8500_vbus_in_curr_to_regval(min_value); - if (input_curr_index < 0) { - dev_err(di->dev, "VBUS input current limit too high\n"); - return -ENXIO; - } + return ab8500_charger_set_current(di, min_value, + AB8500_USBCH_IPT_CRNTLVL_REG); +} - ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, - AB8500_USBCH_IPT_CRNTLVL_REG, - input_curr_index << VBUS_IN_CURR_LIM_SHIFT); - if (ret) - dev_err(di->dev, "%s write failed\n", __func__); +/** + * ab8500_charger_set_main_in_curr() - set main charger input current + * @di: pointer to the ab8500_charger structure + * @ich_in: input charger current, in mA + * + * Set main charger input current. + * Returns error code in case of failure else 0(on success) + */ +static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di, + int ich_in) +{ + return ab8500_charger_set_current(di, ich_in, + AB8500_MCH_IPT_CURLVL_REG); +} - return ret; +/** + * ab8500_charger_set_output_curr() - set charger output current + * @di: pointer to the ab8500_charger structure + * @ich_out: output charger current, in mA + * + * Set charger output current. + * Returns error code in case of failure else 0(on success) + */ +static int ab8500_charger_set_output_curr(struct ab8500_charger *di, + int ich_out) +{ + return ab8500_charger_set_current(di, ich_out, + AB8500_CH_OPT_CRNTLVL_REG); } /** @@ -1088,18 +1191,19 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, return ret; } /* MainChInputCurr: current that can be drawn from the charger*/ - ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, - AB8500_MCH_IPT_CURLVL_REG, - input_curr_index << MAIN_CH_INPUT_CURR_SHIFT); + ret = ab8500_charger_set_main_in_curr(di, + di->bat->chg_params->ac_curr_max); if (ret) { - dev_err(di->dev, "%s write failed\n", __func__); + dev_err(di->dev, "%s Failed to set MainChInputCurr\n", + __func__); return ret; } /* ChOutputCurentLevel: protected output current */ - ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, - AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index); + ret = ab8500_charger_set_output_curr(di, iset); if (ret) { - dev_err(di->dev, "%s write failed\n", __func__); + dev_err(di->dev, "%s " + "Failed to set ChOutputCurentLevel\n", + __func__); return ret; } @@ -1156,12 +1260,11 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, return ret; } - ret = abx500_set_register_interruptible(di->dev, - AB8500_CHARGER, - AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1); + ret = ab8500_charger_set_output_curr(di, 0); if (ret) { - dev_err(di->dev, - "%s write failed\n", __func__); + dev_err(di->dev, "%s " + "Failed to set ChOutputCurentLevel\n", + __func__); return ret; } } else { @@ -1264,10 +1367,11 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger, return ret; } /* ChOutputCurentLevel: protected output current */ - ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, - AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index); + ret = ab8500_charger_set_output_curr(di, ich_out); if (ret) { - dev_err(di->dev, "%s write failed\n", __func__); + dev_err(di->dev, "%s " + "Failed to set ChOutputCurentLevel\n", + __func__); return ret; } /* Check if VBAT overshoot control should be enabled */ @@ -1364,7 +1468,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger, int ich_out) { int ret; - int curr_index; struct ab8500_charger *di; if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS) @@ -1374,18 +1477,11 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger, else return -ENXIO; - curr_index = ab8500_current_to_regval(ich_out); - if (curr_index < 0) { - dev_err(di->dev, - "Charger current too high, " - "charging not started\n"); - return -ENXIO; - } - - ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, - AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index); + ret = ab8500_charger_set_output_curr(di, ich_out); if (ret) { - dev_err(di->dev, "%s write failed\n", __func__); + dev_err(di->dev, "%s " + "Failed to set ChOutputCurentLevel\n", + __func__); return ret; } @@ -2354,11 +2450,18 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di) } /* Backup battery voltage and current */ - ret = abx500_set_register_interruptible(di->dev, - AB8500_RTC, - AB8500_RTC_BACKUP_CHG_REG, - di->bat->bkup_bat_v | - di->bat->bkup_bat_i); + if (machine_is_snowball()) + ret = abx500_set_register_interruptible(di->dev, + AB8500_RTC, + AB8500_RTC_BACKUP_CHG_REG, + BUP_VCH_SEL_3P1V | + BUP_ICH_SEL_150UA); + else + ret = abx500_set_register_interruptible(di->dev, + AB8500_RTC, + AB8500_RTC_BACKUP_CHG_REG, + di->bat->bkup_bat_v | + di->bat->bkup_bat_i); if (ret) { dev_err(di->dev, "failed to setup backup battery charging\n"); goto out; @@ -2534,7 +2637,7 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev) static int __devinit ab8500_charger_probe(struct platform_device *pdev) { int irq, i, charger_status, ret = 0; - struct abx500_bm_plat_data *plat_data; + struct ab8500_platform_data *plat_data; struct ab8500_charger *di = kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL); @@ -2550,7 +2653,8 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev) spin_lock_init(&di->usb_state.usb_lock); /* get charger specific platform data */ - plat_data = pdev->dev.platform_data; + plat_data = dev_get_platdata(di->parent->dev); + di->pdata = plat_data->charger; if (!di->pdata) { diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c index c22f2f05657..798f5f7cef4 100644 --- a/drivers/power/ab8500_fg.c +++ b/drivers/power/ab8500_fg.c @@ -485,8 +485,9 @@ static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable) di->flags.fg_enabled = true; } else { /* Clear any pending read requests */ - ret = abx500_set_register_interruptible(di->dev, - AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0); + ret = abx500_mask_and_set_register_interruptible(di->dev, + AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, + (RESET_ACCU | READ_REQ), 0); if (ret) goto cc_err; @@ -1404,8 +1405,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di) sleep_time = di->bat->fg_params->init_timer; /* Discard the first [x] seconds */ - if (di->init_cnt > - di->bat->fg_params->init_discard_time) { + if (di->init_cnt > di->bat->fg_params->init_discard_time) { ab8500_fg_calc_cap_discharge_voltage(di, true); ab8500_fg_check_capacity_limits(di, true); @@ -2446,7 +2446,7 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev) { int i, irq; int ret = 0; - struct abx500_bm_plat_data *plat_data; + struct ab8500_platform_data *plat_data; struct ab8500_fg *di = kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL); @@ -2461,7 +2461,7 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev) di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); /* get fg specific platform data */ - plat_data = pdev->dev.platform_data; + plat_data = dev_get_platdata(di->parent->dev); di->pdata = plat_data->fg; if (!di->pdata) { dev_err(di->dev, "no fg platform data supplied\n"); diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c index 804b88c760d..032b27d35bf 100644 --- a/drivers/power/abx500_chargalg.c +++ b/drivers/power/abx500_chargalg.c @@ -220,6 +220,7 @@ enum maxim_ret { */ struct abx500_chargalg { struct device *dev; + struct ab8500 *parent; int charge_status; int eoc_cnt; int rch_cnt; @@ -1802,7 +1803,7 @@ static int __devexit abx500_chargalg_remove(struct platform_device *pdev) static int __devinit abx500_chargalg_probe(struct platform_device *pdev) { - struct abx500_bm_plat_data *plat_data; + struct ab8500_platform_data *plat_data; int ret = 0; struct abx500_chargalg *di = @@ -1812,8 +1813,8 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev) /* get device struct */ di->dev = &pdev->dev; - - plat_data = pdev->dev.platform_data; + di->parent = dev_get_drvdata(pdev->dev.parent); + plat_data = dev_get_platdata(di->parent->dev); di->pdata = plat_data->chargalg; di->bat = plat_data->battery; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 36db5a441eb..7278f1e4141 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -247,6 +247,14 @@ config REGULATOR_AB8500 This driver supports the regulators found on the ST-Ericsson mixed signal AB8500 PMIC +config REGULATOR_AB8500_EXT + bool "ST-Ericsson AB8500 External Regulators" + depends on REGULATOR_AB8500 + default y if REGULATOR_AB8500 + help + This driver supports the external regulator controls found on the + ST-Ericsson mixed signal AB8500 PMIC + config REGULATOR_DBX500_PRCMU bool diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 94b52745e95..b5b90fb232b 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o +obj-$(CONFIG_REGULATOR_AB8500_EXT) += ab8500-ext.o obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o diff --git a/drivers/regulator/ab5500.c b/drivers/regulator/ab5500.c new file mode 100644 index 00000000000..99676f7ad8e --- /dev/null +++ b/drivers/regulator/ab5500.c @@ -0,0 +1,650 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA + * + * License terms: GNU General Public License (GPL) version 2 + * + * Based on ab3100.c. + * + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/regulator/ab5500.h> + +#define AB5500_LDO_VDIGMIC_ST 0x50 + +#define AB5500_LDO_G_ST 0x78 +#define AB5500_LDO_G_PWR1 0x79 +#define AB5500_LDO_G_PWR0 0x7a + +#define AB5500_LDO_H_ST 0x7b +#define AB5500_LDO_H_PWR1 0x7c +#define AB5500_LDO_H_PWR0 0x7d + +#define AB5500_LDO_K_ST 0x7e +#define AB5500_LDO_K_PWR1 0x7f +#define AB5500_LDO_K_PWR0 0x80 + +#define AB5500_LDO_L_ST 0x81 +#define AB5500_LDO_L_PWR1 0x82 +#define AB5500_LDO_L_PWR0 0x83 + +/* In SIM bank */ +#define AB5500_SIM_SUP 0x14 + +#define AB5500_MBIAS1 0x00 +#define AB5500_MBIAS2 0x01 + +#define AB5500_LDO_MODE_MASK (0x3 << 4) +#define AB5500_LDO_MODE_FULLPOWER (0x3 << 4) +#define AB5500_LDO_MODE_PWRCTRL (0x2 << 4) +#define AB5500_LDO_MODE_LOWPOWER (0x1 << 4) +#define AB5500_LDO_MODE_OFF (0x0 << 4) +#define AB5500_LDO_VOLT_MASK 0x07 + +#define AB5500_MBIAS1_ENABLE (0x1 << 1) +#define AB5500_MBIAS1_MODE_MASK (0x1 << 1) +#define AB5500_MBIAS2_ENABLE (0x1 << 1) +#define AB5500_MBIAS2_VOLT_MASK (0x1 << 2) +#define AB5500_MBIAS2_MODE_MASK (0x1 << 1) + +struct ab5500_regulator { + struct regulator_desc desc; + const int *voltages; + int num_holes; + bool off_is_lowpower; + bool enabled; + int enable_time; + int load_lp_uA; + u8 bank; + u8 reg; + u8 mode; + u8 update_mask; + u8 update_val_idle; + u8 update_val_normal; + u8 voltage_mask; +}; + +struct ab5500_regulators { + struct device *dev; + struct ab5500_regulator *regulator[AB5500_NUM_REGULATORS]; + struct regulator_dev *rdev[AB5500_NUM_REGULATORS]; +}; + +static int ab5500_regulator_enable_time(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + + return r->enable_time; /* microseconds */ +} + +static int ab5500_regulator_enable(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + int ret; + + ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg, + r->update_mask, r->mode); + if (ret < 0) + return ret; + + r->enabled = true; + + return 0; +} + +static int ab5500_regulator_disable(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + u8 regval; + int ret; + + if (r->off_is_lowpower) + regval = AB5500_LDO_MODE_LOWPOWER; + else + regval = AB5500_LDO_MODE_OFF; + + ret = abx500_mask_and_set(ab5500->dev, r->bank, r->reg, + r->update_mask, regval); + if (ret < 0) + return ret; + + r->enabled = false; + + return 0; +} + +static unsigned int ab5500_regulator_get_mode(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + + if (r->mode == r->update_val_idle) + return REGULATOR_MODE_IDLE; + + return REGULATOR_MODE_NORMAL; +} + +static unsigned int +ab5500_regulator_get_optimum_mode(struct regulator_dev *rdev, + int input_uV, int output_uV, int load_uA) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + unsigned int mode; + + if (load_uA <= r->load_lp_uA) + mode = REGULATOR_MODE_IDLE; + else + mode = REGULATOR_MODE_NORMAL; + + return mode; +} + +static int ab5500_regulator_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + + switch (mode) { + case REGULATOR_MODE_NORMAL: + r->mode = r->update_val_normal; + break; + case REGULATOR_MODE_IDLE: + r->mode = r->update_val_idle; + break; + default: + return -EINVAL; + } + + if (r->enabled) + return ab5500_regulator_enable(rdev); + + return 0; +} + +static int ab5500_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + u8 regval; + int err; + + err = abx500_get_register_interruptible(ab5500->dev, + r->bank, r->reg, ®val); + if (err) { + dev_err(rdev_get_dev(rdev), "unable to get register 0x%x\n", + r->reg); + return err; + } + + switch (regval & r->update_mask) { + case AB5500_LDO_MODE_PWRCTRL: + case AB5500_LDO_MODE_OFF: + r->enabled = false; + break; + case AB5500_LDO_MODE_LOWPOWER: + if (r->off_is_lowpower) { + r->enabled = false; + break; + } + /* fall through */ + default: + r->enabled = true; + break; + } + + return r->enabled; +} + +static int +ab5500_regulator_list_voltage(struct regulator_dev *rdev, unsigned selector) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + unsigned n_voltages = r->desc.n_voltages; + int selindex; + int i; + + for (i = 0, selindex = 0; selindex < n_voltages; i++) { + int voltage = r->voltages[i]; + + if (!voltage) + continue; + + if (selindex == selector) + return voltage; + + selindex++; + } + + return -EINVAL; +} + +static int ab5500_regulator_fixed_get_voltage(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + + return r->voltages[0]; +} + +static int ab5500_regulator_get_voltage(struct regulator_dev *rdev) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + u8 regval; + int ret; + + ret = abx500_get_register_interruptible(ab5500->dev, + r->bank, r->reg, ®val); + if (ret) { + dev_warn(rdev_get_dev(rdev), + "failed to get regulator value in register " + "%02x\n", r->reg); + return ret; + } + + regval &= r->voltage_mask; + if (regval >= r->desc.n_voltages + r->num_holes) + return -EINVAL; + + if (!r->voltages[regval]) + return -EINVAL; + + return r->voltages[regval]; +} + +static int ab5500_get_best_voltage_index(struct ab5500_regulator *r, + int min_uV, int max_uV) +{ + unsigned n_voltages = r->desc.n_voltages; + int bestmatch = INT_MAX; + int bestindex = -EINVAL; + int selindex; + int i; + + /* + * Locate the minimum voltage fitting the criteria on + * this regulator. The switchable voltages are not + * in strict falling order so we need to check them + * all for the best match. + */ + for (i = 0, selindex = 0; selindex < n_voltages; i++) { + int voltage = r->voltages[i]; + + if (!voltage) + continue; + + if (voltage <= max_uV && + voltage >= min_uV && + voltage < bestmatch) { + bestmatch = voltage; + bestindex = i; + } + + selindex++; + } + + return bestindex; +} + +static int ab5500_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV, + unsigned *selector) +{ + struct ab5500_regulators *ab5500 = rdev_get_drvdata(rdev); + struct ab5500_regulator *r = ab5500->regulator[rdev_get_id(rdev)]; + int bestindex; + + bestindex = ab5500_get_best_voltage_index(r, min_uV, max_uV); + if (bestindex < 0) { + dev_warn(rdev_get_dev(rdev), + "requested %d<=x<=%d uV, out of range!\n", + min_uV, max_uV); + return bestindex; + } + + *selector = bestindex; + + return abx500_mask_and_set_register_interruptible(ab5500->dev, + r->bank, r->reg, r->voltage_mask, bestindex); + +} + +static struct regulator_ops ab5500_regulator_variable_ops = { + .enable = ab5500_regulator_enable, + .disable = ab5500_regulator_disable, + .is_enabled = ab5500_regulator_is_enabled, + .enable_time = ab5500_regulator_enable_time, + .get_voltage = ab5500_regulator_get_voltage, + .set_voltage = ab5500_regulator_set_voltage, + .list_voltage = ab5500_regulator_list_voltage, + .set_mode = ab5500_regulator_set_mode, + .get_mode = ab5500_regulator_get_mode, + .get_optimum_mode = ab5500_regulator_get_optimum_mode, +}; + +static struct regulator_ops ab5500_regulator_fixed_ops = { + .enable = ab5500_regulator_enable, + .disable = ab5500_regulator_disable, + .is_enabled = ab5500_regulator_is_enabled, + .enable_time = ab5500_regulator_enable_time, + .get_voltage = ab5500_regulator_fixed_get_voltage, + .list_voltage = ab5500_regulator_list_voltage, + .set_mode = ab5500_regulator_set_mode, + .get_mode = ab5500_regulator_get_mode, + .get_optimum_mode = ab5500_regulator_get_optimum_mode, +}; + +static const int ab5500_ldo_lg_voltages[] = { + [0x00] = 1200000, + [0x01] = 0, /* not used */ + [0x02] = 1500000, + [0x03] = 1800000, + [0x04] = 0, /* not used */ + [0x05] = 2500000, + [0x06] = 2730000, + [0x07] = 2910000, +}; + +static const int ab5500_ldo_kh_voltages[] = { + [0x00] = 1200000, + [0x01] = 1500000, + [0x02] = 1800000, + [0x03] = 2100000, + [0x04] = 2500000, + [0x05] = 2750000, + [0x06] = 2790000, + [0x07] = 2910000, +}; + +static const int ab5500_ldo_vdigmic_voltages[] = { + [0x00] = 2100000, +}; + +static const int ab5500_ldo_sim_voltages[] = { + [0x00] = 1875000, + [0x01] = 2800000, + [0x02] = 2900000, +}; + +static const int ab5500_bias2_voltages[] = { + [0x00] = 2000000, + [0x01] = 2200000, +}; + +static const int ab5500_bias1_voltages[] = { + [0x00] = 2000000, +}; + +static struct ab5500_regulator ab5500_regulators[] = { + [AB5500_LDO_L] = { + .desc = { + .name = "LDO_L", + .id = AB5500_LDO_L, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) - + 2, + }, + .bank = AB5500_BANK_STARTUP, + .reg = AB5500_LDO_L_ST, + .voltages = ab5500_ldo_lg_voltages, + .num_holes = 2, /* 2 register values unused */ + .enable_time = 400, + .load_lp_uA = 20000, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_LDO_G] = { + .desc = { + .name = "LDO_G", + .id = AB5500_LDO_G, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_ldo_lg_voltages) - + 2, + }, + .bank = AB5500_BANK_STARTUP, + .reg = AB5500_LDO_G_ST, + .voltages = ab5500_ldo_lg_voltages, + .num_holes = 2, /* 2 register values unused */ + .enable_time = 400, + .load_lp_uA = 20000, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_LDO_K] = { + .desc = { + .name = "LDO_K", + .id = AB5500_LDO_K, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages), + }, + .bank = AB5500_BANK_STARTUP, + .reg = AB5500_LDO_K_ST, + .voltages = ab5500_ldo_kh_voltages, + .enable_time = 400, + .load_lp_uA = 20000, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_LDO_H] = { + .desc = { + .name = "LDO_H", + .id = AB5500_LDO_H, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_ldo_kh_voltages), + }, + .bank = AB5500_BANK_STARTUP, + .reg = AB5500_LDO_H_ST, + .voltages = ab5500_ldo_kh_voltages, + .enable_time = 400, + .load_lp_uA = 20000, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_LDO_VDIGMIC] = { + .desc = { + .name = "LDO_VDIGMIC", + .id = AB5500_LDO_VDIGMIC, + .ops = &ab5500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = + ARRAY_SIZE(ab5500_ldo_vdigmic_voltages), + }, + .bank = AB5500_BANK_STARTUP, + .reg = AB5500_LDO_VDIGMIC_ST, + .voltages = ab5500_ldo_vdigmic_voltages, + .enable_time = 450, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_LDO_SIM] = { + .desc = { + .name = "LDO_SIM", + .id = AB5500_LDO_SIM, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_ldo_sim_voltages), + }, + .bank = AB5500_BANK_SIM_USBSIM, + .reg = AB5500_SIM_SUP, + .voltages = ab5500_ldo_sim_voltages, + .enable_time = 1000, + .mode = AB5500_LDO_MODE_FULLPOWER, + .update_mask = AB5500_LDO_MODE_MASK, + .update_val_normal = AB5500_LDO_MODE_FULLPOWER, + .update_val_idle = AB5500_LDO_MODE_LOWPOWER, + .voltage_mask = AB5500_LDO_VOLT_MASK, + }, + [AB5500_BIAS2] = { + .desc = { + .name = "MBIAS2", + .id = AB5500_BIAS2, + .ops = &ab5500_regulator_variable_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_bias2_voltages), + }, + .bank = AB5500_BANK_AUDIO_HEADSETUSB, + .reg = AB5500_MBIAS2, + .voltages = ab5500_bias2_voltages, + .enable_time = 1000, + .mode = AB5500_MBIAS2_ENABLE, + .update_mask = AB5500_MBIAS2_MODE_MASK, + .update_val_normal = AB5500_MBIAS2_ENABLE, + .update_val_idle = AB5500_MBIAS2_ENABLE, + .voltage_mask = AB5500_MBIAS2_VOLT_MASK, + }, + [AB5500_BIAS1] = { + .desc = { + .name = "MBIAS1", + .id = AB5500_BIAS1, + .ops = &ab5500_regulator_fixed_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ab5500_bias1_voltages), + }, + .bank = AB5500_BANK_AUDIO_HEADSETUSB, + .reg = AB5500_MBIAS1, + .voltages = ab5500_bias1_voltages, + .enable_time = 1000, + .mode = AB5500_MBIAS1_ENABLE, + .update_mask = AB5500_MBIAS1_MODE_MASK, + .update_val_normal = AB5500_MBIAS1_ENABLE, + .update_val_idle = AB5500_MBIAS1_ENABLE, + }, +}; + + +static int __devinit ab5500_regulator_probe(struct platform_device *pdev) +{ + struct ab5500_platform_data *ppdata = pdev->dev.parent->platform_data; + struct ab5500_regulator_platform_data *pdata = ppdata->regulator; + struct ab5500_regulator_data *regdata; + struct ab5500_regulators *ab5500; + int err = 0; + int i; + + if (!pdata || !pdata->regulator) + return -EINVAL; + + ab5500 = kzalloc(sizeof(*ab5500), GFP_KERNEL); + if (!ab5500) + return -ENOMEM; + + ab5500->dev = &pdev->dev; + regdata = pdata->data; + + platform_set_drvdata(pdev, ab5500); + + for (i = 0; i < AB5500_NUM_REGULATORS; i++) { + struct ab5500_regulator *regulator = &ab5500_regulators[i]; + struct regulator_dev *rdev; + + if (regdata) + regulator->off_is_lowpower = regdata[i].off_is_lowpower; + + ab5500->regulator[i] = regulator; + + rdev = regulator_register(®ulator->desc, &pdev->dev, + &pdata->regulator[i], ab5500); + if (IS_ERR(rdev)) { + err = PTR_ERR(rdev); + dev_err(&pdev->dev, "failed to register regulator %s err %d\n", + regulator->desc.name, err); + goto err_unregister; + } + + ab5500->rdev[i] = rdev; + } + + return 0; + +err_unregister: + /* remove the already registered regulators */ + while (--i >= 0) + regulator_unregister(ab5500->rdev[i]); + + platform_set_drvdata(pdev, NULL); + kfree(ab5500); + + return err; +} + +static int __devexit ab5500_regulators_remove(struct platform_device *pdev) +{ + struct ab5500_regulators *ab5500 = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < AB5500_NUM_REGULATORS; i++) + regulator_unregister(ab5500->rdev[i]); + + platform_set_drvdata(pdev, NULL); + kfree(ab5500); + + return 0; +} + +static struct platform_driver ab5500_regulator_driver = { + .driver = { + .name = "ab5500-regulator", + .owner = THIS_MODULE, + }, + .probe = ab5500_regulator_probe, + .remove = __devexit_p(ab5500_regulators_remove), +}; + +static __init int ab5500_regulator_init(void) +{ + return platform_driver_register(&ab5500_regulator_driver); +} + +static __exit void ab5500_regulator_exit(void) +{ + platform_driver_unregister(&ab5500_regulator_driver); +} + +subsys_initcall(ab5500_regulator_init); +module_exit(ab5500_regulator_exit); + +MODULE_DESCRIPTION("AB5500 Regulator Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ab5500-regulator"); diff --git a/drivers/regulator/ab8500-debug.c b/drivers/regulator/ab8500-debug.c new file mode 100644 index 00000000000..f71cc26c135 --- /dev/null +++ b/drivers/regulator/ab8500-debug.c @@ -0,0 +1,2083 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson. + * + * License Terms: GNU General Public License v2 + */ + +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/platform_device.h> +#include <linux/kobject.h> +#include <linux/slab.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab8500.h> +#include <linux/regulator/ab8500-debug.h> +#include <linux/io.h> + +#include <mach/db8500-regs.h> /* U8500_BACKUPRAM1_BASE */ +#include <mach/hardware.h> + +#include "ab8500-debug.h" + +/* board profile address - to determine if suspend-force is default */ +#define BOOT_INFO_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0xffc) +#define BOARD_PROFILE_BACKUPRAM1 (0x3) + +/* board profile option */ +#define OPTION_BOARD_VERSION_V5X 50 + +/* for error prints */ +struct device *dev; +struct platform_device *pdev; + +/* setting for suspend force (disabled by default) */ +static bool setting_suspend_force; + +/* + * regulator states + */ +enum ab8500_regulator_state_id { + AB8500_REGULATOR_STATE_INIT, + AB8500_REGULATOR_STATE_SUSPEND, + AB8500_REGULATOR_STATE_SUSPEND_CORE, + AB8500_REGULATOR_STATE_RESUME_CORE, + AB8500_REGULATOR_STATE_RESUME, + AB8500_REGULATOR_STATE_CURRENT, + NUM_REGULATOR_STATE +}; + +static const char *regulator_state_name[NUM_REGULATOR_STATE] = { + [AB8500_REGULATOR_STATE_INIT] = "init", + [AB8500_REGULATOR_STATE_SUSPEND] = "suspend", + [AB8500_REGULATOR_STATE_SUSPEND_CORE] = "suspend-core", + [AB8500_REGULATOR_STATE_RESUME_CORE] = "resume-core", + [AB8500_REGULATOR_STATE_RESUME] = "resume", + [AB8500_REGULATOR_STATE_CURRENT] = "current", +}; + +/* + * regulator register definitions + */ +enum ab8500_register_id { + AB8500_REGU_NOUSE, /* if not defined */ + AB8500_REGU_REQUEST_CTRL1, + AB8500_REGU_REQUEST_CTRL2, + AB8500_REGU_REQUEST_CTRL3, + AB8500_REGU_REQUEST_CTRL4, + AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + AB8500_REGU_HW_HP_REQ1_VALID1, + AB8500_REGU_HW_HP_REQ1_VALID2, + AB8500_REGU_HW_HP_REQ2_VALID1, + AB8500_REGU_HW_HP_REQ2_VALID2, + AB8500_REGU_SW_HP_REQ_VALID1, + AB8500_REGU_SW_HP_REQ_VALID2, + AB8500_REGU_SYSCLK_REQ1_VALID, + AB8500_REGU_SYSCLK_REQ2_VALID, + AB9540_REGU_VAUX4_REQ_VALID, + AB8500_REGU_MISC1, + AB8500_REGU_OTG_SUPPLY_CTRL, + AB8500_REGU_VUSB_CTRL, + AB8500_REGU_VAUDIO_SUPPLY, + AB8500_REGU_CTRL1_VAMIC, + AB8500_REGU_ARM_REGU1, + AB8500_REGU_ARM_REGU2, + AB8500_REGU_VAPE_REGU, + AB8500_REGU_VSMPS1_REGU, + AB8500_REGU_VSMPS2_REGU, + AB8500_REGU_VSMPS3_REGU, + AB8500_REGU_VPLL_VANA_REGU, + AB8500_REGU_VREF_DDR, + AB8500_REGU_EXT_SUPPLY_REGU, + AB8500_REGU_VAUX12_REGU, + AB8500_REGU_VRF1_VAUX3_REGU, + AB8500_REGU_VARM_SEL1, + AB8500_REGU_VARM_SEL2, + AB8500_REGU_VARM_SEL3, + AB8500_REGU_VAPE_SEL1, + AB8500_REGU_VAPE_SEL2, + AB8500_REGU_VAPE_SEL3, + AB9540_REGU_VAUX4_REQ_CTRL, + AB9540_REGU_VAUX4_REGU, + AB9540_REGU_VAUX4_SEL, + AB8500_REGU_VBB_SEL1, + AB8500_REGU_VBB_SEL2, + AB8500_REGU_VSMPS1_SEL1, + AB8500_REGU_VSMPS1_SEL2, + AB8500_REGU_VSMPS1_SEL3, + AB8500_REGU_VSMPS2_SEL1, + AB8500_REGU_VSMPS2_SEL2, + AB8500_REGU_VSMPS2_SEL3, + AB8500_REGU_VSMPS3_SEL1, + AB8500_REGU_VSMPS3_SEL2, + AB8500_REGU_VSMPS3_SEL3, + AB8500_REGU_VAUX1_SEL, + AB8500_REGU_VAUX2_SEL, + AB8500_REGU_VRF1_VAUX3_SEL, + AB8500_REGU_CTRL_EXT_SUP, + AB8500_REGU_VMOD_REGU, + AB8500_REGU_VMOD_SEL1, + AB8500_REGU_VMOD_SEL2, + AB8500_REGU_CTRL_DISCH, + AB8500_REGU_CTRL_DISCH2, + AB9540_REGU_CTRL_DISCH3, + AB8500_OTHER_SYSCLK_CTRL, /* Other */ + AB8500_OTHER_VSIM_SYSCLK_CTRL, /* Other */ + AB8500_OTHER_SYSULPCLK_CTRL1, /* Other */ + NUM_AB8500_REGISTER +}; + +struct ab8500_register { + const char *name; + u8 bank; + u8 addr; + u8 unavailable; /* Used to flag when AB doesn't support a register */ +}; + +static struct ab8500_register + ab8500_register[NUM_AB8500_REGISTER] = { + [AB8500_REGU_REQUEST_CTRL1] = { + .name = "ReguRequestCtrl1", + .bank = 0x03, + .addr = 0x03, + }, + [AB8500_REGU_REQUEST_CTRL2] = { + .name = "ReguRequestCtrl2", + .bank = 0x03, + .addr = 0x04, + }, + [AB8500_REGU_REQUEST_CTRL3] = { + .name = "ReguRequestCtrl3", + .bank = 0x03, + .addr = 0x05, + }, + [AB8500_REGU_REQUEST_CTRL4] = { + .name = "ReguRequestCtrl4", + .bank = 0x03, + .addr = 0x06, + }, + [AB8500_REGU_SYSCLK_REQ1_HP_VALID1] = { + .name = "ReguSysClkReq1HPValid", + .bank = 0x03, + .addr = 0x07, + }, + [AB8500_REGU_SYSCLK_REQ1_HP_VALID2] = { + .name = "ReguSysClkReq1HPValid2", + .bank = 0x03, + .addr = 0x08, + }, + [AB8500_REGU_HW_HP_REQ1_VALID1] = { + .name = "ReguHwHPReq1Valid1", + .bank = 0x03, + .addr = 0x09, + }, + [AB8500_REGU_HW_HP_REQ1_VALID2] = { + .name = "ReguHwHPReq1Valid2", + .bank = 0x03, + .addr = 0x0a, + }, + [AB8500_REGU_HW_HP_REQ2_VALID1] = { + .name = "ReguHwHPReq2Valid1", + .bank = 0x03, + .addr = 0x0b, + }, + [AB8500_REGU_HW_HP_REQ2_VALID2] = { + .name = "ReguHwHPReq2Valid2", + .bank = 0x03, + .addr = 0x0c, + }, + [AB8500_REGU_SW_HP_REQ_VALID1] = { + .name = "ReguSwHPReqValid1", + .bank = 0x03, + .addr = 0x0d, + }, + [AB8500_REGU_SW_HP_REQ_VALID2] = { + .name = "ReguSwHPReqValid2", + .bank = 0x03, + .addr = 0x0e, + }, + [AB8500_REGU_SYSCLK_REQ1_VALID] = { + .name = "ReguSysClkReqValid1", + .bank = 0x03, + .addr = 0x0f, + }, + [AB8500_REGU_SYSCLK_REQ2_VALID] = { + .name = "ReguSysClkReqValid2", + .bank = 0x03, + .addr = 0x10, + }, + [AB9540_REGU_VAUX4_REQ_VALID] = { + .name = "ReguVaux4ReqValid", + .bank = 0x03, + .addr = 0x11, + .unavailable = true, /* ab9540 register */ + }, + [AB8500_REGU_MISC1] = { + .name = "ReguMisc1", + .bank = 0x03, + .addr = 0x80, + }, + [AB8500_REGU_OTG_SUPPLY_CTRL] = { + .name = "OTGSupplyCtrl", + .bank = 0x03, + .addr = 0x81, + }, + [AB8500_REGU_VUSB_CTRL] = { + .name = "VusbCtrl", + .bank = 0x03, + .addr = 0x82, + }, + [AB8500_REGU_VAUDIO_SUPPLY] = { + .name = "VaudioSupply", + .bank = 0x03, + .addr = 0x83, + }, + [AB8500_REGU_CTRL1_VAMIC] = { + .name = "ReguCtrl1VAmic", + .bank = 0x03, + .addr = 0x84, + }, + [AB8500_REGU_ARM_REGU1] = { + .name = "ArmRegu1", + .bank = 0x04, + .addr = 0x00, + }, + [AB8500_REGU_ARM_REGU2] = { + .name = "ArmRegu2", + .bank = 0x04, + .addr = 0x01, + }, + [AB8500_REGU_VAPE_REGU] = { + .name = "VapeRegu", + .bank = 0x04, + .addr = 0x02, + }, + [AB8500_REGU_VSMPS1_REGU] = { + .name = "Vsmps1Regu", + .bank = 0x04, + .addr = 0x03, + }, + [AB8500_REGU_VSMPS2_REGU] = { + .name = "Vsmps2Regu", + .bank = 0x04, + .addr = 0x04, + }, + [AB8500_REGU_VSMPS3_REGU] = { + .name = "Vsmps3Regu", + .bank = 0x04, + .addr = 0x05, + }, + [AB8500_REGU_VPLL_VANA_REGU] = { + .name = "VpllVanaRegu", + .bank = 0x04, + .addr = 0x06, + }, + [AB8500_REGU_VREF_DDR] = { + .name = "VrefDDR", + .bank = 0x04, + .addr = 0x07, + }, + [AB8500_REGU_EXT_SUPPLY_REGU] = { + .name = "ExtSupplyRegu", + .bank = 0x04, + .addr = 0x08, + }, + [AB8500_REGU_VAUX12_REGU] = { + .name = "Vaux12Regu", + .bank = 0x04, + .addr = 0x09, + }, + [AB8500_REGU_VRF1_VAUX3_REGU] = { + .name = "VRF1Vaux3Regu", + .bank = 0x04, + .addr = 0x0a, + }, + [AB8500_REGU_VARM_SEL1] = { + .name = "VarmSel1", + .bank = 0x04, + .addr = 0x0b, + }, + [AB8500_REGU_VARM_SEL2] = { + .name = "VarmSel2", + .bank = 0x04, + .addr = 0x0c, + }, + [AB8500_REGU_VARM_SEL3] = { + .name = "VarmSel3", + .bank = 0x04, + .addr = 0x0d, + }, + [AB8500_REGU_VAPE_SEL1] = { + .name = "VapeSel1", + .bank = 0x04, + .addr = 0x0e, + }, + [AB8500_REGU_VAPE_SEL2] = { + .name = "VapeSel2", + .bank = 0x04, + .addr = 0x0f, + }, + [AB8500_REGU_VAPE_SEL3] = { + .name = "VapeSel3", + .bank = 0x04, + .addr = 0x10, + }, + [AB9540_REGU_VAUX4_REQ_CTRL] = { + .name = "Vaux4ReqCtrl", + .bank = 0x04, + .addr = 0x2d, + .unavailable = true, /* ab9540 register */ + }, + [AB9540_REGU_VAUX4_REGU] = { + .name = "Vaux4Regu", + .bank = 0x04, + .addr = 0x2e, + .unavailable = true, /* ab9540 register */ + }, + [AB9540_REGU_VAUX4_SEL] = { + .name = "Vaux4Sel", + .bank = 0x04, + .addr = 0x2f, + .unavailable = true, /* ab9540 register */ + }, + [AB8500_REGU_VBB_SEL1] = { + .name = "VBBSel1", + .bank = 0x04, + .addr = 0x11, + }, + [AB8500_REGU_VBB_SEL2] = { + .name = "VBBSel2", + .bank = 0x04, + .addr = 0x12, + }, + [AB8500_REGU_VSMPS1_SEL1] = { + .name = "Vsmps1Sel1", + .bank = 0x04, + .addr = 0x13, + }, + [AB8500_REGU_VSMPS1_SEL2] = { + .name = "Vsmps1Sel2", + .bank = 0x04, + .addr = 0x14, + }, + [AB8500_REGU_VSMPS1_SEL3] = { + .name = "Vsmps1Sel3", + .bank = 0x04, + .addr = 0x15, + }, + [AB8500_REGU_VSMPS2_SEL1] = { + .name = "Vsmps2Sel1", + .bank = 0x04, + .addr = 0x17, + }, + [AB8500_REGU_VSMPS2_SEL2] = { + .name = "Vsmps2Sel2", + .bank = 0x04, + .addr = 0x18, + }, + [AB8500_REGU_VSMPS2_SEL3] = { + .name = "Vsmps2Sel3", + .bank = 0x04, + .addr = 0x19, + }, + [AB8500_REGU_VSMPS3_SEL1] = { + .name = "Vsmps3Sel1", + .bank = 0x04, + .addr = 0x1b, + }, + [AB8500_REGU_VSMPS3_SEL2] = { + .name = "Vsmps3Sel2", + .bank = 0x04, + .addr = 0x1c, + }, + [AB8500_REGU_VSMPS3_SEL3] = { + .name = "Vsmps3Sel3", + .bank = 0x04, + .addr = 0x1d, + }, + [AB8500_REGU_VAUX1_SEL] = { + .name = "Vaux1Sel", + .bank = 0x04, + .addr = 0x1f, + }, + [AB8500_REGU_VAUX2_SEL] = { + .name = "Vaux2Sel", + .bank = 0x04, + .addr = 0x20, + }, + [AB8500_REGU_VRF1_VAUX3_SEL] = { + .name = "VRF1Vaux3Sel", + .bank = 0x04, + .addr = 0x21, + }, + [AB8500_REGU_CTRL_EXT_SUP] = { + .name = "ReguCtrlExtSup", + .bank = 0x04, + .addr = 0x22, + }, + [AB8500_REGU_VMOD_REGU] = { + .name = "VmodRegu", + .bank = 0x04, + .addr = 0x40, + }, + [AB8500_REGU_VMOD_SEL1] = { + .name = "VmodSel1", + .bank = 0x04, + .addr = 0x41, + }, + [AB8500_REGU_VMOD_SEL2] = { + .name = "VmodSel2", + .bank = 0x04, + .addr = 0x42, + }, + [AB8500_REGU_CTRL_DISCH] = { + .name = "ReguCtrlDisch", + .bank = 0x04, + .addr = 0x43, + }, + [AB8500_REGU_CTRL_DISCH2] = { + .name = "ReguCtrlDisch2", + .bank = 0x04, + .addr = 0x44, + }, + [AB9540_REGU_CTRL_DISCH3] = { + .name = "ReguCtrlDisch3", + .bank = 0x04, + .addr = 0x48, + .unavailable = true, /* ab9540 register */ + }, + /* Outside regulator banks */ + [AB8500_OTHER_SYSCLK_CTRL] = { + .name = "SysClkCtrl", + .bank = 0x02, + .addr = 0x0c, + }, + [AB8500_OTHER_VSIM_SYSCLK_CTRL] = { + .name = "VsimSysClkCtrl", + .bank = 0x02, + .addr = 0x33, + }, + [AB8500_OTHER_SYSULPCLK_CTRL1] = { + .name = "SysUlpClkCtrl1", + .bank = 0x02, + .addr = 0x0b, + }, +}; + +struct ab9540_register_update { + /* Identity of register to be updated */ + u8 bank; + u8 addr; + /* New value for unavailable flag */ + u8 unavailable; +}; + +static const struct ab9540_register_update ab9540_update[] = { + /* AB8500 register which is unavailable to AB9540 */ + /* AB8500_REGU_VREF_DDR */ + { + .bank = 0x04, + .addr = 0x07, + .unavailable = true, + }, + + /* Registers which were not available to AB8500 but are on the + * AB9540. */ + /* AB9540_REGU_VAUX4_REQ_VALID */ + { + .bank = 0x03, + .addr = 0x11, + }, + /* AB9540_REGU_VAUX4_REQ_CTRL */ + { + .bank = 0x04, + .addr = 0x2d, + }, + /* AB9540_REGU_VAUX4_REGU */ + { + .bank = 0x04, + .addr = 0x2e, + }, + /* AB9540_REGU_VAUX4_SEL */ + { + .bank = 0x04, + .addr = 0x2f, + }, + /* AB9540_REGU_CTRL_DISCH3 */ + { + .bank = 0x04, + .addr = 0x48, + }, +}; + +static void ab9540_registers_update(void) +{ + int i; + int j; + + for (i = 0; i < NUM_AB8500_REGISTER; i++) + for (j = 0; j < ARRAY_SIZE(ab9540_update); j++) + if (ab8500_register[i].bank == ab9540_update[j].bank && + ab8500_register[i].addr == ab9540_update[j].addr) { + ab8500_register[i].unavailable = + ab9540_update[j].unavailable; + break; + } +} + +static u8 ab8500_register_state[NUM_REGULATOR_STATE][NUM_AB8500_REGISTER]; +static bool ab8500_register_state_saved[NUM_REGULATOR_STATE]; +static bool ab8500_register_state_save = true; + +static int ab8500_regulator_record_state(int state) +{ + u8 val; + int i; + int ret; + + /* check arguments */ + if ((state > NUM_REGULATOR_STATE) || (state < 0)) { + dev_err(dev, "Wrong state specified\n"); + return -EINVAL; + } + + /* record */ + if (!ab8500_register_state_save) + goto exit; + + ab8500_register_state_saved[state] = true; + + for (i = 1; i < NUM_AB8500_REGISTER; i++) { + if (ab8500_register[i].unavailable) + continue; + + ret = abx500_get_register_interruptible(dev, + ab8500_register[i].bank, + ab8500_register[i].addr, + &val); + if (ret < 0) { + dev_err(dev, "abx500_get_reg fail %d, %d\n", + ret, __LINE__); + return -EINVAL; + } + + ab8500_register_state[state][i] = val; + } +exit: + return 0; +} + +/* + * regulator register dump + */ +static int ab8500_regulator_dump_print(struct seq_file *s, void *p) +{ + struct device *dev = s->private; + int state, reg_id, i; + int err; + + /* record current state */ + ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT); + + /* print dump header */ + err = seq_printf(s, "ab8500-regulator dump:\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow\n"); + + /* print states */ + for (state = NUM_REGULATOR_STATE - 1; state >= 0; state--) { + if (ab8500_register_state_saved[state]) + err = seq_printf(s, "%16s saved -------", + regulator_state_name[state]); + else + err = seq_printf(s, "%12s not saved -------", + regulator_state_name[state]); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + + for (i = 0; i < NUM_REGULATOR_STATE; i++) { + if (i < state) + err = seq_printf(s, "-----"); + else if (i == state) + err = seq_printf(s, "----+"); + else + err = seq_printf(s, " |"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", + __LINE__); + } + err = seq_printf(s, "\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + } + + /* print labels */ + err = seq_printf(s, "\n addr\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + + /* dump registers */ + for (reg_id = 1; reg_id < NUM_AB8500_REGISTER; reg_id++) { + if (ab8500_register[reg_id].unavailable) + continue; + + err = seq_printf(s, "%22s 0x%02x%02x:", + ab8500_register[reg_id].name, + ab8500_register[reg_id].bank, + ab8500_register[reg_id].addr); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + reg_id, __LINE__); + + for (state = 0; state < NUM_REGULATOR_STATE; state++) { + err = seq_printf(s, " 0x%02x", + ab8500_register_state[state][reg_id]); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + reg_id, __LINE__); + } + + err = seq_printf(s, "\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + reg_id, __LINE__); + } + + return 0; +} + +static int ab8500_regulator_dump_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_regulator_dump_print, inode->i_private); +} + +static const struct file_operations ab8500_regulator_dump_fops = { + .open = ab8500_regulator_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +/* + * regulator_voltage + */ +struct regulator_volt { + u8 value; + int volt; +}; + +struct regulator_volt_range { + struct regulator_volt start; + struct regulator_volt step; + struct regulator_volt end; +}; + +/* + * ab8500_regulator + * @name + * @update_regid + * @update_mask + * @update_val[4] {off, on, hw, lp} + * @hw_mode_regid + * @hw_mode_mask + * @hw_mode_val[4] {hp/lp, hp/off, hp, hp} + * @hw_valid_regid[4] {sysclkreq1, hw1, hw2, sw} + * @hw_valid_mask[4] {sysclkreq1, hw1, hw2, sw} + * @vsel_sel_regid + * @vsel_sel_mask + * @vsel_val[333] {sel1, sel2, sel3, sel3} + * @vsel_regid + * @vsel_mask + * @vsel_range + * @vsel_range_len + * @unavailable {true/false depending on whether AB supports the regulator} + */ +struct ab8500_regulator { + const char *name; + int update_regid; + u8 update_mask; + u8 update_val[4]; + int hw_mode_regid; + u8 hw_mode_mask; + u8 hw_mode_val[4]; + int hw_valid_regid[4]; + u8 hw_valid_mask[4]; + int vsel_sel_regid; + u8 vsel_sel_mask; + u8 vsel_sel_val[4]; + int vsel_regid[3]; + u8 vsel_mask[3]; + struct regulator_volt_range const *vsel_range[3]; + int vsel_range_len[3]; + u8 unavailable; +}; + +static const char *update_val_name[] = { + "off", + "on ", + "hw ", + "lp ", + " - " /* undefined value */ +}; + +static const char *hw_mode_val_name[] = { + "hp/lp ", + "hp/off", + "hp ", + "hp ", + "-/- ", /* undefined value */ +}; + +/* voltage selection */ +/* AB8500 device - Varm_vsel in 12.5mV steps */ +#define AB8500_VARM_VSEL_MASK 0x3f +static const struct regulator_volt_range ab8500_varm_vsel[] = { + { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} }, + { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} }, +}; + +/* AB9540 device - Varm_vsel in 6.25mV steps */ +#define AB9540_VARM_VSEL_MASK 0x7f +static const struct regulator_volt_range ab9540_varm_vsel[] = { + { {0x00, 600000}, {0x01, 6250}, {0x7f, 1393750} }, +}; + +static const struct regulator_volt_range vape_vmod_vsel[] = { + { {0x00, 700000}, {0x01, 12500}, {0x35, 1362500} }, + { {0x36, 1362500}, {0x01, 0}, {0x3f, 1362500} }, +}; + +/* AB8500 device - Vbbp_vsel and Vbbn_sel in 100mV steps */ +static const struct regulator_volt_range ab8500_vbbp_vsel[] = { + { {0x00, 0}, {0x10, 100000}, {0x40, 400000} }, + { {0x50, 400000}, {0x10, 0}, {0x70, 400000} }, + { {0x80, -400000}, {0x10, 0}, {0xb0, -400000} }, + { {0xc0, -400000}, {0x10, 100000}, {0xf0, -100000} }, +}; + +static const struct regulator_volt_range ab8500_vbbn_vsel[] = { + { {0x00, 0}, {0x01, -100000}, {0x04, -400000} }, + { {0x05, -400000}, {0x01, 0}, {0x07, -400000} }, + { {0x08, 0}, {0x01, 100000}, {0x0c, 400000} }, + { {0x0d, 400000}, {0x01, 0}, {0x0f, 400000} }, +}; + +/* AB9540 device - Vbbp_vsel and Vbbn_sel in 50mV steps */ +static const struct regulator_volt_range ab9540_vbbp_vsel[] = { + { {0x00, 0}, {0x10, -50000}, {0x70, -350000} }, + { {0x80, 50000}, {0x10, 50000}, {0xf0, 400000} }, +}; + +static const struct regulator_volt_range ab9540_vbbn_vsel[] = { + { {0x00, 0}, {0x01, -50000}, {0x07, -350000} }, + { {0x08, 50000}, {0x01, 50000}, {0x0f, 400000} }, +}; + +static const struct regulator_volt_range vsmps1_vsel[] = { + { {0x00, 1100000}, {0x01, 0}, {0x1f, 1100000} }, + { {0x20, 1100000}, {0x01, 12500}, {0x30, 1300000} }, + { {0x31, 1300000}, {0x01, 0}, {0x3f, 1300000} }, +}; + +static const struct regulator_volt_range vsmps2_vsel[] = { + { {0x00, 1800000}, {0x01, 0}, {0x38, 1800000} }, + { {0x39, 1800000}, {0x01, 12500}, {0x7f, 1875000} }, +}; + +static const struct regulator_volt_range vsmps3_vsel[] = { + { {0x00, 700000}, {0x01, 12500}, {0x35, 1363500} }, + { {0x36, 1363500}, {0x01, 0}, {0x7f, 1363500} }, +}; + +/* for Vaux1, Vaux2 and Vaux4 */ +static const struct regulator_volt_range vauxn_vsel[] = { + { {0x00, 1100000}, {0x01, 100000}, {0x04, 1500000} }, + { {0x05, 1800000}, {0x01, 50000}, {0x07, 1900000} }, + { {0x08, 2500000}, {0x01, 0}, {0x08, 2500000} }, + { {0x09, 2650000}, {0x01, 50000}, {0x0c, 2800000} }, + { {0x0d, 2900000}, {0x01, 100000}, {0x0e, 3000000} }, + { {0x0f, 3300000}, {0x01, 0}, {0x0f, 3300000} }, +}; + +static const struct regulator_volt_range vaux3_vsel[] = { + { {0x00, 1200000}, {0x01, 300000}, {0x03, 2100000} }, + { {0x04, 2500000}, {0x01, 250000}, {0x05, 2750000} }, + { {0x06, 2790000}, {0x01, 0}, {0x06, 2790000} }, + { {0x07, 2910000}, {0x01, 0}, {0x07, 2910000} }, +}; + +static const struct regulator_volt_range vrf1_vsel[] = { + { {0x00, 1800000}, {0x10, 200000}, {0x10, 2000000} }, + { {0x20, 2150000}, {0x10, 0}, {0x20, 2150000} }, + { {0x30, 2500000}, {0x10, 0}, {0x30, 2500000} }, +}; + +static const struct regulator_volt_range vintcore12_vsel[] = { + { {0x00, 1200000}, {0x08, 25000}, {0x30, 1350000} }, + { {0x38, 1350000}, {0x01, 0}, {0x38, 1350000} }, +}; + +/* regulators */ +static struct ab8500_regulator ab8500_regulator[AB8500_NUM_REGULATORS] = { + [AB8500_VARM] = { + .name = "Varm", + .update_regid = AB8500_REGU_ARM_REGU1, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1, + .hw_mode_mask = 0x03, + .hw_mode_val = {0x00, 0x01, 0x02, 0x03}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x02, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x02, + .vsel_sel_regid = AB8500_REGU_ARM_REGU1, + .vsel_sel_mask = 0x0c, + .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c}, + .vsel_regid[0] = AB8500_REGU_VARM_SEL1, + .vsel_mask[0] = AB8500_VARM_VSEL_MASK, + .vsel_range[0] = ab8500_varm_vsel, + .vsel_range_len[0] = ARRAY_SIZE(ab8500_varm_vsel), + .vsel_regid[1] = AB8500_REGU_VARM_SEL2, + .vsel_mask[1] = AB8500_VARM_VSEL_MASK, + .vsel_range[1] = ab8500_varm_vsel, + .vsel_range_len[1] = ARRAY_SIZE(ab8500_varm_vsel), + .vsel_regid[2] = AB8500_REGU_VARM_SEL3, + .vsel_mask[2] = AB8500_VARM_VSEL_MASK, + .vsel_range[2] = ab8500_varm_vsel, + .vsel_range_len[2] = ARRAY_SIZE(ab8500_varm_vsel), + }, + [AB8500_VBBP] = { + .name = "Vbbp", + .update_regid = AB8500_REGU_ARM_REGU2, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x00}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x04, + .vsel_sel_regid = AB8500_REGU_ARM_REGU1, + .vsel_sel_mask = 0x10, + .vsel_sel_val = {0x00, 0x10, 0x00, 0x00}, + .vsel_regid[0] = AB8500_REGU_VBB_SEL1, + .vsel_mask[0] = 0xf0, + .vsel_range[0] = ab8500_vbbp_vsel, + .vsel_range_len[0] = ARRAY_SIZE(ab8500_vbbp_vsel), + .vsel_regid[1] = AB8500_REGU_VBB_SEL2, + .vsel_mask[1] = 0xf0, + .vsel_range[1] = ab8500_vbbp_vsel, + .vsel_range_len[1] = ARRAY_SIZE(ab8500_vbbp_vsel), + }, + [AB8500_VBBN] = { + .name = "Vbbn", + .update_regid = AB8500_REGU_ARM_REGU2, + .update_mask = 0x0c, + .update_val = {0x00, 0x04, 0x08, 0x00}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x04, + .vsel_sel_regid = AB8500_REGU_ARM_REGU1, + .vsel_sel_mask = 0x20, + .vsel_sel_val = {0x00, 0x20, 0x00, 0x00}, + .vsel_regid[0] = AB8500_REGU_VBB_SEL1, + .vsel_mask[0] = 0x0f, + .vsel_range[0] = ab8500_vbbn_vsel, + .vsel_range_len[0] = ARRAY_SIZE(ab8500_vbbn_vsel), + .vsel_regid[1] = AB8500_REGU_VBB_SEL2, + .vsel_mask[1] = 0x0f, + .vsel_range[1] = ab8500_vbbn_vsel, + .vsel_range_len[1] = ARRAY_SIZE(ab8500_vbbn_vsel), + }, + [AB8500_VAPE] = { + .name = "Vape", + .update_regid = AB8500_REGU_VAPE_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1, + .hw_mode_mask = 0x0c, + .hw_mode_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x01, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x01, + .vsel_sel_regid = AB8500_REGU_VAPE_REGU, + .vsel_sel_mask = 0x24, + .vsel_sel_val = {0x00, 0x04, 0x20, 0x24}, + .vsel_regid[0] = AB8500_REGU_VAPE_SEL1, + .vsel_mask[0] = 0x3f, + .vsel_range[0] = vape_vmod_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vape_vmod_vsel), + .vsel_regid[1] = AB8500_REGU_VAPE_SEL2, + .vsel_mask[1] = 0x3f, + .vsel_range[1] = vape_vmod_vsel, + .vsel_range_len[1] = ARRAY_SIZE(vape_vmod_vsel), + .vsel_regid[2] = AB8500_REGU_VAPE_SEL3, + .vsel_mask[2] = 0x3f, + .vsel_range[2] = vape_vmod_vsel, + .vsel_range_len[2] = ARRAY_SIZE(vape_vmod_vsel), + }, + [AB8500_VSMPS1] = { + .name = "Vsmps1", + .update_regid = AB8500_REGU_VSMPS1_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1, + .hw_mode_mask = 0x30, + .hw_mode_val = {0x00, 0x10, 0x20, 0x30}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x01, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x01, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x01, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x04, + .vsel_sel_regid = AB8500_REGU_VSMPS1_REGU, + .vsel_sel_mask = 0x0c, + .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c}, + .vsel_regid[0] = AB8500_REGU_VSMPS1_SEL1, + .vsel_mask[0] = 0x3f, + .vsel_range[0] = vsmps1_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vsmps1_vsel), + .vsel_regid[1] = AB8500_REGU_VSMPS1_SEL2, + .vsel_mask[1] = 0x3f, + .vsel_range[1] = vsmps1_vsel, + .vsel_range_len[1] = ARRAY_SIZE(vsmps1_vsel), + .vsel_regid[2] = AB8500_REGU_VSMPS1_SEL3, + .vsel_mask[2] = 0x3f, + .vsel_range[2] = vsmps1_vsel, + .vsel_range_len[2] = ARRAY_SIZE(vsmps1_vsel), + }, + [AB8500_VSMPS2] = { + .name = "Vsmps2", + .update_regid = AB8500_REGU_VSMPS2_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL1, + .hw_mode_mask = 0xc0, + .hw_mode_val = {0x00, 0x40, 0x80, 0xc0}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x02, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x02, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x02, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x08, + .vsel_sel_regid = AB8500_REGU_VSMPS2_REGU, + .vsel_sel_mask = 0x0c, + .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c}, + .vsel_regid[0] = AB8500_REGU_VSMPS2_SEL1, + .vsel_mask[0] = 0x3f, + .vsel_range[0] = vsmps2_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vsmps2_vsel), + .vsel_regid[1] = AB8500_REGU_VSMPS2_SEL2, + .vsel_mask[1] = 0x3f, + .vsel_range[1] = vsmps2_vsel, + .vsel_range_len[1] = ARRAY_SIZE(vsmps2_vsel), + .vsel_regid[2] = AB8500_REGU_VSMPS2_SEL3, + .vsel_mask[2] = 0x3f, + .vsel_range[2] = vsmps2_vsel, + .vsel_range_len[2] = ARRAY_SIZE(vsmps2_vsel), + }, + [AB8500_VSMPS3] = { + .name = "Vsmps3", + .update_regid = AB8500_REGU_VSMPS3_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2, + .hw_mode_mask = 0x03, + .hw_mode_val = {0x00, 0x01, 0x02, 0x03}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x04, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x04, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x04, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x10, + .vsel_sel_regid = AB8500_REGU_VSMPS3_REGU, + .vsel_sel_mask = 0x0c, + .vsel_sel_val = {0x00, 0x04, 0x08, 0x0c}, + .vsel_regid[0] = AB8500_REGU_VSMPS3_SEL1, + .vsel_mask[0] = 0x7f, + .vsel_range[0] = vsmps3_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vsmps3_vsel), + .vsel_regid[1] = AB8500_REGU_VSMPS3_SEL2, + .vsel_mask[1] = 0x7f, + .vsel_range[1] = vsmps3_vsel, + .vsel_range_len[1] = ARRAY_SIZE(vsmps3_vsel), + .vsel_regid[2] = AB8500_REGU_VSMPS3_SEL3, + .vsel_mask[2] = 0x7f, + .vsel_range[2] = vsmps3_vsel, + .vsel_range_len[2] = ARRAY_SIZE(vsmps3_vsel), + }, + [AB8500_VPLL] = { + .name = "Vpll", + .update_regid = AB8500_REGU_VPLL_VANA_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2, + .hw_mode_mask = 0x0c, + .hw_mode_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x10, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x10, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x10, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x40, + }, + [AB8500_VREFDDR] = { + .name = "VrefDDR", + .update_regid = AB8500_REGU_VREF_DDR, + .update_mask = 0x01, + .update_val = {0x00, 0x01, 0x00, 0x00}, + }, + [AB8500_VMOD] = { + .name = "Vmod", + .update_regid = AB8500_REGU_VMOD_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_VMOD_REGU, + .hw_mode_mask = 0xc0, + .hw_mode_val = {0x00, 0x40, 0x80, 0xc0}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x08, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2, + .hw_valid_mask[1] = 0x08, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2, + .hw_valid_mask[2] = 0x08, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x20, + .vsel_sel_regid = AB8500_REGU_VMOD_REGU, + .vsel_sel_mask = 0x04, + .vsel_sel_val = {0x00, 0x04, 0x00, 0x00}, + .vsel_regid[0] = AB8500_REGU_VMOD_SEL1, + .vsel_mask[0] = 0x3f, + .vsel_range[0] = vape_vmod_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vape_vmod_vsel), + .vsel_regid[1] = AB8500_REGU_VMOD_SEL2, + .vsel_mask[1] = 0x3f, + .vsel_range[1] = vape_vmod_vsel, + .vsel_range_len[1] = ARRAY_SIZE(vape_vmod_vsel), + }, + [AB8500_VEXTSUPPLY1] = { + .name = "Vextsupply1", + .update_regid = AB8500_REGU_EXT_SUPPLY_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2, + .hw_mode_mask = 0xc0, + .hw_mode_val = {0x00, 0x40, 0x80, 0xc0}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x10, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2, + .hw_valid_mask[1] = 0x01, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2, + .hw_valid_mask[2] = 0x01, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x04, + }, + [AB8500_VEXTSUPPLY2] = { + .name = "VextSupply2", + .update_regid = AB8500_REGU_EXT_SUPPLY_REGU, + .update_mask = 0x0c, + .update_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3, + .hw_mode_mask = 0x03, + .hw_mode_val = {0x00, 0x01, 0x02, 0x03}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x20, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2, + .hw_valid_mask[1] = 0x02, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2, + .hw_valid_mask[2] = 0x02, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x08, + }, + [AB8500_VEXTSUPPLY3] = { + .name = "VextSupply3", + .update_regid = AB8500_REGU_EXT_SUPPLY_REGU, + .update_mask = 0x30, + .update_val = {0x00, 0x10, 0x20, 0x30}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3, + .hw_mode_mask = 0x0c, + .hw_mode_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID2, + .hw_valid_mask[0] = 0x40, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID2, + .hw_valid_mask[1] = 0x04, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID2, + .hw_valid_mask[2] = 0x04, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x10, + }, + [AB8500_VRF1] = { + .name = "Vrf1", + .update_regid = AB8500_REGU_VRF1_VAUX3_REGU, + .update_mask = 0x0c, + .update_val = {0x00, 0x04, 0x08, 0x0c}, + .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL, + .vsel_mask[0] = 0x30, + .vsel_range[0] = vrf1_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vrf1_vsel), + }, + [AB8500_VANA] = { + .name = "Vana", + .update_regid = AB8500_REGU_VPLL_VANA_REGU, + .update_mask = 0x0c, + .update_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL2, + .hw_mode_mask = 0x30, + .hw_mode_val = {0x00, 0x10, 0x20, 0x30}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x08, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x08, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x08, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x20, + }, + [AB8500_VAUX1] = { + .name = "Vaux1", + .update_regid = AB8500_REGU_VAUX12_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3, + .hw_mode_mask = 0x30, + .hw_mode_val = {0x00, 0x10, 0x20, 0x30}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x20, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x20, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x20, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID1, + .hw_valid_mask[3] = 0x80, + .vsel_regid[0] = AB8500_REGU_VAUX1_SEL, + .vsel_mask[0] = 0x0f, + .vsel_range[0] = vauxn_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel), + }, + [AB8500_VAUX2] = { + .name = "Vaux2", + .update_regid = AB8500_REGU_VAUX12_REGU, + .update_mask = 0x0c, + .update_val = {0x00, 0x04, 0x08, 0x0c}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL3, + .hw_mode_mask = 0xc0, + .hw_mode_val = {0x00, 0x40, 0x80, 0xc0}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x40, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x40, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x40, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x01, + .vsel_regid[0] = AB8500_REGU_VAUX2_SEL, + .vsel_mask[0] = 0x0f, + .vsel_range[0] = vauxn_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel), + }, + [AB8500_VAUX3] = { + .name = "Vaux3", + .update_regid = AB8500_REGU_VRF1_VAUX3_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB8500_REGU_REQUEST_CTRL4, + .hw_mode_mask = 0x03, + .hw_mode_val = {0x00, 0x01, 0x02, 0x03}, + .hw_valid_regid[0] = AB8500_REGU_SYSCLK_REQ1_HP_VALID1, + .hw_valid_mask[0] = 0x80, + .hw_valid_regid[1] = AB8500_REGU_HW_HP_REQ1_VALID1, + .hw_valid_mask[1] = 0x80, + .hw_valid_regid[2] = AB8500_REGU_HW_HP_REQ2_VALID1, + .hw_valid_mask[2] = 0x80, + .hw_valid_regid[3] = AB8500_REGU_SW_HP_REQ_VALID2, + .hw_valid_mask[3] = 0x02, + .vsel_regid[0] = AB8500_REGU_VRF1_VAUX3_SEL, + .vsel_mask[0] = 0x07, + .vsel_range[0] = vaux3_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vaux3_vsel), + }, + [AB9540_VAUX4] = { + .name = "Vaux4", + .update_regid = AB9540_REGU_VAUX4_REGU, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x02, 0x03}, + .hw_mode_regid = AB9540_REGU_VAUX4_REQ_CTRL, + .hw_mode_mask = 0x03, + .hw_mode_val = {0x00, 0x01, 0x02, 0x03}, + .hw_valid_regid[0] = AB9540_REGU_VAUX4_REQ_VALID, + .hw_valid_mask[0] = 0x08, + .hw_valid_regid[1] = AB9540_REGU_VAUX4_REQ_VALID, + .hw_valid_mask[1] = 0x04, + .hw_valid_regid[2] = AB9540_REGU_VAUX4_REQ_VALID, + .hw_valid_mask[2] = 0x02, + .hw_valid_regid[3] = AB9540_REGU_VAUX4_REQ_VALID, + .hw_valid_mask[3] = 0x01, + .vsel_regid[0] = AB9540_REGU_VAUX4_SEL, + .vsel_mask[0] = 0x0f, + .vsel_range[0] = vauxn_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vauxn_vsel), + .unavailable = true, /* AB9540 regulator */ + }, + [AB8500_VINTCORE] = { + .name = "VintCore12", + .update_regid = AB8500_REGU_MISC1, + .update_mask = 0x44, + .update_val = {0x00, 0x04, 0x00, 0x44}, + .vsel_regid[0] = AB8500_REGU_MISC1, + .vsel_mask[0] = 0x38, + .vsel_range[0] = vintcore12_vsel, + .vsel_range_len[0] = ARRAY_SIZE(vintcore12_vsel), + }, + [AB8500_VTVOUT] = { + .name = "VTVout", + .update_regid = AB8500_REGU_MISC1, + .update_mask = 0x82, + .update_val = {0x00, 0x02, 0x00, 0x82}, + }, + [AB8500_VAUDIO] = { + .name = "Vaudio", + .update_regid = AB8500_REGU_VAUDIO_SUPPLY, + .update_mask = 0x02, + .update_val = {0x00, 0x02, 0x00, 0x00}, + }, + [AB8500_VANAMIC1] = { + .name = "Vanamic1", + .update_regid = AB8500_REGU_VAUDIO_SUPPLY, + .update_mask = 0x08, + .update_val = {0x00, 0x08, 0x00, 0x00}, + }, + [AB8500_VANAMIC2] = { + .name = "Vanamic2", + .update_regid = AB8500_REGU_VAUDIO_SUPPLY, + .update_mask = 0x10, + .update_val = {0x00, 0x10, 0x00, 0x00}, + }, + [AB8500_VDMIC] = { + .name = "Vdmic", + .update_regid = AB8500_REGU_VAUDIO_SUPPLY, + .update_mask = 0x04, + .update_val = {0x00, 0x04, 0x00, 0x00}, + }, + [AB8500_VUSB] = { + .name = "Vusb", + .update_regid = AB8500_REGU_VUSB_CTRL, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x00, 0x03}, + }, + [AB8500_VOTG] = { + .name = "VOTG", + .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL, + .update_mask = 0x03, + .update_val = {0x00, 0x01, 0x00, 0x03}, + }, + [AB8500_VBUSBIS] = { + .name = "Vbusbis", + .update_regid = AB8500_REGU_OTG_SUPPLY_CTRL, + .update_mask = 0x08, + .update_val = {0x00, 0x08, 0x00, 0x00}, + }, +}; + +static void ab9540_regulators_update(void) +{ + /* Update unavailable regulators */ + ab8500_regulator[AB8500_VREFDDR].unavailable = true; + ab8500_regulator[AB9540_VAUX4].unavailable = false; + + /* Update regulator characteristics for AB9540 */ + ab8500_regulator[AB8500_VARM].vsel_mask[0] = AB9540_VARM_VSEL_MASK; + ab8500_regulator[AB8500_VARM].vsel_range[0] = ab9540_varm_vsel; + ab8500_regulator[AB8500_VARM].vsel_range_len[0] = + ARRAY_SIZE(ab9540_varm_vsel); + ab8500_regulator[AB8500_VARM].vsel_mask[1] = AB9540_VARM_VSEL_MASK; + ab8500_regulator[AB8500_VARM].vsel_range[1] = ab9540_varm_vsel; + ab8500_regulator[AB8500_VARM].vsel_range_len[1] = + ARRAY_SIZE(ab9540_varm_vsel); + ab8500_regulator[AB8500_VARM].vsel_mask[2] = AB9540_VARM_VSEL_MASK; + ab8500_regulator[AB8500_VARM].vsel_range[2] = ab9540_varm_vsel; + ab8500_regulator[AB8500_VARM].vsel_range_len[2] = + ARRAY_SIZE(ab9540_varm_vsel); + + ab8500_regulator[AB8500_VBBP].vsel_range[0] = ab9540_vbbp_vsel; + ab8500_regulator[AB8500_VBBP].vsel_range_len[0] = + ARRAY_SIZE(ab9540_vbbp_vsel); + ab8500_regulator[AB8500_VBBP].vsel_range[1] = ab9540_vbbp_vsel; + ab8500_regulator[AB8500_VBBP].vsel_range_len[1] = + ARRAY_SIZE(ab9540_vbbp_vsel); + + ab8500_regulator[AB8500_VBBN].vsel_range[0] = ab9540_vbbn_vsel; + ab8500_regulator[AB8500_VBBN].vsel_range_len[0] = + ARRAY_SIZE(ab9540_vbbn_vsel); + ab8500_regulator[AB8500_VBBN].vsel_range[1] = ab9540_vbbn_vsel; + ab8500_regulator[AB8500_VBBN].vsel_range_len[1] = + ARRAY_SIZE(ab9540_vbbn_vsel); +} + +static int status_state = AB8500_REGULATOR_STATE_CURRENT; + +static int _get_voltage(struct regulator_volt_range const *volt_range, + u8 value, int *volt) +{ + u8 start = volt_range->start.value; + u8 end = volt_range->end.value; + u8 step = volt_range->step.value; + + /* Check if witin range */ + if (step == 0) { + if (value == start) { + *volt = volt_range->start.volt; + return 1; + } + } else { + if ((start <= value) && (value <= end)) { + if ((value - start) % step != 0) + return -EINVAL; /* invalid setting */ + *volt = volt_range->start.volt + + volt_range->step.volt + *((value - start) / step); + return 1; + } + } + + return 0; +} + +static int get_voltage(struct regulator_volt_range const *volt_range, + int volt_range_len, + u8 value) +{ + int volt; + int i, ret; + + for (i = 0; i < volt_range_len; i++) { + ret = _get_voltage(&volt_range[i], value, &volt); + if (ret < 0) + break; /* invalid setting */ + if (ret == 1) + return volt; /* successful */ + } + + return -EINVAL; +} + +static bool get_reg_and_mask(int regid, u8 mask, u8 *val) +{ + int ret; + u8 t; + + if (!regid) + return false; + + ret = abx500_get_register_interruptible(dev, + ab8500_register[regid].bank, + ab8500_register[regid].addr, + &t); + if (ret < 0) + return false; + + (*val) = t & mask; + + return true; +} + +/* Convert regulator register value to index */ +static bool val2idx(u8 val, u8 *v, int len, int *idx) +{ + int i; + + for (i = 0; i < len && v[i] != val; i++); + + if (i == len) + return false; + + (*idx) = i; + return true; +} + +int ab8500_regulator_debug_read(enum ab8500_regulator_id id, + struct ab8500_debug_regulator_status *s) +{ + int i; + u8 val; + bool found; + int idx = 0; + + if (id >= AB8500_NUM_REGULATORS) + return -EINVAL; + + s->name = (char *)ab8500_regulator[id].name; + + /* read mode */ + (void) get_reg_and_mask(ab8500_regulator[id].update_regid, + ab8500_regulator[id].update_mask, + &val); + + (void) val2idx(val, ab8500_regulator[id].update_val, + 4, &idx); + + s->mode = (u8) idx; + + /* read hw mode */ + found = get_reg_and_mask(ab8500_regulator[id].hw_mode_regid, + ab8500_regulator[id].hw_mode_mask, + &val); + + if (found) + found = val2idx(val, ab8500_regulator[id].hw_mode_val, 4, &idx); + + if (found) + /* +1 since 0 = HWMODE_NONE */ + s->hwmode = idx + 1; + else + s->hwmode = AB8500_HWMODE_NONE; + + for (i = 0; i < 4 && found; i++) { + + bool f = get_reg_and_mask(ab8500_regulator[id].hw_valid_regid[i], + ab8500_regulator[id].hw_valid_mask[i], + &val); + if (f) + s->hwmode_auto[i] = !!val; + else + s->hwmode_auto[i] = HWM_INVAL; + } + + /* read voltage */ + found = get_reg_and_mask(ab8500_regulator[id].vsel_sel_regid, + ab8500_regulator[id].vsel_sel_mask, + &val); + if (found) + found = val2idx(val, ab8500_regulator[id].vsel_sel_val, + 3, &idx); + + if (found && idx < 3) + s->volt_selected = idx + 1; + else + s->volt_selected = 0; + + for (s->volt_len = 0; s->volt_len < 3; s->volt_len++) { + int volt; + int i = s->volt_len; + + found = get_reg_and_mask(ab8500_regulator[id].vsel_regid[i], + ab8500_regulator[id].vsel_mask[i], + &val); + if (!found) + break; + + volt = get_voltage(ab8500_regulator[id].vsel_range[i], + ab8500_regulator[id].vsel_range_len[i], + val); + s->volt[i] = volt; + } + return 0; +} + +static int ab8500_regulator_status_print(struct seq_file *s, void *p) +{ + struct device *dev = s->private; + int id, regid; + int i; + u8 val; + int err; + + /* record current state */ + ab8500_regulator_record_state(AB8500_REGULATOR_STATE_CURRENT); + + /* check if chosen state is recorded */ + if (!ab8500_register_state_saved[status_state]) { + seq_printf(s, "ab8500-regulator status is not recorded.\n"); + goto exit; + } + + /* print dump header */ + err = seq_printf(s, "ab8500-regulator status:\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow\n"); + + /* print state */ + for (i = 0; i < NUM_REGULATOR_STATE; i++) { + if (i == status_state) + err = seq_printf(s, "-> %i. %12s\n", + i, regulator_state_name[i]); + else + err = seq_printf(s, " %i. %12s\n", + i, regulator_state_name[i]); + if (err < 0) + dev_err(dev, "seq_printf overflow\n"); + } + + /* print labels */ + err = seq_printf(s, + "+-----------+----+--------------+-------------------------+\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + err = seq_printf(s, + "| name|man |auto |voltage |\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + err = seq_printf(s, + "+-----------+----+--------------+ +-----------------------+\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + err = seq_printf(s, + "| |mode|mode |0|1|2|3| | 1 | 2 | 3 |\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + err = seq_printf(s, + "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + + /* dump registers */ + for (id = 0; id < AB8500_NUM_REGULATORS; id++) { + if (ab8500_register[id].unavailable || + ab8500_regulator[id].unavailable) + continue; + + /* print name */ + err = seq_printf(s, "|%11s|", + ab8500_regulator[id].name); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + id, __LINE__); + + /* print manual mode */ + regid = ab8500_regulator[id].update_regid; + val = ab8500_register_state[status_state][regid] + & ab8500_regulator[id].update_mask; + for (i = 0; i < 4; i++) { + if (val == ab8500_regulator[id].update_val[i]) + break; + } + err = seq_printf(s, "%4s|", + update_val_name[i]); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + id, __LINE__); + + /* print auto mode */ + regid = ab8500_regulator[id].hw_mode_regid; + if (regid) { + val = ab8500_register_state[status_state][regid] + & ab8500_regulator[id].hw_mode_mask; + for (i = 0; i < 4; i++) { + if (val == ab8500_regulator[id].hw_mode_val[i]) + break; + } + err = seq_printf(s, "%6s|", + hw_mode_val_name[i]); + } else { + err = seq_printf(s, " |"); + } + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + id, __LINE__); + + /* print valid bits */ + for (i = 0; i < 4; i++) { + regid = ab8500_regulator[id].hw_valid_regid[i]; + if (regid) { + val = ab8500_register_state[status_state][regid] + & ab8500_regulator[id].hw_valid_mask[i]; + if (val) + err = seq_printf(s, "1|"); + else + err = seq_printf(s, "0|"); + } else { + err = seq_printf(s, " |"); + } + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + regid, __LINE__); + } + + /* print voltage selection */ + regid = ab8500_regulator[id].vsel_sel_regid; + if (regid) { + val = ab8500_register_state[status_state][regid] + & ab8500_regulator[id].vsel_sel_mask; + for (i = 0; i < 3; i++) { + if (val == ab8500_regulator[id].vsel_sel_val[i]) + break; + } + if (i < 3) + seq_printf(s, "%i|", i + 1); + else + seq_printf(s, "-|"); + } else { + seq_printf(s, " |"); + } + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + regid, __LINE__); + + for (i = 0; i < 3; i++) { + int volt; + + regid = ab8500_regulator[id].vsel_regid[i]; + if (regid) { + val = ab8500_register_state[status_state][regid] + & ab8500_regulator[id].vsel_mask[i]; + volt = get_voltage( + ab8500_regulator[id].vsel_range[i], + ab8500_regulator[id].vsel_range_len[i], + val); + seq_printf(s, "%7i|", volt); + } else { + seq_printf(s, " |"); + } + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + regid, __LINE__); + } + + err = seq_printf(s, "\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i, %i\n", + regid, __LINE__); + + } + err = seq_printf(s, + "+-----------+----+------+-+-+-+-+-+-------+-------+-------+\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + err = seq_printf(s, + "Note! In HW mode, voltage selection is controlled by HW.\n"); + if (err < 0) + dev_err(dev, "seq_printf overflow: %i\n", __LINE__); + + +exit: + return 0; +} + +static int ab8500_regulator_status_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + + /* copy user data */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + /* convert */ + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + + /* set suspend force setting */ + if (user_val > NUM_REGULATOR_STATE) { + dev_err(dev, "debugfs error input > number of states\n"); + return -EINVAL; + } + + status_state = user_val; + + return buf_size; +} + + +static int ab8500_regulator_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab8500_regulator_status_print, + inode->i_private); +} + +static const struct file_operations ab8500_regulator_status_fops = { + .open = ab8500_regulator_status_open, + .write = ab8500_regulator_status_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +#ifdef CONFIG_PM + +struct ab8500_force_reg { + char *name; + u8 bank; + u8 addr; + u8 mask; + u8 val; + bool restore; + u8 restore_val; + u8 unavailable; +}; + +static struct ab8500_force_reg ab8500_force_reg[] = { + { + /* + * SysClkCtrl + * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask) + * [ 2] USBClkEna = disable SysClk path to USB block + * [ 1] TVoutClkEna = disable 27Mhz clock to TVout block + * [ 0] TVoutPllEna = disable TVout pll + * (generate 27Mhz from SysClk) + */ + .name = "SysClkCtrl", + .bank = 0x02, + .addr = 0x0c, + .mask = 0x07, + .val = 0x00, + }, + { + /* + * VsimSysClkCtrl + * OTP: 0x01, HSI: 0x21, suspend: 0x01/0xff (value/mask) + * [ 7] VsimSysClkReq8Valid = no connection + * [ 6] VsimSysClkReq7Valid = no connection + * [ 5] VsimSysClkReq6Valid = no connection + * [ 4] VsimSysClkReq5Valid = no connection + * [ 3] VsimSysClkReq4Valid = no connection + * [ 2] VsimSysClkReq3Valid = no connection + * [ 1] VsimSysClkReq2Valid = no connection + * [ 0] VsimSysClkReq1Valid = Vsim set by SysClkReq1 + */ + .name = "VsimSysClkCtrl", + .bank = 0x02, + .addr = 0x33, + .mask = 0xff, + .val = 0x01, + }, + { + /* + * SysUlpClkCtrl1 + * OTP: 0x00, HSI: 0x00, suspend: 0x00/0x0f (value/mask) + * [ 3] 4500SysClkReq = inactive + * [ 2] UlpClkReq = inactive + * [1:0] SysUlpClkIntSel[1:0] = no internal clock switching. + * Internal clock is SysClk. + */ + .name = "SysUlpClkCtrl1", + .bank = 0x02, + .addr = 0x0b, + .mask = 0x0f, + .val = 0x00, + }, + { + /* + * TVoutCtrl + * OTP: N/A, HSI: N/A, suspend: 0x00/0x03 (value/mask) + * [ 2] PlugTvOn = plug/unplug detection disabled + * [1:0] TvoutDacCtrl[1:0] = "0" forced on DAC input (test) + */ + .name = "TVoutCtrl", + .bank = 0x06, + .addr = 0x80, + .mask = 0x03, + .val = 0x00, + }, +}; + +static void ab9540_force_reg_update(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) { + if (ab8500_force_reg[i].bank == 0x02 && + ab8500_force_reg[i].addr == 0x0C) { + /* + * SysClkCtrl + * OTP: 0x00, HSI: 0x06, suspend: 0x00/0x07 (value/mask) + * [ 2] USBClkEna = disable SysClk path to USB block + */ + ab8500_force_reg[i].mask = 0x04; + ab8500_force_reg[i].val = 0x00; + } else if (ab8500_force_reg[i].bank == 0x06 && + ab8500_force_reg[i].addr == 0x80) { + /* TVoutCtrl not supported by AB9540 */ + ab8500_force_reg[i].unavailable = true; + } + } +} + +void ab8500_regulator_debug_force(void) +{ + int ret, i; + + /* save state of registers */ + ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_SUSPEND); + if (ret < 0) + dev_err(&pdev->dev, "Failed to record suspend state.\n"); + + /* check if registers should be forced */ + if (!setting_suspend_force) + goto exit; + + /* + * Optimize href v2_v50_pwr board for ApSleep/ApDeepSleep + * power consumption measurements + */ + + for (i = 0; i < ARRAY_SIZE(ab8500_force_reg); i++) { + if (ab8500_force_reg[i].unavailable) + continue; + + dev_vdbg(&pdev->dev, "Save and set %s: " + "0x%02x, 0x%02x, 0x%02x, 0x%02x.\n", + ab8500_force_reg[i].name, + ab8500_force_reg[i].bank, + ab8500_force_reg[i].addr, + ab8500_force_reg[i].mask, + ab8500_force_reg[i].val); + + /* assume that register should be restored */ + ab8500_force_reg[i].restore = true; + + /* get register value before forcing it */ + ret = abx500_get_register_interruptible(&pdev->dev, + ab8500_force_reg[i].bank, + ab8500_force_reg[i].addr, + &ab8500_force_reg[i].restore_val); + if (ret < 0) { + dev_err(dev, "Failed to read %s.\n", + ab8500_force_reg[i].name); + ab8500_force_reg[i].restore = false; + break; + } + + /* force register value */ + ret = abx500_mask_and_set_register_interruptible(&pdev->dev, + ab8500_force_reg[i].bank, + ab8500_force_reg[i].addr, + ab8500_force_reg[i].mask, + ab8500_force_reg[i].val); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to write %s.\n", + ab8500_force_reg[i].name); + ab8500_force_reg[i].restore = false; + } + } + +exit: + /* save state of registers */ + ret = ab8500_regulator_record_state( + AB8500_REGULATOR_STATE_SUSPEND_CORE); + if (ret < 0) + dev_err(&pdev->dev, "Failed to record suspend state.\n"); + + return; +} + +void ab8500_regulator_debug_restore(void) +{ + int ret, i; + + /* save state of registers */ + ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME_CORE); + if (ret < 0) + dev_err(&pdev->dev, "Failed to record resume state.\n"); + for (i = ARRAY_SIZE(ab8500_force_reg) - 1; i >= 0; i--) { + if (ab8500_force_reg[i].unavailable) + continue; + + /* restore register value */ + if (ab8500_force_reg[i].restore) { + ret = abx500_mask_and_set_register_interruptible( + &pdev->dev, + ab8500_force_reg[i].bank, + ab8500_force_reg[i].addr, + ab8500_force_reg[i].mask, + ab8500_force_reg[i].restore_val); + if (ret < 0) + dev_err(&pdev->dev, "Failed to restore %s.\n", + ab8500_force_reg[i].name); + dev_vdbg(&pdev->dev, "Restore %s: " + "0x%02x, 0x%02x, 0x%02x, 0x%02x\n", + ab8500_force_reg[i].name, + ab8500_force_reg[i].bank, + ab8500_force_reg[i].addr, + ab8500_force_reg[i].mask, + ab8500_force_reg[i].restore_val); + } + } + + /* save state of registers */ + ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_RESUME); + if (ret < 0) + dev_err(&pdev->dev, "Failed to record resume state.\n"); + + return; +} + +#endif + +static int ab8500_regulator_suspend_force_show(struct seq_file *s, void *p) +{ + /* print suspend standby status */ + if (setting_suspend_force) + return seq_printf(s, "suspend force enabled\n"); + else + return seq_printf(s, "no suspend force\n"); +} + +static int ab8500_regulator_suspend_force_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + + /* copy user data */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + /* convert */ + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + + /* set suspend force setting */ + if (user_val > 1) { + dev_err(dev, "debugfs error input > 1\n"); + return -EINVAL; + } + + if (user_val) + setting_suspend_force = true; + else + setting_suspend_force = false; + + return buf_size; +} + +static int ab8500_regulator_suspend_force_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ab8500_regulator_suspend_force_show, + inode->i_private); +} + +static const struct file_operations ab8500_regulator_suspend_force_fops = { + .open = ab8500_regulator_suspend_force_open, + .write = ab8500_regulator_suspend_force_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct dentry *ab8500_regulator_dir; +static struct dentry *ab8500_regulator_dump_file; +static struct dentry *ab8500_regulator_status_file; +static struct dentry *ab8500_regulator_suspend_force_file; + +int __devinit ab8500_regulator_debug_init(struct platform_device *plf) +{ + void __iomem *boot_info_backupram; + int ret; + struct ab8500 *ab8500; + + /* setup dev pointers */ + dev = &plf->dev; + pdev = plf; + + /* save state of registers */ + ret = ab8500_regulator_record_state(AB8500_REGULATOR_STATE_INIT); + if (ret < 0) + dev_err(&plf->dev, "Failed to record init state.\n"); + + ab8500 = dev_get_drvdata(plf->dev.parent); + /* Update data structures for AB9540 */ + if (is_ab9540(ab8500)) { + ab9540_registers_update(); + ab9540_regulators_update(); + ab9540_force_reg_update(); + } + /* make suspend-force default if board profile is v5x-power */ + boot_info_backupram = ioremap(BOOT_INFO_BACKUPRAM1, 0x4); + + if (boot_info_backupram) { + u8 board_profile; + board_profile = readb( + boot_info_backupram + BOARD_PROFILE_BACKUPRAM1); + dev_dbg(dev, "Board profile is 0x%02x\n", board_profile); + + if (board_profile >= OPTION_BOARD_VERSION_V5X) + setting_suspend_force = true; + + iounmap(boot_info_backupram); + } else { + dev_err(dev, "Failed to read backupram.\n"); + } + + /* create directory */ + ab8500_regulator_dir = debugfs_create_dir("ab8500-regulator", NULL); + if (!ab8500_regulator_dir) + goto exit_no_debugfs; + + /* create "dump" file */ + ab8500_regulator_dump_file = debugfs_create_file("dump", + S_IRUGO, ab8500_regulator_dir, &plf->dev, + &ab8500_regulator_dump_fops); + if (!ab8500_regulator_dump_file) + goto exit_destroy_dir; + + /* create "status" file */ + ab8500_regulator_status_file = debugfs_create_file("status", + S_IRUGO, ab8500_regulator_dir, &plf->dev, + &ab8500_regulator_status_fops); + if (!ab8500_regulator_status_file) + goto exit_destroy_dump_file; + + /* + * create "suspend-force-v5x" file. As indicated by the name, this is + * only applicable for v2_v5x hardware versions. + */ + ab8500_regulator_suspend_force_file = debugfs_create_file( + "suspend-force-v5x", + S_IRUGO, ab8500_regulator_dir, &plf->dev, + &ab8500_regulator_suspend_force_fops); + if (!ab8500_regulator_suspend_force_file) + goto exit_destroy_status_file; + + return 0; + +exit_destroy_status_file: + debugfs_remove(ab8500_regulator_status_file); +exit_destroy_dump_file: + debugfs_remove(ab8500_regulator_dump_file); +exit_destroy_dir: + debugfs_remove(ab8500_regulator_dir); +exit_no_debugfs: + dev_err(&plf->dev, "failed to create debugfs entries.\n"); + return -ENOMEM; +} + +int __devexit ab8500_regulator_debug_exit(struct platform_device *plf) +{ + debugfs_remove(ab8500_regulator_suspend_force_file); + debugfs_remove(ab8500_regulator_status_file); + debugfs_remove(ab8500_regulator_dump_file); + debugfs_remove(ab8500_regulator_dir); + + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com"); +MODULE_DESCRIPTION("AB8500 Regulator Debug"); +MODULE_ALIAS("platform:ab8500-regulator-debug"); diff --git a/drivers/regulator/ab8500-debug.h b/drivers/regulator/ab8500-debug.h new file mode 100644 index 00000000000..2b59e556a3f --- /dev/null +++ b/drivers/regulator/ab8500-debug.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) ST-Ericsson SA 2010-2011 + * + * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson. + * + * License Terms: GNU General Public License v2 + */ + +#ifndef __AB8500_DEBUG_H__ +#define __AB8500_DEBUG_H__ + +/* + * regulator status print + */ +enum ab8500_regulator_id { + AB8500_VARM, + AB8500_VBBP, + AB8500_VBBN, + AB8500_VAPE, + AB8500_VSMPS1, + AB8500_VSMPS2, + AB8500_VSMPS3, + AB8500_VPLL, + AB8500_VREFDDR, + AB8500_VMOD, + AB8500_VEXTSUPPLY1, + AB8500_VEXTSUPPLY2, + AB8500_VEXTSUPPLY3, + AB8500_VRF1, + AB8500_VANA, + AB8500_VAUX1, + AB8500_VAUX2, + AB8500_VAUX3, + AB9540_VAUX4, /* Note: AB9540 only */ + AB8500_VINTCORE, + AB8500_VTVOUT, + AB8500_VAUDIO, + AB8500_VANAMIC1, + AB8500_VANAMIC2, + AB8500_VDMIC, + AB8500_VUSB, + AB8500_VOTG, + AB8500_VBUSBIS, + AB8500_NUM_REGULATORS, +}; + +enum ab8500_regulator_mode { + AB8500_MODE_OFF = 0, + AB8500_MODE_ON, + AB8500_MODE_HW, + AB8500_MODE_LP +}; + +enum ab8500_regulator_hwmode { + AB8500_HWMODE_NONE = 0, + AB8500_HWMODE_HPLP, + AB8500_HWMODE_HPOFF, + AB8500_HWMODE_HP, + AB8500_HWMODE_HP2, +}; + +enum hwmode_auto { + HWM_OFF = 0, + HWM_ON = 1, + HWM_INVAL = 2, +}; + +struct ab8500_debug_regulator_status { + char *name; + enum ab8500_regulator_mode mode; + enum ab8500_regulator_hwmode hwmode; + enum hwmode_auto hwmode_auto[4]; + int volt_selected; + int volt_len; + int volt[4]; +}; + +int ab8500_regulator_debug_read(enum ab8500_regulator_id id, + struct ab8500_debug_regulator_status *s); +#endif /* __AB8500_DEBUG_H__ */ diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c new file mode 100644 index 00000000000..8a5064c07fb --- /dev/null +++ b/drivers/regulator/ab8500-ext.c @@ -0,0 +1,451 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * + * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com> + * + * This file is based on drivers/regulator/ab8500.c + * + * AB8500 external regulators + * + * ab8500-ext supports the following regulators: + * - VextSupply3 + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab8500.h> +#include <linux/regulator/ab8500.h> + +/** + * struct ab8500_ext_regulator_info - ab8500 regulator information + * @dev: device pointer + * @desc: regulator description + * @rdev: regulator device + * @cfg: regulator configuration (extension of regulator FW configuration) + * @is_enabled: status of regulator (on/off) + * @fixed_uV: typical voltage (for fixed voltage supplies) + * @update_bank: bank to control on/off + * @update_reg: register to control on/off + * @update_mask: mask to enable/disable and set mode of regulator + * @update_val: bits holding the regulator current mode + * @update_val_hp: bits to set EN pin active (LPn pin deactive) + * normally this means high power mode + * @update_val_lp: bits to set EN pin active and LPn pin active + * normally this means low power mode + * @update_val_hw: bits to set regulator pins in HW control + * SysClkReq pins and logic will choose mode + */ +struct ab8500_ext_regulator_info { + struct device *dev; + struct regulator_desc desc; + struct regulator_dev *rdev; + struct ab8500_ext_regulator_cfg *cfg; + bool is_enabled; + int fixed_uV; + u8 update_bank; + u8 update_reg; + u8 update_mask; + u8 update_val; + u8 update_val_hp; + u8 update_val_lp; + u8 update_val_hw; +}; + +static int enable(struct ab8500_ext_regulator_info *info, u8 *regval) +{ + int ret; + + *regval = info->update_val; + + /* + * To satisfy both HW high power request and SW request, the regulator + * must be on in high power. + */ + if (info->cfg && info->cfg->hwreq) + *regval = info->update_val_hp; + + ret = abx500_mask_and_set_register_interruptible(info->dev, + info->update_bank, info->update_reg, + info->update_mask, *regval); + if (ret < 0) + dev_err(rdev_get_dev(info->rdev), + "couldn't set enable bits for regulator\n"); + + info->is_enabled = true; + + return ret; +} + +static int ab8500_ext_regulator_enable(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + u8 regval; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = enable(info, ®val); + + dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value):" + " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, regval); + + return ret; +} + +static int ab8500_ext_regulator_set_suspend_enable(struct regulator_dev *rdev) +{ + dev_dbg(rdev_get_dev(rdev), "suspend: "); + + return ab8500_ext_regulator_enable(rdev); +} + +static int disable(struct ab8500_ext_regulator_info *info, u8 *regval) +{ + int ret; + + *regval = 0x0; + + /* + * Set the regulator in HW request mode if configured + */ + if (info->cfg && info->cfg->hwreq) + *regval = info->update_val_hw; + + ret = abx500_mask_and_set_register_interruptible(info->dev, + info->update_bank, info->update_reg, + info->update_mask, *regval); + if (ret < 0) + dev_err(rdev_get_dev(info->rdev), + "couldn't set disable bits for regulator\n"); + + info->is_enabled = false; + + return ret; +} + +static int ab8500_ext_regulator_disable(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + u8 regval; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = disable(info, ®val); + + dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):" + " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, regval); + + return ret; +} + +static int ab8500_ext_regulator_set_suspend_disable(struct regulator_dev *rdev) +{ + dev_dbg(rdev_get_dev(rdev), "suspend: "); + + return ab8500_ext_regulator_disable(rdev); +} + +static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + u8 regval; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = abx500_get_register_interruptible(info->dev, + info->update_bank, info->update_reg, ®val); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "couldn't read 0x%x register\n", info->update_reg); + return ret; + } + + dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):" + " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, regval); + + if (((regval & info->update_mask) == info->update_val_lp) || + ((regval & info->update_mask) == info->update_val_hp)) + info->is_enabled = true; + else + info->is_enabled = false; + + return info->is_enabled; +} + +static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + int ret = 0; + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + switch (mode) { + case REGULATOR_MODE_NORMAL: + info->update_val = info->update_val_hp; + break; + case REGULATOR_MODE_IDLE: + info->update_val = info->update_val_lp; + break; + + default: + return -EINVAL; + } + + if (info->is_enabled) { + u8 regval; + + ret = enable(info, ®val); + if (ret < 0) + dev_err(rdev_get_dev(rdev), + "Could not set regulator mode.\n"); + + dev_dbg(rdev_get_dev(rdev), + "%s-set_mode (bank, reg, mask, value): " + "0x%x, 0x%x, 0x%x, 0x%x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, regval); + } + + return ret; +} + +static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev) +{ + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + if (info->update_val == info->update_val_hp) + ret = REGULATOR_MODE_NORMAL; + else if (info->update_val == info->update_val_lp) + ret = REGULATOR_MODE_IDLE; + else + ret = -EINVAL; + + return ret; +} + +static int ab8500_ext_fixed_get_voltage(struct regulator_dev *rdev) +{ + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + return info->fixed_uV; +} + +static int ab8500_ext_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + /* return the uV for the fixed regulators */ + if (info->fixed_uV) + return info->fixed_uV; + + return -EINVAL; +} + +static struct regulator_ops ab8500_ext_regulator_ops = { + .enable = ab8500_ext_regulator_enable, + .set_suspend_enable = ab8500_ext_regulator_set_suspend_enable, + .disable = ab8500_ext_regulator_disable, + .set_suspend_disable = ab8500_ext_regulator_set_suspend_disable, + .is_enabled = ab8500_ext_regulator_is_enabled, + .set_mode = ab8500_ext_regulator_set_mode, + .get_mode = ab8500_ext_regulator_get_mode, + .get_voltage = ab8500_ext_fixed_get_voltage, + .list_voltage = ab8500_ext_list_voltage, +}; + +static struct ab8500_ext_regulator_info + ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = { + [AB8500_EXT_SUPPLY1] = { + .desc = { + .name = "VEXTSUPPLY1", + .ops = &ab8500_ext_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_EXT_SUPPLY1, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1800000, + .update_bank = 0x04, + .update_reg = 0x08, + .update_mask = 0x03, + .update_val = 0x01, + .update_val_hp = 0x01, + .update_val_lp = 0x03, + .update_val_hw = 0x02, + }, + [AB8500_EXT_SUPPLY2] = { + .desc = { + .name = "VEXTSUPPLY2", + .ops = &ab8500_ext_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_EXT_SUPPLY2, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1360000, + .update_bank = 0x04, + .update_reg = 0x08, + .update_mask = 0x0c, + .update_val = 0x04, + .update_val_hp = 0x04, + .update_val_lp = 0x0c, + .update_val_hw = 0x08, + }, + [AB8500_EXT_SUPPLY3] = { + .desc = { + .name = "VEXTSUPPLY3", + .ops = &ab8500_ext_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_EXT_SUPPLY3, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 3400000, + .update_bank = 0x04, + .update_reg = 0x08, + .update_mask = 0x30, + .update_val = 0x10, + .update_val_hp = 0x10, + .update_val_lp = 0x30, + .update_val_hw = 0x20, + }, +}; + +__devinit int ab8500_ext_regulator_init(struct platform_device *pdev) +{ + struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); + struct ab8500_platform_data *ppdata; + struct ab8500_regulator_platform_data *pdata; + int i, err; + + if (!ab8500) { + dev_err(&pdev->dev, "null mfd parent\n"); + return -EINVAL; + } + ppdata = dev_get_platdata(ab8500->dev); + if (!ppdata) { + dev_err(&pdev->dev, "null parent pdata\n"); + return -EINVAL; + } + + pdata = ppdata->regulator; + if (!pdata) { + dev_err(&pdev->dev, "null pdata\n"); + return -EINVAL; + } + + /* make sure the platform data has the correct size */ + if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) { + dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); + return -EINVAL; + } + + /* check for AB8500 2.x */ + if (is_ab8500_2p0_or_earlier(ab8500)) { + struct ab8500_ext_regulator_info *info; + + /* VextSupply3LPn is inverted on AB8500 2.x */ + info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3]; + info->update_val = 0x30; + info->update_val_hp = 0x30; + info->update_val_lp = 0x10; + } + + /* register all regulators */ + for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) { + struct ab8500_ext_regulator_info *info = NULL; + + /* assign per-regulator data */ + info = &ab8500_ext_regulator_info[i]; + info->dev = &pdev->dev; + info->cfg = (struct ab8500_ext_regulator_cfg *) + pdata->ext_regulator[i].driver_data; + + /* register regulator with framework */ + info->rdev = regulator_register(&info->desc, &pdev->dev, + &pdata->ext_regulator[i], info, NULL); + if (IS_ERR(info->rdev)) { + err = PTR_ERR(info->rdev); + dev_err(&pdev->dev, "failed to register regulator %s\n", + info->desc.name); + /* when we fail, un-register all earlier regulators */ + while (--i >= 0) { + info = &ab8500_ext_regulator_info[i]; + regulator_unregister(info->rdev); + } + return err; + } + + dev_dbg(rdev_get_dev(info->rdev), + "%s-probed\n", info->desc.name); + } + + return 0; +} + +__devexit int ab8500_ext_regulator_exit(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) { + struct ab8500_ext_regulator_info *info = NULL; + info = &ab8500_ext_regulator_info[i]; + + dev_vdbg(rdev_get_dev(info->rdev), + "%s-remove\n", info->desc.name); + + regulator_unregister(info->rdev); + } + + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>"); +MODULE_DESCRIPTION("AB8500 external regulator driver"); +MODULE_ALIAS("platform:ab8500-ext-regulator"); diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c index c7ee4c15d6f..71328249659 100644 --- a/drivers/regulator/ab8500.c +++ b/drivers/regulator/ab8500.c @@ -21,43 +21,55 @@ #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/ab8500.h> +#include <linux/mfd/abx500/ab8500-gpio.h> /* for sysclkreq pins */ +#include <mach/id.h> /** * struct ab8500_regulator_info - ab8500 regulator information * @dev: device pointer * @desc: regulator description * @regulator_dev: regulator device + * @is_enabled: status of regulator (on/off) * @max_uV: maximum voltage (for variable voltage supplies) * @min_uV: minimum voltage (for variable voltage supplies) * @fixed_uV: typical voltage (for fixed voltage supplies) + * @load_lp_uA: maximum load in idle (low power) mode * @update_bank: bank to control on/off * @update_reg: register to control on/off - * @update_mask: mask to enable/disable regulator - * @update_val_enable: bits to enable the regulator in normal (high power) mode + * @update_mask: mask to enable/disable and set mode of regulator + * @update_val: bits holding the regulator current mode + * @update_val_idle: bits to enable the regulator in idle (low power) mode + * @update_val_normal: bits to enable the regulator in normal (high power) mode * @voltage_bank: bank to control regulator voltage * @voltage_reg: register to control regulator voltage * @voltage_mask: mask to control regulator voltage * @voltages: supported voltage table * @voltages_len: number of supported voltages for the regulator * @delay: startup/set voltage delay in us + * @gpio_pin: ab8500 gpio pin offset number (for sysclkreq regulator only) */ struct ab8500_regulator_info { struct device *dev; struct regulator_desc desc; struct regulator_dev *regulator; + bool is_enabled; int max_uV; int min_uV; int fixed_uV; + int load_lp_uA; u8 update_bank; u8 update_reg; u8 update_mask; - u8 update_val_enable; + u8 update_val; + u8 update_val_idle; + u8 update_val_normal; u8 voltage_bank; u8 voltage_reg; u8 voltage_mask; int const *voltages; int voltages_len; unsigned int delay; + enum ab8500_pin gpio_pin; }; /* voltage tables for the vauxn/vintcore supplies */ @@ -113,15 +125,17 @@ static int ab8500_regulator_enable(struct regulator_dev *rdev) ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, - info->update_mask, info->update_val_enable); + info->update_mask, info->update_val); if (ret < 0) dev_err(rdev_get_dev(rdev), "couldn't set enable bits for regulator\n"); + info->is_enabled = true; + dev_vdbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", info->desc.name, info->update_bank, info->update_reg, - info->update_mask, info->update_val_enable); + info->update_mask, info->update_val); return ret; } @@ -143,6 +157,8 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev) dev_err(rdev_get_dev(rdev), "couldn't set disable bits for regulator\n"); + info->is_enabled = false; + dev_vdbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n", info->desc.name, info->update_bank, info->update_reg, @@ -151,6 +167,88 @@ static int ab8500_regulator_disable(struct regulator_dev *rdev) return ret; } +static unsigned int ab8500_regulator_get_optimum_mode( + struct regulator_dev *rdev, int input_uV, + int output_uV, int load_uA) +{ + unsigned int mode; + + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + if (load_uA <= info->load_lp_uA) + mode = REGULATOR_MODE_IDLE; + else + mode = REGULATOR_MODE_NORMAL; + + return mode; +} + +static int ab8500_regulator_set_mode(struct regulator_dev *rdev, + unsigned int mode) +{ + int ret = 0; + + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + switch (mode) { + case REGULATOR_MODE_NORMAL: + info->update_val = info->update_val_normal; + break; + case REGULATOR_MODE_IDLE: + info->update_val = info->update_val_idle; + break; + default: + return -EINVAL; + } + + if (info->is_enabled) { + ret = abx500_mask_and_set_register_interruptible(info->dev, + info->update_bank, info->update_reg, + info->update_mask, info->update_val); + if (ret < 0) + dev_err(rdev_get_dev(rdev), + "couldn't set regulator mode\n"); + + dev_vdbg(rdev_get_dev(rdev), + "%s-set_mode (bank, reg, mask, value): " + "0x%x, 0x%x, 0x%x, 0x%x\n", + info->desc.name, info->update_bank, info->update_reg, + info->update_mask, info->update_val); + } + + return ret; +} + +static unsigned int ab8500_regulator_get_mode(struct regulator_dev *rdev) +{ + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + if (info->update_val == info->update_val_normal) + ret = REGULATOR_MODE_NORMAL; + else if (info->update_val == info->update_val_idle) + ret = REGULATOR_MODE_IDLE; + else + ret = -EINVAL; + + return ret; +} + static int ab8500_regulator_is_enabled(struct regulator_dev *rdev) { int ret; @@ -177,9 +275,11 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev) info->update_mask, regval); if (regval & info->update_mask) - return true; + info->is_enabled = true; else - return false; + info->is_enabled = false; + + return info->is_enabled; } static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector) @@ -273,8 +373,13 @@ static int ab8500_regulator_set_voltage(struct regulator_dev *rdev, *selector = ret; + /* vintcore register has a different layout */ + if (info->desc.id == AB8500_LDO_INTCORE) + regval = ((u8)ret) << 3; + else + regval = (u8)ret; + /* set the registers for the request */ - regval = (u8)ret; ret = abx500_mask_and_set_register_interruptible(info->dev, info->voltage_bank, info->voltage_reg, info->voltage_mask, regval); @@ -314,9 +419,12 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev, return info->delay; } -static struct regulator_ops ab8500_regulator_ops = { +static struct regulator_ops ab8500_regulator_volt_mode_ops = { .enable = ab8500_regulator_enable, .disable = ab8500_regulator_disable, + .get_optimum_mode = ab8500_regulator_get_optimum_mode, + .set_mode = ab8500_regulator_set_mode, + .get_mode = ab8500_regulator_get_mode, .is_enabled = ab8500_regulator_is_enabled, .get_voltage_sel = ab8500_regulator_get_voltage_sel, .set_voltage = ab8500_regulator_set_voltage, @@ -337,16 +445,116 @@ static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) return info->fixed_uV; } -static struct regulator_ops ab8500_regulator_fixed_ops = { +static struct regulator_ops ab8500_regulator_mode_ops = { .enable = ab8500_regulator_enable, .disable = ab8500_regulator_disable, .is_enabled = ab8500_regulator_is_enabled, + .get_optimum_mode = ab8500_regulator_get_optimum_mode, + .set_mode = ab8500_regulator_set_mode, + .get_mode = ab8500_regulator_get_mode, .get_voltage = ab8500_fixed_get_voltage, .list_voltage = ab8500_list_voltage, .enable_time = ab8500_regulator_enable_time, .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, }; +static struct regulator_ops ab8500_regulator_ops = { + .enable = ab8500_regulator_enable, + .disable = ab8500_regulator_disable, + .is_enabled = ab8500_regulator_is_enabled, + .get_voltage = ab8500_fixed_get_voltage, + .list_voltage = ab8500_list_voltage, +}; + +static int ab8500_sysclkreq_enable(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, false); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "couldn't set sysclkreq pin selection\n"); + return ret; + } + + info->is_enabled = true; + + dev_vdbg(rdev_get_dev(rdev), + "%s-enable (gpio_pin, gpio_select): %i, false\n", + info->desc.name, info->gpio_pin); + + return ret; +} + +static int ab8500_sysclkreq_disable(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = ab8500_gpio_config_select(info->dev, info->gpio_pin, true); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "couldn't set gpio pin selection\n"); + return ret; + } + + info->is_enabled = false; + + dev_vdbg(rdev_get_dev(rdev), + "%s-disable (gpio_pin, gpio_select): %i, true\n", + info->desc.name, info->gpio_pin); + + return ret; +} + +static int ab8500_sysclkreq_is_enabled(struct regulator_dev *rdev) +{ + int ret; + struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); + bool gpio_select; + + if (info == NULL) { + dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); + return -EINVAL; + } + + ret = ab8500_gpio_config_get_select(info->dev, info->gpio_pin, + &gpio_select); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "couldn't read gpio pin selection\n"); + return ret; + } + + info->is_enabled = !gpio_select; + + dev_vdbg(rdev_get_dev(rdev), + "%s-is_enabled (gpio_pin, is_enabled): %i, %i\n", + info->desc.name, info->gpio_pin, !gpio_select); + + return info->is_enabled; +} + +static struct regulator_ops ab8500_sysclkreq_ops = { + .enable = ab8500_sysclkreq_enable, + .disable = ab8500_sysclkreq_disable, + .is_enabled = ab8500_sysclkreq_is_enabled, + .get_voltage = ab8500_fixed_get_voltage, + .list_voltage = ab8500_list_voltage, +}; + +/* AB8500 regulator information */ static struct ab8500_regulator_info ab8500_regulator_info[AB8500_NUM_REGULATORS] = { /* @@ -358,7 +566,7 @@ static struct ab8500_regulator_info [AB8500_LDO_AUX1] = { .desc = { .name = "LDO-AUX1", - .ops = &ab8500_regulator_ops, + .ops = &ab8500_regulator_volt_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_AUX1, .owner = THIS_MODULE, @@ -366,10 +574,13 @@ static struct ab8500_regulator_info }, .min_uV = 1100000, .max_uV = 3300000, + .load_lp_uA = 5000, .update_bank = 0x04, .update_reg = 0x09, .update_mask = 0x03, - .update_val_enable = 0x01, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, .voltage_bank = 0x04, .voltage_reg = 0x1f, .voltage_mask = 0x0f, @@ -379,7 +590,7 @@ static struct ab8500_regulator_info [AB8500_LDO_AUX2] = { .desc = { .name = "LDO-AUX2", - .ops = &ab8500_regulator_ops, + .ops = &ab8500_regulator_volt_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_AUX2, .owner = THIS_MODULE, @@ -387,10 +598,13 @@ static struct ab8500_regulator_info }, .min_uV = 1100000, .max_uV = 3300000, + .load_lp_uA = 5000, .update_bank = 0x04, .update_reg = 0x09, .update_mask = 0x0c, - .update_val_enable = 0x04, + .update_val = 0x04, + .update_val_idle = 0x0c, + .update_val_normal = 0x04, .voltage_bank = 0x04, .voltage_reg = 0x20, .voltage_mask = 0x0f, @@ -400,7 +614,7 @@ static struct ab8500_regulator_info [AB8500_LDO_AUX3] = { .desc = { .name = "LDO-AUX3", - .ops = &ab8500_regulator_ops, + .ops = &ab8500_regulator_volt_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_AUX3, .owner = THIS_MODULE, @@ -408,10 +622,13 @@ static struct ab8500_regulator_info }, .min_uV = 1100000, .max_uV = 3300000, + .load_lp_uA = 5000, .update_bank = 0x04, .update_reg = 0x0a, .update_mask = 0x03, - .update_val_enable = 0x01, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, .voltage_bank = 0x04, .voltage_reg = 0x21, .voltage_mask = 0x07, @@ -421,7 +638,7 @@ static struct ab8500_regulator_info [AB8500_LDO_INTCORE] = { .desc = { .name = "LDO-INTCORE", - .ops = &ab8500_regulator_ops, + .ops = &ab8500_regulator_volt_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_INTCORE, .owner = THIS_MODULE, @@ -429,10 +646,13 @@ static struct ab8500_regulator_info }, .min_uV = 1100000, .max_uV = 3300000, + .load_lp_uA = 5000, .update_bank = 0x03, .update_reg = 0x80, .update_mask = 0x44, - .update_val_enable = 0x04, + .update_val = 0x44, + .update_val_idle = 0x44, + .update_val_normal = 0x04, .voltage_bank = 0x03, .voltage_reg = 0x80, .voltage_mask = 0x38, @@ -448,7 +668,275 @@ static struct ab8500_regulator_info [AB8500_LDO_TVOUT] = { .desc = { .name = "LDO-TVOUT", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_TVOUT, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .delay = 500, + .fixed_uV = 2000000, + .load_lp_uA = 1000, + .update_bank = 0x03, + .update_reg = 0x80, + .update_mask = 0x82, + .update_val = 0x02, + .update_val_idle = 0x82, + .update_val_normal = 0x02, + }, + [AB8500_LDO_AUDIO] = { + .desc = { + .name = "LDO-AUDIO", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUDIO, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2000000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x02, + .update_val = 0x02, + }, + [AB8500_LDO_ANAMIC1] = { + .desc = { + .name = "LDO-ANAMIC1", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANAMIC1, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2050000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x08, + .update_val = 0x08, + }, + [AB8500_LDO_ANAMIC2] = { + .desc = { + .name = "LDO-ANAMIC2", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANAMIC2, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 2050000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x10, + .update_val = 0x10, + }, + [AB8500_LDO_DMIC] = { + .desc = { + .name = "LDO-DMIC", + .ops = &ab8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_DMIC, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1800000, + .update_bank = 0x03, + .update_reg = 0x83, + .update_mask = 0x04, + .update_val = 0x04, + }, + + /* + * Regulators with fixed voltage and normal/idle modes + */ + [AB8500_LDO_ANA] = { + .desc = { + .name = "LDO-ANA", + .ops = &ab8500_regulator_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_ANA, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1200000, + .load_lp_uA = 1000, + .update_bank = 0x04, + .update_reg = 0x06, + .update_mask = 0x0c, + .update_val = 0x04, + .update_val_idle = 0x0c, + .update_val_normal = 0x04, + }, + + /* + * SysClkReq regulators + */ + [AB8500_SYSCLKREQ_2] = { + .desc = { + .name = "SYSCLKREQ-2", + .ops = &ab8500_sysclkreq_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_SYSCLKREQ_2, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1, /* bogus value */ + .gpio_pin = AB8500_PIN_GPIO1, + }, + [AB8500_SYSCLKREQ_4] = { + .desc = { + .name = "SYSCLKREQ-4", + .ops = &ab8500_sysclkreq_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_SYSCLKREQ_4, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1, /* bogus value */ + .gpio_pin = AB8500_PIN_GPIO3, + }, +}; + +/* AB9540 regulator information */ +static struct ab8500_regulator_info + ab9540_regulator_info[AB9540_NUM_REGULATORS] = { + /* + * Variable Voltage Regulators + * name, min mV, max mV, + * update bank, reg, mask, enable val + * volt bank, reg, mask, table, table length + */ + [AB9540_LDO_AUX1] = { + .desc = { + .name = "LDO-AUX1", + .ops = &ab8500_regulator_volt_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX1, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .load_lp_uA = 5000, + .update_bank = 0x04, + .update_reg = 0x09, + .update_mask = 0x03, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, + .voltage_bank = 0x04, + .voltage_reg = 0x1f, + .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), + }, + [AB9540_LDO_AUX2] = { + .desc = { + .name = "LDO-AUX2", + .ops = &ab8500_regulator_volt_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX2, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .load_lp_uA = 5000, + .update_bank = 0x04, + .update_reg = 0x09, + .update_mask = 0x0c, + .update_val = 0x04, + .update_val_idle = 0x0c, + .update_val_normal = 0x04, + .voltage_bank = 0x04, + .voltage_reg = 0x20, + .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), + }, + [AB9540_LDO_AUX3] = { + .desc = { + .name = "LDO-AUX3", + .ops = &ab8500_regulator_volt_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_AUX3, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vaux3_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .load_lp_uA = 5000, + .update_bank = 0x04, + .update_reg = 0x0a, + .update_mask = 0x03, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, + .voltage_bank = 0x04, + .voltage_reg = 0x21, + .voltage_mask = 0x07, + .voltages = ldo_vaux3_voltages, + .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages), + }, + [AB9540_LDO_AUX4] = { + .desc = { + .name = "LDO-AUX4", + .ops = &ab8500_regulator_volt_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB9540_LDO_AUX4, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vauxn_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .load_lp_uA = 5000, + /* values for Vaux4Regu register */ + .update_bank = 0x04, + .update_reg = 0x2e, + .update_mask = 0x03, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, + /* values for Vaux4SEL register */ + .voltage_bank = 0x04, + .voltage_reg = 0x2f, + .voltage_mask = 0x0f, + .voltages = ldo_vauxn_voltages, + .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages), + }, + [AB9540_LDO_INTCORE] = { + .desc = { + .name = "LDO-INTCORE", + .ops = &ab8500_regulator_volt_mode_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_LDO_INTCORE, + .owner = THIS_MODULE, + .n_voltages = ARRAY_SIZE(ldo_vintcore_voltages), + }, + .min_uV = 1100000, + .max_uV = 3300000, + .load_lp_uA = 5000, + .update_bank = 0x03, + .update_reg = 0x80, + .update_mask = 0x44, + .update_val = 0x44, + .update_val_idle = 0x44, + .update_val_normal = 0x04, + .voltage_bank = 0x03, + .voltage_reg = 0x80, + .voltage_mask = 0x38, + .voltages = ldo_vintcore_voltages, + .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages), + }, + + /* + * Fixed Voltage Regulators + * name, fixed mV, + * update bank, reg, mask, enable val + */ + [AB9540_LDO_TVOUT] = { + .desc = { + .name = "LDO-TVOUT", + .ops = &ab8500_regulator_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_TVOUT, .owner = THIS_MODULE, @@ -456,17 +944,20 @@ static struct ab8500_regulator_info }, .delay = 10000, .fixed_uV = 2000000, + .load_lp_uA = 1000, .update_bank = 0x03, .update_reg = 0x80, .update_mask = 0x82, - .update_val_enable = 0x02, + .update_val = 0x02, + .update_val_idle = 0x82, + .update_val_normal = 0x02, }, - [AB8500_LDO_USB] = { + [AB9540_LDO_USB] = { .desc = { .name = "LDO-USB", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_ops, .type = REGULATOR_VOLTAGE, - .id = AB8500_LDO_USB, + .id = AB9540_LDO_USB, .owner = THIS_MODULE, .n_voltages = 1, }, @@ -474,12 +965,14 @@ static struct ab8500_regulator_info .update_bank = 0x03, .update_reg = 0x82, .update_mask = 0x03, - .update_val_enable = 0x01, + .update_val = 0x01, + .update_val_idle = 0x03, + .update_val_normal = 0x01, }, - [AB8500_LDO_AUDIO] = { + [AB9540_LDO_AUDIO] = { .desc = { .name = "LDO-AUDIO", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_AUDIO, .owner = THIS_MODULE, @@ -489,12 +982,12 @@ static struct ab8500_regulator_info .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x02, - .update_val_enable = 0x02, + .update_val = 0x02, }, - [AB8500_LDO_ANAMIC1] = { + [AB9540_LDO_ANAMIC1] = { .desc = { .name = "LDO-ANAMIC1", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_ANAMIC1, .owner = THIS_MODULE, @@ -504,12 +997,12 @@ static struct ab8500_regulator_info .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x08, - .update_val_enable = 0x08, + .update_val = 0x08, }, - [AB8500_LDO_ANAMIC2] = { + [AB9540_LDO_ANAMIC2] = { .desc = { .name = "LDO-ANAMIC2", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_ANAMIC2, .owner = THIS_MODULE, @@ -519,12 +1012,12 @@ static struct ab8500_regulator_info .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x10, - .update_val_enable = 0x10, + .update_val = 0x10, }, - [AB8500_LDO_DMIC] = { + [AB9540_LDO_DMIC] = { .desc = { .name = "LDO-DMIC", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_DMIC, .owner = THIS_MODULE, @@ -534,25 +1027,58 @@ static struct ab8500_regulator_info .update_bank = 0x03, .update_reg = 0x83, .update_mask = 0x04, - .update_val_enable = 0x04, + .update_val = 0x04, }, - [AB8500_LDO_ANA] = { + + /* + * Regulators with fixed voltage and normal/idle modes + */ + [AB9540_LDO_ANA] = { .desc = { .name = "LDO-ANA", - .ops = &ab8500_regulator_fixed_ops, + .ops = &ab8500_regulator_mode_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_LDO_ANA, .owner = THIS_MODULE, .n_voltages = 1, }, .fixed_uV = 1200000, + .load_lp_uA = 1000, .update_bank = 0x04, .update_reg = 0x06, .update_mask = 0x0c, - .update_val_enable = 0x04, + .update_val = 0x04, + .update_val_idle = 0x0c, + .update_val_normal = 0x04, }, - + /* + * SysClkReq regulators + */ + [AB9540_SYSCLKREQ_2] = { + .desc = { + .name = "SYSCLKREQ-2", + .ops = &ab8500_sysclkreq_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_SYSCLKREQ_2, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1, /* bogus value */ + .gpio_pin = AB8500_PIN_GPIO1, + }, + [AB9540_SYSCLKREQ_4] = { + .desc = { + .name = "SYSCLKREQ-4", + .ops = &ab8500_sysclkreq_ops, + .type = REGULATOR_VOLTAGE, + .id = AB8500_SYSCLKREQ_4, + .owner = THIS_MODULE, + .n_voltages = 1, + }, + .fixed_uV = 1, /* bogus value */ + .gpio_pin = AB8500_PIN_GPIO3, + }, }; struct ab8500_reg_init { @@ -568,13 +1094,13 @@ struct ab8500_reg_init { .mask = _mask, \ } +/* AB8500 register init */ static struct ab8500_reg_init ab8500_reg_init[] = { /* * 0x30, VanaRequestCtrl - * 0x0C, VpllRequestCtrl * 0xc0, VextSupply1RequestCtrl */ - REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xfc), + REG_INIT(AB8500_REGUREQUESTCTRL2, 0x03, 0x04, 0xf0), /* * 0x03, VextSupply2RequestCtrl * 0x0c, VextSupply3RequestCtrl @@ -641,13 +1167,21 @@ static struct ab8500_reg_init ab8500_reg_init[] = { REG_INIT(AB8500_REGUSWHPREQVALID2, 0x03, 0x0e, 0x1f), /* * 0x02, SysClkReq2Valid1 - * ... + * 0x04, SysClkReq3Valid1 + * 0x08, SysClkReq4Valid1 + * 0x10, SysClkReq5Valid1 + * 0x20, SysClkReq6Valid1 + * 0x40, SysClkReq7Valid1 * 0x80, SysClkReq8Valid1 */ REG_INIT(AB8500_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe), /* * 0x02, SysClkReq2Valid2 - * ... + * 0x04, SysClkReq3Valid2 + * 0x08, SysClkReq4Valid2 + * 0x10, SysClkReq5Valid2 + * 0x20, SysClkReq6Valid2 + * 0x40, SysClkReq7Valid2 * 0x80, SysClkReq8Valid2 */ REG_INIT(AB8500_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe), @@ -672,8 +1206,8 @@ static struct ab8500_reg_init ab8500_reg_init[] = { */ REG_INIT(AB8500_REGUCTRL1VAMIC, 0x03, 0x84, 0x03), /* + * 0x03, VpllRegu (NOTE! PRCMU register bits) * 0x0c, VanaRegu - * 0x03, VpllRegu */ REG_INIT(AB8500_VPLLVANAREGU, 0x04, 0x06, 0x0f), /* @@ -699,10 +1233,6 @@ static struct ab8500_reg_init ab8500_reg_init[] = { */ REG_INIT(AB8500_VRF1VAUX3REGU, 0x04, 0x0a, 0x03), /* - * 0x3f, Vsmps1Sel1 - */ - REG_INIT(AB8500_VSMPS1SEL1, 0x04, 0x13, 0x3f), - /* * 0x0f, Vaux1Sel */ REG_INIT(AB8500_VAUX1SEL, 0x04, 0x1f, 0x0f), @@ -735,79 +1265,412 @@ static struct ab8500_reg_init ab8500_reg_init[] = { REG_INIT(AB8500_REGUCTRLDISCH2, 0x04, 0x44, 0x16), }; +/* Possibility to add debug */ +int __attribute__((weak)) ab8500_regulator_debug_init( + struct platform_device *pdev) +{ + return 0; +} + +int __attribute__((weak)) ab8500_regulator_debug_exit( + struct platform_device *pdev) +{ + return 0; +} + +/* AB9540 register init */ +static struct ab8500_reg_init ab9540_reg_init[] = { + /* + * 0x03, VarmRequestCtrl + * 0x0c, VapeRequestCtrl + * 0x30, Vsmps1RequestCtrl + * 0xc0, Vsmps2RequestCtrl + */ + REG_INIT(AB9540_REGUREQUESTCTRL1, 0x03, 0x03, 0xff), + /* + * 0x03, Vsmps3RequestCtrl + * 0x0c, VpllRequestCtrl + * 0x30, VanaRequestCtrl + * 0xc0, VextSupply1RequestCtrl + */ + REG_INIT(AB9540_REGUREQUESTCTRL2, 0x03, 0x04, 0xff), + /* + * 0x03, VextSupply2RequestCtrl + * 0x0c, VextSupply3RequestCtrl + * 0x30, Vaux1RequestCtrl + * 0xc0, Vaux2RequestCtrl + */ + REG_INIT(AB9540_REGUREQUESTCTRL3, 0x03, 0x05, 0xff), + /* + * 0x03, Vaux3RequestCtrl + * 0x04, SwHPReq + */ + REG_INIT(AB9540_REGUREQUESTCTRL4, 0x03, 0x06, 0x07), + /* + * 0x01, Vsmps1SysClkReq1HPValid + * 0x02, Vsmps2SysClkReq1HPValid + * 0x04, Vsmps3SysClkReq1HPValid + * 0x08, VanaSysClkReq1HPValid + * 0x10, VpllSysClkReq1HPValid + * 0x20, Vaux1SysClkReq1HPValid + * 0x40, Vaux2SysClkReq1HPValid + * 0x80, Vaux3SysClkReq1HPValid + */ + REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID1, 0x03, 0x07, 0xff), + /* + * 0x01, VapeSysClkReq1HPValid + * 0x02, VarmSysClkReq1HPValid + * 0x04, VbbSysClkReq1HPValid + * 0x08, VmodSysClkReq1HPValid + * 0x10, VextSupply1SysClkReq1HPValid + * 0x20, VextSupply2SysClkReq1HPValid + * 0x40, VextSupply3SysClkReq1HPValid + */ + REG_INIT(AB9540_REGUSYSCLKREQ1HPVALID2, 0x03, 0x08, 0x7f), + /* + * 0x01, Vsmps1HwHPReq1Valid + * 0x02, Vsmps2HwHPReq1Valid + * 0x04, Vsmps3HwHPReq1Valid + * 0x08, VanaHwHPReq1Valid + * 0x10, VpllHwHPReq1Valid + * 0x20, Vaux1HwHPReq1Valid + * 0x40, Vaux2HwHPReq1Valid + * 0x80, Vaux3HwHPReq1Valid + */ + REG_INIT(AB9540_REGUHWHPREQ1VALID1, 0x03, 0x09, 0xff), + /* + * 0x01, VextSupply1HwHPReq1Valid + * 0x02, VextSupply2HwHPReq1Valid + * 0x04, VextSupply3HwHPReq1Valid + * 0x08, VmodHwHPReq1Valid + */ + REG_INIT(AB9540_REGUHWHPREQ1VALID2, 0x03, 0x0a, 0x0f), + /* + * 0x01, Vsmps1HwHPReq2Valid + * 0x02, Vsmps2HwHPReq2Valid + * 0x03, Vsmps3HwHPReq2Valid + * 0x08, VanaHwHPReq2Valid + * 0x10, VpllHwHPReq2Valid + * 0x20, Vaux1HwHPReq2Valid + * 0x40, Vaux2HwHPReq2Valid + * 0x80, Vaux3HwHPReq2Valid + */ + REG_INIT(AB9540_REGUHWHPREQ2VALID1, 0x03, 0x0b, 0xff), + /* + * 0x01, VextSupply1HwHPReq2Valid + * 0x02, VextSupply2HwHPReq2Valid + * 0x04, VextSupply3HwHPReq2Valid + * 0x08, VmodHwHPReq2Valid + */ + REG_INIT(AB9540_REGUHWHPREQ2VALID2, 0x03, 0x0c, 0x0f), + /* + * 0x01, VapeSwHPReqValid + * 0x02, VarmSwHPReqValid + * 0x04, Vsmps1SwHPReqValid + * 0x08, Vsmps2SwHPReqValid + * 0x10, Vsmps3SwHPReqValid + * 0x20, VanaSwHPReqValid + * 0x40, VpllSwHPReqValid + * 0x80, Vaux1SwHPReqValid + */ + REG_INIT(AB9540_REGUSWHPREQVALID1, 0x03, 0x0d, 0xff), + /* + * 0x01, Vaux2SwHPReqValid + * 0x02, Vaux3SwHPReqValid + * 0x04, VextSupply1SwHPReqValid + * 0x08, VextSupply2SwHPReqValid + * 0x10, VextSupply3SwHPReqValid + * 0x20, VmodSwHPReqValid + */ + REG_INIT(AB9540_REGUSWHPREQVALID2, 0x03, 0x0e, 0x3f), + /* + * 0x02, SysClkReq2Valid1 + * ... + * 0x80, SysClkReq8Valid1 + */ + REG_INIT(AB9540_REGUSYSCLKREQVALID1, 0x03, 0x0f, 0xfe), + /* + * 0x02, SysClkReq2Valid2 + * ... + * 0x80, SysClkReq8Valid2 + */ + REG_INIT(AB9540_REGUSYSCLKREQVALID2, 0x03, 0x10, 0xfe), + /* + * 0x01, Vaux4SwHPReqValid + * 0x02, Vaux4HwHPReq2Valid + * 0x04, Vaux4HwHPReq1Valid + * 0x08, Vaux4SysClkReq1HPValid + */ + REG_INIT(AB9540_REGUVAUX4REQVALID, 0x03, 0x11, 0x0f), + /* + * 0x02, VTVoutEna + * 0x04, Vintcore12Ena + * 0x38, Vintcore12Sel + * 0x40, Vintcore12LP + * 0x80, VTVoutLP + */ + REG_INIT(AB9540_REGUMISC1, 0x03, 0x80, 0xfe), + /* + * 0x02, VaudioEna + * 0x04, VdmicEna + * 0x08, Vamic1Ena + * 0x10, Vamic2Ena + */ + REG_INIT(AB9540_VAUDIOSUPPLY, 0x03, 0x83, 0x1e), + /* + * 0x01, Vamic1_dzout + * 0x02, Vamic2_dzout + */ + REG_INIT(AB9540_REGUCTRL1VAMIC, 0x03, 0x84, 0x03), + /* + * 0x03, Vsmps1Regu + * 0x0c, Vsmps1SelCtrl + * 0x10, Vsmps1AutoMode + * 0x20, Vsmps1PWMMode + */ + REG_INIT(AB9540_VSMPS1REGU, 0x04, 0x03, 0x3f), + /* + * 0x03, Vsmps2Regu + * 0x0c, Vsmps2SelCtrl + * 0x10, Vsmps2AutoMode + * 0x20, Vsmps2PWMMode + */ + REG_INIT(AB9540_VSMPS2REGU, 0x04, 0x04, 0x3f), + /* + * 0x03, Vsmps3Regu + * 0x0c, Vsmps3SelCtrl + * NOTE! PRCMU register + */ + REG_INIT(AB9540_VSMPS3REGU, 0x04, 0x05, 0x0f), + /* + * 0x03, VpllRegu + * 0x0c, VanaRegu + */ + REG_INIT(AB9540_VPLLVANAREGU, 0x04, 0x06, 0x0f), + /* + * 0x03, VextSupply1Regu + * 0x0c, VextSupply2Regu + * 0x30, VextSupply3Regu + * 0x40, ExtSupply2Bypass + * 0x80, ExtSupply3Bypass + */ + REG_INIT(AB9540_EXTSUPPLYREGU, 0x04, 0x08, 0xff), + /* + * 0x03, Vaux1Regu + * 0x0c, Vaux2Regu + */ + REG_INIT(AB9540_VAUX12REGU, 0x04, 0x09, 0x0f), + /* + * 0x0c, Vrf1Regu + * 0x03, Vaux3Regu + */ + REG_INIT(AB9540_VRF1VAUX3REGU, 0x04, 0x0a, 0x0f), + /* + * 0x3f, Vsmps1Sel1 + */ + REG_INIT(AB9540_VSMPS1SEL1, 0x04, 0x13, 0x3f), + /* + * 0x3f, Vsmps1Sel2 + */ + REG_INIT(AB9540_VSMPS1SEL2, 0x04, 0x14, 0x3f), + /* + * 0x3f, Vsmps1Sel3 + */ + REG_INIT(AB9540_VSMPS1SEL3, 0x04, 0x15, 0x3f), + /* + * 0x3f, Vsmps2Sel1 + */ + REG_INIT(AB9540_VSMPS2SEL1, 0x04, 0x17, 0x3f), + /* + * 0x3f, Vsmps2Sel2 + */ + REG_INIT(AB9540_VSMPS2SEL2, 0x04, 0x18, 0x3f), + /* + * 0x3f, Vsmps2Sel3 + */ + REG_INIT(AB9540_VSMPS2SEL3, 0x04, 0x19, 0x3f), + /* + * 0x7f, Vsmps3Sel1 + * NOTE! PRCMU register + */ + REG_INIT(AB9540_VSMPS3SEL1, 0x04, 0x1b, 0x7f), + /* + * 0x7f, Vsmps3Sel2 + * NOTE! PRCMU register + */ + REG_INIT(AB9540_VSMPS3SEL2, 0x04, 0x1c, 0x7f), + /* + * 0x0f, Vaux1Sel + */ + REG_INIT(AB9540_VAUX1SEL, 0x04, 0x1f, 0x0f), + /* + * 0x0f, Vaux2Sel + */ + REG_INIT(AB9540_VAUX2SEL, 0x04, 0x20, 0x0f), + /* + * 0x07, Vaux3Sel + * 0x30, Vrf1Sel + */ + REG_INIT(AB9540_VRF1VAUX3SEL, 0x04, 0x21, 0x37), + /* + * 0x01, VextSupply12LP + */ + REG_INIT(AB9540_REGUCTRL2SPARE, 0x04, 0x22, 0x01), + /* + * 0x03, Vaux4RequestCtrl + */ + REG_INIT(AB9540_VAUX4REQCTRL, 0x04, 0x2d, 0x03), + /* + * 0x03, Vaux4Regu + */ + REG_INIT(AB9540_VAUX4REGU, 0x04, 0x2e, 0x03), + /* + * 0x08, Vaux4Sel + */ + REG_INIT(AB9540_VAUX4SEL, 0x04, 0x2f, 0x0f), + /* + * 0x01, VpllDisch + * 0x02, Vrf1Disch + * 0x04, Vaux1Disch + * 0x08, Vaux2Disch + * 0x10, Vaux3Disch + * 0x20, Vintcore12Disch + * 0x40, VTVoutDisch + * 0x80, VaudioDisch + */ + REG_INIT(AB9540_REGUCTRLDISCH, 0x04, 0x43, 0xff), + /* + * 0x01, VsimDisch + * 0x02, VanaDisch + * 0x04, VdmicPullDownEna + * 0x08, VpllPullDownEna + * 0x10, VdmicDisch + */ + REG_INIT(AB9540_REGUCTRLDISCH2, 0x04, 0x44, 0x1f), + /* + * 0x01, Vaux4Disch + */ + REG_INIT(AB9540_REGUCTRLDISCH3, 0x04, 0x48, 0x01), +}; + static __devinit int ab8500_regulator_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); - struct ab8500_platform_data *pdata; + struct ab8500_platform_data *ppdata; + struct ab8500_regulator_platform_data *pdata; int i, err; + struct ab8500_regulator_info *regulator_info; + int regulator_info_size; + struct ab8500_reg_init *reg_init; + int reg_init_size; + /* cache values needed repeatedly inside for-loops */ if (!ab8500) { dev_err(&pdev->dev, "null mfd parent\n"); return -EINVAL; } - pdata = dev_get_platdata(ab8500->dev); + + ppdata = dev_get_platdata(ab8500->dev); + if (!ppdata) { + dev_err(&pdev->dev, "null parent pdata\n"); + return -EINVAL; + } + + pdata = ppdata->regulator; if (!pdata) { dev_err(&pdev->dev, "null pdata\n"); return -EINVAL; } + if (is_ab9540(ab8500)) { + regulator_info = ab9540_regulator_info; + regulator_info_size = ARRAY_SIZE(ab9540_regulator_info); + reg_init = ab9540_reg_init; + reg_init_size = AB9540_NUM_REGULATOR_REGISTERS; + } else { + regulator_info = ab8500_regulator_info; + regulator_info_size = ARRAY_SIZE(ab8500_regulator_info); + reg_init = ab8500_reg_init; + reg_init_size = AB8500_NUM_REGULATOR_REGISTERS; + } + /* make sure the platform data has the correct size */ - if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) { + if (pdata->num_regulator != regulator_info_size) { dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); return -EINVAL; } + /* initialize debug (initial state is recorded with this call) */ + err = ab8500_regulator_debug_init(pdev); + if (err) + return err; + /* initialize registers */ - for (i = 0; i < pdata->num_regulator_reg_init; i++) { + for (i = 0; i < pdata->num_reg_init; i++) { int id; - u8 value; + u8 mask, value; - id = pdata->regulator_reg_init[i].id; - value = pdata->regulator_reg_init[i].value; + id = pdata->reg_init[i].id; + mask = pdata->reg_init[i].mask; + value = pdata->reg_init[i].value; /* check for configuration errors */ - if (id >= AB8500_NUM_REGULATOR_REGISTERS) { - dev_err(&pdev->dev, - "Configuration error: id outside range.\n"); - return -EINVAL; - } - if (value & ~ab8500_reg_init[id].mask) { - dev_err(&pdev->dev, - "Configuration error: value outside mask.\n"); - return -EINVAL; - } + BUG_ON(id >= reg_init_size); + BUG_ON(value & ~mask); + BUG_ON(mask & ~reg_init[id].mask); /* initialize register */ err = abx500_mask_and_set_register_interruptible(&pdev->dev, - ab8500_reg_init[id].bank, - ab8500_reg_init[id].addr, - ab8500_reg_init[id].mask, - value); + reg_init[id].bank, + reg_init[id].addr, + mask, value); if (err < 0) { dev_err(&pdev->dev, "Failed to initialize 0x%02x, 0x%02x.\n", - ab8500_reg_init[id].bank, - ab8500_reg_init[id].addr); + reg_init[id].bank, + reg_init[id].addr); return err; } dev_vdbg(&pdev->dev, " init: 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", - ab8500_reg_init[id].bank, - ab8500_reg_init[id].addr, - ab8500_reg_init[id].mask, - value); + reg_init[id].bank, + reg_init[id].addr, + mask, value); + } + + /* + * This changes the default setting for VextSupply3Regu to low power. + * Active high or low is depending on OTP which is changed from ab8500v3.0. + * Remove this when ab8500v2.0 is no longer important. + * This only affects power consumption and it depends on the + * HREF OTP configurations. + */ + if (is_ab8500_2p0_or_earlier(ab8500)) { + err = abx500_mask_and_set_register_interruptible(&pdev->dev, + AB8500_REGU_CTRL2, 0x08, 0x30, 0x30); + if (err < 0) { + dev_err(&pdev->dev, + "Failed to override 0x%02x, 0x%02x.\n", + AB8500_REGU_CTRL2, 0x08); + return err; + } } + /* register external regulators (before Vaux1, 2 and 3) */ + err = ab8500_ext_regulator_init(pdev); + if (err) + return err; + /* register all regulators */ - for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { + for (i = 0; i < regulator_info_size; i++) { struct ab8500_regulator_info *info = NULL; /* assign per-regulator data */ - info = &ab8500_regulator_info[i]; + info = ®ulator_info[i]; info->dev = &pdev->dev; /* fix for hardware before ab8500v2.0 */ - if (abx500_get_chip_id(info->dev) < 0x20) { + if (is_ab8500_1p1_or_earlier(ab8500)) { if (info->desc.id == AB8500_LDO_AUX3) { info->desc.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages); @@ -827,7 +1690,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) info->desc.name); /* when we fail, un-register all earlier regulators */ while (--i >= 0) { - info = &ab8500_regulator_info[i]; + info = ®ulator_info[i]; regulator_unregister(info->regulator); } return err; @@ -842,11 +1705,23 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev) static __devexit int ab8500_regulator_remove(struct platform_device *pdev) { - int i; + int i, err; + struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); + struct ab8500_regulator_info *regulator_info; + int regulator_info_size; + - for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { + if (is_ab9540(ab8500)) { + regulator_info = ab9540_regulator_info; + regulator_info_size = ARRAY_SIZE(ab9540_regulator_info); + } else { + regulator_info = ab8500_regulator_info; + regulator_info_size = ARRAY_SIZE(ab8500_regulator_info); + } + + for (i = 0; i < regulator_info_size; i++) { struct ab8500_regulator_info *info = NULL; - info = &ab8500_regulator_info[i]; + info = ®ulator_info[i]; dev_vdbg(rdev_get_dev(info->regulator), "%s-remove\n", info->desc.name); @@ -854,6 +1729,16 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev) regulator_unregister(info->regulator); } + /* remove external regulators (after Vaux1, 2 and 3) */ + err = ab8500_ext_regulator_exit(pdev); + if (err) + return err; + + /* remove regulator debug */ + err = ab8500_regulator_debug_exit(pdev); + if (err) + return err; + return 0; } @@ -886,5 +1771,6 @@ module_exit(ab8500_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>"); +MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>"); MODULE_DESCRIPTION("Regulator Driver for ST-Ericsson AB8500 Mixed-Sig PMIC"); MODULE_ALIAS("platform:ab8500-regulator"); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 046fb1bd861..bc6c12c8a76 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -81,6 +81,7 @@ struct regulator { struct device_attribute dev_attr; struct regulator_dev *rdev; struct dentry *debugfs; + int use; }; static int _regulator_is_enabled(struct regulator_dev *rdev); @@ -199,11 +200,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev, */ if (!regulator->min_uV && !regulator->max_uV) continue; - - if (*max_uV > regulator->max_uV) - *max_uV = regulator->max_uV; - if (*min_uV < regulator->min_uV) - *min_uV = regulator->min_uV; + + if (regulator->use) { + if (*max_uV > regulator->max_uV) + *max_uV = regulator->max_uV; + if (*min_uV < regulator->min_uV) + *min_uV = regulator->min_uV; + } } if (*min_uV > *max_uV) @@ -602,6 +605,32 @@ static ssize_t regulator_suspend_standby_state_show(struct device *dev, static DEVICE_ATTR(suspend_standby_state, 0444, regulator_suspend_standby_state_show, NULL); +static ssize_t regulator_use_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + struct regulator *reg; + size_t size = 0; + + if (rdev->use_count == 0) + return sprintf(buf, "no users\n"); + + list_for_each_entry(reg, &rdev->consumer_list, list) { + if (!reg->use) + continue; + + if (reg->dev != NULL) + size += sprintf((buf + size), "%s (%d) ", + dev_name(reg->dev), reg->use); + else + size += sprintf((buf + size), "unknown (%d) ", + reg->use); + } + size += sprintf((buf + size), "\n"); + + return size; +} +static DEVICE_ATTR(use, 0444, regulator_use_show, NULL); /* * These are the only attributes are present for all regulators. @@ -1491,12 +1520,8 @@ static int _regulator_enable(struct regulator_dev *rdev) trace_regulator_enable_delay(rdev_get_name(rdev)); - if (delay >= 1000) { - mdelay(delay / 1000); - udelay(delay % 1000); - } else if (delay) { - udelay(delay); - } + if (delay) + usleep_range(delay, delay); trace_regulator_enable_complete(rdev_get_name(rdev)); @@ -1540,6 +1565,8 @@ int regulator_enable(struct regulator *regulator) if (ret != 0 && rdev->supply) regulator_disable(rdev->supply); + else + regulator->use++; return ret; } @@ -1613,6 +1640,9 @@ int regulator_disable(struct regulator *regulator) if (ret == 0 && rdev->supply) regulator_disable(rdev->supply); + if (ret == 0) + regulator->use--; + return ret; } EXPORT_SYMBOL_GPL(regulator_disable); @@ -2699,6 +2729,10 @@ static int add_regulator_attributes(struct regulator_dev *rdev) struct regulator_ops *ops = rdev->desc->ops; int status = 0; + status = device_create_file(dev, &dev_attr_use); + if (status < 0) + dev_warn(dev, "Create sysfs file \"use\" failed"); + /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) { diff --git a/drivers/regulator/db5500-prcmu.c b/drivers/regulator/db5500-prcmu.c new file mode 100644 index 00000000000..189362ab8e0 --- /dev/null +++ b/drivers/regulator/db5500-prcmu.c @@ -0,0 +1,334 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson + * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + * + * Power domain regulators on DB5500 + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/db5500-prcmu.h> + +#include <linux/mfd/dbx500-prcmu.h> + +#include "dbx500-prcmu.h" +static int db5500_regulator_enable(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n", + info->desc.name); + + if (!info->is_enabled) { + info->is_enabled = true; + if (!info->exclude_from_power_state) + power_state_active_enable(); + } + + return 0; +} + +static int db5500_regulator_disable(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + int ret = 0; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n", + info->desc.name); + + if (info->is_enabled) { + info->is_enabled = false; + if (!info->exclude_from_power_state) + ret = power_state_active_disable(); + } + + return ret; +} + +static int db5500_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):" + " %i\n", info->desc.name, info->is_enabled); + + return info->is_enabled; +} + +/* db5500 regulator operations */ +static struct regulator_ops db5500_regulator_ops = { + .enable = db5500_regulator_enable, + .disable = db5500_regulator_disable, + .is_enabled = db5500_regulator_is_enabled, +}; + +/* + * EPOD control + */ +static bool epod_on[NUM_EPOD_ID]; +static bool epod_ramret[NUM_EPOD_ID]; + +static inline int epod_id_to_index(u16 epod_id) +{ + return epod_id - DB5500_EPOD_ID_BASE; +} + +static int enable_epod(u16 epod_id, bool ramret) +{ + int idx = epod_id_to_index(epod_id); + int ret; + + if (ramret) { + if (!epod_on[idx]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET); + if (ret < 0) + return ret; + } + epod_ramret[idx] = true; + } else { + ret = prcmu_set_epod(epod_id, EPOD_STATE_ON); + if (ret < 0) + return ret; + epod_on[idx] = true; + } + + return 0; +} + +static int disable_epod(u16 epod_id, bool ramret) +{ + int idx = epod_id_to_index(epod_id); + int ret; + + if (ramret) { + if (!epod_on[idx]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF); + if (ret < 0) + return ret; + } + epod_ramret[idx] = false; + } else { + if (epod_ramret[idx]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET); + if (ret < 0) + return ret; + } else { + ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF); + if (ret < 0) + return ret; + } + epod_on[idx] = false; + } + + return 0; +} + +/* + * Regulator switch + */ +static int db5500_regulator_switch_enable(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n", + info->desc.name); + + ret = enable_epod(info->epod_id, info->is_ramret); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "regulator-switch-%s-enable: prcmu call failed\n", + info->desc.name); + goto out; + } + + info->is_enabled = true; +out: + return ret; +} + +static int db5500_regulator_switch_disable(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n", + info->desc.name); + + ret = disable_epod(info->epod_id, info->is_ramret); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "regulator_switch-%s-disable: prcmu call failed\n", + info->desc.name); + goto out; + } + + info->is_enabled = 0; +out: + return ret; +} + +static int db5500_regulator_switch_is_enabled(struct regulator_dev *rdev) +{ + struct dbx500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), + "regulator-switch-%s-is_enabled (is_enabled): %i\n", + info->desc.name, info->is_enabled); + + return info->is_enabled; +} + +static struct regulator_ops db5500_regulator_switch_ops = { + .enable = db5500_regulator_switch_enable, + .disable = db5500_regulator_switch_disable, + .is_enabled = db5500_regulator_switch_is_enabled, +}; + +/* + * Regulator information + */ +#define DB5500_REGULATOR_SWITCH(_name, reg) \ + [DB5500_REGULATOR_SWITCH_##reg] = { \ + .desc = { \ + .name = _name, \ + .id = DB5500_REGULATOR_SWITCH_##reg, \ + .ops = &db5500_regulator_switch_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + .epod_id = DB5500_EPOD_ID_##reg, \ +} + +static struct dbx500_regulator_info + dbx500_regulator_info[DB5500_NUM_REGULATORS] = { + [DB5500_REGULATOR_VAPE] = { + .desc = { + .name = "db5500-vape", + .id = DB5500_REGULATOR_VAPE, + .ops = &db5500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + DB5500_REGULATOR_SWITCH("db5500-sga", SGA), + DB5500_REGULATOR_SWITCH("db5500-hva", HVA), + DB5500_REGULATOR_SWITCH("db5500-sia", SIA), + DB5500_REGULATOR_SWITCH("db5500-disp", DISP), + DB5500_REGULATOR_SWITCH("db5500-esram12", ESRAM12), +}; + +static int __devinit db5500_regulator_probe(struct platform_device *pdev) +{ + struct regulator_init_data *db5500_init_data = + dev_get_platdata(&pdev->dev); + int i, err; + + /* register all regulators */ + for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) { + struct dbx500_regulator_info *info; + struct regulator_init_data *init_data = &db5500_init_data[i]; + + /* assign per-regulator data */ + info = &dbx500_regulator_info[i]; + info->dev = &pdev->dev; + + /* register with the regulator framework */ + info->rdev = regulator_register(&info->desc, &pdev->dev, + init_data, info); + if (IS_ERR(info->rdev)) { + err = PTR_ERR(info->rdev); + dev_err(&pdev->dev, "failed to register %s: err %i\n", + info->desc.name, err); + + /* if failing, unregister all earlier regulators */ + i--; + while (i >= 0) { + info = &dbx500_regulator_info[i]; + regulator_unregister(info->rdev); + i--; + } + return err; + } + + dev_dbg(rdev_get_dev(info->rdev), + "regulator-%s-probed\n", info->desc.name); + } + + return 0; +} + +static int __exit db5500_regulator_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dbx500_regulator_info); i++) { + struct dbx500_regulator_info *info; + info = &dbx500_regulator_info[i]; + + dev_vdbg(rdev_get_dev(info->rdev), + "regulator-%s-remove\n", info->desc.name); + + regulator_unregister(info->rdev); + } + + return 0; +} + +static struct platform_driver db5500_regulator_driver = { + .driver = { + .name = "db5500-prcmu-regulators", + .owner = THIS_MODULE, + }, + .probe = db5500_regulator_probe, + .remove = __exit_p(db5500_regulator_remove), +}; + +static int __init db5500_regulator_init(void) +{ + int ret; + + ret = platform_driver_register(&db5500_regulator_driver); + if (ret < 0) + return -ENODEV; + + return 0; +} + +static void __exit db5500_regulator_exit(void) +{ + platform_driver_unregister(&db5500_regulator_driver); +} + +arch_initcall(db5500_regulator_init); +module_exit(db5500_regulator_exit); + +MODULE_AUTHOR("STMicroelectronics/ST-Ericsson"); +MODULE_DESCRIPTION("DB5500 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c index f2e5ecdc586..bee4f7be93b 100644 --- a/drivers/regulator/dbx500-prcmu.c +++ b/drivers/regulator/dbx500-prcmu.c @@ -9,6 +9,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/err.h> #include <linux/regulator/driver.h> #include <linux/debugfs.h> @@ -62,12 +63,105 @@ out: return ret; } +struct ux500_regulator { + char *name; + void (*enable)(void); + int (*disable)(void); + int count; +}; + +static struct ux500_regulator ux500_atomic_regulators[] = { + { + .name = "dma40.0", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "ssp0", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "ssp1", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "spi0", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "spi1", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "spi2", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "spi3", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "cryp1", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, + { + .name = "hash1", + .enable = power_state_active_enable, + .disable = power_state_active_disable, + }, +}; + +struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) { + if (!strcmp(dev_name(dev), ux500_atomic_regulators[i].name)) + return &ux500_atomic_regulators[i]; + } + + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(ux500_regulator_get); + +int ux500_regulator_atomic_enable(struct ux500_regulator *regulator) +{ + if (regulator) { + regulator->count++; + regulator->enable(); + return 0; + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ux500_regulator_atomic_enable); + +int ux500_regulator_atomic_disable(struct ux500_regulator *regulator) +{ + if (regulator) { + regulator->count--; + return regulator->disable(); + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(ux500_regulator_atomic_disable); + +void ux500_regulator_put(struct ux500_regulator *regulator) +{ + /* Here for symetric reasons and for possible future use */ +} +EXPORT_SYMBOL_GPL(ux500_regulator_put); + #ifdef CONFIG_REGULATOR_DEBUG static struct ux500_regulator_debug { struct dentry *dir; - struct dentry *status_file; - struct dentry *power_state_cnt_file; struct dbx500_regulator_info *regulator_array; int num_regulators; u8 *state_before_suspend; @@ -119,6 +213,35 @@ static const struct file_operations ux500_regulator_power_state_cnt_fops = { .owner = THIS_MODULE, }; +static int ux500_regulator_power_state_use_print(struct seq_file *s, void *p) +{ + int i; + + seq_printf(s, "\nPower state usage:\n\n"); + + for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) { + seq_printf(s, "%s\t : %d\n", + ux500_atomic_regulators[i].name, + ux500_atomic_regulators[i].count); + } + return 0; +} + +static int ux500_regulator_power_state_use_open(struct inode *inode, + struct file *file) +{ + return single_open(file, ux500_regulator_power_state_use_print, + inode->i_private); +} + +static const struct file_operations ux500_regulator_power_state_use_fops = { + .open = ux500_regulator_power_state_use_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static int ux500_regulator_status_print(struct seq_file *s, void *p) { struct device *dev = s->private; @@ -180,22 +303,27 @@ ux500_regulator_debug_init(struct platform_device *pdev, { /* create directory */ rdebug.dir = debugfs_create_dir("ux500-regulator", NULL); - if (!rdebug.dir) + if (IS_ERR_OR_NULL(rdebug.dir)) goto exit_no_debugfs; /* create "status" file */ - rdebug.status_file = debugfs_create_file("status", - S_IRUGO, rdebug.dir, &pdev->dev, - &ux500_regulator_status_fops); - if (!rdebug.status_file) - goto exit_destroy_dir; + if (IS_ERR_OR_NULL(debugfs_create_file("status", + S_IRUGO, rdebug.dir, &pdev->dev, + &ux500_regulator_status_fops))) + goto exit_fail; + + /* create "power-state-count" file */ + if (IS_ERR_OR_NULL(debugfs_create_file("power-state-count", + S_IRUGO, rdebug.dir, &pdev->dev, + &ux500_regulator_power_state_cnt_fops))) + goto exit_fail; /* create "power-state-count" file */ - rdebug.power_state_cnt_file = debugfs_create_file("power-state-count", - S_IRUGO, rdebug.dir, &pdev->dev, - &ux500_regulator_power_state_cnt_fops); - if (!rdebug.power_state_cnt_file) - goto exit_destroy_status; + if (IS_ERR_OR_NULL(debugfs_create_file("power-state-usage", + S_IRUGO, rdebug.dir, &pdev->dev, + &ux500_regulator_power_state_use_fops))) + goto exit_fail; + rdebug.regulator_array = regulator_info; rdebug.num_regulators = num_regulators; @@ -204,27 +332,22 @@ ux500_regulator_debug_init(struct platform_device *pdev, if (!rdebug.state_before_suspend) { dev_err(&pdev->dev, "could not allocate memory for saving state\n"); - goto exit_destroy_power_state; + goto exit_fail; } rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL); if (!rdebug.state_after_suspend) { dev_err(&pdev->dev, "could not allocate memory for saving state\n"); - goto exit_free; + goto exit_fail; } dbx500_regulator_testcase(regulator_info, num_regulators); return 0; -exit_free: +exit_fail: kfree(rdebug.state_before_suspend); -exit_destroy_power_state: - debugfs_remove(rdebug.power_state_cnt_file); -exit_destroy_status: - debugfs_remove(rdebug.status_file); -exit_destroy_dir: - debugfs_remove(rdebug.dir); + debugfs_remove_recursive(rdebug.dir); exit_no_debugfs: dev_err(&pdev->dev, "failed to create debugfs entries.\n"); return -ENOMEM; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 8c8377d50c4..3e47885660e 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -704,6 +704,13 @@ config RTC_DRV_PCF50633 If you say yes here you get support for the RTC subsystem of the NXP PCF50633 used in embedded systems. +config RTC_DRV_AB + tristate "ST-Ericsson AB5500 RTC" + depends on AB5500_CORE + help + Select this to enable the ST-Ericsson AB5500 Mixed Signal IC RTC + support. This chip contains a battery- and capacitor-backed RTC. + config RTC_DRV_AB3100 tristate "ST-Ericsson AB3100 RTC" depends on AB3100_CORE @@ -715,6 +722,7 @@ config RTC_DRV_AB3100 config RTC_DRV_AB8500 tristate "ST-Ericsson AB8500 RTC" depends on AB8500_CORE + select RTC_INTF_DEV_UIE_EMUL help Select this to enable the ST-Ericsson AB8500 power management IC RTC support. This chip contains a battery- and capacitor-backed RTC. diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 727ae7786e6..56766bbc519 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -16,6 +16,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o # Keep the list ordered. obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o +obj-$(CONFIG_RTC_DRV_AB) += rtc-ab.o obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o diff --git a/drivers/rtc/rtc-ab.c b/drivers/rtc/rtc-ab.c new file mode 100644 index 00000000000..009409f39d7 --- /dev/null +++ b/drivers/rtc/rtc-ab.c @@ -0,0 +1,485 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Rabin Vincent <rabin.vincent@stericsson.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/mfd/abx500.h> +#include <linux/mfd/abx500/ab5500.h> + +#define AB5500_RTC_CLOCK_RATE 32768 +#define AB5500_RTC 0x00 +#define AB5500_RTC_ALARM (1 << 1) +#define AB5500_READREQ 0x01 +#define AB5500_READREQ_REQ 0x01 +#define AB5500_AL0 0x02 +#define AB5500_TI0 0x06 + +/** + * struct ab_rtc - variant specific data + * @irqname: optional name for the alarm interrupt resource + * @epoch: epoch to adjust year to + * @bank: AB bank where this block is present + * @rtc: address of the "RTC" (control) register + * @rtc_alarmon: mask of the alarm enable bit in the above register + * @ti0: address of the TI0 register. The rest of the TI + * registers are assumed to contiguously follow this one. + * @nr_ti: number of TI* registers + * @al0: address of the AL0 register. The rest of the + * AL registers are assumed to contiguously follow this one. + * @nr_al: number of AL* registers + * @startup: optional function to initialize the RTC + * @alarm_to_regs: function to convert alarm time in seconds + * to a list of AL register values + * @time_to_regs: function to convert alarm time in seconds + * to a list of TI register values + * @regs_to_alarm: function to convert a list of AL register + * values to the alarm time in seconds + * @regs_to_time: function to convert a list of TI register + * values to the alarm time in seconds + * @request_read: optional function to request a read from the TI* registers + * @request_write: optional function to request a write to the TI* registers + */ +struct ab_rtc { + const char *irqname; + unsigned int epoch; + + u8 bank; + u8 rtc; + u8 rtc_alarmon; + u8 ti0; + int nr_ti; + u8 al0; + int nr_al; + + int (*startup)(struct device *dev); + void (*alarm_to_regs)(struct device *dev, unsigned long secs, u8 *regs); + void (*time_to_regs)(struct device *dev, unsigned long secs, u8 *regs); + unsigned long (*regs_to_alarm)(struct device *dev, u8 *regs); + unsigned long (*regs_to_time)(struct device *dev, u8 *regs); + int (*request_read)(struct device *dev); + int (*request_write)(struct device *dev); +}; + +static const struct ab_rtc *to_ab_rtc(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + return (struct ab_rtc *)pdev->id_entry->driver_data; +} + +/* Calculate the number of seconds since year, for epoch adjustment */ +static unsigned long ab_rtc_get_elapsed_seconds(unsigned int year) +{ + unsigned long secs; + struct rtc_time tm = { + .tm_year = year - 1900, + .tm_mday = 1, + }; + + rtc_tm_to_time(&tm, &secs); + + return secs; +} + +static int ab5500_rtc_request_read(struct device *dev) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + unsigned long timeout; + int err; + + err = abx500_set_register_interruptible(dev, variant->bank, + AB5500_READREQ, + AB5500_READREQ_REQ); + if (err < 0) + return err; + + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + u8 value; + + err = abx500_get_register_interruptible(dev, variant->bank, + AB5500_READREQ, &value); + if (err < 0) + return err; + + if (!(value & AB5500_READREQ_REQ)) + return 0; + + msleep(1); + } + + return -EIO; +} + +static void +ab5500_rtc_time_to_regs(struct device *dev, unsigned long secs, u8 *regs) +{ + unsigned long mins = secs / 60; + u64 fat_time; + + secs %= 60; + + fat_time = secs * AB5500_RTC_CLOCK_RATE; + fat_time |= (u64)mins << 21; + + regs[0] = (fat_time) & 0xFF; + regs[1] = (fat_time >> 8) & 0xFF; + regs[2] = (fat_time >> 16) & 0xFF; + regs[3] = (fat_time >> 24) & 0xFF; + regs[4] = (fat_time >> 32) & 0xFF; + regs[5] = (fat_time >> 40) & 0xFF; +} + +static unsigned long +ab5500_rtc_regs_to_time(struct device *dev, u8 *regs) +{ + u64 fat_time = ((u64)regs[5] << 40) | ((u64)regs[4] << 32) | + ((u64)regs[3] << 24) | ((u64)regs[2] << 16) | + ((u64)regs[1] << 8) | regs[0]; + unsigned long secs = (fat_time & 0x1fffff) / AB5500_RTC_CLOCK_RATE; + unsigned long mins = fat_time >> 21; + + return mins * 60 + secs; +} + +static void +ab5500_rtc_alarm_to_regs(struct device *dev, unsigned long secs, u8 *regs) +{ + unsigned long mins = secs / 60; + +#ifdef CONFIG_ANDROID + /* + * Needed because Android believes all hw have a wake-up resolution in + * seconds. + */ + mins++; +#endif + + regs[0] = mins & 0xFF; + regs[1] = (mins >> 8) & 0xFF; + regs[2] = (mins >> 16) & 0xFF; +} + +static unsigned long +ab5500_rtc_regs_to_alarm(struct device *dev, u8 *regs) +{ + unsigned long mins = ((unsigned long)regs[2] << 16) | + ((unsigned long)regs[1] << 8) | + regs[0]; + unsigned long secs = mins * 60; + + return secs; +} + +static const struct ab_rtc ab5500_rtc = { + .irqname = "RTC_Alarm", + .bank = AB5500_BANK_RTC, + .rtc = AB5500_RTC, + .rtc_alarmon = AB5500_RTC_ALARM, + .ti0 = AB5500_TI0, + .nr_ti = 6, + .al0 = AB5500_AL0, + .nr_al = 3, + .epoch = 2000, + .time_to_regs = ab5500_rtc_time_to_regs, + .regs_to_time = ab5500_rtc_regs_to_time, + .alarm_to_regs = ab5500_rtc_alarm_to_regs, + .regs_to_alarm = ab5500_rtc_regs_to_alarm, + .request_read = ab5500_rtc_request_read, +}; + +static int ab_rtc_request_read(struct device *dev) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + + if (!variant->request_read) + return 0; + + return variant->request_read(dev); +} + +static int ab_rtc_request_write(struct device *dev) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + + if (!variant->request_write) + return 0; + + return variant->request_write(dev); +} + +static bool ab_rtc_valid_time(struct device *dev, struct rtc_time *time) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + + if (!variant->epoch) + return true; + + return time->tm_year >= variant->epoch - 1900; +} + +static int +ab_rtc_tm_to_time(struct device *dev, struct rtc_time *tm, unsigned long *secs) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + + rtc_tm_to_time(tm, secs); + + if (variant->epoch) + *secs -= ab_rtc_get_elapsed_seconds(variant->epoch); + + return 0; +} + +static int +ab_rtc_time_to_tm(struct device *dev, unsigned long secs, struct rtc_time *tm) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + + if (variant->epoch) + secs += ab_rtc_get_elapsed_seconds(variant->epoch); + + rtc_time_to_tm(secs, tm); + + return 0; +} + +static int ab_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + unsigned char buf[variant->nr_ti]; + unsigned long secs; + int err; + + err = ab_rtc_request_read(dev); + if (err) + return err; + + err = abx500_get_register_page_interruptible(dev, variant->bank, + variant->ti0, + buf, variant->nr_ti); + if (err) + return err; + + secs = variant->regs_to_time(dev, buf); + ab_rtc_time_to_tm(dev, secs, tm); + + return rtc_valid_tm(tm); +} + +static int ab_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + unsigned char buf[variant->nr_ti]; + unsigned long secs; + u8 reg = variant->ti0; + int err; + int i; + + if (!ab_rtc_valid_time(dev, tm)) + return -EINVAL; + + ab_rtc_tm_to_time(dev, tm, &secs); + variant->time_to_regs(dev, secs, buf); + + for (i = 0; i < variant->nr_ti; i++, reg++) { + err = abx500_set_register_interruptible(dev, variant->bank, + reg, buf[i]); + if (err) + return err; + } + + return ab_rtc_request_write(dev); +} + +static int ab_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + unsigned long secs; + u8 buf[variant->nr_al]; + u8 rtcval; + int err; + + err = abx500_get_register_interruptible(dev, variant->bank, + variant->rtc, &rtcval); + if (err) + return err; + + alarm->enabled = !!(rtcval & variant->rtc_alarmon); + alarm->pending = 0; + + err = abx500_get_register_page_interruptible(dev, variant->bank, + variant->al0, buf, + variant->nr_al); + if (err) + return err; + + secs = variant->regs_to_alarm(dev, buf); + ab_rtc_time_to_tm(dev, secs, &alarm->time); + + return rtc_valid_tm(&alarm->time); +} + +static int ab_rtc_alarm_enable(struct device *dev, unsigned int enabled) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + u8 mask = variant->rtc_alarmon; + u8 value = enabled ? mask : 0; + + return abx500_mask_and_set_register_interruptible(dev, variant->bank, + variant->rtc, mask, + value); +} + +static int ab_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + const struct ab_rtc *variant = to_ab_rtc(dev); + unsigned char buf[variant->nr_al]; + unsigned long secs; + u8 reg = variant->al0; + int err; + int i; + + if (!ab_rtc_valid_time(dev, &alarm->time)) + return -EINVAL; + + ab_rtc_tm_to_time(dev, &alarm->time, &secs); + variant->alarm_to_regs(dev, secs, buf); + + /* + * Disable alarm first. Otherwise the RTC may not detect an alarm + * reprogrammed for the same time without disabling the alarm in + * between the programmings. + */ + err = ab_rtc_alarm_enable(dev, false); + if (err) + return err; + + for (i = 0; i < variant->nr_al; i++, reg++) { + err = abx500_set_register_interruptible(dev, variant->bank, + reg, buf[i]); + if (err) + return err; + } + + return alarm->enabled ? ab_rtc_alarm_enable(dev, true) : 0; +} + +static const struct rtc_class_ops ab_rtc_ops = { + .read_time = ab_rtc_read_time, + .set_time = ab_rtc_set_time, + .read_alarm = ab_rtc_read_alarm, + .set_alarm = ab_rtc_set_alarm, + .alarm_irq_enable = ab_rtc_alarm_enable, +}; + +static irqreturn_t ab_rtc_irq(int irq, void *dev_id) +{ + unsigned long events = RTC_IRQF | RTC_AF; + struct rtc_device *rtc = dev_id; + + rtc_update_irq(rtc, 1, events); + + return IRQ_HANDLED; +} + +static int __devinit ab_rtc_probe(struct platform_device *pdev) +{ + const struct ab_rtc *variant = to_ab_rtc(&pdev->dev); + int err; + struct rtc_device *rtc; + int irq = -ENXIO; + + if (variant->irqname) { + irq = platform_get_irq_byname(pdev, variant->irqname); + if (irq < 0) + return irq; + } + + if (variant->startup) { + err = variant->startup(&pdev->dev); + if (err) + return err; + } + + device_init_wakeup(&pdev->dev, true); + + rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + dev_err(&pdev->dev, "Registration failed\n"); + err = PTR_ERR(rtc); + return err; + } + + if (irq >= 0) { + err = request_any_context_irq(irq, ab_rtc_irq, + IRQF_NO_SUSPEND, + pdev->id_entry->name, + rtc); + if (err < 0) { + dev_err(&pdev->dev, "could not get irq: %d\n", err); + goto out_unregister; + } + } + + platform_set_drvdata(pdev, rtc); + + return 0; + +out_unregister: + rtc_device_unregister(rtc); + return err; +} + +static int __devexit ab_rtc_remove(struct platform_device *pdev) +{ + const struct ab_rtc *variant = to_ab_rtc(&pdev->dev); + struct rtc_device *rtc = platform_get_drvdata(pdev); + int irq = platform_get_irq_byname(pdev, variant->irqname); + + if (irq >= 0) + free_irq(irq, rtc); + rtc_device_unregister(rtc); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_device_id ab_rtc_id_table[] = { + { "ab5500-rtc", (kernel_ulong_t)&ab5500_rtc, }, + { }, +}; +MODULE_DEVICE_TABLE(platform, ab_rtc_id_table); + +static struct platform_driver ab_rtc_driver = { + .driver.name = "ab-rtc", + .driver.owner = THIS_MODULE, + .id_table = ab_rtc_id_table, + .probe = ab_rtc_probe, + .remove = __devexit_p(ab_rtc_remove), +}; + +static int __init ab_rtc_init(void) +{ + return platform_driver_register(&ab_rtc_driver); +} +module_init(ab_rtc_init); + +static void __exit ab_rtc_exit(void) +{ + platform_driver_unregister(&ab_rtc_driver); +} +module_exit(ab_rtc_exit); + +MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>"); +MODULE_DESCRIPTION("AB5500 RTC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c index 4bcf9ca2818..63b3a672a17 100644 --- a/drivers/rtc/rtc-ab8500.c +++ b/drivers/rtc/rtc-ab8500.c @@ -88,22 +88,17 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) if (retval < 0) return retval; - /* Early AB8500 chips will not clear the rtc read request bit */ - if (abx500_get_chip_id(dev) == 0) { - usleep_range(1000, 1000); - } else { - /* Wait for some cycles after enabling the rtc read in ab8500 */ - while (time_before(jiffies, timeout)) { - retval = abx500_get_register_interruptible(dev, - AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value); - if (retval < 0) - return retval; - - if (!(value & RTC_READ_REQUEST)) - break; - - usleep_range(1000, 5000); - } + /* Wait for some cycles after enabling the rtc read in ab8500 */ + while (time_before(jiffies, timeout)) { + retval = abx500_get_register_interruptible(dev, + AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value); + if (retval < 0) + return retval; + + if (!(value & RTC_READ_REQUEST)) + break; + + usleep_range(1000, 5000); } /* Read the Watchtime registers */ @@ -224,8 +219,8 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int retval, i; unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; - unsigned long mins, secs = 0; - + unsigned long mins, secs = 0, cursec=0; + struct rtc_time curtm; if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { dev_dbg(dev, "year should be equal to or greater than %d\n", AB8500_RTC_EPOCH); @@ -235,14 +230,36 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) /* Get the number of seconds since 1970 */ rtc_tm_to_time(&alarm->time, &secs); + /* Check whether alarm is set less than 1min. + * Since our RTC doesn't support alarm resolution less than 1min, + * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON + */ + ab8500_rtc_read_time(dev, &curtm); /* Read current time */ + rtc_tm_to_time(&curtm, &cursec); + if ((secs - cursec) < 59) { + dev_dbg(dev, "Alarm less than 1 minute not supported\n"); + return -EINVAL; + } + /* * Convert it to the number of seconds since 01-01-2000 00:00:00, since * we only have a small counter in the RTC. */ secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); +#ifndef CONFIG_ANDROID + secs += 30; /* Round to nearest minute */ +#endif + mins = secs / 60; +#ifdef CONFIG_ANDROID + /* + * Needed due to Android believes all hw have a wake-up resolution + * in seconds. + */ + mins++; +#endif buf[2] = mins & 0xFF; buf[1] = (mins >> 8) & 0xFF; buf[0] = (mins >> 16) & 0xFF; diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index f027c063fb2..cc0533994f6 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id) unsigned long events = 0; rtcmis = readl(ldata->base + RTC_MIS); - if (rtcmis) { - writel(rtcmis, ldata->base + RTC_ICR); - - if (rtcmis & RTC_BIT_AI) - events |= (RTC_AF | RTC_IRQF); - - /* Timer interrupt is only available in ST variants */ - if ((rtcmis & RTC_BIT_PI) && - (ldata->hw_designer == AMBA_VENDOR_ST)) - events |= (RTC_PF | RTC_IRQF); - + if (rtcmis & RTC_BIT_AI) { + writel(RTC_BIT_AI, ldata->base + RTC_ICR); + events |= (RTC_AF | RTC_IRQF); rtc_update_irq(ldata->rtc, 1, events); return IRQ_HANDLED; diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 400ae2121a2..8bf53a76a33 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -489,6 +489,13 @@ static void giveback(struct pl022 *pl022) pl022->cur_transfer = NULL; pl022->cur_chip = NULL; spi_finalize_current_message(pl022->master); + + /* disable the SPI/SSP operation */ + writew((readw(SSP_CR1(pl022->virtbase)) & + (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + + /* This message is completed, so let's turn off the clocks & power */ + pm_runtime_put(&pl022->adev->dev); } /** @@ -895,6 +902,12 @@ static int configure_dma(struct pl022 *pl022) struct dma_async_tx_descriptor *rxdesc; struct dma_async_tx_descriptor *txdesc; + /* DMA burstsize should be same as the FIFO trigger level */ + rx_conf.src_maxburst = pl022->rx_lev_trig ? 1 << + (pl022->rx_lev_trig + 1) : pl022->rx_lev_trig; + tx_conf.dst_maxburst = pl022->tx_lev_trig ? 1 << + (pl022->tx_lev_trig + 1) : pl022->tx_lev_trig; + /* Check that the channels are available */ if (!rxchan || !txchan) return -ENODEV; @@ -2048,6 +2061,9 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); + pm_runtime_enable(dev); + pm_runtime_resume(dev); + pl022->clk = clk_get(&adev->dev, NULL); if (IS_ERR(pl022->clk)) { status = PTR_ERR(pl022->clk); @@ -2158,6 +2174,7 @@ pl022_remove(struct amba_device *adev) clk_disable(pl022->clk); clk_unprepare(pl022->clk); clk_put(pl022->clk); + pm_runtime_disable(&adev->dev); iounmap(pl022->virtbase); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index eb1dee26bda..a4495997d1b 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -8,6 +8,16 @@ config ANDROID if ANDROID +config ANDROID_AB5500_TIMED_VIBRA + bool "AB5500 Timed Output Vibrator" + depends on AB5500_CORE + depends on ANDROID_TIMED_OUTPUT + default y + help + Say Y here to enable linear/rotary vibrator driver using timed + output class device for ST-Ericsson's based on ST-Ericsson's + AB5500 Mix-Sig PMIC + config ANDROID_BINDER_IPC bool "Android Binder IPC Driver" default n @@ -53,6 +63,14 @@ config ANDROID_LOW_MEMORY_KILLER ---help--- Register processes to be killed when memory is low +config ANDROID_STE_TIMED_VIBRA + bool "ST-Ericsson Timed Output Vibrator" + depends on SND_SOC_AB8500 + depends on ANDROID_TIMED_OUTPUT + default y + help + ST-Ericsson's vibrator driver using timed output class device + source "drivers/staging/android/switch/Kconfig" config ANDROID_INTF_ALARM diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 9b6c9ed91f6..0d642936a0b 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,3 +1,4 @@ +obj-$(CONFIG_ANDROID_AB5500_TIMED_VIBRA) += ab5500-timed-vibra.o obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_LOGGER) += logger.o @@ -6,6 +7,7 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +obj-$(CONFIG_ANDROID_STE_TIMED_VIBRA) += ste_timed_vibra.o obj-$(CONFIG_ANDROID_SWITCH) += switch/ obj-$(CONFIG_ANDROID_INTF_ALARM) += alarm.o obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o diff --git a/drivers/staging/android/ab5500-timed-vibra.c b/drivers/staging/android/ab5500-timed-vibra.c new file mode 100644 index 00000000000..35c627a6285 --- /dev/null +++ b/drivers/staging/android/ab5500-timed-vibra.c @@ -0,0 +1,490 @@ +/* + * ab5500-vibra.c - driver for vibrator in ST-Ericsson AB5500 chip + * + * Copyright (C) 2011 ST-Ericsson SA. + * + * License Terms: GNU General Public License v2 + * + * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com> + */ + +#include <linux/kernel.h> +#include <linux/hrtimer.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/wait.h> +#include <linux/err.h> +#include "timed_output.h" + +#include <linux/mfd/abx500.h> /* abx500_* */ +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500/ab5500-gpadc.h> +#include <linux/ab5500-vibra.h> + +#define AB5500_VIBRA_DEV_NAME "ab5500:vibra" +#define AB5500_VIBRA_DRV_NAME "ab5500-vibrator" + +/* Vibrator Register Address Offsets */ +#define AB5500_VIB_CTRL 0x10 +#define AB5500_VIB_VOLT 0x11 +#define AB5500_VIB_FUND_FREQ 0x12 /* Linear vibra resonance freq. */ +#define AB5500_VIB_FUND_DUTY 0x13 +#define AB5500_KELVIN_ANA 0xB1 +#define AB5500_VIBRA_KELVIN 0xFE + +/* Vibrator Control */ +#define AB5500_VIB_DISABLE (0x80) +#define AB5500_VIB_PWR_ON (0x40) +#define AB5500_VIB_FUND_EN (0x20) +#define AB5500_VIB_FREQ_SHIFT (0) +#define AB5500_VIB_DUTY_SHIFT (3) +#define AB5500_VIB_VOLT_SHIFT (0) +#define AB5500_VIB_PULSE_SHIFT (4) +#define VIBRA_KELVIN_ENABLE (0x90) +#define VIBRA_KELVIN_VOUT (0x20) + +/* Vibrator Freq. (in HZ) and Duty */ +enum ab5500_vibra_freq { + AB5500_VIB_FREQ_1HZ = 1, + AB5500_VIB_FREQ_2HZ, + AB5500_VIB_FREQ_4HZ, + AB5500_VIB_FREQ_8HZ, +}; + +enum ab5500_vibra_duty { + AB5500_VIB_DUTY_100 = 0, + AB5500_VIB_DUTY_75 = 8, + AB5500_VIB_DUTY_50 = 16, + AB5500_VIB_DUTY_25 = 24, +}; + +/* Linear vibrator resonance freq. duty */ +#define AB5500_VIB_RDUTY_50 (0x7F) + +/* Vibration magnitudes */ +#define AB5500_VIB_FREQ_MAX (4) +#define AB5500_VIB_DUTY_MAX (4) + +static u8 vib_freq[AB5500_VIB_FREQ_MAX] = { + AB5500_VIB_FREQ_1HZ, + AB5500_VIB_FREQ_2HZ, + AB5500_VIB_FREQ_4HZ, + AB5500_VIB_FREQ_8HZ, +}; + +static u8 vib_duty[AB5500_VIB_DUTY_MAX] = { + AB5500_VIB_DUTY_100, + AB5500_VIB_DUTY_75, + AB5500_VIB_DUTY_50, + AB5500_VIB_DUTY_25, +}; + +/** + * struct ab5500_vibra - Vibrator driver interal info. + * @tdev: Pointer to timed output device structure + * @dev: Reference to vibra device structure + * @vibra_workqueue: Pointer to vibrator workqueue structure + * @vibra_work: Vibrator work + * @gpadc: Gpadc instance + * @vibra_wait: Vibrator wait queue head + * @vibra_lock: Vibrator lock + * @timeout_ms: Indicates how long time the vibrator will be enabled + * @timeout_start: Start of vibrator in jiffies + * @pdata: Local pointer to platform data with vibrator parameters + * @magnitude: required vibration strength + * @enable: Vibrator running status + * @eol: Vibrator end of life(eol) status + **/ +struct ab5500_vibra { + struct timed_output_dev tdev; + struct device *dev; + struct workqueue_struct *vibra_workqueue; + struct work_struct vibra_work; + struct ab5500_gpadc *gpadc; + wait_queue_head_t vibra_wait; + spinlock_t vibra_lock; + unsigned int timeout_ms; + unsigned long timeout_start; + struct ab5500_vibra_platform_data *pdata; + u8 magnitude; + bool enable; + bool eol; +}; + +static inline u8 vibra_magnitude(u8 mag) +{ + mag /= (AB5500_VIB_FREQ_MAX * AB5500_VIB_DUTY_MAX); + mag = vib_freq[mag / AB5500_VIB_FREQ_MAX] << AB5500_VIB_FREQ_SHIFT; + mag |= vib_duty[mag % AB5500_VIB_DUTY_MAX] << AB5500_VIB_DUTY_SHIFT; + + return mag; +} + +static int ab5500_setup_vibra_kelvin(struct ab5500_vibra* vibra) +{ + int ret; + + /* Establish the kelvin IP connection to be measured */ + ret = abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP, + AB5500_KELVIN_ANA, VIBRA_KELVIN_ENABLE); + if (ret < 0) { + dev_err(vibra->dev, "failed to set kelvin network\n"); + return ret; + } + + /* Select vibra parameter to be measured */ + ret = abx500_set_register_interruptible(vibra->dev, AB5500_BANK_VIBRA, + AB5500_VIBRA_KELVIN, VIBRA_KELVIN_VOUT); + if (ret < 0) + dev_err(vibra->dev, "failed to select the kelvin param\n"); + + return ret; +} + +static int ab5500_vibra_start(struct ab5500_vibra* vibra) +{ + u8 ctrl = 0; + + ctrl = AB5500_VIB_PWR_ON | + vibra_magnitude(vibra->magnitude); + + if (vibra->pdata->type == AB5500_VIB_LINEAR) + ctrl |= AB5500_VIB_FUND_EN; + + return abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_CTRL, ctrl); +} + +static int ab5500_vibra_stop(struct ab5500_vibra* vibra) +{ + return abx500_mask_and_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_CTRL, + AB5500_VIB_PWR_ON, 0); +} + +static int ab5500_vibra_eol_check(struct ab5500_vibra* vibra) +{ + int ret, vout; + + ret = ab5500_setup_vibra_kelvin(vibra); + if (ret < 0) { + dev_err(vibra->dev, "failed to setup kelvin network\n"); + return ret; + } + + /* Start vibra to measure voltage */ + ret = ab5500_vibra_start(vibra); + if (ret < 0) { + dev_err(vibra->dev, "failed to start vibra\n"); + return ret; + } + /* 20ms delay required for voltage rampup */ + wait_event_interruptible_timeout(vibra->vibra_wait, + 0, msecs_to_jiffies(20)); + + vout = ab5500_gpadc_convert(vibra->gpadc, VIBRA_KELVIN); + if (vout < 0) { + dev_err(vibra->dev, "failed to read gpadc vibra\n"); + return vout; + } + + /* Stop vibra after measuring voltage */ + ret = ab5500_vibra_stop(vibra); + if (ret < 0) { + dev_err(vibra->dev, "failed to stop vibra\n"); + return ret; + } + /* Check for vibra eol condition */ + if (vout < vibra->pdata->eol_voltage) { + vibra->eol = true; + dev_err(vibra->dev, "Vibra eol detected. Disabling vibra!\n"); + } + + return ret; +} + +/** + * ab5500_vibra_work() - Vibrator work, turns on/off vibrator + * @work: Pointer to work structure + * + * This function is called from workqueue, turns on/off vibrator + **/ +static void ab5500_vibra_work(struct work_struct *work) +{ + struct ab5500_vibra *vibra = container_of(work, + struct ab5500_vibra, vibra_work); + unsigned long flags; + int ret; + + ret = ab5500_vibra_start(vibra); + if (ret < 0) + dev_err(vibra->dev, "reg[%d] w failed: %d\n", + AB5500_VIB_CTRL, ret); + + wait_event_interruptible_timeout(vibra->vibra_wait, + 0, msecs_to_jiffies(vibra->timeout_ms)); + + ret = ab5500_vibra_stop(vibra); + if (ret < 0) + dev_err(vibra->dev, "reg[%d] w failed: %d\n", + AB5500_VIB_CTRL, ret); + + spin_lock_irqsave(&vibra->vibra_lock, flags); + + vibra->timeout_start = 0; + vibra->enable = false; + + spin_unlock_irqrestore(&vibra->vibra_lock, flags); +} + +/** + * vibra_enable() - Enables vibrator + * @tdev: Pointer to timed output device structure + * @timeout: Time indicating how long vibrator will be enabled + * + * This function enables vibrator + **/ +static void vibra_enable(struct timed_output_dev *tdev, int timeout) +{ + struct ab5500_vibra *vibra = dev_get_drvdata(tdev->dev); + unsigned long flags; + + spin_lock_irqsave(&vibra->vibra_lock, flags); + + if ((!vibra->enable || timeout) && !vibra->eol) { + vibra->enable = true; + + vibra->timeout_ms = timeout; + vibra->timeout_start = jiffies; + queue_work(vibra->vibra_workqueue, &vibra->vibra_work); + } + + spin_unlock_irqrestore(&vibra->vibra_lock, flags); +} + +/** + * vibra_get_time() - Returns remaining time to disabling vibration + * @tdev: Pointer to timed output device structure + * + * This function returns time remaining to disabling vibration + * + * Returns: + * Returns remaining time to disabling vibration + **/ +static int vibra_get_time(struct timed_output_dev *tdev) +{ + struct ab5500_vibra *vibra = dev_get_drvdata(tdev->dev); + unsigned int ms; + unsigned long flags; + + spin_lock_irqsave(&vibra->vibra_lock, flags); + + if (vibra->enable) + ms = jiffies_to_msecs(vibra->timeout_start + + msecs_to_jiffies(vibra->timeout_ms) - jiffies); + else + ms = 0; + + spin_unlock_irqrestore(&vibra->vibra_lock, flags); + + return ms; +} + +static int ab5500_vibra_reg_init(struct ab5500_vibra *vibra) +{ + int ret = 0; + u8 ctrl = 0; + u8 pulse = 0; + + ctrl = (AB5500_VIB_DUTY_50 << AB5500_VIB_DUTY_SHIFT) | + (AB5500_VIB_FREQ_8HZ << AB5500_VIB_FREQ_SHIFT); + + if (vibra->pdata->type == AB5500_VIB_LINEAR) { + ctrl |= AB5500_VIB_FUND_EN; + + if (vibra->pdata->voltage > AB5500_VIB_VOLT_MAX) + vibra->pdata->voltage = AB5500_VIB_VOLT_MAX; + + pulse = (vibra->pdata->pulse << AB5500_VIB_PULSE_SHIFT) | + (vibra->pdata->voltage << AB5500_VIB_VOLT_SHIFT); + ret = abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_VOLT, + pulse); + if (ret < 0) { + dev_err(vibra->dev, + "reg[%#x] w %#x failed: %d\n", + AB5500_VIB_VOLT, vibra->pdata->voltage, ret); + return ret; + } + + ret = abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_FUND_FREQ, + vibra->pdata->res_freq); + if (ret < 0) { + dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n", + AB5500_VIB_FUND_FREQ, + vibra->pdata->res_freq, ret); + return ret; + } + + ret = abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_FUND_DUTY, + AB5500_VIB_RDUTY_50); + if (ret < 0) { + dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n", + AB5500_VIB_FUND_DUTY, + AB5500_VIB_RDUTY_50, ret); + return ret; + } + } + + ret = abx500_set_register_interruptible(vibra->dev, + AB5500_BANK_VIBRA, AB5500_VIB_CTRL, ctrl); + if (ret < 0) { + dev_err(vibra->dev, "reg[%#x] w %#x failed: %d\n", + AB5500_VIB_CTRL, ctrl, ret); + return ret; + } + + return ret; +} + +static int ab5500_vibra_register_dev(struct ab5500_vibra *vibra, + struct platform_device *pdev) +{ + int ret = 0; + + ret = timed_output_dev_register(&vibra->tdev); + if (ret) { + dev_err(&pdev->dev, "failed to register timed output device\n"); + goto err_out; + } + + dev_set_drvdata(vibra->tdev.dev, vibra); + + + /* Create workqueue just for timed output vibrator */ + vibra->vibra_workqueue = + create_singlethread_workqueue("ste-timed-output-vibra"); + if (!vibra->vibra_workqueue) { + dev_err(&pdev->dev, "failed to allocate workqueue\n"); + ret = -ENOMEM; + goto exit_output_unregister; + } + + init_waitqueue_head(&vibra->vibra_wait); + INIT_WORK(&vibra->vibra_work, ab5500_vibra_work); + spin_lock_init(&vibra->vibra_lock); + + platform_set_drvdata(pdev, vibra); + + return ret; + +exit_output_unregister: + timed_output_dev_unregister(&vibra->tdev); +err_out: + return ret; +} + +static int __devinit ab5500_vibra_probe(struct platform_device *pdev) +{ + struct ab5500_vibra_platform_data *pdata = pdev->dev.platform_data; + struct ab5500_vibra *vibra = NULL; + int ret = 0; + + if (pdata == NULL) { + dev_err(&pdev->dev, "platform data required. Quitting...\n"); + return -ENODEV; + } + + vibra = kzalloc(sizeof(struct ab5500_vibra), GFP_KERNEL); + if (vibra == NULL) + return -ENOMEM; + + vibra->tdev.name = "vibrator"; + vibra->tdev.enable = vibra_enable; + vibra->tdev.get_time = vibra_get_time; + vibra->timeout_start = 0; + vibra->enable = false; + vibra->magnitude = pdata->magnitude; + vibra->pdata = pdata; + vibra->dev = &pdev->dev; + + if (vibra->pdata->eol_voltage) { + vibra->gpadc = ab5500_gpadc_get("ab5500-adc.0"); + if (IS_ERR(vibra->gpadc)) + goto err_alloc; + } + + if (vibra->pdata->type == AB5500_VIB_LINEAR) + dev_info(&pdev->dev, "Linear Type Vibrators\n"); + else + dev_info(&pdev->dev, "Rotary Type Vibrators\n"); + + ret = ab5500_vibra_reg_init(vibra); + if (ret < 0) + goto err_alloc; + + ret = ab5500_vibra_register_dev(vibra, pdev); + if (ret < 0) + goto err_alloc; + + /* Perform vibra eol diagnostics if eol_voltage is set */ + if (vibra->pdata->eol_voltage) { + ret = ab5500_vibra_eol_check(vibra); + if (ret < 0) + dev_warn(&pdev->dev, "EOL check failed\n"); + } + + dev_info(&pdev->dev, "initialization success\n"); + + return ret; + +err_alloc: + kfree(vibra); + + return ret; +} + +static int __devexit ab5500_vibra_remove(struct platform_device *pdev) +{ + struct ab5500_vibra *vibra = platform_get_drvdata(pdev); + + timed_output_dev_unregister(&vibra->tdev); + destroy_workqueue(vibra->vibra_workqueue); + kfree(vibra); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ab5500_vibra_driver = { + .probe = ab5500_vibra_probe, + .remove = __devexit_p(ab5500_vibra_remove), + .driver = { + .name = AB5500_VIBRA_DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init ab5500_vibra_module_init(void) +{ + return platform_driver_register(&ab5500_vibra_driver); +} + +static void __exit ab5500_vibra_module_exit(void) +{ + platform_driver_unregister(&ab5500_vibra_driver); +} + +module_init(ab5500_vibra_module_init); +module_exit(ab5500_vibra_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>"); +MODULE_DESCRIPTION("Timed Output Driver for AB5500 Vibrator"); + diff --git a/drivers/staging/android/ste_timed_vibra.c b/drivers/staging/android/ste_timed_vibra.c new file mode 100644 index 00000000000..4621b2fb441 --- /dev/null +++ b/drivers/staging/android/ste_timed_vibra.c @@ -0,0 +1,431 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * Author: Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com> + * for ST-Ericsson + * License Terms: GNU General Public License v2 + */ + +#include <linux/kernel.h> +#include <linux/hrtimer.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/ste_timed_vibra.h> +#include <linux/delay.h> +#include "timed_output.h" + +/** + * struct vibra_info - Vibrator information structure + * @tdev: Pointer to timed output device structure + * @linear_workqueue: Pointer to linear vibrator workqueue structure + * @linear_work: Linear Vibrator work + * @linear_tick: Linear Vibrator high resolution timer + * @vibra_workqueue: Pointer to vibrator workqueue structure + * @vibra_work: Vibrator work + * @vibra_timer: Vibrator high resolution timer + * @vibra_lock: Vibrator lock + * @vibra_state: Actual vibrator state + * @state_force: Indicates if oppositive state is requested + * @timeout: Indicates how long time the vibrator will be enabled + * @time_passed: Total time passed in states + * @pdata: Local pointer to platform data with vibrator parameters + * + * Structure vibra_info holds vibrator information + **/ +struct vibra_info { + struct timed_output_dev tdev; + struct workqueue_struct *linear_workqueue; + struct work_struct linear_work; + struct hrtimer linear_tick; + struct workqueue_struct *vibra_workqueue; + struct work_struct vibra_work; + struct hrtimer vibra_timer; + spinlock_t vibra_lock; + enum ste_timed_vibra_states vibra_state; + bool state_force; + unsigned int timeout; + unsigned int time_passed; + struct ste_timed_vibra_platform_data *pdata; +}; + +/* + * Linear vibrator hardware operates on a particular resonance + * frequency. The resonance frequency (f) may also vary with h/w. + * This define is half time period (t) in micro seconds (us). + * For resonance frequency f = 150 Hz + * t = T/2 = ((1/150) / 2) = 3333 usec. + */ +#define LINEAR_RESONANCE 3333 + +/** + * linear_vibra_work() - Linear Vibrator work, turns on/off vibrator + * @work: Pointer to work structure + * + * This function is called from workqueue, turns on/off vibrator + **/ +static void linear_vibra_work(struct work_struct *work) +{ + struct vibra_info *vinfo = + container_of(work, struct vibra_info, linear_work); + unsigned char speed_pos = 0, speed_neg = 0; + ktime_t ktime; + static unsigned char toggle; + + if (toggle) { + speed_pos = vinfo->pdata->boost_level; + speed_neg = 0; + } else { + speed_neg = vinfo->pdata->boost_level; + speed_pos = 0; + } + + toggle = !toggle; + vinfo->pdata->timed_vibra_control(speed_pos, speed_neg, + speed_pos, speed_neg); + + if ((vinfo->vibra_state != STE_VIBRA_IDLE) && + (vinfo->vibra_state != STE_VIBRA_OFF)) { + ktime = ktime_set((LINEAR_RESONANCE / USEC_PER_SEC), + (LINEAR_RESONANCE % USEC_PER_SEC) * NSEC_PER_USEC); + hrtimer_start(&vinfo->linear_tick, ktime, HRTIMER_MODE_REL); + } +} + +/** + * vibra_control_work() - Vibrator work, turns on/off vibrator + * @work: Pointer to work structure + * + * This function is called from workqueue, turns on/off vibrator + **/ +static void vibra_control_work(struct work_struct *work) +{ + struct vibra_info *vinfo = + container_of(work, struct vibra_info, vibra_work); + unsigned val = 0; + unsigned char speed_pos = 0, speed_neg = 0; + unsigned long flags; + + /* + * Cancel scheduled timer if it has not started + * else it will wait for timer callback to complete. + * It should be done before taking vibra_lock to + * prevent race condition, as timer callback also + * takes same lock. + */ + hrtimer_cancel(&vinfo->vibra_timer); + + spin_lock_irqsave(&vinfo->vibra_lock, flags); + + switch (vinfo->vibra_state) { + case STE_VIBRA_BOOST: + /* Turn on both vibrators with boost speed */ + speed_pos = vinfo->pdata->boost_level; + val = vinfo->pdata->boost_time; + break; + case STE_VIBRA_ON: + /* Turn on both vibrators with speed */ + speed_pos = vinfo->pdata->on_level; + val = vinfo->timeout - vinfo->pdata->boost_time; + break; + case STE_VIBRA_OFF: + /* Turn on both vibrators with reversed speed */ + speed_neg = vinfo->pdata->off_level; + val = vinfo->pdata->off_time; + break; + case STE_VIBRA_IDLE: + vinfo->time_passed = 0; + break; + default: + break; + } + spin_unlock_irqrestore(&vinfo->vibra_lock, flags); + + /* Send new settings (only for rotary vibrators) */ + if (!vinfo->pdata->is_linear_vibra) + vinfo->pdata->timed_vibra_control(speed_pos, speed_neg, + speed_pos, speed_neg); + + if (vinfo->vibra_state != STE_VIBRA_IDLE) { + /* Start timer if it's not in IDLE state */ + ktime_t ktime; + ktime = ktime_set((val / MSEC_PER_SEC), + (val % MSEC_PER_SEC) * NSEC_PER_MSEC), + hrtimer_start(&vinfo->vibra_timer, ktime, HRTIMER_MODE_REL); + } else if (vinfo->pdata->is_linear_vibra) { + /* Cancel work and timers of linear vibrator in IDLE state */ + hrtimer_cancel(&vinfo->linear_tick); + flush_workqueue(vinfo->linear_workqueue); + vinfo->pdata->timed_vibra_control(0, 0, 0, 0); + } +} + +/** + * vibra_enable() - Enables vibrator + * @tdev: Pointer to timed output device structure + * @timeout: Time indicating how long vibrator will be enabled + * + * This function enables vibrator + **/ +static void vibra_enable(struct timed_output_dev *tdev, int timeout) +{ + struct vibra_info *vinfo = dev_get_drvdata(tdev->dev); + unsigned long flags; + + spin_lock_irqsave(&vinfo->vibra_lock, flags); + switch (vinfo->vibra_state) { + case STE_VIBRA_IDLE: + if (timeout) + vinfo->vibra_state = STE_VIBRA_BOOST; + else /* Already disabled */ + break; + + vinfo->state_force = false; + /* Trim timeout */ + vinfo->timeout = timeout < vinfo->pdata->boost_time ? + vinfo->pdata->boost_time : timeout; + + if (vinfo->pdata->is_linear_vibra) + queue_work(vinfo->linear_workqueue, + &vinfo->linear_work); + queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work); + break; + case STE_VIBRA_BOOST: + /* Force only when user requested OFF while BOOST */ + if (!timeout) + vinfo->state_force = true; + break; + case STE_VIBRA_ON: + /* If user requested OFF */ + if (!timeout) { + if (vinfo->pdata->is_linear_vibra) + hrtimer_cancel(&vinfo->linear_tick); + /* Cancel timer if it has not expired yet. + * Else setting the vibra_state to STE_VIBRA_OFF + * will make take care that vibrator will move to + * STE_VIBRA_IDLE in timer callback just after + * this function call. + */ + hrtimer_try_to_cancel(&vinfo->vibra_timer); + vinfo->vibra_state = STE_VIBRA_OFF; + queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work); + } + break; + case STE_VIBRA_OFF: + /* Force only when user requested ON while OFF */ + if (timeout) + vinfo->state_force = true; + break; + default: + break; + } + spin_unlock_irqrestore(&vinfo->vibra_lock, flags); +} + +/** + * linear_vibra_tick() - Generate resonance frequency waveform + * @hrtimer: Pointer to high resolution timer structure + * + * This function helps in generating the resonance frequency + * waveform required for linear vibrators + * + * Returns: + * Returns value which indicates whether hrtimer should be restarted + **/ +static enum hrtimer_restart linear_vibra_tick(struct hrtimer *hrtimer) +{ + struct vibra_info *vinfo = + container_of(hrtimer, struct vibra_info, linear_tick); + + if ((vinfo->vibra_state != STE_VIBRA_IDLE) && + (vinfo->vibra_state != STE_VIBRA_OFF)) { + queue_work(vinfo->linear_workqueue, &vinfo->linear_work); + } + + return HRTIMER_NORESTART; +} + +/** + * vibra_timer_expired() - Handles vibrator machine state + * @hrtimer: Pointer to high resolution timer structure + * + * This function handles vibrator machine state + * + * Returns: + * Returns value which indicates wether hrtimer should be restarted + **/ +static enum hrtimer_restart vibra_timer_expired(struct hrtimer *hrtimer) +{ + struct vibra_info *vinfo = + container_of(hrtimer, struct vibra_info, vibra_timer); + unsigned long flags; + + spin_lock_irqsave(&vinfo->vibra_lock, flags); + switch (vinfo->vibra_state) { + case STE_VIBRA_BOOST: + /* If BOOST finished and force, go to OFF */ + if (vinfo->state_force) + vinfo->vibra_state = STE_VIBRA_OFF; + else + vinfo->vibra_state = STE_VIBRA_ON; + vinfo->time_passed = vinfo->pdata->boost_time; + break; + case STE_VIBRA_ON: + vinfo->vibra_state = STE_VIBRA_OFF; + vinfo->time_passed = vinfo->timeout; + break; + case STE_VIBRA_OFF: + /* If OFF finished and force, go to ON */ + if (vinfo->state_force) + vinfo->vibra_state = STE_VIBRA_ON; + else + vinfo->vibra_state = STE_VIBRA_IDLE; + vinfo->time_passed += vinfo->pdata->off_time; + break; + case STE_VIBRA_IDLE: + break; + default: + break; + } + vinfo->state_force = false; + spin_unlock_irqrestore(&vinfo->vibra_lock, flags); + + queue_work(vinfo->vibra_workqueue, &vinfo->vibra_work); + + return HRTIMER_NORESTART; +} + +/** + * vibra_get_time() - Returns remaining time to disabling vibration + * @tdev: Pointer to timed output device structure + * + * This function returns time remaining to disabling vibration + * + * Returns: + * Returns remaining time to disabling vibration + **/ +static int vibra_get_time(struct timed_output_dev *tdev) +{ + struct vibra_info *vinfo = dev_get_drvdata(tdev->dev); + u32 ms; + + if (hrtimer_active(&vinfo->vibra_timer)) { + ktime_t remain = hrtimer_get_remaining(&vinfo->vibra_timer); + ms = (u32) ktime_to_ms(remain); + return ms + vinfo->time_passed; + } else + return 0; +} + +static int __devinit ste_timed_vibra_probe(struct platform_device *pdev) +{ + int ret; + struct vibra_info *vinfo; + + if (!pdev->dev.platform_data) { + dev_err(&pdev->dev, "No platform data supplied\n"); + return -ENODEV; + } + + vinfo = kmalloc(sizeof *vinfo, GFP_KERNEL); + if (!vinfo) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + vinfo->tdev.name = "vibrator"; + vinfo->tdev.enable = vibra_enable; + vinfo->tdev.get_time = vibra_get_time; + vinfo->time_passed = 0; + vinfo->vibra_state = STE_VIBRA_IDLE; + vinfo->state_force = false; + vinfo->pdata = pdev->dev.platform_data; + + if (vinfo->pdata->is_linear_vibra) + dev_info(&pdev->dev, "Linear Type Vibrators\n"); + else + dev_info(&pdev->dev, "Rotary Type Vibrators\n"); + + ret = timed_output_dev_register(&vinfo->tdev); + if (ret) { + dev_err(&pdev->dev, "failed to register timed output device\n"); + goto exit_free_vinfo; + } + + dev_set_drvdata(vinfo->tdev.dev, vinfo); + + vinfo->linear_workqueue = + create_singlethread_workqueue("ste-timed-linear-vibra"); + if (!vinfo->linear_workqueue) { + dev_err(&pdev->dev, "failed to allocate workqueue\n"); + ret = -ENOMEM; + goto exit_timed_output_unregister; + } + + /* Create workqueue just for timed output vibrator */ + vinfo->vibra_workqueue = + create_singlethread_workqueue("ste-timed-output-vibra"); + if (!vinfo->vibra_workqueue) { + dev_err(&pdev->dev, "failed to allocate workqueue\n"); + ret = -ENOMEM; + goto exit_destroy_workqueue; + } + + INIT_WORK(&vinfo->linear_work, linear_vibra_work); + INIT_WORK(&vinfo->vibra_work, vibra_control_work); + spin_lock_init(&vinfo->vibra_lock); + hrtimer_init(&vinfo->linear_tick, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&vinfo->vibra_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + vinfo->linear_tick.function = linear_vibra_tick; + vinfo->vibra_timer.function = vibra_timer_expired; + + platform_set_drvdata(pdev, vinfo); + return 0; + +exit_destroy_workqueue: + destroy_workqueue(vinfo->linear_workqueue); +exit_timed_output_unregister: + timed_output_dev_unregister(&vinfo->tdev); +exit_free_vinfo: + kfree(vinfo); + return ret; +} + +static int __devexit ste_timed_vibra_remove(struct platform_device *pdev) +{ + struct vibra_info *vinfo = platform_get_drvdata(pdev); + + timed_output_dev_unregister(&vinfo->tdev); + destroy_workqueue(vinfo->linear_workqueue); + destroy_workqueue(vinfo->vibra_workqueue); + kfree(vinfo); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ste_timed_vibra_driver = { + .driver = { + .name = "ste_timed_output_vibra", + .owner = THIS_MODULE, + }, + .probe = ste_timed_vibra_probe, + .remove = __devexit_p(ste_timed_vibra_remove) +}; + +static int __init ste_timed_vibra_init(void) +{ + return platform_driver_register(&ste_timed_vibra_driver); +} +module_init(ste_timed_vibra_init); + +static void __exit ste_timed_vibra_exit(void) +{ + platform_driver_unregister(&ste_timed_vibra_driver); +} +module_exit(ste_timed_vibra_exit); + +MODULE_AUTHOR("Marcin Mielczarczyk <marcin.mielczarczyk@tieto.com>"); +MODULE_DESCRIPTION("STE Timed Output Vibrator"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c index a272e488e5b..545e03d31fc 100644 --- a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c +++ b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c @@ -20,8 +20,9 @@ static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = { .irq_number = NOMADIK_GPIO_TO_IRQ(84), .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED), - .x_flip = false, - .y_flip = true, + .x_flip = true, + .y_flip = false, + .regulator_en = true, }; struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = { diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c index 11728a03f8a..fd7fed743f7 100644 --- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c +++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c @@ -1,11 +1,10 @@ -/** - * +/* * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver. * Copyright (c) 2007-2010, Synaptics Incorporated * * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson - * Copyright 2010 (c) ST-Ericsson AB + * Copyright 2010 (c) ST-Ericsson SA */ /* * This file is licensed under the GPL2 license. @@ -27,6 +26,7 @@ #include <linux/input.h> #include <linux/slab.h> +#include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/regulator/consumer.h> @@ -36,8 +36,10 @@ /* TODO: for multiple device support will need a per-device mutex */ #define DRIVER_NAME "synaptics_rmi4_i2c" +#define DELTA 8 #define MAX_ERROR_REPORT 6 -#define MAX_TOUCH_MAJOR 15 +#define TIMEOUT_PERIOD 1 +#define MAX_WIDTH_MAJOR 255 #define MAX_RETRY_COUNT 5 #define STD_QUERY_LEN 21 #define PAGE_LEN 2 @@ -45,6 +47,7 @@ #define BUF_LEN 37 #define QUERY_LEN 9 #define DATA_LEN 12 +#define RESUME_DELAY 100 /* msecs */ #define HAS_TAP 0x01 #define HAS_PALMDETECT 0x01 #define HAS_ROTATE 0x02 @@ -164,6 +167,8 @@ struct synaptics_rmi4_device_info { * @regulator: pointer to the regulator structure * @wait: wait queue structure variable * @touch_stopped: flag to stop the thread function + * @enable: flag to enable/disable the driver event. + * @resume_wq_handler: work queue for resume the device * * This structure gives the device data information. */ @@ -184,6 +189,8 @@ struct synaptics_rmi4_data { struct regulator *regulator; wait_queue_head_t wait; bool touch_stopped; + bool enable; + struct work_struct resume_wq_handler; }; /** @@ -291,6 +298,133 @@ exit: } /** + * synaptics_rmi4_enable() - enable the touchpad driver event + * @pdata: pointer to synaptics_rmi4_data structure + * + * This function is to enable the touchpad driver event and returns integer. + */ +static int synaptics_rmi4_enable(struct synaptics_rmi4_data *pdata) +{ + int retval; + unsigned char intr_status; + + if (pdata->board->regulator_en) + regulator_enable(pdata->regulator); + enable_irq(pdata->board->irq_number); + pdata->touch_stopped = false; + + msleep(RESUME_DELAY); + retval = synaptics_rmi4_i2c_block_read(pdata, + pdata->fn01_data_base_addr + 1, + &intr_status, + pdata->number_of_interrupt_register); + if (retval < 0) + return retval; + + retval = synaptics_rmi4_i2c_byte_write(pdata, + pdata->fn01_ctrl_base_addr + 1, + (intr_status | TOUCHPAD_CTRL_INTR)); + if (retval < 0) + return retval; + + return 0; +} + +/** + * synaptics_rmi4_disable() - disable the touchpad driver event + * @pdata: pointer to synaptics_rmi4_data structure + * + * This function is to disable the driver event and returns integer. + */ + +static int synaptics_rmi4_disable(struct synaptics_rmi4_data *pdata) +{ + int retval; + unsigned char intr_status; + + pdata->touch_stopped = true; + disable_irq(pdata->board->irq_number); + + retval = synaptics_rmi4_i2c_block_read(pdata, + pdata->fn01_data_base_addr + 1, + &intr_status, + pdata->number_of_interrupt_register); + if (retval < 0) + return retval; + + retval = synaptics_rmi4_i2c_byte_write(pdata, + pdata->fn01_ctrl_base_addr + 1, + (intr_status & ~TOUCHPAD_CTRL_INTR)); + if (retval < 0) + return retval; + if (pdata->board->regulator_en) + regulator_disable(pdata->regulator); + + return 0; +} + +/** + * synaptics_rmi4_show_attr_enable() - show the touchpad enable value + * @dev: pointer to device data structure + * @attr: pointer to attribute structure + * @buf: pointer to character buffer + * + * This function is to show the touchpad enable value and returns ssize_t. + */ +static ssize_t synaptics_rmi4_show_attr_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", pdata->enable); +} + +/** + * synaptics_rmi4_store_attr_enable() - store the touchpad enable value + * @dev: pointer to device data structure + * @attr: pointer to attribute structure + * @buf: pointer to character buffer + * @count: number fo arguments + * + * This function is to store the touchpad enable value and returns ssize_t. + */ +static ssize_t synaptics_rmi4_store_attr_enable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct synaptics_rmi4_data *pdata = dev_get_drvdata(dev); + unsigned long val; + int retval = 0; + + if (strict_strtoul(buf, 0, &val)) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + if (pdata->enable != val) { + pdata->enable = val ? true : false; + if (pdata->enable) + retval = synaptics_rmi4_enable(pdata); + else + retval = synaptics_rmi4_disable(pdata); + + } + return ((retval < 0) ? retval : count); +} + +static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, + synaptics_rmi4_show_attr_enable, synaptics_rmi4_store_attr_enable); + +static struct attribute *synaptics_rmi4_attrs[] = { + &dev_attr_enable.attr, + NULL, +}; + +static struct attribute_group synaptics_rmi4_attr_group = { + .attrs = synaptics_rmi4_attrs, +}; + +/** * synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device * @pdata: pointer to synaptics_rmi4_data structure * @rfi: pointer to synaptics_rmi4_fn structure @@ -316,8 +450,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, unsigned char data[DATA_LEN]; int x[RMI4_NUMBER_OF_MAX_FINGERS]; int y[RMI4_NUMBER_OF_MAX_FINGERS]; - int wx[RMI4_NUMBER_OF_MAX_FINGERS]; - int wy[RMI4_NUMBER_OF_MAX_FINGERS]; + int w[RMI4_NUMBER_OF_MAX_FINGERS]; + static int prv_x[RMI4_NUMBER_OF_MAX_FINGERS]; + static int prv_y[RMI4_NUMBER_OF_MAX_FINGERS]; struct i2c_client *client = pdata->i2c_client; /* get 2D sensor finger data */ @@ -376,11 +511,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, y[touch_count] = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT); - wy[touch_count] = - (data[3] >> 4) & MASK_4BIT; - wx[touch_count] = - (data[3] & MASK_4BIT); - + w[touch_count] = data[3]; if (pdata->board->x_flip) x[touch_count] = pdata->sensor_max_x - @@ -389,6 +520,25 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, y[touch_count] = pdata->sensor_max_y - y[touch_count]; + if (x[touch_count] < 0) + x[touch_count] = 0; + else if (x[touch_count] >= pdata->sensor_max_x) + x[touch_count] = + pdata->sensor_max_x - 1; + + if (y[touch_count] < 0) + y[touch_count] = 0; + else if (y[touch_count] >= pdata->sensor_max_y) + y[touch_count] = + pdata->sensor_max_y - 1; + } + if ((abs(x[finger] - prv_x[finger]) < DELTA) && + (abs(y[finger] - prv_y[finger]) < DELTA)) { + x[finger] = prv_x[finger]; + y[finger] = prv_y[finger]; + } else { + prv_x[finger] = x[finger]; + prv_y[finger] = y[finger]; } /* number of active touch points */ touch_count++; @@ -399,7 +549,9 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, if (touch_count) { for (finger = 0; finger < touch_count; finger++) { input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR, - max(wx[finger] , wy[finger])); + max(x[finger] , y[finger])); + input_report_abs(pdata->input_dev, ABS_MT_WIDTH_MAJOR, + w[finger]); input_report_abs(pdata->input_dev, ABS_MT_POSITION_X, x[finger]); input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y, @@ -502,7 +654,7 @@ static irqreturn_t synaptics_rmi4_irq(int irq, void *data) touch_count = synaptics_rmi4_sensor_report(pdata); if (touch_count) wait_event_timeout(pdata->wait, pdata->touch_stopped, - msecs_to_jiffies(1)); + msecs_to_jiffies(TIMEOUT_PERIOD)); else break; } while (!pdata->touch_stopped); @@ -881,9 +1033,27 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata) } /** + * synaptics_rmi4_resume_handler() - work queue for resume handler + * @work:work_struct structure pointer + * + * This work queue handler used to resume the device and returns none + */ +static void synaptics_rmi4_resume_handler(struct work_struct *work) +{ + struct synaptics_rmi4_data *prmi4_data = container_of(work, + struct synaptics_rmi4_data, resume_wq_handler); + struct i2c_client *client = prmi4_data->i2c_client; + int retval; + + retval = synaptics_rmi4_enable(prmi4_data); + if (retval < 0) + dev_err(&client->dev, "%s: resume failed\n", __func__); +} + +/** * synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver - * @i2c: i2c client structure pointer - * @id:i2c device id pointer + * @client: i2c client structure pointer + * @dev_id:i2c device id pointer * * This function will allocate and initialize the instance * data and request the irq and set the instance data as the clients @@ -927,19 +1097,17 @@ static int __devinit synaptics_rmi4_probe goto err_input; } - rmi4_data->regulator = regulator_get(&client->dev, "vdd"); - if (IS_ERR(rmi4_data->regulator)) { - dev_err(&client->dev, "%s:get regulator failed\n", - __func__); - retval = PTR_ERR(rmi4_data->regulator); - goto err_get_regulator; - } - retval = regulator_enable(rmi4_data->regulator); - if (retval < 0) { - dev_err(&client->dev, "%s:regulator enable failed\n", - __func__); - goto err_regulator_enable; + if (platformdata->regulator_en) { + rmi4_data->regulator = regulator_get(&client->dev, "vdd"); + if (IS_ERR(rmi4_data->regulator)) { + dev_err(&client->dev, "%s:get regulator failed\n", + __func__); + retval = PTR_ERR(rmi4_data->regulator); + goto err_regulator; + } + regulator_enable(rmi4_data->regulator); } + init_waitqueue_head(&rmi4_data->wait); /* * Copy i2c_client pointer into RTID's i2c_client pointer for @@ -987,7 +1155,16 @@ static int __devinit synaptics_rmi4_probe input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0, rmi4_data->sensor_max_y, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0, - MAX_TOUCH_MAJOR, 0, 0); + max(rmi4_data->sensor_max_y, rmi4_data->sensor_max_y), + 0, 0); + input_set_abs_params(rmi4_data->input_dev, ABS_MT_WIDTH_MAJOR, 0, + MAX_WIDTH_MAJOR, 0, 0); + + retval = input_register_device(rmi4_data->input_dev); + if (retval) { + dev_err(&client->dev, "%s:input register failed\n", __func__); + goto err_input_register; + } /* Clear interrupts */ synaptics_rmi4_i2c_block_read(rmi4_data, @@ -1000,24 +1177,34 @@ static int __devinit synaptics_rmi4_probe if (retval) { dev_err(&client->dev, "%s:Unable to get attn irq %d\n", __func__, platformdata->irq_number); - goto err_query_dev; + goto err_request_irq; } - retval = input_register_device(rmi4_data->input_dev); + INIT_WORK(&rmi4_data->resume_wq_handler, synaptics_rmi4_resume_handler); + + /* sysfs implementation for dynamic enable/disable the input event */ + retval = sysfs_create_group(&client->dev.kobj, + &synaptics_rmi4_attr_group); if (retval) { - dev_err(&client->dev, "%s:input register failed\n", __func__); - goto err_free_irq; + dev_err(&client->dev, "failed to create sysfs entries\n"); + goto err_sysfs; } - + rmi4_data->enable = true; return retval; -err_free_irq: +err_sysfs: + cancel_work_sync(&rmi4_data->resume_wq_handler); +err_request_irq: free_irq(platformdata->irq_number, rmi4_data); + input_unregister_device(rmi4_data->input_dev); +err_input_register: + i2c_set_clientdata(client, NULL); err_query_dev: - regulator_disable(rmi4_data->regulator); -err_regulator_enable: - regulator_put(rmi4_data->regulator); -err_get_regulator: + if (platformdata->regulator_en) { + regulator_disable(rmi4_data->regulator); + regulator_put(rmi4_data->regulator); + } +err_regulator: input_free_device(rmi4_data->input_dev); rmi4_data->input_dev = NULL; err_input: @@ -1037,12 +1224,16 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client) struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client); const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; + sysfs_remove_group(&client->dev.kobj, &synaptics_rmi4_attr_group); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); + cancel_work_sync(&rmi4_data->resume_wq_handler); free_irq(pdata->irq_number, rmi4_data); input_unregister_device(rmi4_data->input_dev); - regulator_disable(rmi4_data->regulator); - regulator_put(rmi4_data->regulator); + if (pdata->regulator_en) { + regulator_disable(rmi4_data->regulator); + regulator_put(rmi4_data->regulator); + } kfree(rmi4_data); return 0; @@ -1059,31 +1250,11 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client) static int synaptics_rmi4_suspend(struct device *dev) { /* Touch sleep mode */ - int retval; - unsigned char intr_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); - const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; - rmi4_data->touch_stopped = true; - disable_irq(pdata->irq_number); - - retval = synaptics_rmi4_i2c_block_read(rmi4_data, - rmi4_data->fn01_data_base_addr + 1, - &intr_status, - rmi4_data->number_of_interrupt_register); - if (retval < 0) - return retval; - - retval = synaptics_rmi4_i2c_byte_write(rmi4_data, - rmi4_data->fn01_ctrl_base_addr + 1, - (intr_status & ~TOUCHPAD_CTRL_INTR)); - if (retval < 0) - return retval; - - regulator_disable(rmi4_data->regulator); - - return 0; + return synaptics_rmi4_disable(rmi4_data); } + /** * synaptics_rmi4_resume() - resume the touch screen controller * @dev: pointer to device structure @@ -1093,28 +1264,9 @@ static int synaptics_rmi4_suspend(struct device *dev) */ static int synaptics_rmi4_resume(struct device *dev) { - int retval; - unsigned char intr_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); - const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; - - regulator_enable(rmi4_data->regulator); - enable_irq(pdata->irq_number); - rmi4_data->touch_stopped = false; - - retval = synaptics_rmi4_i2c_block_read(rmi4_data, - rmi4_data->fn01_data_base_addr + 1, - &intr_status, - rmi4_data->number_of_interrupt_register); - if (retval < 0) - return retval; - - retval = synaptics_rmi4_i2c_byte_write(rmi4_data, - rmi4_data->fn01_ctrl_base_addr + 1, - (intr_status | TOUCHPAD_CTRL_INTR)); - if (retval < 0) - return retval; + schedule_work(&rmi4_data->resume_wq_handler); return 0; } diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h index 384436ef806..973abc97374 100644 --- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h +++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h @@ -42,6 +42,7 @@ struct synaptics_rmi4_platform_data { int irq_type; bool x_flip; bool y_flip; + bool regulator_en; }; #endif diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 070b442c1f8..b2831ed01bb 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -63,6 +63,14 @@ config SERIAL_AMBA_PL011_CONSOLE your boot loader (lilo or loadlin) about how to pass options to the kernel at boot time.) +config SERIAL_AMBA_PL011_CLOCK_CONTROL + bool "Support for clock control on AMBA serial port" + depends on SERIAL_AMBA_PL011 + select CONSOLE_POLL + ---help--- + Say Y here if you wish to use amba set_termios function to control + the pl011 clock. Any positive baudrate passed enables clock, + config SERIAL_SB1250_DUART tristate "BCM1xxx on-chip DUART serial support" depends on SIBYTE_SB1xxx_SOC=y diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 3d569cd68f5..d356b940382 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -47,11 +47,13 @@ #include <linux/amba/serial.h> #include <linux/clk.h> #include <linux/slab.h> +#include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/delay.h> #include <linux/types.h> +#include <linux/pm_runtime.h> #include <asm/io.h> #include <asm/sizes.h> @@ -67,6 +69,37 @@ #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) #define UART_DUMMY_DR_RX (1 << 16) +/* + * The console UART is handled differently for power management (it doesn't + * take the regulator, in order to allow the system to go to sleep even if the + * console is open). This should be removed once cable detect is in place. + */ +#ifdef CONFIG_SERIAL_CORE_CONSOLE +#define uart_console(port) ((port)->cons \ + && (port)->cons->index == (port)->line) +#else +#define uart_console(port) (0) +#endif + +/* Available amba pl011 port clock states */ +enum pl011_clk_states { + PL011_CLK_OFF = 0, /* clock disabled */ + PL011_CLK_REQUEST_OFF, /* disable after TX flushed */ + PL011_CLK_ON, /* clock enabled */ + PL011_PORT_OFF, /* port disabled */ +}; + +/* + * Backup registers to be used during regulator startup/shutdown + */ +static const u32 backup_regs[] = { + UART011_IBRD, + UART011_FBRD, + ST_UART011_LCRH_RX, + ST_UART011_LCRH_TX, + UART011_CR, + UART011_IMSC, +}; #define UART_WA_SAVE_NR 14 @@ -89,7 +122,9 @@ static const u32 uart_wa_reg[UART_WA_SAVE_NR] = { }; static u32 uart_wa_regdata[UART_WA_SAVE_NR]; -static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0); +static unsigned int uart_wa_tlet_line; +static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, + (unsigned long) &uart_wa_tlet_line); /* There is by now at least one vendor with differing details, so handle it */ struct vendor_data { @@ -158,10 +193,18 @@ struct uart_amba_port { unsigned int im; /* interrupt mask */ unsigned int old_status; unsigned int fifosize; /* vendor-specific */ + unsigned int ifls; /* vendor-specific */ unsigned int lcrh_tx; /* vendor-specific */ unsigned int lcrh_rx; /* vendor-specific */ unsigned int old_cr; /* state during shutdown */ bool autorts; +#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL + enum pl011_clk_states clk_state; /* actual clock state */ + struct delayed_work clk_off_work; /* work used for clock off */ + unsigned int clk_off_delay; /* clock off delay */ +#endif + struct regulator *regulator; + u32 backup[ARRAY_SIZE(backup_regs)]; char type[12]; bool interrupt_may_hang; /* vendor-specific */ #ifdef CONFIG_DMA_ENGINE @@ -1070,13 +1113,17 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) */ static void pl011_lockup_wa(unsigned long data) { - struct uart_amba_port *uap = amba_ports[0]; + struct uart_amba_port *uap = amba_ports[*(unsigned int *)data]; void __iomem *base = uap->port.membase; struct circ_buf *xmit = &uap->port.state->xmit; struct tty_struct *tty = uap->port.state->port.tty; int buf_empty_retries = 200; int loop; + /* Exit early if there is no tty */ + if (!tty) + return; + /* Stop HCI layer from submitting data for tx */ tty->hw_stopped = 1; while (!uart_circ_empty(xmit)) { @@ -1117,6 +1164,260 @@ static void pl011_lockup_wa(unsigned long data) tty->hw_stopped = 0; } +static void __pl011_startup(struct uart_amba_port *uap) +{ + unsigned int cr; + + writew(uap->ifls, uap->port.membase + UART011_IFLS); + + /* + * Provoke TX FIFO interrupt into asserting. + */ + cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; + writew(cr, uap->port.membase + UART011_CR); + writew(0, uap->port.membase + UART011_FBRD); + writew(1, uap->port.membase + UART011_IBRD); + writew(0, uap->port.membase + uap->lcrh_rx); + if (uap->lcrh_tx != uap->lcrh_rx) { + int i; + /* + * Wait 10 PCLKs before writing LCRH_TX register, + * to get this delay write read only register 10 times + */ + for (i = 0; i < 10; ++i) + writew(0xff, uap->port.membase + UART011_MIS); + writew(0, uap->port.membase + uap->lcrh_tx); + } + writew(0, uap->port.membase + UART01x_DR); + while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) + barrier(); +} + +/* Backup the registers during regulator startup/shutdown */ +#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL +static int pl011_backup(struct uart_amba_port *uap, bool suspend) +{ + int i, cnt; + + if (!suspend) { + __pl011_startup(uap); + writew(0, uap->port.membase + UART011_CR); + } + + for (i = 0; i < ARRAY_SIZE(backup_regs); i++) { + if (suspend) + uap->backup[i] = readw(uap->port.membase + + backup_regs[i]); + else { + if (backup_regs[i] == ST_UART011_LCRH_TX) { + /* + * Wait 10 PCLKs before writing LCRH_TX + * register, to get this delay write read + * only register 10 times + */ + for (cnt = 0; cnt < 10; ++cnt) + writew(0xff, uap->port.membase + + UART011_MIS); + } + + writew(uap->backup[i], + uap->port.membase + backup_regs[i]); + } + } + return 0; +} +#endif + +#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL +/* Turn clock off if TX buffer is empty, otherwise reschedule */ +static void pl011_clock_off(struct work_struct *work) +{ + struct uart_amba_port *uap = container_of(work, struct uart_amba_port, + clk_off_work.work); + struct uart_port *port = &uap->port; + struct circ_buf *xmit = &port->state->xmit; + unsigned long flags; + bool disable_regulator = false; + bool runtime_put = false; + unsigned int busy, interrupt_status; + + spin_lock_irqsave(&port->lock, flags); + + interrupt_status = readw(uap->port.membase + UART011_MIS); + busy = readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY; + + if (uap->clk_state == PL011_CLK_REQUEST_OFF) { + if (uart_circ_empty(xmit) && !interrupt_status && !busy) { + if (!uart_console(&uap->port) && uap->regulator) { + pl011_backup(uap, true); + disable_regulator = true; + } + runtime_put = true; + uap->clk_state = PL011_CLK_OFF; + clk_disable(uap->clk); + } else + schedule_delayed_work(&uap->clk_off_work, + uap->clk_off_delay); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (disable_regulator) + regulator_disable(uap->regulator); + if (runtime_put) + pm_runtime_put_sync(uap->port.dev); +} + +/* Request to turn off uart clock once pending TX is flushed */ +static void pl011_clock_request_off(struct uart_port *port) +{ + unsigned long flags; + struct uart_amba_port *uap = (struct uart_amba_port *)(port); + + spin_lock_irqsave(&port->lock, flags); + + if (uap->clk_state == PL011_CLK_ON) { + uap->clk_state = PL011_CLK_REQUEST_OFF; + /* Turn off later */ + schedule_delayed_work(&uap->clk_off_work, + uap->clk_off_delay); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* Request to immediately turn on uart clock */ +static void pl011_clock_on(struct uart_port *port) +{ + unsigned long flags; + struct uart_amba_port *uap = (struct uart_amba_port *)(port); + + spin_lock_irqsave(&port->lock, flags); + + switch (uap->clk_state) { + case PL011_CLK_OFF: + pm_runtime_get_sync(uap->port.dev); + clk_enable(uap->clk); + if (!uart_console(&uap->port) && uap->regulator) { + spin_unlock_irqrestore(&port->lock, flags); + regulator_enable(uap->regulator); + spin_lock_irqsave(&port->lock, flags); + pl011_backup(uap, false); + } + /* fallthrough */ + case PL011_CLK_REQUEST_OFF: + __cancel_delayed_work(&uap->clk_off_work); + uap->clk_state = PL011_CLK_ON; + break; + default: + break; + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void pl011_clock_check(struct uart_amba_port *uap) +{ + /* Reshedule work during off request */ + if (uap->clk_state == PL011_CLK_REQUEST_OFF) + /* New TX - restart work */ + if (__cancel_delayed_work(&uap->clk_off_work)) + schedule_delayed_work(&uap->clk_off_work, + uap->clk_off_delay); +} + +static int pl011_power_startup(struct uart_amba_port *uap) +{ + int retval = 0; + + if (uap->clk_state == PL011_PORT_OFF) { + pm_runtime_get_sync(uap->port.dev); + if (!uart_console(&uap->port) && uap->regulator) + regulator_enable(uap->regulator); + retval = clk_enable(uap->clk); + if (!retval) { + uap->clk_state = PL011_CLK_ON; + } else { + uap->clk_state = PL011_PORT_OFF; + pm_runtime_put_sync(uap->port.dev); + } + } + + return retval; +} + +static void pl011_power_shutdown(struct uart_amba_port *uap) +{ + bool disable_regulator = false; + bool runtime_put = false; + + cancel_delayed_work_sync(&uap->clk_off_work); + + spin_lock_irq(&uap->port.lock); + if (uap->clk_state == PL011_CLK_ON || + uap->clk_state == PL011_CLK_REQUEST_OFF) { + clk_disable(uap->clk); + runtime_put = true; + if (!uart_console(&uap->port) && uap->regulator) + disable_regulator = true; + } + uap->clk_state = PL011_PORT_OFF; + spin_unlock_irq(&uap->port.lock); + + if (disable_regulator) + regulator_disable(uap->regulator); + if (runtime_put) + pm_runtime_put_sync(uap->port.dev); +} + +static void +pl011_clock_control(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + speed_t new_baud = tty_termios_baud_rate(termios); + + if (new_baud == 0) + pl011_clock_request_off(port); + else + pl011_clock_on(port); +} + +static void pl011_clock_control_init(struct uart_amba_port *uap) +{ + uap->clk_state = PL011_PORT_OFF; + INIT_DELAYED_WORK(&uap->clk_off_work, pl011_clock_off); + uap->clk_off_delay = HZ / 10; /* 100 ms */ +} + +#else +/* Blank functions for clock control */ +static inline void pl011_clock_check(struct uart_amba_port *uap) +{ +} + +static inline int pl011_power_startup(struct uart_amba_port *uap) +{ + pm_runtime_get_sync(uap->port.dev); + return clk_enable(uap->clk); +} + +static inline void pl011_power_shutdown(struct uart_amba_port *uap) +{ + clk_disable(uap->clk); + pm_runtime_put_sync(uap->port.dev); +} + +static inline void +pl011_clock_control(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ +} + +static inline void pl011_clock_control_init(struct uart_amba_port *uap) +{ +} +#endif + static void pl011_stop_tx(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; @@ -1208,6 +1509,9 @@ static void pl011_tx_chars(struct uart_amba_port *uap) break; } while (--count > 0); + if (count) + pl011_clock_check(uap); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); @@ -1253,7 +1557,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id) do { writew(status & ~(UART011_TXIS|UART011_RTIS| UART011_RXIS), - uap->port.membase + UART011_ICR); + uap->port.membase + UART011_ICR); if (status & (UART011_RTIS|UART011_RXIS)) { if (pl011_dma_rx_running(uap)) @@ -1268,8 +1572,10 @@ static irqreturn_t pl011_int(int irq, void *dev_id) pl011_tx_chars(uap); if (pass_counter-- == 0) { - if (uap->interrupt_may_hang) + if (uap->interrupt_may_hang) { + uart_wa_tlet_line = uap->port.line; tasklet_schedule(&pl011_lockup_tlet); + } break; } @@ -1389,9 +1695,9 @@ static int pl011_startup(struct uart_port *port) goto out; /* - * Try to enable the clock producer. + * Try to enable the clock producer and the regulator. */ - retval = clk_enable(uap->clk); + retval = pl011_power_startup(uap); if (retval) goto clk_unprep; @@ -1408,29 +1714,7 @@ static int pl011_startup(struct uart_port *port) if (retval) goto clk_dis; - writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); - - /* - * Provoke TX FIFO interrupt into asserting. - */ - cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; - writew(cr, uap->port.membase + UART011_CR); - writew(0, uap->port.membase + UART011_FBRD); - writew(1, uap->port.membase + UART011_IBRD); - writew(0, uap->port.membase + uap->lcrh_rx); - if (uap->lcrh_tx != uap->lcrh_rx) { - int i; - /* - * Wait 10 PCLKs before writing LCRH_TX register, - * to get this delay write read only register 10 times - */ - for (i = 0; i < 10; ++i) - writew(0xff, uap->port.membase + UART011_MIS); - writew(0, uap->port.membase + uap->lcrh_tx); - } - writew(0, uap->port.membase + UART01x_DR); - while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) - barrier(); + __pl011_startup(uap); /* restore RTS and DTR */ cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); @@ -1471,7 +1755,7 @@ static int pl011_startup(struct uart_port *port) return 0; clk_dis: - clk_disable(uap->clk); + pl011_power_shutdown(uap); clk_unprep: clk_unprepare(uap->clk); out: @@ -1529,10 +1813,18 @@ static void pl011_shutdown(struct uart_port *port) if (uap->lcrh_rx != uap->lcrh_tx) pl011_shutdown_channel(uap, uap->lcrh_tx); + if (uap->port.dev->platform_data) { + struct amba_pl011_data *plat; + + plat = uap->port.dev->platform_data; + if (plat->exit) + plat->exit(); + } + /* - * Shut down the clock producer + * Shut down the clock producer and the producer */ - clk_disable(uap->clk); + pl011_power_shutdown(uap); clk_unprepare(uap->clk); if (uap->port.dev->platform_data) { @@ -1545,6 +1837,32 @@ static void pl011_shutdown(struct uart_port *port) } +/* Power/Clock management. */ +static void pl011_serial_pm(struct uart_port *port, unsigned int state, +unsigned int oldstate) +{ + struct uart_amba_port *uap = (struct uart_amba_port *)port; + + switch (state) { + case 0: /*fully on */ + /* + * Enable the peripheral clock for this serial port. + * This is called on uart_open() or a resume event. + */ + pl011_power_startup(uap); + break; + case 3: /* powered down */ + /* + * Disable the peripheral clock for this serial port. + * This is called on uart_close() or a suspend event. + */ + pl011_power_shutdown(uap); + break; + default: + printk(KERN_ERR "pl011_serial: unknown pm %d\n", state); + } +} + static void pl011_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) @@ -1558,7 +1876,12 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, clkdiv = 8; else clkdiv = 16; - + /* + * Must be before uart_get_baud_rate() call, because + * this function changes baudrate to default in case of 0 + * B0 hangup !!! + */ + pl011_clock_control(port, termios, old); /* * Ask the core to calculate the divisor for us. */ @@ -1746,14 +2069,13 @@ static struct uart_ops amba_pl011_pops = { .request_port = pl010_request_port, .config_port = pl010_config_port, .verify_port = pl010_verify_port, + .pm = pl011_serial_pm, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = pl010_get_poll_char, .poll_put_char = pl010_put_poll_char, #endif }; -static struct uart_amba_port *amba_ports[UART_NR]; - #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE static void pl011_console_putchar(struct uart_port *port, int ch) @@ -1897,6 +2219,13 @@ static struct console amba_console = { .data = &amba_reg, }; +static int __init pl011_console_init(void) +{ + register_console(&amba_console); + return 0; +} +console_initcall(pl011_console_init); + #define AMBA_CONSOLE (&amba_console) #else #define AMBA_CONSOLE NULL @@ -1911,7 +2240,6 @@ static struct uart_driver amba_reg = { .nr = UART_NR, .cons = AMBA_CONSOLE, }; - static int pl011_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; @@ -1940,6 +2268,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) goto free; } + uap->regulator = regulator_get(&dev->dev, "v-uart"); + if (IS_ERR(uap->regulator)) { + dev_warn(&dev->dev, "could not get uart regulator\n"); + uap->regulator = NULL; + } + uap->clk = clk_get(&dev->dev, NULL); if (IS_ERR(uap->clk)) { ret = PTR_ERR(uap->clk); @@ -1947,6 +2281,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) } uap->vendor = vendor; + uap->ifls = vendor->ifls; uap->lcrh_rx = vendor->lcrh_rx; uap->lcrh_tx = vendor->lcrh_tx; uap->old_cr = 0; @@ -1972,18 +2307,30 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) amba_ports[i] = uap; amba_set_drvdata(dev, uap); + + pm_runtime_irq_safe(&dev->dev); + + pl011_clock_control_init(uap); + ret = uart_add_one_port(&amba_reg, &uap->port); + + if (!ret) + pm_runtime_put(&dev->dev); + if (ret) { amba_set_drvdata(dev, NULL); amba_ports[i] = NULL; pl011_dma_remove(uap); clk_put(uap->clk); unmap: + if (uap->regulator) + regulator_put(uap->regulator); iounmap(base); free: kfree(uap); } out: + return ret; } @@ -1994,6 +2341,8 @@ static int pl011_remove(struct amba_device *dev) amba_set_drvdata(dev, NULL); + pm_runtime_get_sync(uap->port.dev); + uart_remove_one_port(&amba_reg, &uap->port); for (i = 0; i < ARRAY_SIZE(amba_ports); i++) @@ -2002,6 +2351,8 @@ static int pl011_remove(struct amba_device *dev) pl011_dma_remove(uap); iounmap(uap->port.membase); + if (uap->regulator) + regulator_put(uap->regulator); clk_put(uap->clk); kfree(uap); return 0; @@ -2014,7 +2365,12 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state) if (!uap) return -EINVAL; +#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL + cancel_delayed_work_sync(&uap->clk_off_work); + if (uap->clk_state == PL011_CLK_OFF) + return 0; +#endif return uart_suspend_port(&amba_reg, &uap->port); } @@ -2024,6 +2380,10 @@ static int pl011_resume(struct amba_device *dev) if (!uap) return -EINVAL; +#ifdef CONFIG_SERIAL_AMBA_PL011_CLOCK_CONTROL + if (uap->clk_state == PL011_CLK_OFF) + return 0; +#endif return uart_resume_port(&amba_reg, &uap->port); } @@ -2082,7 +2442,7 @@ static void __exit pl011_exit(void) * While this can be a module, if builtin it's most likely the console * So let's leave module_exit but move module_init to an earlier place */ -arch_initcall(pl011_init); +subsys_initcall(pl011_init); module_exit(pl011_exit); MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 9a56635dc19..b3eeb3e1d60 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1211,6 +1211,19 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) * and flush any outstanding URBs. */ } else { +#ifdef CONFIG_USB_OTG + /* According to OTG supplement Rev 2.0 section 6.3 + * Unless an A-device enables b_hnp_enable before entering + * suspend it shall also continue polling while the bus is + * suspended. + * + * We don't have to perform HNP polling, as we are going to + * enable b_hnp_enable before suspending. + */ + if (udev->bus->hnp_support && + udev->portnum == udev->bus->otg_port) + cancel_delayed_work(&udev->bus->hnp_polling); +#endif udev->can_submit = 0; for (i = 0; i < 16; ++i) { usb_hcd_flush_endpoint(udev, udev->ep_out[i]); @@ -1274,6 +1287,44 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) return status; } +#ifdef CONFIG_USB_OTG +void usb_hnp_polling_work(struct work_struct *work) +{ + int ret; + struct usb_bus *bus = + container_of(work, struct usb_bus, hnp_polling.work); + struct usb_device *udev = bus->root_hub->children[bus->otg_port - 1]; + u8 *status = kmalloc(sizeof(*status), GFP_KERNEL); + + if (!status) + return; + + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_DEVICE, + 0, OTG_STATUS_SELECTOR, status, sizeof(*status), + USB_CTRL_GET_TIMEOUT); + if (ret < 0) { + /* Peripheral may not be supporting HNP polling */ + dev_vdbg(&udev->dev, "HNP polling failed. status %d\n", ret); + ret = usb_suspend_both(udev, PMSG_USER_SUSPEND); + goto out; + } + + /* Spec says host must suspend the bus with in 2 sec. */ + if (*status & (1 << HOST_REQUEST_FLAG)) { + unbind_no_pm_drivers_interfaces(udev); + ret = usb_suspend_both(udev, PMSG_USER_SUSPEND); + if (ret) + dev_info(&udev->dev, "suspend failed\n"); + } else { + schedule_delayed_work(&bus->hnp_polling, + msecs_to_jiffies(THOST_REQ_POLL)); + } +out: + kfree(status); +} +#endif + static void choose_wakeup(struct usb_device *udev, pm_message_t msg) { int w; @@ -1620,7 +1671,7 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); /* Internal routine to check whether we may autosuspend a device. */ static int autosuspend_check(struct usb_device *udev) { - int w, i; + int w, i, audio_class = 0; struct usb_interface *intf; /* Fail if autosuspend is disabled, or any interfaces are in use, or @@ -1654,13 +1705,28 @@ static int autosuspend_check(struct usb_device *udev) intf->needs_remote_wakeup) return -EOPNOTSUPP; } + + if (intf->cur_altsetting->desc.bInterfaceClass + == USB_CLASS_AUDIO) { + dev_dbg(&udev->dev, + "audio interface class present\n"); + audio_class = 1; + } } + if (audio_class) { + dev_dbg(&udev->dev, + "disabling remote wakeup for audio class\n"); + udev->do_remote_wakeup = 0; + } else { + if (w && !device_can_wakeup(&udev->dev)) { + dev_dbg(&udev->dev, + "remote wakeup needed for autosuspend\n"); + return -EOPNOTSUPP; + } + udev->do_remote_wakeup = w; + } + } - if (w && !device_can_wakeup(&udev->dev)) { - dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n"); - return -EOPNOTSUPP; - } - udev->do_remote_wakeup = w; return 0; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 140d3e11f21..f4f78a30b1d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -897,6 +897,9 @@ static void usb_bus_init (struct usb_bus *bus) bus->bandwidth_isoc_reqs = 0; INIT_LIST_HEAD (&bus->bus_list); +#ifdef CONFIG_USB_OTG + INIT_DELAYED_WORK(&bus->hnp_polling, usb_hnp_polling_work); +#endif } /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index ec6c97dadbe..f78df1d3163 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -30,6 +30,12 @@ #include "usb.h" +#ifdef CONFIG_ARCH_U8500 +#define MAX_TOPO_LEVEL_U8500 2 +#define MAX_USB_DEVICE_U8500 8 +int usb_device_count; +#endif + /* if we are in debug mode, always announce new devices */ #ifdef DEBUG #ifndef CONFIG_USB_ANNOUNCE_NEW_DEVICES @@ -633,7 +639,7 @@ static int hub_hub_status(struct usb_hub *hub, "%s failed (err = %d)\n", __func__, ret); else { *status = le16_to_cpu(hub->status->hub.wHubStatus); - *change = le16_to_cpu(hub->status->hub.wHubChange); + *change = le16_to_cpu(hub->status->hub.wHubChange); ret = 0; } mutex_unlock(&hub->status_mutex); @@ -1326,11 +1332,20 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) /* Hubs have proper suspend/resume support. */ usb_enable_autosuspend(hdev); +#ifdef CONFIG_ARCH_U8500 + if (hdev->level > MAX_TOPO_LEVEL_U8500) { + dev_err(&intf->dev, + "Unsupported bus topology: > %d " + " hub nesting\n", MAX_TOPO_LEVEL_U8500); + return -E2BIG; + } +#else if (hdev->level == MAX_TOPO_LEVEL) { dev_err(&intf->dev, "Unsupported bus topology: hub nested too deep\n"); return -E2BIG; } +#endif #ifdef CONFIG_USB_OTG_BLACKLIST_HUB if (hdev->parent) { @@ -1612,12 +1627,14 @@ static void choose_devnum(struct usb_device *udev) * bus->devnum_next. */ devnum = find_next_zero_bit(bus->devmap.devicemap, 128, bus->devnum_next); - if (devnum >= 128) + /* Due to Hardware bugs we need to reserve a device address + * for flushing of endpoints. */ + if (devnum >= 127) devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1); - bus->devnum_next = ( devnum >= 127 ? 1 : devnum + 1); + bus->devnum_next = devnum >= 126 ? 1 : devnum + 1; } - if (devnum < 128) { + if (devnum < 127) { set_bit(devnum, bus->devmap.devicemap); udev->devnum = devnum; } @@ -1676,6 +1693,12 @@ void usb_disconnect(struct usb_device **pdev) dev_info(&udev->dev, "USB disconnect, device number %d\n", udev->devnum); +#ifdef CONFIG_USB_OTG_20 + if (udev->bus->hnp_support && udev->portnum == udev->bus->otg_port) { + cancel_delayed_work_sync(&udev->bus->hnp_polling); + udev->bus->hnp_support = 0; + } +#endif usb_lock_device(udev); /* Free up all the children before we remove this device */ @@ -1752,11 +1775,13 @@ static inline void announce_device(struct usb_device *udev) { } * * Finish enumeration for On-The-Go devices */ + +#ifdef CONFIG_USB_OTG_20 + static int usb_enumerate_device_otg(struct usb_device *udev) { int err = 0; -#ifdef CONFIG_USB_OTG /* * OTG-aware devices on OTG-capable root hubs may be able to use SRP, * to wake us after we've powered off VBUS; and HNP, switching roles @@ -1780,6 +1805,84 @@ static int usb_enumerate_device_otg(struct usb_device *udev) (port1 == bus->otg_port) ? "" : "non-"); + if (port1 != bus->otg_port) + goto out; + bus->hnp_support = 1; + + err = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_A_HNP_SUPPORT, + 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (err < 0) { + /* OTG MESSAGE: report errors here, + * customize to match your product. + */ + dev_info(&udev->dev, + "can't set HNP mode: %d\n", + err); + bus->hnp_support = 0; + } + } + } + } + +out: + if (!is_targeted(udev)) { + + /* Maybe it can talk to us, though we can't talk to it. + * (Includes HNP test device.) + */ + if (udev->bus->hnp_support) { + err = usb_port_suspend(udev, PMSG_SUSPEND); + if (err < 0) + dev_dbg(&udev->dev, "HNP fail, %d\n", err); + } + } else if (udev->bus->hnp_support && + udev->portnum == udev->bus->otg_port) { + /* HNP polling is introduced in OTG supplement Rev 2.0 + * and older devices does not support. Work is not + * re-armed if device returns STALL. B-Host also performs + * HNP polling. + */ + schedule_delayed_work(&udev->bus->hnp_polling, + msecs_to_jiffies(THOST_REQ_POLL)); + } +fail: + + return err; +} + +#else + +static int usb_enumerate_device_otg(struct usb_device *udev) +{ + int err = 0; + +#ifdef CONFIG_USB_OTG + /* + * OTG-aware devices on OTG-capable root hubs may be able to use SRP, + * to wake us after we've powered off VBUS; and HNP, switching roles + * "host" to "peripheral". The OTG descriptor helps figure this out. + */ + if (!udev->bus->is_b_host + && udev->config + && udev->parent == udev->bus->root_hub) { + struct usb_otg_descriptor *desc = NULL; + struct usb_bus *bus = udev->bus; + + /* descriptor may appear anywhere in config */ + if (__usb_get_extra_descriptor(udev->rawdescriptors[0], + le16_to_cpu(udev->config[0].desc.wTotalLength), + USB_DT_OTG, (void **) &desc) == 0) { + if (desc->bmAttributes & USB_OTG_HNP) { + unsigned port1 = udev->portnum; + + dev_info(&udev->dev, + "Dual-Role OTG device on %sHNP port\n", + (port1 == bus->otg_port) + ? "" : "non-"); + /* enable HNP before suspend, it's simpler */ if (port1 == bus->otg_port) bus->b_hnp_enable = 1; @@ -1821,6 +1924,7 @@ fail: return err; } +#endif /** * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) @@ -2480,6 +2584,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) if (udev->usb2_hw_lpm_enabled == 1) usb_set_usb2_hardware_lpm(udev, 0); +#ifdef CONFIG_USB_OTG_20 + if (!udev->bus->is_b_host && udev->bus->hnp_support && + udev->portnum == udev->bus->otg_port) { + status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_B_HNP_ENABLE, + 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (status < 0) + dev_dbg(&udev->dev, "can't enable HNP on port %d, " + "status %d\n", port1, status); + else + udev->bus->b_hnp_enable = 1; + } +#endif + /* see 7.1.7.6 */ if (hub_is_superspeed(hub->hdev)) status = set_port_feature(hub->hdev, @@ -2634,6 +2753,12 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) int status; u16 portchange, portstatus; +#ifdef CONFIG_USB_OTG_20 + if (!udev->bus->is_b_host && udev->bus->hnp_support && + udev->portnum == udev->bus->otg_port) + udev->bus->b_hnp_enable = 0; +#endif + /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); if (status == 0 && !port_is_suspended(hub, portstatus)) @@ -2837,7 +2962,7 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); * Between connect detection and reset signaling there must be a delay * of 100ms at least for debounce and power-settling. The corresponding * timer shall restart whenever the downstream port detects a disconnect. - * + * * Apparently there are some bluetooth and irda-dongles and a number of * low-speed devices for which this debounce period may last over a second. * Not covered by the spec - but easy to deal with. @@ -3035,7 +3160,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, udev->tt = &hub->tt; udev->ttport = port1; } - + /* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way? * Because device hardware and firmware is sometimes buggy in * this area, and this is how Linux has done it for ages. @@ -3195,7 +3320,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); } - + retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE); if (retval < (signed)sizeof(udev->descriptor)) { dev_err(&udev->dev, "device descriptor read/all, error %d\n", @@ -3425,6 +3550,22 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, goto loop; } +#ifdef CONFIG_ARCH_U8500 + if (hdev->parent == NULL) + usb_device_count = 1; + + if (usb_device_count > MAX_USB_DEVICE_U8500) { + + dev_err(&udev->dev, + "device connected is more than %d\n", + MAX_USB_DEVICE_U8500); + + status = -ENOTCONN; /* Don't retry */ + goto loop; + } +#endif + + /* reset (non-USB 3.0 devices) and get descriptor */ status = hub_port_init(hub, udev, port1, i); if (status < 0) @@ -3464,7 +3605,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, goto loop_disable; } } - + /* check for devices running slower than they could */ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200 && udev->speed == USB_SPEED_FULL @@ -3505,6 +3646,115 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, if (status) dev_dbg(hub_dev, "%dmA power budget left\n", status); +#ifdef CONFIG_USB_OTG_20 + struct usb_otg_descriptor *desc = NULL; + int ret; + /* descriptor may appear anywhere in config */ + __usb_get_extra_descriptor(udev->rawdescriptors[0], + le16_to_cpu(udev->config[0].desc.wTotalLength), + USB_DT_OTG, (void **) &desc); + + ret = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_B_HNP_ENABLE, + 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (ret < 0) + dev_dbg(hub_dev, "set feature error\n"); + + u16 idVendor = le16_to_cpu(udev->descriptor.idVendor); + if (idVendor == USB_OTG_TEST_MODE_VID) { + u16 wValue, typeReq, wIndex; + u32 set_feature = 0; + int err = 0; + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + u16 idProduct = le16_to_cpu( + udev->descriptor.idProduct); + /* Convert the Test Mode Request + * to control request + */ + wValue = USB_PORT_FEAT_TEST; + typeReq = SetPortFeature; + wIndex = 1; + + switch (idProduct) { + case USB_OTG_TEST_SE0_NAK_PID: + wIndex |= USB_OTG_TEST_SE0_NAK << 8; + set_feature = 1; + break; + case USB_OTG_TEST_J_PID: + wIndex |= USB_OTG_TEST_J << 8; + set_feature = 1; + break; + case USB_OTG_TEST_K_PID: + wIndex |= USB_OTG_TEST_K << 8; + set_feature = 1; + break; + case USB_OTG_TEST_PACKET_PID: + wIndex |= USB_OTG_TEST_PACKET << 8; + set_feature = 1; + break; + case USB_OTG_TEST_HS_HOST_PORT_SUSPEND_RESUME_PID: + /* Sleep for 15 sec. Suspend + * for 15 Sec, Then Resume + */ + ssleep(15); + + err = usb_port_suspend(udev, + PMSG_SUSPEND); + if (err < 0) { + dev_err(&udev->dev, "OTG TEST_MODE:" + "Suspend Fail, %d\n", err); + goto loop_disable; + } + ssleep(15); + err = usb_port_resume(udev, PMSG_RESUME); + if (err < 0) { + dev_err(&udev->dev, + "can't resume for" + "OTG TEST_MODE: %d\n", err); + goto loop_disable; + } + break; + case USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_PID: + /* Sleep for 15 Sec. Issue the GetDeviceDescriptor */ + ssleep(15); + err = usb_get_device_descriptor(udev, + sizeof(udev->descriptor)); + if (err < 0) { + dev_err(&udev->dev, "can't re-read" + "device descriptor for " + "OTG TEST MODE: %d\n", err); + goto loop_disable; + } + break; + case USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_DATA_PID: + /* Issue GetDeviceDescriptor, Sleep for 15 Sec. */ + err = usb_get_device_descriptor(udev, + sizeof(udev->descriptor)); + if (err < 0) { + dev_err(&udev->dev, "can't re-read" + "device descriptor for " + "OTG TEST MODE: %d\n", err); + goto loop_disable; + } + ssleep(15); + break; + default: + /* is_targeted() will take care for wrong PID */ + dev_dbg(&udev->dev, "OTG TEST_MODE:Wrong" + "PID %d\n", idProduct); + break; + } + + if (set_feature) { + err = hcd->driver->hub_control(hcd, + typeReq, wValue, wIndex, + NULL, 0); + } + } + +#endif return; loop_disable: @@ -3522,7 +3772,7 @@ loop: !(hcd->driver->port_handed_over)(hcd, port1)) dev_err(hub_dev, "unable to enumerate USB device on port %d\n", port1); - + done: hub_port_disable(hub, port1, 1); if (hcd->driver->relinquish_port && !hub->hdev->parent) @@ -3689,7 +3939,7 @@ static void hub_events(void) * EM interference sometimes causes badly * shielded USB devices to be shutdown by * the hub, this hack enables them again. - * Works at least with mouse driver. + * Works at least with mouse driver. */ if (!(portstatus & USB_PORT_STAT_ENABLE) && !connect_change @@ -4029,7 +4279,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (ret < 0) goto re_enumerate; - + /* Device might have changed firmware (DFU or similar) */ if (descriptors_changed(udev, &descriptor)) { dev_info(&udev->dev, "device firmware changed\n"); @@ -4062,6 +4312,16 @@ static int usb_reset_and_verify_device(struct usb_device *udev) goto re_enumerate; } mutex_unlock(hcd->bandwidth_mutex); + +#ifdef CONFIG_USB_OTG_20 + ret = usb_control_msg(udev, + usb_sndctrlpipe(udev, 0), + USB_REQ_SET_FEATURE, 0, + USB_DEVICE_A_HNP_SUPPORT, + 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + if (ret < 0) + dev_err(&udev->dev, "set feature error\n"); +#endif usb_set_device_state(udev, USB_STATE_CONFIGURED); /* Put interfaces back into the same altsettings as before. @@ -4102,7 +4362,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) done: return 0; - + re_enumerate: hub_port_logical_disconnect(parent_hub, port1); return -ENODEV; diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c index 7728c91dfa2..a5fdc3ac0d7 100644 --- a/drivers/usb/core/notify.c +++ b/drivers/usb/core/notify.c @@ -46,11 +46,18 @@ EXPORT_SYMBOL_GPL(usb_unregister_notify); void usb_notify_add_device(struct usb_device *udev) { +#ifdef CONFIG_ARCH_U8500 + usb_device_count++; +#endif + blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev); } void usb_notify_remove_device(struct usb_device *udev) { +#ifdef CONFIG_ARCH_U8500 + usb_device_count--; +#endif /* Protect against simultaneous usbfs open */ mutex_lock(&usbfs_mutex); blocking_notifier_call_chain(&usb_notifier_list, diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h index e8cdce571bb..86a09972836 100644 --- a/drivers/usb/core/otg_whitelist.h +++ b/drivers/usb/core/otg_whitelist.h @@ -43,12 +43,50 @@ static struct usb_device_id whitelist_table [] = { { USB_DEVICE(0x0525, 0xa4a0), }, #endif +#ifdef CONFIG_USB_OTG_20 +{ USB_DEVICE_INFO(8, 6, 80) },/* Mass Storage Devices */ +{ USB_DEVICE_INFO(1, 1, 0) },/* Audio Devices */ +{ USB_DEVICE_INFO(3, 0, 0) },/* keyboard Devices */ +{ USB_DEVICE_INFO(3, 1, 2) },/* Mouse Devices */ + +/* Test Devices */ +{ USB_DEVICE(0x1A0A, 0x0101), },/* Test_SE0_NAK */ +{ USB_DEVICE(0x1A0A, 0x0102), },/* Test_J */ +{ USB_DEVICE(0x1A0A, 0x0103), },/* Test_K */ +{ USB_DEVICE(0x1A0A, 0x0104), },/* Test_Packet */ +{ USB_DEVICE(0x1A0A, 0x0106), },/* HS_HOST_PORT_SUSPEND_RESUME */ +{ USB_DEVICE(0x1A0A, 0x0107), },/* SINGLE_STEP_GET_DEV_DESC */ +{ USB_DEVICE(0x1A0A, 0x0108), },/* SINGLE_STEP_ GET_DEV_DESC_DATA*/ +{ USB_DEVICE(0x1A0A, 0x0201), },/* OTG 2 TEST DEVICE*/ +#endif { } /* Terminating entry */ }; +/* The TEST_MODE Definition for OTG as per 6.4 of OTG Rev 2.0 */ + +#ifdef CONFIG_USB_OTG_20 +#define USB_OTG_TEST_MODE_VID 0x1A0A +#define USB_OTG_TEST_SE0_NAK_PID 0x0101 +#define USB_OTG_TEST_J_PID 0x0102 +#define USB_OTG_TEST_K_PID 0x0103 +#define USB_OTG_TEST_PACKET_PID 0x0104 +#define USB_OTG_TEST_HS_HOST_PORT_SUSPEND_RESUME_PID 0x0106 +#define USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_PID 0x0107 +#define USB_OTG_TEST_SINGLE_STEP_GET_DEV_DESC_DATA_PID 0x0108 + +#define USB_OTG_TEST_SE0_NAK 0x01 +#define USB_OTG_TEST_J 0x02 +#define USB_OTG_TEST_K 0x03 +#define USB_OTG_TEST_PACKET 0x04 +#endif + static int is_targeted(struct usb_device *dev) { struct usb_device_id *id = whitelist_table; +#ifdef CONFIG_USB_OTG_20 + u8 number_configs = 0; + u8 number_interface = 0; +#endif /* possible in developer configs only! */ if (!dev->bus->otg_port) @@ -98,6 +136,36 @@ static int is_targeted(struct usb_device *dev) /* add other match criteria here ... */ +#ifdef CONFIG_USB_OTG_20 + + /* Checking class,subclass and protocal at interface level */ + for (number_configs = dev->descriptor.bNumConfigurations; + number_configs > 0; number_configs--) + for (number_interface = dev->config->desc.bNumInterfaces; + number_interface > 0; + number_interface--) + for (id = whitelist_table; id->match_flags; id++) { + if ((id->match_flags & + USB_DEVICE_ID_MATCH_DEV_CLASS) && + (id->bDeviceClass != + dev->config->intf_cache[number_interface-1] + ->altsetting[0].desc.bInterfaceClass)) + continue; + if ((id->match_flags & + USB_DEVICE_ID_MATCH_DEV_SUBCLASS) + && (id->bDeviceSubClass != + dev->config->intf_cache[number_interface-1] + ->altsetting[0].desc.bInterfaceSubClass)) + continue; + if ((id->match_flags & + USB_DEVICE_ID_MATCH_DEV_PROTOCOL) + && (id->bDeviceProtocol != + dev->config->intf_cache[number_interface-1] + ->altsetting[0].desc.bInterfaceProtocol)) + continue; + return 1; + } +#endif /* OTG MESSAGE: report errors here, customize to match your product */ dev_err(&dev->dev, "device v%04x p%04x is not supported\n", diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 71648dcbe43..f7962567c1b 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -1,5 +1,9 @@ #include <linux/pm.h> +#ifdef CONFIG_ARCH_U8500 +extern int usb_device_count; +#endif + /* Functions local to drivers/usb/core/ */ extern int usb_create_sysfs_dev_files(struct usb_device *dev); @@ -74,7 +78,9 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg) } #endif - +#ifdef CONFIG_USB_OTG +extern void usb_hnp_polling_work(struct work_struct *work); +#endif #ifdef CONFIG_USB_SUSPEND extern void usb_autosuspend_device(struct usb_device *udev); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 2633f759511..7e99677b81b 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -977,4 +977,15 @@ config USB_G_WEBCAM endchoice +config USB_OTG_20 + bool "OTG 2.0 USB SUPPORT" + select USB_OTG + select PM_RUNTIME + select USB_OTG_WHITELIST + select USB_SUSPEND + default n + help + Enabling the whitelist (Target Peripheral List-TPL) and runtime power + management at system level and usb level for OTG 2.0. + endif # USB_GADGET diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index 51f3d42f5a6..14cbfa80afb 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -315,6 +315,12 @@ struct usb_ep *usb_ep_autoconfig_ss( #endif } + if (gadget->ops->configure_ep) { + ep = gadget->ops->configure_ep(gadget, type, desc); + if (ep && ep_matches(gadget, ep, desc, ep_comp)) + return ep; + } + /* Second, look at endpoints until an unclaimed one looks usable */ list_for_each_entry (ep, &gadget->ep_list, ep_list) { if (ep_matches(gadget, ep, desc, ep_comp)) diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index cb8c162cae5..8473424e31f 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -2786,6 +2786,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) { curlun->cdrom = !!lcfg->cdrom; curlun->ro = lcfg->cdrom || lcfg->ro; + curlun->nofua = 1; curlun->initially_ro = curlun->ro; curlun->removable = lcfg->removable; curlun->dev.release = fsg_lun_release; diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 52343654f5d..b8bfda4fc58 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -174,8 +174,8 @@ rndis_iad_descriptor = { .bFirstInterface = 0, /* XXX, hardcoded */ .bInterfaceCount = 2, // control + data .bFunctionClass = USB_CLASS_COMM, - .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, - .bFunctionProtocol = USB_CDC_PROTO_NONE, + .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, + .bFunctionProtocol = USB_CDC_ACM_PROTO_VENDOR, /* .iFunction = DYNAMIC */ }; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index f70cab3beee..f9e42041b5f 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -34,6 +34,8 @@ if USB_MUSB_HDRC choice prompt "Platform Glue Layer" + bool + default USB_MUSB_UX500 if ARCH_U8500 || ARCH_U5500 config USB_MUSB_DAVINCI tristate "DaVinci" @@ -60,7 +62,7 @@ config USB_MUSB_BLACKFIN config USB_MUSB_UX500 tristate "U8500 and U5500" - depends on (ARCH_U8500 && AB8500_USB) + depends on (ARCH_U8500) || (ARCH_U5500) endchoice @@ -114,4 +116,13 @@ config MUSB_PIO_ONLY endchoice +config USB_MUSB_DEBUG + depends on USB_MUSB_HDRC + bool "Enable debugging messages" + default n + help + This enables musb debugging. To set the logging level use the debug + module parameter. Starting at level 3, per-transfer (urb, usb_request, + packet, or dma transfer) tracing may kick in. + endif # USB_MUSB_HDRC diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 66aaccf0449..2923752b858 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -520,9 +520,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, /* see manual for the order of the tests */ if (int_usb & MUSB_INTR_SESSREQ) { void __iomem *mbase = musb->mregs; - if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS - && (devctl & MUSB_DEVCTL_BDEVICE)) { + || (devctl & MUSB_DEVCTL_BDEVICE)) { dev_dbg(musb->controller, "SessReq while on B state\n"); return IRQ_HANDLED; } @@ -715,6 +714,9 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, b_host: musb->xceiv->state = OTG_STATE_B_HOST; hcd->self.is_b_host = 1; +#ifdef CONFIG_USB_OTG_20 + musb->g.otg_hnp_reqd = 0; +#endif musb->ignore_disconnect = 0; del_timer(&musb->otg_timer); break; @@ -1036,9 +1038,6 @@ static void musb_shutdown(struct platform_device *pdev) || defined(CONFIG_USB_MUSB_AM35X) \ || defined(CONFIG_USB_MUSB_AM35X_MODULE) static ushort __devinitdata fifo_mode = 4; -#elif defined(CONFIG_USB_MUSB_UX500) \ - || defined(CONFIG_USB_MUSB_UX500_MODULE) -static ushort __devinitdata fifo_mode = 5; #else static ushort __devinitdata fifo_mode = 2; #endif @@ -1123,8 +1122,8 @@ static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = { /* mode 5 - fits in 8KB */ static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = { -{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, -{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE }, { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, @@ -1752,7 +1751,9 @@ musb_srp_store(struct device *dev, struct device_attribute *attr, { struct musb *musb = dev_to_musb(dev); unsigned short srp; - +#ifdef CONFIG_USB_OTG_20 + musb->xceiv->start_srp(musb->xceiv); +#endif if (sscanf(buf, "%hu", &srp) != 1 || (srp != 1)) { dev_err(dev, "SRP: Value must be 1\n"); @@ -1766,10 +1767,45 @@ musb_srp_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); +static ssize_t +ux500_set_extvbus(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb_hdrc_platform_data *plat = dev->platform_data; + unsigned short extvbus; + + if (sscanf(buf, "%hu", &extvbus) != 1 + || ((extvbus != 1) && (extvbus != 0))) { + dev_err(dev, "Invalid value EXTVBUS must be 1 or 0\n"); + return -EINVAL; + } + + plat->extvbus = extvbus; + + return n; +} + +static ssize_t +ux500_get_extvbus(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct musb_hdrc_platform_data *plat = dev->platform_data; + int extvbus; + + /* FIXME get_vbus_status() is normally #defined as false... + * and is effectively TUSB-specific. + */ + extvbus = plat->extvbus; + + return sprintf(buf, "EXTVBUS is %s\n", + extvbus ? "on" : "off"); +} +static DEVICE_ATTR(extvbus, 0644, ux500_get_extvbus, ux500_set_extvbus); + static struct attribute *musb_attributes[] = { &dev_attr_mode.attr, &dev_attr_vbus.attr, &dev_attr_srp.attr, + &dev_attr_extvbus.attr, NULL }; @@ -1886,7 +1922,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) status = -ENODEV; goto fail0; } - /* allocate */ musb = allocate_instance(dev, plat->config, ctrl); if (!musb) { @@ -2330,7 +2365,7 @@ static int musb_suspend(struct device *dev) return 0; } -static int musb_resume_noirq(struct device *dev) +static int musb_resume(struct device *dev) { /* for static cmos like DaVinci, register values were preserved * unless for some reason the whole soc powered down or the USB @@ -2371,13 +2406,17 @@ static int musb_runtime_resume(struct device *dev) static const struct dev_pm_ops musb_dev_pm_ops = { .suspend = musb_suspend, - .resume_noirq = musb_resume_noirq, + .resume = musb_resume, .runtime_suspend = musb_runtime_suspend, .runtime_resume = musb_runtime_resume, }; - +#ifdef CONFIG_UX500_SOC_DB8500 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) #else +#define MUSB_DEV_PM_OPS NULL +#endif + +#else #define MUSB_DEV_PM_OPS NULL #endif diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index f4a40f001c8..2d52530d3e4 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -153,7 +153,10 @@ enum musb_g_ep0_state { #define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */ #define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */ #define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */ - +#ifdef CONFIG_USB_OTG_20 +#define USB_SUSP_DET_DURATION 5 /* suspend time 5ms */ +#define TTST_SRP 3000 /* max 5 sec */ +#endif /*************************** REGISTER ACCESS ********************************/ @@ -229,6 +232,8 @@ struct musb_platform_ops { int (*adjust_channel_params)(struct dma_channel *channel, u16 packet_sz, u8 *mode, dma_addr_t *dma_addr, u32 *len); + struct usb_ep* (*configure_endpoints)(struct musb *musb, u8 type, + struct usb_endpoint_descriptor *desc); }; /* @@ -430,7 +435,6 @@ struct musb { unsigned set_address:1; unsigned test_mode:1; unsigned softconnect:1; - u8 address; u8 test_mode_nr; u16 ackpend; /* ep0 */ @@ -603,4 +607,13 @@ static inline int musb_platform_exit(struct musb *musb) return musb->ops->exit(musb); } +static inline struct usb_ep *musb_platform_configure_ep(struct musb *musb, + u8 type, struct usb_endpoint_descriptor *desc) +{ + struct usb_ep *ep = NULL; + + if (musb->ops->configure_endpoints) + ep = musb->ops->configure_endpoints(musb, type, desc); + return ep; +} #endif /* __MUSB_CORE_H__ */ diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 40a37c91cc1..19e3c91c22e 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -68,6 +68,7 @@ static const struct musb_register_map musb_regmap[] = { { "RxFIFOadd", 0x66, 16 }, { "VControl", 0x68, 32 }, { "HWVers", 0x6C, 16 }, + { "EXTVBUS", 0x70, 8 }, { "EPInfo", 0x78, 8 }, { "RAMInfo", 0x79, 8 }, { "LinkInfo", 0x7A, 8 }, diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index f42c29b11f7..b2abef69dc3 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -401,7 +401,21 @@ static void txstate(struct musb *musb, struct musb_request *req) csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); - if (!musb_ep->hb_mult) + /* + * Enable Autoset according to table + * below + * ************************************ + * bulk_split hb_mult Autoset_Enable + * ************************************ + * 0 0 Yes(Normal) + * 0 >0 No(High BW ISO) + * 1 0 Yes(HS bulk) + * 1 >0 Yes(FS bulk) + */ + if (!musb_ep->hb_mult || + (musb_ep->hb_mult && + can_bulk_split(musb, + musb_ep->type))) csr |= MUSB_TXCSR_AUTOSET; } csr &= ~MUSB_TXCSR_P_UNDERRUN; @@ -1097,6 +1111,12 @@ static int musb_gadget_enable(struct usb_ep *ep, /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx */ + /* Set the TXMAXP register correctly for Bulk IN + * endpoints in device mode + */ + if (can_bulk_split(musb, musb_ep->type)) + musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / + musb_ep->packet_sz) - 1; /* Set TXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ @@ -1642,7 +1662,9 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget) } spin_unlock_irqrestore(&musb->lock, flags); +#ifndef CONFIG_USB_OTG_20 otg_start_srp(musb->xceiv->otg); +#endif spin_lock_irqsave(&musb->lock, flags); /* Block idling for at least 1s */ @@ -1753,6 +1775,14 @@ static int musb_gadget_start(struct usb_gadget *g, static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver); +static struct usb_ep *musb_gadget_configure_ep(struct usb_gadget *gadget, + u8 type, struct usb_endpoint_descriptor *desc) +{ + struct musb *musb = gadget_to_musb(gadget); + + return musb_platform_configure_ep(musb, type, desc); +} + static const struct usb_gadget_ops musb_gadget_operations = { .get_frame = musb_gadget_get_frame, .wakeup = musb_gadget_wakeup, @@ -1762,6 +1792,7 @@ static const struct usb_gadget_ops musb_gadget_operations = { .pullup = musb_gadget_pullup, .udc_start = musb_gadget_start, .udc_stop = musb_gadget_stop, + .configure_ep = musb_gadget_configure_ep, }; /* ----------------------------------------------------------------------- */ diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index e40d7647caf..631aab86240 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -45,6 +45,11 @@ /* ep0 is always musb->endpoints[0].ep_in */ #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) +/* OTG 2.0 Specification 6.2.3 GetStatus commands */ +#ifdef CONFIG_USB_OTG_20 +#define OTG_STATUS_SELECT 0xF +#endif + /* * locking note: we use only the controller lock, for simpler correctness. * It's always held with IRQs blocked. @@ -80,21 +85,33 @@ static int service_tx_status_request( int handled = 1; u8 result[2], epnum = 0; const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; - +#ifdef CONFIG_USB_OTG_20 + unsigned int otg_recip = ctrlrequest->wIndex >> 12; +#endif result[1] = 0; switch (recip) { case USB_RECIP_DEVICE: - result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; - result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; - if (musb->g.is_otg) { - result[0] |= musb->g.b_hnp_enable - << USB_DEVICE_B_HNP_ENABLE; - result[0] |= musb->g.a_alt_hnp_support - << USB_DEVICE_A_ALT_HNP_SUPPORT; - result[0] |= musb->g.a_hnp_support - << USB_DEVICE_A_HNP_SUPPORT; +#ifdef CONFIG_USB_OTG_20 + if (!(otg_recip == OTG_STATUS_SELECT)) { +#endif + result[0] = musb->is_self_powered << + USB_DEVICE_SELF_POWERED; + result[0] |= musb->may_wakeup << + USB_DEVICE_REMOTE_WAKEUP; + if (musb->g.is_otg) { + result[0] |= musb->g.b_hnp_enable + << USB_DEVICE_B_HNP_ENABLE; + result[0] |= musb->g.a_alt_hnp_support + << USB_DEVICE_A_ALT_HNP_SUPPORT; + result[0] |= musb->g.a_hnp_support + << USB_DEVICE_A_HNP_SUPPORT; + } +#ifdef CONFIG_USB_OTG_20 + } else { + result[0] = 1 & musb->g.otg_hnp_reqd; } +#endif break; case USB_RECIP_INTERFACE: @@ -356,7 +373,22 @@ __acquires(musb->lock) musb->test_mode_nr = MUSB_TEST_PACKET; break; - +#ifdef CONFIG_USB_OTG_20 + case 6: + if (!musb->g.is_otg) + goto stall; + musb->g.otg_srp_reqd = 1; + + mod_timer(&musb->otg_timer, + jiffies + + msecs_to_jiffies(TTST_SRP)); + break; + case 7: + if (!musb->g.is_otg) + goto stall; + musb->g.otg_hnp_reqd = 1; + break; +#endif case 0xc0: /* TEST_FORCE_HS */ pr_debug("TEST_FORCE_HS\n"); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index ef8d744800a..0a108587e2e 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -46,7 +46,6 @@ #include "musb_core.h" #include "musb_host.h" - /* MUSB HOST status 22-mar-2006 * * - There's still lots of partial code duplication for fault paths, so @@ -108,24 +107,41 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) { struct musb *musb = ep->musb; void __iomem *epio = ep->regs; + void __iomem *regs = ep->musb->mregs; u16 csr; - u16 lastcsr = 0; - int retries = 1000; + u8 addr; + int retries = 3000; /* 3ms */ + /* + * NOTE: We are using a hack here because the FIFO-FLUSH + * bit is broken in hardware! The hack consists of changing + * the TXFUNCADDR to an unused device address and waiting + * for any pending USB packets to hit the 3-strikes and your + * gone rule. + */ + addr = musb_readb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR)); csr = musb_readw(epio, MUSB_TXCSR); while (csr & MUSB_TXCSR_FIFONOTEMPTY) { - if (csr != lastcsr) - dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); - lastcsr = csr; - csr |= MUSB_TXCSR_FLUSHFIFO; - musb_writew(epio, MUSB_TXCSR, csr); + musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, + MUSB_TXFUNCADDR), 127); csr = musb_readw(epio, MUSB_TXCSR); - if (WARN(retries-- < 1, - "Could not flush host TX%d fifo: csr: %04x\n", - ep->epnum, csr)) - return; - mdelay(1); + retries--; + if (retries == 0) { + /* can happen if the USB clocks are OFF */ + dev_dbg(musb->controller, "Could not flush host TX%d " + "fifo: csr=0x%04x\n", ep->epnum, csr); + break; + } + udelay(1); } + /* clear any errors */ + csr &= ~(MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_NAKTIMEOUT); + musb_writew(epio, MUSB_TXCSR, csr); + + /* restore endpoint address */ + musb_writeb(regs, MUSB_BUSCTL_OFFSET(ep->epnum, MUSB_TXFUNCADDR), addr); } static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) @@ -615,16 +631,26 @@ static bool musb_tx_dma_program(struct dma_controller *dma, u16 csr; u8 mode; -#ifdef CONFIG_USB_INVENTRA_DMA +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) if (length > channel->max_len) length = channel->max_len; csr = musb_readw(epio, MUSB_TXCSR); - if (length > pkt_size) { + if (length >= pkt_size) { mode = 1; csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; /* autoset shouldn't be set in high bandwidth */ - if (qh->hb_mult == 1) + /* + * Enable Autoset according to table + * below + * bulk_split hb_mult Autoset_Enable + * 0 1 Yes(Normal) + * 0 >1 No(High BW ISO) + * 1 1 Yes(HS bulk) + * 1 >1 Yes(FS bulk) + */ + if (qh->hb_mult == 1 || (qh->hb_mult > 1 && + can_bulk_split(hw_ep->musb, qh->type))) csr |= MUSB_TXCSR_AUTOSET; } else { mode = 0; @@ -771,6 +797,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum, /* protocol/endpoint/interval/NAKlimit */ if (epnum) { musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); + /* + * Set the TXMAXP register correctly for Bulk OUT + * endpoints in host mode + */ + if (can_bulk_split(musb, qh->type)) + qh->hb_mult = hw_ep->max_packet_sz_tx + / packet_sz; if (musb->double_buffer_not_ok) musb_writew(epio, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); @@ -802,6 +835,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum, if (load_count) { /* PIO to load FIFO */ + /* Unmap the buffer so that CPU can use it */ + usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); qh->segsize = load_count; musb_write_fifo(hw_ep, load_count, buf); } @@ -894,6 +929,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) if (fifo_count < len) urb->status = -EOVERFLOW; + /* Unmap the buffer so that CPU can use it */ + usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); musb_read_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; @@ -933,6 +970,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) fifo_count, (fifo_count == 1) ? "" : "s", fifo_dest); + /* Unmap the buffer so that CPU can use it */ + usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); musb_write_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; @@ -1134,6 +1173,22 @@ void musb_host_tx(struct musb *musb, u8 epnum) dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); status = -ETIMEDOUT; + } else if (tx_csr & MUSB_TXCSR_TXPKTRDY) { + /* BUSY - can happen during USB transfer cancel */ + + /* MUSB_TXCSR_TXPKTRDY indicates that the data written + * to the FIFO by DMA has not still gone on the USB bus. + * DMA completion callback doesn't indicate that data has + * gone on the USB bus. So, if we reach this case, need to + * wait for the MUSB_TXCSR_TXPKTRDY to be cleared and then + * proceed. + */ + dev_dbg(musb->controller, "TXPKTRDY set. Data transfer ongoing. Wait...\n"); + + do { + tx_csr = musb_readw(epio, MUSB_TXCSR); + } while ((tx_csr & MUSB_TXCSR_TXPKTRDY) != 0); + dev_dbg(musb->controller, "TXPKTRDY Cleared. Continue...\n"); } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); @@ -1427,7 +1482,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) size_t xfer_len; void __iomem *mbase = musb->mregs; int pipe; - u16 rx_csr, val; + u16 rx_csr, val, restore_csr; bool iso_err = false; bool done = false; u32 status; @@ -1537,7 +1592,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) /* FIXME this is _way_ too much in-line logic for Mentor DMA */ -#ifndef CONFIG_USB_INVENTRA_DMA +#if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA) if (rx_csr & MUSB_RXCSR_H_REQPKT) { /* REVISIT this happened for a while on some short reads... * the cleanup still needs investigation... looks bad... @@ -1569,7 +1624,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) | MUSB_RXCSR_RXPKTRDY); musb_writew(hw_ep->regs, MUSB_RXCSR, val); -#ifdef CONFIG_USB_INVENTRA_DMA +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) if (usb_pipeisoc(pipe)) { struct usb_iso_packet_descriptor *d; @@ -1625,7 +1680,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) } /* we are expecting IN packets */ -#ifdef CONFIG_USB_INVENTRA_DMA +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) if (dma) { struct dma_controller *c; u16 rx_count; @@ -1709,6 +1764,11 @@ void musb_host_rx(struct musb *musb, u8 epnum) */ val = musb_readw(epio, MUSB_RXCSR); + + /* retain the original value, + * which will be used to reset CSR + */ + restore_csr = val; val &= ~MUSB_RXCSR_H_REQPKT; if (dma->desired_mode == 0) @@ -1736,7 +1796,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) c->channel_release(dma); hw_ep->rx_channel = NULL; dma = NULL; - /* REVISIT reset CSR */ + musb_writew(epio, MUSB_RXCSR, restore_csr); } } #endif /* Mentor DMA */ diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 03f2655af29..1c96cf60907 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -246,7 +246,9 @@ */ #define MUSB_DEVCTL 0x60 /* 8 bit */ - +#ifdef CONFIG_USB_OTG_20 +#define MUSB_MISC 0x61 /* 8 bit */ +#endif /* These are always controlled through the INDEX register */ #define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ #define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c index 22ec3e37998..702d5efe9ef 100644 --- a/drivers/usb/musb/musb_virthub.c +++ b/drivers/usb/musb/musb_virthub.c @@ -379,7 +379,7 @@ int musb_hub_control( musb_port_suspend(musb, true); break; case USB_PORT_FEAT_TEST: - if (unlikely(is_host_active(musb))) + if (unlikely(!is_host_active(musb))) goto error; wIndex >>= 8; diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index aa09dd417b9..c1a205cd5e3 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -25,9 +25,15 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <mach/id.h> +#include <mach/usb.h> #include "musb_core.h" +#define DEFAULT_DEVCTL 0x81 +static void ux500_musb_set_vbus(struct musb *musb, int is_on); + struct ux500_glue { struct device *dev; struct platform_device *musb; @@ -35,17 +41,439 @@ struct ux500_glue { }; #define glue_to_musb(g) platform_get_drvdata(g->musb) +static struct timer_list notify_timer; +static struct musb_context_registers context; +static bool context_stored; +struct musb *_musb; + +static void ux500_store_context(struct musb *musb) +{ +#ifdef CONFIG_PM + int i; + void __iomem *musb_base; + void __iomem *epio; + + if (cpu_is_u5500()) { + if (musb != NULL) + _musb = musb; + else + return; + } + + musb_base = musb->mregs; + + if (is_host_enabled(musb)) { + context.frame = musb_readw(musb_base, MUSB_FRAME); + context.testmode = musb_readb(musb_base, MUSB_TESTMODE); + context.busctl = musb_read_ulpi_buscontrol(musb->mregs); + } + context.power = musb_readb(musb_base, MUSB_POWER); + context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); + context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); + context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); + context.index = musb_readb(musb_base, MUSB_INDEX); + context.devctl = DEFAULT_DEVCTL; + + for (i = 0; i < musb->config->num_eps; ++i) { + struct musb_hw_ep *hw_ep; + + musb_writeb(musb_base, MUSB_INDEX, i); + hw_ep = &musb->endpoints[i]; + if (!hw_ep) + continue; + + epio = hw_ep->regs; + if (!epio) + continue; + + context.index_regs[i].txmaxp = + musb_readw(epio, MUSB_TXMAXP); + context.index_regs[i].txcsr = + musb_readw(epio, MUSB_TXCSR); + context.index_regs[i].rxmaxp = + musb_readw(epio, MUSB_RXMAXP); + context.index_regs[i].rxcsr = + musb_readw(epio, MUSB_RXCSR); + + if (musb->dyn_fifo) { + context.index_regs[i].txfifoadd = + musb_read_txfifoadd(musb_base); + context.index_regs[i].rxfifoadd = + musb_read_rxfifoadd(musb_base); + context.index_regs[i].txfifosz = + musb_read_txfifosz(musb_base); + context.index_regs[i].rxfifosz = + musb_read_rxfifosz(musb_base); + } + if (is_host_enabled(musb)) { + context.index_regs[i].txtype = + musb_readb(epio, MUSB_TXTYPE); + context.index_regs[i].txinterval = + musb_readb(epio, MUSB_TXINTERVAL); + context.index_regs[i].rxtype = + musb_readb(epio, MUSB_RXTYPE); + context.index_regs[i].rxinterval = + musb_readb(epio, MUSB_RXINTERVAL); + + context.index_regs[i].txfunaddr = + musb_read_txfunaddr(musb_base, i); + context.index_regs[i].txhubaddr = + musb_read_txhubaddr(musb_base, i); + context.index_regs[i].txhubport = + musb_read_txhubport(musb_base, i); + + context.index_regs[i].rxfunaddr = + musb_read_rxfunaddr(musb_base, i); + context.index_regs[i].rxhubaddr = + musb_read_rxhubaddr(musb_base, i); + context.index_regs[i].rxhubport = + musb_read_rxhubport(musb_base, i); + } + } + context_stored = true; +#endif +} + +void ux500_restore_context(struct musb *musb) +{ +#ifdef CONFIG_PM + int i; + void __iomem *musb_base; + void __iomem *ep_target_regs; + void __iomem *epio; + + if (!context_stored) + return; + + if (cpu_is_u5500()) { + if (_musb != NULL) + musb = _musb; + else + return; + } + + musb_base = musb->mregs; + if (is_host_enabled(musb)) { + musb_writew(musb_base, MUSB_FRAME, context.frame); + musb_writeb(musb_base, MUSB_TESTMODE, context.testmode); + musb_write_ulpi_buscontrol(musb->mregs, context.busctl); + } + musb_writeb(musb_base, MUSB_POWER, context.power); + musb_writew(musb_base, MUSB_INTRTXE, context.intrtxe); + musb_writew(musb_base, MUSB_INTRRXE, context.intrrxe); + musb_writeb(musb_base, MUSB_INTRUSBE, context.intrusbe); + musb_writeb(musb_base, MUSB_DEVCTL, context.devctl); + + for (i = 0; i < musb->config->num_eps; ++i) { + struct musb_hw_ep *hw_ep; + + musb_writeb(musb_base, MUSB_INDEX, i); + hw_ep = &musb->endpoints[i]; + if (!hw_ep) + continue; + + epio = hw_ep->regs; + if (!epio) + continue; + + musb_writew(epio, MUSB_TXMAXP, + context.index_regs[i].txmaxp); + musb_writew(epio, MUSB_TXCSR, + context.index_regs[i].txcsr); + musb_writew(epio, MUSB_RXMAXP, + context.index_regs[i].rxmaxp); + musb_writew(epio, MUSB_RXCSR, + context.index_regs[i].rxcsr); + + if (musb->dyn_fifo) { + musb_write_txfifosz(musb_base, + context.index_regs[i].txfifosz); + musb_write_rxfifosz(musb_base, + context.index_regs[i].rxfifosz); + musb_write_txfifoadd(musb_base, + context.index_regs[i].txfifoadd); + musb_write_rxfifoadd(musb_base, + context.index_regs[i].rxfifoadd); + } + + if (is_host_enabled(musb)) { + musb_writeb(epio, MUSB_TXTYPE, + context.index_regs[i].txtype); + musb_writeb(epio, MUSB_TXINTERVAL, + context.index_regs[i].txinterval); + musb_writeb(epio, MUSB_RXTYPE, + context.index_regs[i].rxtype); + musb_writeb(epio, MUSB_RXINTERVAL, + + musb->context.index_regs[i].rxinterval); + musb_write_txfunaddr(musb_base, i, + context.index_regs[i].txfunaddr); + musb_write_txhubaddr(musb_base, i, + context.index_regs[i].txhubaddr); + musb_write_txhubport(musb_base, i, + context.index_regs[i].txhubport); + + ep_target_regs = + musb_read_target_reg_base(i, musb_base); + + musb_write_rxfunaddr(ep_target_regs, + context.index_regs[i].rxfunaddr); + musb_write_rxhubaddr(ep_target_regs, + context.index_regs[i].rxhubaddr); + musb_write_rxhubport(ep_target_regs, + context.index_regs[i].rxhubport); + } + } + musb_writeb(musb_base, MUSB_INDEX, context.index); +#endif +} + +static void musb_notify_idle(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + unsigned long flags; + + u8 devctl; + dev_dbg(musb->controller, "musb_notify_idle %s", + otg_state_string(musb->xceiv->state)); + spin_lock_irqsave(&musb->lock, flags); + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + switch (musb->xceiv->state) { + case OTG_STATE_A_WAIT_BCON: + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv->state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } else { + musb->xceiv->state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + if (cpu_is_u8500()) { + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); + } + break; + + case OTG_STATE_A_SUSPEND: + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + +/* blocking notifier support */ +static int musb_otg_notifications(struct notifier_block *nb, + unsigned long event, void *unused) +{ + struct musb *musb = container_of(nb, struct musb, nb); + + dev_dbg(musb->controller, "musb_otg_notifications %ld %s\n", + event, otg_state_string(musb->xceiv->state)); + switch (event) { + + case USB_EVENT_PREPARE: + pm_runtime_get_sync(musb->controller); + ux500_restore_context(musb); + break; + case USB_EVENT_ID: + case USB_EVENT_RIDA: + dev_dbg(musb->controller, "ID GND\n"); + if (is_otg_enabled(musb)) { + ux500_musb_set_vbus(musb, 1); + } + break; + + case USB_EVENT_VBUS: + dev_dbg(musb->controller, "VBUS Connect\n"); + + break; +/* case USB_EVENT_RIDB: FIXME, not yet managed */ + case USB_EVENT_NONE: + dev_dbg(musb->controller, "VBUS Disconnect\n"); + if (is_otg_enabled(musb) && musb->is_host) + ux500_musb_set_vbus(musb, 0); + else + musb->xceiv->state = OTG_STATE_B_IDLE; + break; + case USB_EVENT_CLEAN: + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); + break; + default: + dev_dbg(musb->controller, "ID float\n"); + return NOTIFY_DONE; + } + return NOTIFY_OK; +} + +static void ux500_musb_set_vbus(struct musb *musb, int is_on) +{ + u8 devctl; + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + int ret = 1; + struct musb_hdrc_platform_data *plat = musb->controller->platform_data; +#ifdef CONFIG_USB_OTG_20 + int val = 0; +#endif + /* HDRC controls CPEN, but beware current surges during device + * connect. They can trigger transient overcurrent conditions + * that must be ignored. + */ +#ifdef CONFIG_USB_OTG_20 + val = musb_readb(musb->mregs, MUSB_MISC); + val |= 0x1C; + musb_writeb(musb->mregs, MUSB_MISC, val); +#endif + + /* Use EXTVBUS */ + u8 busctl = musb_read_ulpi_buscontrol(musb->mregs); + if (plat->extvbus) { + busctl |= MUSB_ULPI_USE_EXTVBUS; + musb_write_ulpi_buscontrol(musb->mregs, busctl); + } else { + busctl &= ~MUSB_ULPI_USE_EXTVBUS; + musb_write_ulpi_buscontrol(musb->mregs, busctl); + } + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + if (is_on) { + if (musb->xceiv->state == OTG_STATE_A_IDLE) { + /* start the session */ + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + /* + * Wait for the musb to set as A device to enable the + * VBUS + */ + while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { + + if (time_after(jiffies, timeout)) { + dev_err(musb->controller, + "configured as A device timeout"); + ret = -EINVAL; + break; + } + } + + } else { + musb->is_active = 1; + musb->xceiv->otg->default_a = 1; + musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; + devctl |= MUSB_DEVCTL_SESSION; + MUSB_HST_MODE(musb); + } + } else { + musb->is_active = 0; + + /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and + * jumping right to B_IDLE... + */ + musb->xceiv->otg->default_a = 0; + devctl &= ~MUSB_DEVCTL_SESSION; + MUSB_DEV_MODE(musb); + } + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + dev_dbg(musb->controller, "VBUS %s, devctl %02x " + /* otg %3x conf %08x prcm %08x */ "\n", + otg_state_string(musb->xceiv->state), + musb_readb(musb->mregs, MUSB_DEVCTL)); +} + +static void ux500_musb_try_idle(struct musb *musb, unsigned long timeout) +{ + static unsigned long last_timer; + + if (timeout == 0) + timeout = jiffies + msecs_to_jiffies(3); + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || ((musb->a_wait_bcon == 0) + && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { + dev_dbg(musb->controller, "%s active, deleting timer\n", + otg_state_string(musb->xceiv->state)); + del_timer(¬ify_timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout)) { + if (!timer_pending(¬ify_timer)) + last_timer = timeout; + else { + dev_dbg(musb->controller, "Longer idle timer " + "already pending, ignoring\n"); + return; + } + } + last_timer = timeout; + + dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", + otg_state_string(musb->xceiv->state), + (unsigned long)jiffies_to_msecs(timeout - jiffies)); + mod_timer(¬ify_timer, timeout); +} + +static void ux500_musb_enable(struct musb *musb) +{ + ux500_store_context(musb); +} + +static struct usb_ep *ux500_musb_configure_endpoints(struct musb *musb, + u8 type, struct usb_endpoint_descriptor *desc) +{ + struct usb_ep *ep = NULL; + struct usb_gadget *gadget = &musb->g; + char name[4]; + + if (USB_ENDPOINT_XFER_INT == type) { + list_for_each_entry(ep, &gadget->ep_list, ep_list) { + if (ep->maxpacket == 512) + continue; + if (NULL == ep->driver_data) { + strncpy(name, (ep->name + 3), 4); + if (USB_DIR_IN & desc->bEndpointAddress) + if (strcmp("in", name) == 0) + return ep; + } + } + } + return ep; +} + static int ux500_musb_init(struct musb *musb) { + int status; + musb->xceiv = usb_get_transceiver(); if (!musb->xceiv) { pr_err("HS USB OTG: no transceiver configured\n"); return -ENODEV; } + pm_runtime_get_noresume(musb->controller); + musb->nb.notifier_call = musb_otg_notifications; + status = usb_register_notifier(musb->xceiv, &musb->nb); + + if (status < 0) { + dev_dbg(musb->controller, "notification register failed\n"); + goto err1; + } + + setup_timer(¬ify_timer, musb_notify_idle, (unsigned long) musb); return 0; +err1: + pm_runtime_disable(musb->controller); + return status; } +/** + * ux500_musb_exit() - unregister the platform USB driver. + * @musb: struct musb pointer. + * + * This function unregisters the USB controller. + */ static int ux500_musb_exit(struct musb *musb) { usb_put_transceiver(musb->xceiv); @@ -56,8 +484,21 @@ static int ux500_musb_exit(struct musb *musb) static const struct musb_platform_ops ux500_ops = { .init = ux500_musb_init, .exit = ux500_musb_exit, + + .set_vbus = ux500_musb_set_vbus, + .try_idle = ux500_musb_try_idle, + + .enable = ux500_musb_enable, + .configure_endpoints = ux500_musb_configure_endpoints, }; +/** + * ux500_probe() - Allocate the resources. + * @pdev: struct platform_device. + * + * This function allocates the required memory for the + * structures and initialize interrupts. + */ static int __devinit ux500_probe(struct platform_device *pdev) { struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; @@ -122,12 +563,12 @@ static int __devinit ux500_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register musb device\n"); goto err4; } + pm_runtime_enable(&pdev->dev); return 0; - err4: - clk_disable(clk); - + if (cpu_is_u5500()) + clk_disable(clk); err3: clk_put(clk); @@ -147,43 +588,99 @@ static int __devexit ux500_remove(struct platform_device *pdev) platform_device_del(glue->musb); platform_device_put(glue->musb); - clk_disable(glue->clk); + if (cpu_is_u5500()) + clk_disable(glue->clk); clk_put(glue->clk); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + kfree(glue); return 0; } #ifdef CONFIG_PM +/** + * ux500_suspend() - Handles the platform suspend. + * @dev: struct device + * + * This function gets triggered when the platform + * is going to suspend + */ static int ux500_suspend(struct device *dev) { struct ux500_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); usb_phy_set_suspend(musb->xceiv, 1); - clk_disable(glue->clk); + if (cpu_is_u5500()) + /* + * Since this clock is in the APE domain, it will + * automatically be disabled on suspend. + * (And enabled on resume automatically.) + */ + clk_disable(glue->clk); + dev_dbg(dev, "ux500_suspend\n"); return 0; } +/** + * ux500_resume() - Handles the platform resume. + * @dev: struct device + * + * This function gets triggered when the platform + * is going to resume + */ static int ux500_resume(struct device *dev) { struct ux500_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); - int ret; + + if (cpu_is_u5500()) + /* No point in propagating errors on resume */ + (void) clk_enable(glue->clk); + dev_dbg(dev, "ux500_resume\n"); + + usb_phy_set_suspend(musb->xceiv, 0); + + return 0; +} +#ifdef CONFIG_UX500_SOC_DB8500 +static int ux500_musb_runtime_resume(struct device *dev) +{ + struct ux500_glue *glue = dev_get_drvdata(dev); + int ret; + + if (cpu_is_u5500()) + return 0; ret = clk_enable(glue->clk); if (ret) { - dev_err(dev, "failed to enable clock\n"); + dev_dbg(dev, "Unable to enable clk\n"); return ret; } + dev_dbg(dev, "ux500_musb_runtime_resume\n"); + return 0; +} - usb_phy_set_suspend(musb->xceiv, 0); +static int ux500_musb_runtime_suspend(struct device *dev) +{ + struct ux500_glue *glue = dev_get_drvdata(dev); + if (cpu_is_u5500()) + return 0; + + clk_disable(glue->clk); + dev_dbg(dev, "ux500_musb_runtime_suspend\n"); return 0; } - +#endif static const struct dev_pm_ops ux500_pm_ops = { +#ifdef CONFIG_UX500_SOC_DB8500 + SET_RUNTIME_PM_OPS(ux500_musb_runtime_suspend, + ux500_musb_runtime_resume, NULL) +#endif .suspend = ux500_suspend, .resume = ux500_resume, }; diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index d05c7fbbb70..7bf0c289ef5 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c @@ -32,6 +32,11 @@ #include <linux/pfn.h> #include <mach/usb.h> #include "musb_core.h" +#undef DBG +#undef WARNING +#undef INFO +#include <linux/usb/composite.h> +#define Ux500_USB_DMA_MIN_TRANSFER_SIZE 512 struct ux500_dma_channel { struct dma_channel channel; @@ -64,14 +69,14 @@ void ux500_dma_callback(void *private_data) struct musb *musb = hw_ep->musb; unsigned long flags; - dev_dbg(musb->controller, "DMA rx transfer done on hw_ep=%d\n", + dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n", hw_ep->epnum); spin_lock_irqsave(&musb->lock, flags); ux500_channel->channel.actual_len = ux500_channel->cur_len; ux500_channel->channel.status = MUSB_DMA_STATUS_FREE; musb_dma_completion(musb, hw_ep->epnum, - ux500_channel->is_tx); + ux500_channel->is_tx); spin_unlock_irqrestore(&musb->lock, flags); } @@ -134,6 +139,15 @@ static bool ux500_configure_channel(struct dma_channel *channel, return true; } +/** + * ux500_dma_controller_allocate() - allocates the DMA channels + * @c: pointer to DMA controller + * @hw_ep: pointer to endpoint + * @is_tx: transmit or receive direction + * + * This function allocates the DMA channel and initializes + * the channel +*/ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, struct musb_hw_ep *hw_ep, u8 is_tx) { @@ -172,7 +186,13 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c, return &(ux500_channel->channel); } - +/** + * ux500_dma_channel_release() - releases the DMA channel + * @channel: channel to be released + * + * This function releases the DMA channel + * +*/ static void ux500_dma_channel_release(struct dma_channel *channel) { struct ux500_dma_channel *ux500_channel = channel->private_data; @@ -190,26 +210,71 @@ static void ux500_dma_channel_release(struct dma_channel *channel) static int ux500_dma_is_compatible(struct dma_channel *channel, u16 maxpacket, void *buf, u32 length) { - if ((maxpacket & 0x3) || - ((int)buf & 0x3) || - (length < 512) || - (length & 0x3)) - return false; - else - return true; + struct ux500_dma_channel *ux500_channel = channel->private_data; + struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; + struct musb *musb = hw_ep->musb; + struct usb_descriptor_header **descriptors; + struct usb_function *f; + struct usb_gadget *gadget = &musb->g; + struct usb_composite_dev *cdev = get_gadget_data(gadget); + + if (length < Ux500_USB_DMA_MIN_TRANSFER_SIZE) + return 0; + + list_for_each_entry(f, &cdev->config->functions, list) { + if (!strcmp(f->name, "cdc_ethernet") || + !strcmp(f->name, "rndis") || + !strcmp(f->name, "mtp") || + !strcmp(f->name, "phonet") || + !strcmp(f->name, "adb")) { + if (gadget->speed == USB_SPEED_HIGH) + descriptors = f->hs_descriptors; + else + descriptors = f->descriptors; + + for (; *descriptors; ++descriptors) { + struct usb_endpoint_descriptor *ep; + + if ((*descriptors)->bDescriptorType != + USB_DT_ENDPOINT) + continue; + + ep = (struct usb_endpoint_descriptor *) + *descriptors; + if (ep->bEndpointAddress == + ux500_channel->hw_ep->epnum) + return 0; + } + } + } + + return 1; } +/** + * ux500_dma_channel_program() - Configures the channel and initiates transfer + * @channel: pointer to DMA channel + * @packet_sz: packet size + * @mode: mode + * @dma_addr: physical address of memory + * @len: length + * + * This function configures the channel and initiates the DMA transfer +*/ static int ux500_dma_channel_program(struct dma_channel *channel, u16 packet_sz, u8 mode, dma_addr_t dma_addr, u32 len) { int ret; + struct ux500_dma_channel *ux500_dma_channel = channel->private_data; BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); - if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len)) - return false; + if (len < Ux500_USB_DMA_MIN_TRANSFER_SIZE) + return 0; + if (!ux500_dma_channel->is_tx && len < packet_sz) + return 0; channel->status = MUSB_DMA_STATUS_BUSY; channel->actual_len = 0; @@ -220,6 +285,12 @@ static int ux500_dma_channel_program(struct dma_channel *channel, return ret; } +/** + * ux500_dma_channel_abort() - aborts the DMA transfer + * @channel: pointer to DMA channel. + * + * This function aborts the DMA transfer. +*/ static int ux500_dma_channel_abort(struct dma_channel *channel) { struct ux500_dma_channel *ux500_channel = channel->private_data; @@ -254,6 +325,12 @@ static int ux500_dma_channel_abort(struct dma_channel *channel) return 0; } +/** + * ux500_dma_controller_stop() - releases all the channels and frees the DMA pipes + * @c: pointer to DMA controller + * + * This function frees all of the logical channels and frees the DMA pipes +*/ static int ux500_dma_controller_stop(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, @@ -285,6 +362,15 @@ static int ux500_dma_controller_stop(struct dma_controller *c) return 0; } + +/** + * ux500_dma_controller_start() - creates the logical channels pool and registers callbacks + * @c: pointer to DMA Controller + * + * This function requests the logical channels from the DMA driver and creates + * logical channels based on event lines and also registers the callbacks which + * are invoked after data transfer in the transmit or receive direction. +*/ static int ux500_dma_controller_start(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, @@ -356,6 +442,12 @@ static int ux500_dma_controller_start(struct dma_controller *c) return 0; } +/** + * dma_controller_destroy() - deallocates the DMA controller + * @c: pointer to dma controller. + * + * This function deallocates the DMA controller. +*/ void dma_controller_destroy(struct dma_controller *c) { struct ux500_dma_controller *controller = container_of(c, @@ -364,6 +456,15 @@ void dma_controller_destroy(struct dma_controller *c) kfree(controller); } +/** + * dma_controller_create() - creates the dma controller and initializes callbacks + * + * @musb: pointer to mentor core driver data instance| + * @base: base address of musb registers. + * + * This function creates the DMA controller and initializes the callbacks + * that are invoked from the Mentor IP core. +*/ struct dma_controller *__init dma_controller_create(struct musb *musb, void __iomem *base) { diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig index 5c87db06b59..168f5b0364f 100644 --- a/drivers/usb/otg/Kconfig +++ b/drivers/usb/otg/Kconfig @@ -108,6 +108,15 @@ config AB8500_USB This transceiver supports high and full speed devices plus, in host mode, low speed. +config AB5500_USB + tristate "AB5500 USB Transceiver Driver" + depends on AB5500_CORE + select USB_OTG_UTILS + help + Enable this to support the USB OTG transceiver in AB5500 chip. + This transceiver supports high and full speed devices plus, + in host mode, low speed. + config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile index 41aa5098b13..e227d9add96 100644 --- a/drivers/usb/otg/Makefile +++ b/drivers/usb/otg/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_USB_ULPI) += ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o obj-$(CONFIG_AB8500_USB) += ab8500-usb.o +obj-$(CONFIG_AB5500_USB) += ab5500-usb.o fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o obj-$(CONFIG_USB_MV_OTG) += mv_otg.o diff --git a/drivers/usb/otg/ab5500-usb.c b/drivers/usb/otg/ab5500-usb.c new file mode 100644 index 00000000000..e8c06e694a6 --- /dev/null +++ b/drivers/usb/otg/ab5500-usb.c @@ -0,0 +1,681 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Avinash Kumar <avinash.kumar@stericsson.com> for ST-Ericsson + * Author: Ravi Kant SINGH <ravikant.singh@stericsson.com> for ST-Ericsson + * Author: Supriya s KARANTH <supriya.karanth@stericsson.com> for ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + */ + +#include <linux/platform_device.h> +#include <linux/usb/otg.h> +#include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/mfd/abx500/ab5500.h> +#include <linux/mfd/abx500.h> +#include <linux/regulator/consumer.h> +#include <linux/mfd/dbx500-prcmu.h> +#include <mach/usb.h> +#include <linux/kernel_stat.h> +#include <mach/gpio.h> +#include <mach/reboot_reasons.h> +#include <linux/pm_qos.h> + +/* AB5500 USB macros + */ +#define AB5500_MAIN_WATCHDOG_ENABLE 0x1 +#define AB5500_MAIN_WATCHDOG_KICK 0x2 +#define AB5500_MAIN_WATCHDOG_DISABLE 0x0 +#define AB5500_USB_ADP_ENABLE 0x1 +#define AB5500_WATCHDOG_DELAY 10 +#define AB5500_WATCHDOG_DELAY_US 100 +#define AB5500_PHY_DELAY_US 100 +#define AB5500_MAIN_WDOG_CTRL_REG 0x01 +#define AB5500_USB_LINE_STAT_REG 0x80 +#define AB5500_USB_PHY_CTRL_REG 0x8A +#define AB5500_MAIN_WATCHDOG_ENABLE 0x1 +#define AB5500_MAIN_WATCHDOG_KICK 0x2 +#define AB5500_MAIN_WATCHDOG_DISABLE 0x0 +#define AB5500_SYS_CTRL2_BLOCK 0x2 + +/* UsbLineStatus register bit masks */ +#define AB5500_USB_LINK_STATUS_MASK_V1 0x78 +#define AB5500_USB_LINK_STATUS_MASK_V2 0xF8 + +#define USB_PROBE_DELAY 1000 /* 1 seconds */ +#define USB_LIMIT (200) /* If we have more than 200 irqs per second */ + +static struct pm_qos_request usb_pm_qos_latency; +static bool usb_pm_qos_is_latency_0; + +#define PUBLIC_ID_BACKUPRAM1 (U5500_BACKUPRAM1_BASE + 0x0FC0) +#define MAX_USB_SERIAL_NUMBER_LEN 31 + +/* UsbLineStatus register - usb types */ +enum ab5500_usb_link_status { + USB_LINK_NOT_CONFIGURED, + USB_LINK_STD_HOST_NC, + USB_LINK_STD_HOST_C_NS, + USB_LINK_STD_HOST_C_S, + USB_LINK_HOST_CHG_NM, + USB_LINK_HOST_CHG_HS, + USB_LINK_HOST_CHG_HS_CHIRP, + USB_LINK_DEDICATED_CHG, + USB_LINK_ACA_RID_A, + USB_LINK_ACA_RID_B, + USB_LINK_ACA_RID_C_NM, + USB_LINK_ACA_RID_C_HS, + USB_LINK_ACA_RID_C_HS_CHIRP, + USB_LINK_HM_IDGND, + USB_LINK_OTG_HOST_NO_CURRENT, + USB_LINK_NOT_VALID_LINK, + USB_LINK_PHY_EN_NO_VBUS_NO_IDGND, + USB_LINK_STD_UPSTREAM_NO_VBUS_NO_IDGND, + USB_LINK_HM_IDGND_V2 +}; + +/** + * ab5500_usb_mode - Different states of ab usb_chip + * + * Used for USB cable plug-in state machine + */ +enum ab5500_usb_mode { + USB_IDLE, + USB_DEVICE, + USB_HOST, + USB_DEDICATED_CHG, +}; +struct ab5500_usb { + struct otg_transceiver otg; + struct device *dev; + int irq_num_id_fall; + int irq_num_vbus_rise; + int irq_num_vbus_fall; + int irq_num_link_status; + unsigned vbus_draw; + struct delayed_work dwork; + struct work_struct phy_dis_work; + unsigned long link_status_wait; + int rev; + int usb_cs_gpio; + enum ab5500_usb_mode mode; + struct clk *sysclk; + struct regulator *v_ape; + struct abx500_usbgpio_platform_data *usb_gpio; + struct delayed_work work_usb_workaround; + bool phy_enabled; +}; + +static int ab5500_usb_irq_setup(struct platform_device *pdev, + struct ab5500_usb *ab); +static int ab5500_usb_boot_detect(struct ab5500_usb *ab); +static int ab5500_usb_link_status_update(struct ab5500_usb *ab); + +static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host); +static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host); + +static inline struct ab5500_usb *xceiv_to_ab(struct otg_transceiver *x) +{ + return container_of(x, struct ab5500_usb, otg); +} + +/** + * ab5500_usb_wd_workaround() - Kick the watch dog timer + * + * This function used to Kick the watch dog timer + */ +static void ab5500_usb_wd_workaround(struct ab5500_usb *ab) +{ + abx500_set_register_interruptible(ab->dev, + AB5500_SYS_CTRL2_BLOCK, + AB5500_MAIN_WDOG_CTRL_REG, + AB5500_MAIN_WATCHDOG_ENABLE); + + udelay(AB5500_WATCHDOG_DELAY_US); + + abx500_set_register_interruptible(ab->dev, + AB5500_SYS_CTRL2_BLOCK, + AB5500_MAIN_WDOG_CTRL_REG, + (AB5500_MAIN_WATCHDOG_ENABLE + | AB5500_MAIN_WATCHDOG_KICK)); + + udelay(AB5500_WATCHDOG_DELAY_US); + + abx500_set_register_interruptible(ab->dev, + AB5500_SYS_CTRL2_BLOCK, + AB5500_MAIN_WDOG_CTRL_REG, + AB5500_MAIN_WATCHDOG_DISABLE); + + udelay(AB5500_WATCHDOG_DELAY_US); +} + +static void ab5500_usb_load(struct work_struct *work) +{ + int cpu; + unsigned int num_irqs = 0; + static unsigned int old_num_irqs = UINT_MAX; + struct delayed_work *work_usb_workaround = to_delayed_work(work); + struct ab5500_usb *ab = container_of(work_usb_workaround, + struct ab5500_usb, work_usb_workaround); + + for_each_online_cpu(cpu) + num_irqs += kstat_irqs_cpu(IRQ_DB5500_USBOTG, cpu); + + if ((num_irqs > old_num_irqs) && + (num_irqs - old_num_irqs) > USB_LIMIT) { + + if (!usb_pm_qos_is_latency_0) { + + pm_qos_add_request(&usb_pm_qos_latency, + PM_QOS_CPU_DMA_LATENCY, 0); + usb_pm_qos_is_latency_0 = true; + } + } else { + + if (usb_pm_qos_is_latency_0) { + + pm_qos_remove_request(&usb_pm_qos_latency); + usb_pm_qos_is_latency_0 = false; + } + } + old_num_irqs = num_irqs; + + schedule_delayed_work_on(0, + &ab->work_usb_workaround, + msecs_to_jiffies(USB_PROBE_DELAY)); +} + +static void ab5500_usb_phy_enable(struct ab5500_usb *ab, bool sel_host) +{ + int ret = 0; + /* Workaround for spurious interrupt to be checked with Hardware Team*/ + if (ab->phy_enabled == true) + return; + ab->phy_enabled = true; + + ab->usb_gpio->enable(); + clk_enable(ab->sysclk); + regulator_enable(ab->v_ape); + + /* TODO: Remove ux500_resotore_context and handle similar to ab8500 */ + ux500_restore_context(NULL); + ret = gpio_direction_output(ab->usb_cs_gpio, 0); + if (ret < 0) { + dev_err(ab->dev, "usb_cs_gpio: gpio direction failed\n"); + gpio_free(ab->usb_cs_gpio); + return; + } + gpio_set_value(ab->usb_cs_gpio, 1); + if (sel_host) { + schedule_delayed_work_on(0, + &ab->work_usb_workaround, + msecs_to_jiffies(USB_PROBE_DELAY)); + } +} + +static void ab5500_usb_phy_disable(struct ab5500_usb *ab, bool sel_host) +{ + /* Workaround for spurious interrupt to be checked with Hardware Team*/ + if (ab->phy_enabled == false) + return; + ab->phy_enabled = false; + + /* Needed to disable the phy.*/ + ab5500_usb_wd_workaround(ab); + clk_disable(ab->sysclk); + regulator_disable(ab->v_ape); + ab->usb_gpio->disable(); + gpio_set_value(ab->usb_cs_gpio, 0); + + if (sel_host) { + if (usb_pm_qos_is_latency_0) { + + pm_qos_remove_request(&usb_pm_qos_latency); + usb_pm_qos_is_latency_0 = false; + } + cancel_delayed_work_sync(&ab->work_usb_workaround); + } +} + +#define ab5500_usb_peri_phy_en(ab) ab5500_usb_phy_enable(ab, false) +#define ab5500_usb_peri_phy_dis(ab) ab5500_usb_phy_disable(ab, false) +#define ab5500_usb_host_phy_en(ab) ab5500_usb_phy_enable(ab, true) +#define ab5500_usb_host_phy_dis(ab) ab5500_usb_phy_disable(ab, true) + +/* Work created after an link status update handler*/ +static int ab5500_usb_link_status_update(struct ab5500_usb *ab) +{ + u8 val = 0; + enum ab5500_usb_link_status lsts; + enum usb_xceiv_events event = USB_EVENT_NONE; + + (void)abx500_get_register_interruptible(ab->dev, + AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &val); + + if (ab->rev >= AB5500_2_0) + lsts = (val & AB5500_USB_LINK_STATUS_MASK_V2) >> 3; + else + lsts = (val & AB5500_USB_LINK_STATUS_MASK_V1) >> 3; + + switch (lsts) { + + case USB_LINK_STD_HOST_NC: + case USB_LINK_STD_HOST_C_NS: + case USB_LINK_STD_HOST_C_S: + case USB_LINK_HOST_CHG_NM: + case USB_LINK_HOST_CHG_HS: + case USB_LINK_HOST_CHG_HS_CHIRP: + + event = USB_EVENT_VBUS; + ab5500_usb_peri_phy_en(ab); + + break; + case USB_LINK_DEDICATED_CHG: + /* TODO: vbus_draw */ + event = USB_EVENT_CHARGER; + break; + + case USB_LINK_HM_IDGND: + if (ab->rev >= AB5500_2_0) + return -1; + + + ab5500_usb_host_phy_en(ab); + + ab->otg.default_a = true; + event = USB_EVENT_ID; + + break; + case USB_LINK_PHY_EN_NO_VBUS_NO_IDGND: + ab5500_usb_peri_phy_dis(ab); + + break; + case USB_LINK_STD_UPSTREAM_NO_VBUS_NO_IDGND: + ab5500_usb_host_phy_dis(ab); + + break; + + case USB_LINK_HM_IDGND_V2: + if (!(ab->rev >= AB5500_2_0)) + return -1; + + + ab5500_usb_host_phy_en(ab); + + ab->otg.default_a = true; + event = USB_EVENT_ID; + + break; + default: + break; + } + + atomic_notifier_call_chain(&ab->otg.notifier, event, &ab->vbus_draw); + + return 0; +} + +static void ab5500_usb_delayed_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct ab5500_usb *ab = container_of(dwork, struct ab5500_usb, dwork); + + ab5500_usb_link_status_update(ab); +} + +/** + * This function is used to signal the completion of + * USB Link status register update + */ +static irqreturn_t ab5500_usb_link_status_irq(int irq, void *data) +{ + struct ab5500_usb *ab = (struct ab5500_usb *) data; + ab5500_usb_link_status_update(ab); + + return IRQ_HANDLED; +} + + + + +static void ab5500_usb_irq_free(struct ab5500_usb *ab) +{ + if (ab->irq_num_link_status) + free_irq(ab->irq_num_link_status, ab); +} + +/** + * ab5500_usb_irq_setup : register USB callback handlers for ab5500 + * @mode: value for mode. + * + * This function is used to register USB callback handlers for ab5500. + */ +static int ab5500_usb_irq_setup(struct platform_device *pdev, + struct ab5500_usb *ab) +{ + int ret = 0; + int irq, err; + + if (!ab->dev) + return -EINVAL; + + + irq = platform_get_irq_byname(pdev, "Link_Update"); + if (irq < 0) { + dev_err(&pdev->dev, "Link Update irq not found\n"); + err = irq; + goto irq_fail; + } + ab->irq_num_link_status = irq; + + ret = request_threaded_irq(ab->irq_num_link_status, + NULL, ab5500_usb_link_status_irq, + IRQF_NO_SUSPEND | IRQF_SHARED, + "usb-link-status-update", ab); + if (ret < 0) { + printk(KERN_ERR "failed to set the callback" + " handler for usb charge" + " detect done\n"); + err = ret; + goto irq_fail; + } + + ab5500_usb_wd_workaround(ab); + return 0; + +irq_fail: + ab5500_usb_irq_free(ab); + return err; +} + +/* Sys interfaces */ +static ssize_t +serial_number_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 bufer[5]; + void __iomem *backup_ram = NULL; + backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14); + + if (backup_ram) { + bufer[0] = readl(backup_ram); + bufer[1] = readl(backup_ram + 4); + bufer[2] = readl(backup_ram + 8); + bufer[3] = readl(backup_ram + 0x0c); + bufer[4] = readl(backup_ram + 0x10); + + snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1, + "%.8X%.8X%.8X%.8X%.8X", + bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]); + + iounmap(backup_ram); + } else + dev_err(dev, "$$\n"); + + return strlen(buf); +} + + +static DEVICE_ATTR(serial_number, 0644, serial_number_show, NULL); + +static struct attribute *ab5500_usb_attributes[] = { + &dev_attr_serial_number.attr, + NULL +}; +static const struct attribute_group ab5500_attr_group = { + .attrs = ab5500_usb_attributes, +}; + +static int ab5500_create_sysfsentries(struct ab5500_usb *ab) +{ + int err; + + err = sysfs_create_group(&ab->dev->kobj, &ab5500_attr_group); + if (err) + sysfs_remove_group(&ab->dev->kobj, &ab5500_attr_group); + + return err; +} + +/** + * ab5500_usb_boot_detect : detect the USB cable during boot time. + * @mode: value for mode. + * + * This function is used to detect the USB cable during boot time. + */ +static int ab5500_usb_boot_detect(struct ab5500_usb *ab) +{ + int usb_status = 0; + enum ab5500_usb_link_status lsts; + if (!ab->dev) + return -EINVAL; + + (void)abx500_get_register_interruptible(ab->dev, + AB5500_BANK_USB, AB5500_USB_LINE_STAT_REG, &usb_status); + + if (ab->rev >= AB5500_2_0) + lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V2) >> 3; + else + lsts = (usb_status & AB5500_USB_LINK_STATUS_MASK_V1) >> 3; + + switch (lsts) { + + case USB_LINK_STD_HOST_NC: + case USB_LINK_STD_HOST_C_NS: + case USB_LINK_STD_HOST_C_S: + case USB_LINK_HOST_CHG_NM: + case USB_LINK_HOST_CHG_HS: + case USB_LINK_HOST_CHG_HS_CHIRP: + /* + * If Power on key was not pressed then enter charge only + * mode and dont enumerate + */ + if ((!(ab5500_get_turn_on_status() & + (P_ON_KEY1_EVENT | P_ON_KEY2_EVENT))) && + (prcmu_get_reset_code() == + SW_RESET_COLDSTART)) { + dev_dbg(ab->dev, "USB entered charge only mode"); + return 0; + } + ab5500_usb_peri_phy_en(ab); + + break; + + case USB_LINK_HM_IDGND: + case USB_LINK_HM_IDGND_V2: + ab5500_usb_host_phy_en(ab); + + break; + default: + break; + } + + return 0; +} + +static int ab5500_usb_set_power(struct otg_transceiver *otg, unsigned mA) +{ + struct ab5500_usb *ab; + + if (!otg) + return -ENODEV; + + ab = xceiv_to_ab(otg); + + ab->vbus_draw = mA; + + atomic_notifier_call_chain(&ab->otg.notifier, + USB_EVENT_VBUS, &ab->vbus_draw); + return 0; +} + +static int ab5500_usb_set_suspend(struct otg_transceiver *x, int suspend) +{ + /* TODO */ + return 0; +} + +static int ab5500_usb_set_host(struct otg_transceiver *otg, + struct usb_bus *host) +{ + struct ab5500_usb *ab; + + if (!otg) + return -ENODEV; + + ab = xceiv_to_ab(otg); + + /* Some drivers call this function in atomic context. + * Do not update ab5500 registers directly till this + * is fixed. + */ + + if (!host) { + ab->otg.host = NULL; + schedule_work(&ab->phy_dis_work); + } else { + ab->otg.host = host; + } + + return 0; +} + +static int ab5500_usb_set_peripheral(struct otg_transceiver *otg, + struct usb_gadget *gadget) +{ + struct ab5500_usb *ab; + + if (!otg) + return -ENODEV; + + ab = xceiv_to_ab(otg); + + /* Some drivers call this function in atomic context. + * Do not update ab5500 registers directly till this + * is fixed. + */ + + if (!gadget) { + ab->otg.gadget = NULL; + schedule_work(&ab->phy_dis_work); + } else { + ab->otg.gadget = gadget; + } + + return 0; +} + +static int __devinit ab5500_usb_probe(struct platform_device *pdev) +{ + struct ab5500_usb *ab; + struct abx500_usbgpio_platform_data *usb_pdata = + pdev->dev.platform_data; + int err; + int ret = -1; + ab = kzalloc(sizeof *ab, GFP_KERNEL); + if (!ab) + return -ENOMEM; + + ab->dev = &pdev->dev; + ab->otg.dev = ab->dev; + ab->otg.label = "ab5500"; + ab->otg.state = OTG_STATE_B_IDLE; + ab->otg.set_host = ab5500_usb_set_host; + ab->otg.set_peripheral = ab5500_usb_set_peripheral; + ab->otg.set_suspend = ab5500_usb_set_suspend; + ab->otg.set_power = ab5500_usb_set_power; + ab->usb_gpio = usb_pdata; + ab->mode = USB_IDLE; + + platform_set_drvdata(pdev, ab); + + ATOMIC_INIT_NOTIFIER_HEAD(&ab->otg.notifier); + + /* v1: Wait for link status to become stable. + * all: Updates form set_host and set_peripheral as they are atomic. + */ + INIT_DELAYED_WORK(&ab->dwork, ab5500_usb_delayed_work); + + INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround, + ab5500_usb_load); + + err = otg_set_transceiver(&ab->otg); + if (err) + dev_err(&pdev->dev, "Can't register transceiver\n"); + + ab->usb_cs_gpio = ab->usb_gpio->usb_cs; + + ab->rev = abx500_get_chip_id(ab->dev); + + ab->sysclk = clk_get(ab->dev, "sysclk"); + if (IS_ERR(ab->sysclk)) { + ret = PTR_ERR(ab->sysclk); + ab->sysclk = NULL; + return ret; + } + + ab->v_ape = regulator_get(ab->dev, "v-ape"); + if (!ab->v_ape) { + dev_err(ab->dev, "Could not get v-ape supply\n"); + + return -EINVAL; + } + + ab5500_usb_irq_setup(pdev, ab); + + ret = gpio_request(ab->usb_cs_gpio, "usb-cs"); + if (ret < 0) + dev_err(&pdev->dev, "usb gpio request fail\n"); + + /* Aquire GPIO alternate config struct for USB */ + err = ab->usb_gpio->get(ab->dev); + if (err < 0) + goto fail1; + + err = ab5500_usb_boot_detect(ab); + if (err < 0) + goto fail1; + + err = ab5500_create_sysfsentries(ab); + if (err < 0) + dev_err(ab->dev, "usb create sysfs entries failed\n"); + + return 0; + +fail1: + ab5500_usb_irq_free(ab); + kfree(ab); + return err; +} + +static int __devexit ab5500_usb_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver ab5500_usb_driver = { + .driver = { + .name = "ab5500-usb", + .owner = THIS_MODULE, + }, + .probe = ab5500_usb_probe, + .remove = __devexit_p(ab5500_usb_remove), +}; + +static int __init ab5500_usb_init(void) +{ + return platform_driver_register(&ab5500_usb_driver); +} +subsys_initcall(ab5500_usb_init); + +static void __exit ab5500_usb_exit(void) +{ + platform_driver_unregister(&ab5500_usb_driver); +} +module_exit(ab5500_usb_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c index a84af677dc5..49c5d31c150 100644 --- a/drivers/usb/otg/ab8500-usb.c +++ b/drivers/usb/otg/ab8500-usb.c @@ -29,47 +29,117 @@ #include <linux/notifier.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/err.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> +#include <linux/mfd/dbx500-prcmu.h> +#include <linux/kernel_stat.h> +#include <linux/pm_qos.h> #define AB8500_MAIN_WD_CTRL_REG 0x01 #define AB8500_USB_LINE_STAT_REG 0x80 #define AB8500_USB_PHY_CTRL_REG 0x8A +#define AB8500_VBUS_CTRL_REG 0x82 +#define AB8500_IT_SOURCE2_REG 0x01 +#define AB8500_IT_SOURCE20_REG 0x13 +#define AB8500_SRC_INT_USB_HOST 0x04 +#define AB8500_SRC_INT_USB_DEVICE 0x80 #define AB8500_BIT_OTG_STAT_ID (1 << 0) #define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0) #define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1) #define AB8500_BIT_WD_CTRL_ENABLE (1 << 0) #define AB8500_BIT_WD_CTRL_KICK (1 << 1) +#define AB8500_BIT_VBUS_ENABLE (1 << 0) #define AB8500_V1x_LINK_STAT_WAIT (HZ/10) #define AB8500_WD_KICK_DELAY_US 100 /* usec */ #define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */ +#define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */ #define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */ +/* Registers in bank 0x11 */ +#define AB8500_BANK12_ACCESS 0x00 + +/* Registers in bank 0x12 */ +#define AB8500_USB_PHY_TUNE1 0x05 +#define AB8500_USB_PHY_TUNE2 0x06 +#define AB8500_USB_PHY_TUNE3 0x07 + +static struct pm_qos_request usb_pm_qos_latency; +static bool usb_pm_qos_is_latency_0; + +#define USB_PROBE_DELAY 1000 /* 1 seconds */ +#define USB_LIMIT (200) /* If we have more than 200 irqs per second */ + +#define PUBLIC_ID_BACKUPRAM1 (U8500_BACKUPRAM1_BASE + 0x0FC0) +#define MAX_USB_SERIAL_NUMBER_LEN 31 +#define AB8505_USB_LINE_STAT_REG 0x94 + /* Usb line status register */ enum ab8500_usb_link_status { - USB_LINK_NOT_CONFIGURED = 0, - USB_LINK_STD_HOST_NC, - USB_LINK_STD_HOST_C_NS, - USB_LINK_STD_HOST_C_S, - USB_LINK_HOST_CHG_NM, - USB_LINK_HOST_CHG_HS, - USB_LINK_HOST_CHG_HS_CHIRP, - USB_LINK_DEDICATED_CHG, - USB_LINK_ACA_RID_A, - USB_LINK_ACA_RID_B, - USB_LINK_ACA_RID_C_NM, - USB_LINK_ACA_RID_C_HS, - USB_LINK_ACA_RID_C_HS_CHIRP, - USB_LINK_HM_IDGND, - USB_LINK_RESERVED, - USB_LINK_NOT_VALID_LINK + USB_LINK_NOT_CONFIGURED_8500 = 0, + USB_LINK_STD_HOST_NC_8500, + USB_LINK_STD_HOST_C_NS_8500, + USB_LINK_STD_HOST_C_S_8500, + USB_LINK_HOST_CHG_NM_8500, + USB_LINK_HOST_CHG_HS_8500, + USB_LINK_HOST_CHG_HS_CHIRP_8500, + USB_LINK_DEDICATED_CHG_8500, + USB_LINK_ACA_RID_A_8500, + USB_LINK_ACA_RID_B_8500, + USB_LINK_ACA_RID_C_NM_8500, + USB_LINK_ACA_RID_C_HS_8500, + USB_LINK_ACA_RID_C_HS_CHIRP_8500, + USB_LINK_HM_IDGND_8500, + USB_LINK_RESERVED_8500, + USB_LINK_NOT_VALID_LINK_8500, +}; + +enum ab8505_usb_link_status { + USB_LINK_NOT_CONFIGURED_8505 = 0, + USB_LINK_STD_HOST_NC_8505, + USB_LINK_STD_HOST_C_NS_8505, + USB_LINK_STD_HOST_C_S_8505, + USB_LINK_CDP_8505, + USB_LINK_RESERVED0_8505, + USB_LINK_RESERVED1_8505, + USB_LINK_DEDICATED_CHG_8505, + USB_LINK_ACA_RID_A_8505, + USB_LINK_ACA_RID_B_8505, + USB_LINK_ACA_RID_C_NM_8505, + USB_LINK_RESERVED2_8505, + USB_LINK_RESERVED3_8505, + USB_LINK_HM_IDGND_8505, + USB_LINK_CHARGERPORT_NOT_OK_8505, + USB_LINK_CHARGER_DM_HIGH_8505, + USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8505, + USB_LINK_STD_UPSTREAM_NO_IDGNG_NO_VBUS_8505, + USB_LINK_STD_UPSTREAM_8505, + USB_LINK_CHARGER_SE1_8505, + USB_LINK_CARKIT_CHGR_1_8505, + USB_LINK_CARKIT_CHGR_2_8505, + USB_LINK_ACA_DOCK_CHGR_8505, + USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_8505, + USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_8505, + USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505, + USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505, + USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8505, +}; + +enum ab8500_usb_mode { + USB_IDLE = 0, + USB_PERIPHERAL, + USB_HOST, + USB_DEDICATED_CHG }; struct ab8500_usb { struct usb_phy phy; struct device *dev; + struct ab8500 *ab8500; int irq_num_id_rise; int irq_num_id_fall; int irq_num_vbus_rise; @@ -79,7 +149,13 @@ struct ab8500_usb { struct delayed_work dwork; struct work_struct phy_dis_work; unsigned long link_status_wait; - int rev; + enum ab8500_usb_mode mode; + struct clk *sysclk; + struct regulator *v_ape; + struct regulator *v_musb; + struct regulator *v_ulpi; + struct delayed_work work_usb_workaround; + bool sysfs_flag; }; static inline struct ab8500_usb *phy_to_ab(struct usb_phy *x) @@ -102,10 +178,8 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab) (AB8500_BIT_WD_CTRL_ENABLE | AB8500_BIT_WD_CTRL_KICK)); - if (ab->rev > 0x10) /* v1.1 v2.0 */ + if (!is_ab8500_1p0_or_earlier(ab->ab8500)) udelay(AB8500_WD_V11_DISABLE_DELAY_US); - else /* v1.0 */ - msleep(AB8500_WD_V10_DISABLE_DELAY_MS); abx500_set_register_interruptible(ab->dev, AB8500_SYS_CTRL2_BLOCK, @@ -113,146 +187,412 @@ static void ab8500_usb_wd_workaround(struct ab8500_usb *ab) 0); } -static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host, +static void ab8500_usb_load(struct work_struct *work) +{ + int cpu; + unsigned int num_irqs = 0; + static unsigned int old_num_irqs = UINT_MAX; + struct delayed_work *work_usb_workaround = to_delayed_work(work); + struct ab8500_usb *ab = container_of(work_usb_workaround, + struct ab8500_usb, work_usb_workaround); + + for_each_online_cpu(cpu) + num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu); + + if ((num_irqs > old_num_irqs) && + (num_irqs - old_num_irqs) > USB_LIMIT) { + + prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, + "usb", 125); + if (!usb_pm_qos_is_latency_0) { + + pm_qos_add_request(&usb_pm_qos_latency, + PM_QOS_CPU_DMA_LATENCY, 0); + usb_pm_qos_is_latency_0 = true; + } + } else { + + if (usb_pm_qos_is_latency_0) { + + pm_qos_remove_request(&usb_pm_qos_latency); + usb_pm_qos_is_latency_0 = false; + } + + prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, + "usb", 25); + } + old_num_irqs = num_irqs; + + schedule_delayed_work_on(0, + &ab->work_usb_workaround, + msecs_to_jiffies(USB_PROBE_DELAY)); +} + +static void ab8500_usb_regulator_ctrl(struct ab8500_usb *ab, bool sel_host, bool enable) { - u8 ctrl_reg; - abx500_get_register_interruptible(ab->dev, - AB8500_USB, - AB8500_USB_PHY_CTRL_REG, - &ctrl_reg); - if (sel_host) { - if (enable) - ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN; - else - ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN; + int ret = 0, volt = 0; + + if (enable) { + regulator_enable(ab->v_ape); + if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { + ret = regulator_set_voltage(ab->v_ulpi, + 1300000, 1350000); + if (ret < 0) + dev_err(ab->dev, "Failed to set the Vintcore" + " to 1.3V, ret=%d\n", ret); + ret = regulator_set_optimum_mode(ab->v_ulpi, + 28000); + if (ret < 0) + dev_err(ab->dev, "Failed to set optimum mode" + " (ret=%d)\n", ret); + + } + regulator_enable(ab->v_ulpi); + if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { + volt = regulator_get_voltage(ab->v_ulpi); + if ((volt != 1300000) && (volt != 1350000)) + dev_err(ab->dev, "Vintcore is not" + " set to 1.3V" + " volt=%d\n", volt); + } + regulator_enable(ab->v_musb); + } else { - if (enable) - ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN; - else - ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN; + regulator_disable(ab->v_musb); + regulator_disable(ab->v_ulpi); + regulator_disable(ab->v_ape); } +} - abx500_set_register_interruptible(ab->dev, + +static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host) +{ + u8 bit; + bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN : + AB8500_BIT_PHY_CTRL_DEVICE_EN; + + clk_enable(ab->sysclk); + + ab8500_usb_regulator_ctrl(ab, sel_host, true); + + prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, + (char *)dev_name(ab->dev), 100); + + schedule_delayed_work_on(0, + &ab->work_usb_workaround, + msecs_to_jiffies(USB_PROBE_DELAY)); + + abx500_mask_and_set_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_PHY_CTRL_REG, - ctrl_reg); + bit, + bit); - /* Needed to enable the phy.*/ - if (enable) - ab8500_usb_wd_workaround(ab); } -#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_ctrl(ab, true, true) -#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_ctrl(ab, true, false) -#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_ctrl(ab, false, true) -#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_ctrl(ab, false, false) +static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab,u8 bit) +{ + /* Workaround for v2.0 bug # 31952 */ + if (is_ab8500_2p0(ab->ab8500)) { + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + bit, + bit); + udelay(AB8500_V20_31952_DISABLE_DELAY_US); + } +} -static int ab8500_usb_link_status_update(struct ab8500_usb *ab) +static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host) { - u8 reg; - enum ab8500_usb_link_status lsts; - void *v = NULL; - enum usb_phy_events event; + u8 bit; + bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN : + AB8500_BIT_PHY_CTRL_DEVICE_EN; + + ab8500_usb_wd_linkstatus(ab,bit); + + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + bit, + 0); + + /* Needed to disable the phy.*/ + ab8500_usb_wd_workaround(ab); + + clk_disable(ab->sysclk); + + ab8500_usb_regulator_ctrl(ab, sel_host, false); + + prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, + (char *)dev_name(ab->dev), 50); + + if (!sel_host) { + + cancel_delayed_work_sync(&ab->work_usb_workaround); + prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, + "usb", 25); + } +} + +#define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true) +#define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true) +#define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false) +#define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false) - abx500_get_register_interruptible(ab->dev, - AB8500_USB, - AB8500_USB_LINE_STAT_REG, - ®); +static int ab8505_usb_link_status_update(struct ab8500_usb *ab, + enum ab8505_usb_link_status lsts) +{ + enum usb_phy_events event=0; - lsts = (reg >> 3) & 0x0F; + dev_dbg(ab->dev, "ab8505_usb_link_status_update %d\n", lsts); switch (lsts) { - case USB_LINK_NOT_CONFIGURED: - case USB_LINK_RESERVED: - case USB_LINK_NOT_VALID_LINK: - /* TODO: Disable regulators. */ - ab8500_usb_host_phy_dis(ab); - ab8500_usb_peri_phy_dis(ab); - ab->phy.state = OTG_STATE_B_IDLE; + case USB_LINK_ACA_RID_B_8505: + event = USB_EVENT_RIDB; + case USB_LINK_NOT_CONFIGURED_8505: + case USB_LINK_RESERVED0_8505: + case USB_LINK_RESERVED1_8505: + case USB_LINK_RESERVED2_8505: + case USB_LINK_RESERVED3_8505: + if (ab->mode == USB_PERIPHERAL) + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_CLEAN, + &ab->vbus_draw); + ab->mode = USB_IDLE; ab->phy.otg->default_a = false; ab->vbus_draw = 0; - event = USB_EVENT_NONE; + if (event != USB_EVENT_RIDB) + event = USB_EVENT_NONE; break; - case USB_LINK_STD_HOST_NC: - case USB_LINK_STD_HOST_C_NS: - case USB_LINK_STD_HOST_C_S: - case USB_LINK_HOST_CHG_NM: - case USB_LINK_HOST_CHG_HS: - case USB_LINK_HOST_CHG_HS_CHIRP: - if (ab->phy.otg->gadget) { - /* TODO: Enable regulators. */ + case USB_LINK_ACA_RID_C_NM_8505: + event = USB_EVENT_RIDC; + case USB_LINK_STD_HOST_NC_8505: + case USB_LINK_STD_HOST_C_NS_8505: + case USB_LINK_STD_HOST_C_S_8505: + if (ab->mode == USB_HOST) { + ab->mode = USB_PERIPHERAL; + ab8500_usb_host_phy_dis(ab); + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_CLEAN, + &ab->vbus_draw); ab8500_usb_peri_phy_en(ab); - v = ab->phy.otg->gadget; } - event = USB_EVENT_VBUS; + if (ab->mode == USB_IDLE) { + ab->mode = USB_PERIPHERAL; + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_peri_phy_en(ab); + } + if (event != USB_EVENT_RIDC) + event = USB_EVENT_VBUS; break; - - case USB_LINK_HM_IDGND: - if (ab->phy.otg->host) { - /* TODO: Enable regulators. */ + case USB_LINK_ACA_RID_A_8505: + event = USB_EVENT_RIDA; + case USB_LINK_HM_IDGND_8505: + if (ab->mode == USB_PERIPHERAL) { + ab->mode = USB_HOST; + ab8500_usb_peri_phy_dis(ab); + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_host_phy_en(ab); + } + if (ab->mode == USB_IDLE) { + ab->mode = USB_HOST; + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); ab8500_usb_host_phy_en(ab); - v = ab->phy.otg->host; } - ab->phy.state = OTG_STATE_A_IDLE; ab->phy.otg->default_a = true; - event = USB_EVENT_ID; + if (event != USB_EVENT_RIDA) + event = USB_EVENT_ID; + atomic_notifier_call_chain(&ab->phy.notifier, + event, + &ab->vbus_draw); break; - case USB_LINK_ACA_RID_A: - case USB_LINK_ACA_RID_B: - /* TODO */ - case USB_LINK_ACA_RID_C_NM: - case USB_LINK_ACA_RID_C_HS: - case USB_LINK_ACA_RID_C_HS_CHIRP: - case USB_LINK_DEDICATED_CHG: - /* TODO: vbus_draw */ - event = USB_EVENT_CHARGER; + case USB_LINK_DEDICATED_CHG_8505: + ab->mode = USB_DEDICATED_CHG; + event = USB_EVENT_CHARGER; + atomic_notifier_call_chain(&ab->phy.notifier, + event, + &ab->vbus_draw); break; - } - - atomic_notifier_call_chain(&ab->phy.notifier, event, v); + default: + break; + } return 0; } -static void ab8500_usb_delayed_work(struct work_struct *work) +static int ab8500_usb_link_status_update(struct ab8500_usb *ab, + enum ab8500_usb_link_status lsts) { - struct ab8500_usb *ab = container_of(work, struct ab8500_usb, - dwork.work); + enum usb_phy_events event=0; - ab8500_usb_link_status_update(ab); + dev_dbg(ab->dev, "ab8500_usb_link_status_update %d\n", lsts); + + switch (lsts) { + case USB_LINK_ACA_RID_B_8500: + event = USB_EVENT_RIDB; + case USB_LINK_NOT_CONFIGURED_8500: + case USB_LINK_RESERVED_8500: + case USB_LINK_NOT_VALID_LINK_8500: + if (ab->mode == USB_PERIPHERAL) + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_CLEAN, + &ab->vbus_draw); + ab->mode = USB_IDLE; + ab->phy.otg->default_a = false; + ab->vbus_draw = 0; + if (event != USB_EVENT_RIDB) + event = USB_EVENT_NONE; + break; + case USB_LINK_ACA_RID_C_NM_8500: + case USB_LINK_ACA_RID_C_HS_8500: + case USB_LINK_ACA_RID_C_HS_CHIRP_8500: + event = USB_EVENT_RIDC; + case USB_LINK_STD_HOST_NC_8500: + case USB_LINK_STD_HOST_C_NS_8500: + case USB_LINK_STD_HOST_C_S_8500: + case USB_LINK_HOST_CHG_NM_8500: + case USB_LINK_HOST_CHG_HS_8500: + case USB_LINK_HOST_CHG_HS_CHIRP_8500: + if (ab->mode == USB_HOST) { + ab->mode = USB_PERIPHERAL; + ab8500_usb_host_phy_dis(ab); + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_peri_phy_en(ab); + } + if (ab->mode == USB_IDLE) { + ab->mode = USB_PERIPHERAL; + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_peri_phy_en(ab); + } + if (event != USB_EVENT_RIDC) + event = USB_EVENT_VBUS; + break; + + case USB_LINK_ACA_RID_A_8500: + event = USB_EVENT_RIDA; + case USB_LINK_HM_IDGND_8500: + if (ab->mode == USB_PERIPHERAL) { + ab->mode = USB_HOST; + ab8500_usb_peri_phy_dis(ab); + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_host_phy_en(ab); + } + if (ab->mode == USB_IDLE) { + ab->mode = USB_HOST; + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + ab8500_usb_host_phy_en(ab); + } + ab->phy.otg->default_a = true; + if (event != USB_EVENT_RIDA) + event = USB_EVENT_ID; + atomic_notifier_call_chain(&ab->phy.notifier, + event, + &ab->vbus_draw); + break; + + case USB_LINK_DEDICATED_CHG_8500: + ab->mode = USB_DEDICATED_CHG; + event = USB_EVENT_CHARGER; + atomic_notifier_call_chain(&ab->phy.notifier, + event, + &ab->vbus_draw); + break; + } + return 0; } -static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data) +static int abx500_usb_link_status_update(struct ab8500_usb *ab) { - struct ab8500_usb *ab = (struct ab8500_usb *) data; + u8 reg; + int ret = 0; - /* Wait for link status to become stable. */ - schedule_delayed_work(&ab->dwork, ab->link_status_wait); + if (!(ab->sysfs_flag)) { + if (is_ab8500(ab->ab8500)) { + enum ab8500_usb_link_status lsts; - return IRQ_HANDLED; + abx500_get_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_LINE_STAT_REG, + ®); + lsts = (reg >> 3) & 0x0F; + ret = ab8500_usb_link_status_update(ab, lsts); + } + if (is_ab8505(ab->ab8500)) { + enum ab8505_usb_link_status lsts; + + abx500_get_register_interruptible(ab->dev, + AB8500_USB, + AB8505_USB_LINE_STAT_REG, + ®); + lsts = (reg >> 3) & 0x1F; + ret = ab8505_usb_link_status_update(ab, lsts); + } + } + return ret; +} + +static void ab8500_usb_delayed_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct ab8500_usb *ab = container_of(dwork, struct ab8500_usb, dwork); + abx500_usb_link_status_update(ab); } -static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data) +static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data) { struct ab8500_usb *ab = (struct ab8500_usb *) data; + enum usb_phy_events event = USB_EVENT_NONE; /* Link status will not be updated till phy is disabled. */ - ab8500_usb_peri_phy_dis(ab); - - /* Wait for link status to become stable. */ - schedule_delayed_work(&ab->dwork, ab->link_status_wait); + if (ab->mode == USB_HOST) { + ab->phy.otg->default_a = false; + ab->vbus_draw = 0; + atomic_notifier_call_chain(&ab->phy.notifier, + event, &ab->vbus_draw); + ab8500_usb_host_phy_dis(ab); + } + if (ab->mode == USB_PERIPHERAL) { + atomic_notifier_call_chain(&ab->phy.notifier, + event, &ab->vbus_draw); + ab8500_usb_peri_phy_dis(ab); + } + if (is_ab8500_2p0(ab->ab8500)) { + if (ab->mode == USB_DEDICATED_CHG) { + ab8500_usb_wd_linkstatus(ab, AB8500_BIT_PHY_CTRL_DEVICE_EN); + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + AB8500_BIT_PHY_CTRL_DEVICE_EN, + 0); + } + } return IRQ_HANDLED; } -static irqreturn_t ab8500_usb_v20_irq(int irq, void *data) +static irqreturn_t ab8500_usb_v20_link_status_irq(int irq, void *data) { struct ab8500_usb *ab = (struct ab8500_usb *) data; - ab8500_usb_link_status_update(ab); + abx500_usb_link_status_update(ab); return IRQ_HANDLED; } @@ -267,8 +607,42 @@ static void ab8500_usb_phy_disable_work(struct work_struct *work) if (!ab->phy.otg->gadget) ab8500_usb_peri_phy_dis(ab); + } +static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA) +{ + /* AB V2 has eye diagram issues when drawing more + * than 100mA from VBUS.So setting charging current + * to 100mA in case of standard host + */ + if (is_ab8500_2p0_or_earlier(ab->ab8500)) + if (mA > 100) + mA = 100; + + return mA; +} + +#ifdef CONFIG_USB_OTG_20 +static int ab8500_usb_start_srp(struct usb_phy *phy, unsigned mA) +{ + struct ab8500_usb *ab; + + if (!phy) + return -ENODEV; + + ab = phy_to_ab(phy); + + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_PREPARE, + &ab->vbus_draw); + + ab8500_usb_peri_phy_en(ab); + + return 0; +} +#endif + static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA) { struct ab8500_usb *ab; @@ -278,18 +652,15 @@ static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA) ab = phy_to_ab(phy); + mA = ab8500_eyediagram_workaroud(ab, mA); + ab->vbus_draw = mA; - if (mA) - atomic_notifier_call_chain(&ab->phy.notifier, - USB_EVENT_ENUMERATED, ab->phy.otg->gadget); + atomic_notifier_call_chain(&ab->phy.notifier, + USB_EVENT_VBUS, &ab->vbus_draw); return 0; } -/* TODO: Implement some way for charging or other drivers to read - * ab->vbus_draw. - */ - static int ab8500_usb_set_suspend(struct usb_phy *x, int suspend) { /* TODO */ @@ -306,25 +677,13 @@ static int ab8500_usb_set_peripheral(struct usb_otg *otg, ab = phy_to_ab(otg->phy); + ab->phy.otg->gadget = gadget; /* Some drivers call this function in atomic context. * Do not update ab8500 registers directly till this * is fixed. */ - - if (!gadget) { - /* TODO: Disable regulators. */ - otg->gadget = NULL; + if (!gadget) schedule_work(&ab->phy_dis_work); - } else { - otg->gadget = gadget; - otg->phy->state = OTG_STATE_B_IDLE; - - /* Phy will not be enabled if cable is already - * plugged-in. Schedule to enable phy. - * Use same delay to avoid any race condition. - */ - schedule_delayed_work(&ab->dwork, ab->link_status_wait); - } return 0; } @@ -338,22 +697,93 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host) ab = phy_to_ab(otg->phy); + ab->phy.otg->host = host; + /* Some drivers call this function in atomic context. * Do not update ab8500 registers directly till this * is fixed. */ - - if (!host) { - /* TODO: Disable regulators. */ - otg->host = NULL; + if (!host) schedule_work(&ab->phy_dis_work); - } else { - otg->host = host; - /* Phy will not be enabled if cable is already - * plugged-in. Schedule to enable phy. - * Use same delay to avoid any race condition. - */ - schedule_delayed_work(&ab->dwork, ab->link_status_wait); + + return 0; +} +/** + * ab8500_usb_boot_detect : detect the USB cable during boot time. + * @device: value for device. + * + * This function is used to detect the USB cable during boot time. + */ +static int ab8500_usb_boot_detect(struct ab8500_usb *ab) +{ + /* Disabling PHY before selective enable or disable */ + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + AB8500_BIT_PHY_CTRL_DEVICE_EN, + AB8500_BIT_PHY_CTRL_DEVICE_EN); + + udelay(100); + + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + AB8500_BIT_PHY_CTRL_DEVICE_EN, + 0); + + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + AB8500_BIT_PHY_CTRL_HOST_EN, + AB8500_BIT_PHY_CTRL_HOST_EN); + + udelay(100); + + abx500_mask_and_set_register_interruptible(ab->dev, + AB8500_USB, + AB8500_USB_PHY_CTRL_REG, + AB8500_BIT_PHY_CTRL_HOST_EN, + 0); + + return 0; +} + +static void ab8500_usb_regulator_put(struct ab8500_usb *ab) +{ + + if (ab->v_ape) + regulator_put(ab->v_ape); + + if (ab->v_ulpi) + regulator_put(ab->v_ulpi); + + if (ab->v_musb) + regulator_put(ab->v_musb); +} + +static int ab8500_usb_regulator_get(struct ab8500_usb *ab) +{ + int err; + + ab->v_ape = regulator_get(ab->dev, "v-ape"); + if (IS_ERR(ab->v_ape)) { + dev_err(ab->dev, "Could not get v-ape supply\n"); + err = PTR_ERR(ab->v_ape); + return err; + } + + ab->v_ulpi = regulator_get(ab->dev, "vddulpivio18"); + if (IS_ERR(ab->v_ulpi)) { + dev_err(ab->dev, "Could not get vddulpivio18 supply\n"); + err = PTR_ERR(ab->v_ulpi); + return err; + } + + ab->v_musb = regulator_get(ab->dev, "musb_1v8"); + if (IS_ERR(ab->v_musb)) { + dev_err(ab->dev, "Could not get musb_1v8 supply\n"); + err = PTR_ERR(ab->v_musb); + return err; } return 0; @@ -361,126 +791,179 @@ static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host) static void ab8500_usb_irq_free(struct ab8500_usb *ab) { - if (ab->rev < 0x20) { + if (ab->irq_num_id_rise) free_irq(ab->irq_num_id_rise, ab); + + if (ab->irq_num_id_fall) free_irq(ab->irq_num_id_fall, ab); + + if (ab->irq_num_vbus_rise) free_irq(ab->irq_num_vbus_rise, ab); + + if (ab->irq_num_vbus_fall) free_irq(ab->irq_num_vbus_fall, ab); - } else { + + if (ab->irq_num_link_status) free_irq(ab->irq_num_link_status, ab); - } } -static int ab8500_usb_v1x_res_setup(struct platform_device *pdev, +static int ab8500_usb_irq_setup(struct platform_device *pdev, struct ab8500_usb *ab) { int err; + int irq; + + if (!is_ab8500_1p0_or_earlier(ab->ab8500)) { + irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS"); + if (irq < 0) { + err = irq; + dev_err(&pdev->dev, "Link status irq not found\n"); + goto irq_fail; + } - ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R"); - if (ab->irq_num_id_rise < 0) { - dev_err(&pdev->dev, "ID rise irq not found\n"); - return ab->irq_num_id_rise; - } - err = request_threaded_irq(ab->irq_num_id_rise, NULL, - ab8500_usb_v1x_common_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, - "usb-id-rise", ab); - if (err < 0) { - dev_err(ab->dev, "request_irq failed for ID rise irq\n"); - goto fail0; + err = request_threaded_irq(irq, NULL, + ab8500_usb_v20_link_status_irq, + IRQF_NO_SUSPEND | IRQF_SHARED, + "usb-link-status", ab); + if (err < 0) { + dev_err(ab->dev, + "request_irq failed for link status irq\n"); + return err; + } + ab->irq_num_link_status = irq; } - ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F"); - if (ab->irq_num_id_fall < 0) { + irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F"); + if (irq < 0) { + err = irq; dev_err(&pdev->dev, "ID fall irq not found\n"); return ab->irq_num_id_fall; } - err = request_threaded_irq(ab->irq_num_id_fall, NULL, - ab8500_usb_v1x_common_irq, + err = request_threaded_irq(irq, NULL, + ab8500_usb_disconnect_irq, IRQF_NO_SUSPEND | IRQF_SHARED, "usb-id-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for ID fall irq\n"); - goto fail1; + goto irq_fail; } + ab->irq_num_id_fall = irq; - ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R"); - if (ab->irq_num_vbus_rise < 0) { - dev_err(&pdev->dev, "VBUS rise irq not found\n"); - return ab->irq_num_vbus_rise; - } - err = request_threaded_irq(ab->irq_num_vbus_rise, NULL, - ab8500_usb_v1x_common_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, - "usb-vbus-rise", ab); - if (err < 0) { - dev_err(ab->dev, "request_irq failed for Vbus rise irq\n"); - goto fail2; - } - - ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F"); - if (ab->irq_num_vbus_fall < 0) { + irq = platform_get_irq_byname(pdev, "VBUS_DET_F"); + if (irq < 0) { + err = irq; dev_err(&pdev->dev, "VBUS fall irq not found\n"); - return ab->irq_num_vbus_fall; + goto irq_fail; } - err = request_threaded_irq(ab->irq_num_vbus_fall, NULL, - ab8500_usb_v1x_vbus_fall_irq, + err = request_threaded_irq(irq, NULL, + ab8500_usb_disconnect_irq, IRQF_NO_SUSPEND | IRQF_SHARED, "usb-vbus-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); - goto fail3; + goto irq_fail; } + ab->irq_num_vbus_fall = irq; return 0; -fail3: - free_irq(ab->irq_num_vbus_rise, ab); -fail2: - free_irq(ab->irq_num_id_fall, ab); -fail1: - free_irq(ab->irq_num_id_rise, ab); -fail0: + +irq_fail: + ab8500_usb_irq_free(ab); return err; } -static int ab8500_usb_v2_res_setup(struct platform_device *pdev, - struct ab8500_usb *ab) +/* Sys interfaces */ +static ssize_t +serial_number_show(struct device *dev, + struct device_attribute *attr, char *buf) { - int err; + u32 bufer[5]; + void __iomem *backup_ram = NULL; - ab->irq_num_link_status = platform_get_irq_byname(pdev, - "USB_LINK_STATUS"); - if (ab->irq_num_link_status < 0) { - dev_err(&pdev->dev, "Link status irq not found\n"); - return ab->irq_num_link_status; - } + backup_ram = ioremap(PUBLIC_ID_BACKUPRAM1, 0x14); - err = request_threaded_irq(ab->irq_num_link_status, NULL, - ab8500_usb_v20_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, - "usb-link-status", ab); - if (err < 0) { - dev_err(ab->dev, - "request_irq failed for link status irq\n"); - return err; - } + if (backup_ram) { + bufer[0] = readl(backup_ram); + bufer[1] = readl(backup_ram + 4); + bufer[2] = readl(backup_ram + 8); + bufer[3] = readl(backup_ram + 0x0c); + bufer[4] = readl(backup_ram + 0x10); - return 0; + snprintf(buf, MAX_USB_SERIAL_NUMBER_LEN+1, + "%.8X%.8X%.8X%.8X%.8X", + bufer[0], bufer[1], bufer[2], bufer[3], bufer[4]); + + iounmap(backup_ram); + } else + dev_err(dev, "$$\n"); + + return strlen(buf); +} + +static DEVICE_ATTR(serial_number, 0644, serial_number_show, NULL); + +static ssize_t +boot_time_device_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ab8500_usb *ab = dev_get_drvdata(dev); + u8 val = ab->sysfs_flag; + + snprintf(buf, 2, "%d", val); + + return strlen(buf); +} + +static ssize_t +boot_time_device_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct ab8500_usb *ab = dev_get_drvdata(dev); + + ab->sysfs_flag = false; + + abx500_usb_link_status_update(ab); + + return n; +} +static DEVICE_ATTR(boot_time_device, 0644, + boot_time_device_show, boot_time_device_store); + + +static struct attribute *ab8500_usb_attributes[] = { + &dev_attr_serial_number.attr, + &dev_attr_boot_time_device.attr, + NULL +}; +static const struct attribute_group ab8500_attr_group = { + .attrs = ab8500_usb_attributes, +}; + +static int ab8500_create_sysfsentries(struct ab8500_usb *ab) +{ + int err; + + err = sysfs_create_group(&ab->dev->kobj, &ab8500_attr_group); + if (err) + sysfs_remove_group(&ab->dev->kobj, &ab8500_attr_group); + + return err; } static int __devinit ab8500_usb_probe(struct platform_device *pdev) { struct ab8500_usb *ab; + struct ab8500 *ab8500; struct usb_otg *otg; int err; int rev; + int ret = -1; + ab8500 = dev_get_drvdata(pdev->dev.parent); rev = abx500_get_chip_id(&pdev->dev); - if (rev < 0) { - dev_err(&pdev->dev, "Chip id read failed\n"); - return rev; - } else if (rev < 0x10) { - dev_err(&pdev->dev, "Unsupported AB8500 chip\n"); + + if (is_ab8500_1p1_or_earlier(ab8500)) { + dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev); return -ENODEV; } @@ -495,20 +978,24 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev) } ab->dev = &pdev->dev; - ab->rev = rev; + ab->ab8500 = ab8500; ab->phy.dev = ab->dev; ab->phy.otg = otg; ab->phy.label = "ab8500"; ab->phy.set_suspend = ab8500_usb_set_suspend; ab->phy.set_power = ab8500_usb_set_power; - ab->phy.state = OTG_STATE_UNDEFINED; + ab->phy.state = OTG_STATE_B_IDLE; otg->phy = &ab->phy; otg->set_host = ab8500_usb_set_host; otg->set_peripheral = ab8500_usb_set_peripheral; +#ifdef CONFIG_USB_OTG_20 + ab->otg.start_srp = ab8500_usb_start_srp; +#endif + ab->sysfs_flag = true; platform_set_drvdata(pdev, ab); - + dev_set_drvdata(ab->dev, ab); ATOMIC_INIT_NOTIFIER_HEAD(&ab->phy.notifier); /* v1: Wait for link status to become stable. @@ -519,27 +1006,98 @@ static int __devinit ab8500_usb_probe(struct platform_device *pdev) /* all: Disable phy when called from set_host and set_peripheral */ INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work); - if (ab->rev < 0x20) { - err = ab8500_usb_v1x_res_setup(pdev, ab); - ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT; - } else { - err = ab8500_usb_v2_res_setup(pdev, ab); + INIT_DELAYED_WORK_DEFERRABLE(&ab->work_usb_workaround, + ab8500_usb_load); + err = ab8500_usb_regulator_get(ab); + if (err) + goto fail0; + + ab->sysclk = clk_get(ab->dev, "sysclk"); + if (IS_ERR(ab->sysclk)) { + err = PTR_ERR(ab->sysclk); + goto fail1; } + err = ab8500_usb_irq_setup(pdev, ab); if (err < 0) - goto fail0; + goto fail2; err = usb_set_transceiver(&ab->phy); if (err) { dev_err(&pdev->dev, "Can't register transceiver\n"); - goto fail1; + goto fail3; } - dev_info(&pdev->dev, "AB8500 usb driver initialized\n"); + /* Write Phy tuning values */ + if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { + /* Enable the PBT/Bank 0x12 access */ + ret = abx500_set_register_interruptible(ab->dev, + AB8500_DEVELOPMENT, + AB8500_BANK12_ACCESS, + 0x01); + if (ret < 0) + printk(KERN_ERR "Failed to enable bank12" + " access ret=%d\n", ret); + + ret = abx500_set_register_interruptible(ab->dev, + AB8500_DEBUG, + AB8500_USB_PHY_TUNE1, + 0xC8); + if (ret < 0) + printk(KERN_ERR "Failed to set PHY_TUNE1" + " register ret=%d\n", ret); + + ret = abx500_set_register_interruptible(ab->dev, + AB8500_DEBUG, + AB8500_USB_PHY_TUNE2, + 0x00); + if (ret < 0) + printk(KERN_ERR "Failed to set PHY_TUNE2" + " register ret=%d\n", ret); + + ret = abx500_set_register_interruptible(ab->dev, + AB8500_DEBUG, + AB8500_USB_PHY_TUNE3, + 0x78); + + if (ret < 0) + printk(KERN_ERR "Failed to set PHY_TUNE3" + " regester ret=%d\n", ret); + + /* Switch to normal mode/disable Bank 0x12 access */ + ret = abx500_set_register_interruptible(ab->dev, + AB8500_DEVELOPMENT, + AB8500_BANK12_ACCESS, + 0x00); + + if (ret < 0) + printk(KERN_ERR "Failed to switch bank12" + " access ret=%d\n", ret); + } + /* Needed to enable ID detection. */ + ab8500_usb_wd_workaround(ab); + + prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, + (char *)dev_name(ab->dev), 50); + dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", rev); + + prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usb", 25); + + err = ab8500_usb_boot_detect(ab); + if (err < 0) + goto fail3; + + err = ab8500_create_sysfsentries(ab); + if (err) + goto fail3; return 0; -fail1: +fail3: ab8500_usb_irq_free(ab); +fail2: + clk_put(ab->sysclk); +fail1: + ab8500_usb_regulator_put(ab); fail0: kfree(otg); kfree(ab); @@ -558,8 +1116,14 @@ static int __devexit ab8500_usb_remove(struct platform_device *pdev) usb_set_transceiver(NULL); - ab8500_usb_host_phy_dis(ab); - ab8500_usb_peri_phy_dis(ab); + if (ab->mode == USB_HOST) + ab8500_usb_host_phy_dis(ab); + else if (ab->mode == USB_PERIPHERAL) + ab8500_usb_peri_phy_dis(ab); + + clk_put(ab->sysclk); + + ab8500_usb_regulator_put(ab); platform_set_drvdata(pdev, NULL); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 750c5182ca8..c775ac2419d 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -288,6 +288,8 @@ config FB_CIRRUS Say N unless you have such a graphics board or plan to get one before you next recompile the kernel. +source "drivers/video/mcde/Kconfig" + config FB_PM2 tristate "Permedia2 support" depends on FB && ((AMIGA && BROKEN) || PCI) @@ -2417,6 +2419,8 @@ source "drivers/video/omap/Kconfig" source "drivers/video/omap2/Kconfig" source "drivers/video/exynos/Kconfig" source "drivers/video/backlight/Kconfig" +source "drivers/video/av8100/Kconfig" +source "drivers/video/b2r2/Kconfig" if VT source "drivers/video/console/Kconfig" diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 9356add945b..3a4ad2ebe33 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -139,6 +139,9 @@ obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o obj-$(CONFIG_FB_OMAP) += omap/ obj-y += omap2/ +obj-$(CONFIG_FB_MCDE) += mcde/ +obj-$(CONFIG_AV8100) += av8100/ +obj-y += b2r2/ obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o obj-$(CONFIG_FB_CARMINE) += carminefb.o obj-$(CONFIG_FB_MB862XX) += mb862xx/ diff --git a/drivers/video/av8100/Kconfig b/drivers/video/av8100/Kconfig new file mode 100644 index 00000000000..40b9943aaa9 --- /dev/null +++ b/drivers/video/av8100/Kconfig @@ -0,0 +1,48 @@ +config AV8100 + tristate "AV8100 driver support(HDMI/CVBS)" + default n + help + Please enable this feature if hdmi/tvout driver support is required. + +config HDMI_AV8100_DEBUG + bool "HDMI and AV8100 debug messages" + default n + depends on AV8100 + ---help--- + Say Y here if you want the HDMI and AV8100 driver to + output debug messages. + +choice + prompt "AV8100 HW trig method" + default AV8100_HWTRIG_DSI_TE + +config AV8100_HWTRIG_INT + bool "AV8100 HW trig on INT" + depends on AV8100 + ---help--- + If you say Y here AV8100 will use HW triggering + from AV8100 INT to MCDE sync0. + +config AV8100_HWTRIG_I2SDAT3 + bool "AV8100 HW trig on I2SDAT3" + depends on AV8100 + ---help--- + If you say Y here AV8100 will use HW triggering + from AV8100 I2SDAT3 to MCDE sync1. + +config AV8100_HWTRIG_DSI_TE + bool "AV8100 HW trig on DSI" + depends on AV8100 + ---help--- + If you say Y here AV8100 will use HW triggering + using DSI TE polling between AV8100 and MCDE. + +config AV8100_HWTRIG_NONE + bool "AV8100 SW trig" + depends on AV8100 + ---help--- + If you say Y here AV8100 will use SW triggering + between AV8100 and MCDE. + +endchoice + diff --git a/drivers/video/av8100/Makefile b/drivers/video/av8100/Makefile new file mode 100644 index 00000000000..2d3028b18ca --- /dev/null +++ b/drivers/video/av8100/Makefile @@ -0,0 +1,10 @@ +# Make file for compiling and loadable module HDMI + +obj-$(CONFIG_AV8100) += av8100.o hdmi.o + +ifdef CONFIG_HDMI_AV8100_DEBUG +EXTRA_CFLAGS += -DDEBUG +endif + +clean-files := av8100.o hdmi.o built-in.o modules.order + diff --git a/drivers/video/av8100/av8100.c b/drivers/video/av8100/av8100.c new file mode 100644 index 00000000000..d5d159079c3 --- /dev/null +++ b/drivers/video/av8100/av8100.c @@ -0,0 +1,4166 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * AV8100 driver + * + * Author: Per Persson <per.xb.persson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/platform_device.h> +#include <linux/i2c.h> +#include <linux/fs.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/timer.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/clk.h> +#include <linux/list.h> +#include <linux/regulator/consumer.h> +#include <linux/mfd/dbx500-prcmu.h> + +#include "av8100_regs.h" +#include <video/av8100.h> +#include <video/hdmi.h> +#include <linux/firmware.h> + +#define AV8100_FW_FILENAME "av8100.fw" +#define CUT_STR_0 "2.1" +#define CUT_STR_1 "2.2" +#define CUT_STR_3 "2.3" +#define CUT_STR_30 "3.0" +#define CUT_STR_UNKNOWN "" +#define AV8100_DEVNR_DEFAULT 0 + +/* Interrupts */ +#define AV8100_INT_EVENT 0x1 +#define AV8100_PLUGSTARTUP_EVENT 0x4 + +#define AV8100_PLUGSTARTUP_TIME 100 + +/* Standby search time */ +#define AV8100_ON_TIME 1 /* 9 ms step */ +#define AV8100_DENC_OFF_TIME 3 /* 275 ms step if > V1. Not used if V1 */ +#define AV8100_HDMI_OFF_TIME 2 /* 140 ms step if V2. 80 ms step if V1 */ + +/* Command offsets */ +#define AV8100_COMMAND_OFFSET 0x10 +#define AV8100_CUTVER_OFFSET 0x11 +#define AV8100_COMMAND_MAX_LENGTH 0x81 +#define AV8100_CMD_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1) +#define AV8100_2ND_RET_BYTE_OFFSET (AV8100_COMMAND_OFFSET + 1) +#define AV8100_CEC_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 4) +#define AV8100_HDCP_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 2) +#define AV8100_EDID_RET_BUF_OFFSET (AV8100_COMMAND_OFFSET + 1) +#define AV8100_FUSE_CRC_OFFSET (AV8100_COMMAND_OFFSET + 2) +#define AV8100_FUSE_PRGD_OFFSET (AV8100_COMMAND_OFFSET + 3) +#define AV8100_CRC32_OFFSET (AV8100_COMMAND_OFFSET + 2) +#define AV8100_CEC_ADDR_OFFSET (AV8100_COMMAND_OFFSET + 3) + +/* Tearing effect line numbers */ +#define AV8100_TE_LINE_NB_14 14 +#define AV8100_TE_LINE_NB_17 17 +#define AV8100_TE_LINE_NB_18 18 +#define AV8100_TE_LINE_NB_21 21 +#define AV8100_TE_LINE_NB_22 22 +#define AV8100_TE_LINE_NB_24 24 +#define AV8100_TE_LINE_NB_25 25 +#define AV8100_TE_LINE_NB_26 26 +#define AV8100_TE_LINE_NB_29 29 +#define AV8100_TE_LINE_NB_30 30 +#define AV8100_TE_LINE_NB_32 32 +#define AV8100_TE_LINE_NB_38 38 +#define AV8100_TE_LINE_NB_40 40 +#define AV8100_UI_X4_DEFAULT 6 + +#define HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT 2 +#define HDMI_CEC_MESSAGE_WRITE_BUFFER_SIZE 16 +#define HDMI_HDCP_SEND_KEY_SIZE 7 +#define HDMI_INFOFRAME_DATA_SIZE 28 +#define HDMI_FUSE_AES_KEY_SIZE 16 +#define HDMI_FUSE_AES_KEY_RET_SIZE 2 +#define HDMI_LOADAES_END_BLK_NR 145 +#define HDMI_CRC32_SIZE 4 +#define HDMI_HDCP_MGMT_BKSV_SIZE 5 +#define HDMI_HDCP_MGMT_SHA_SIZE 20 +#define HDMI_HDCP_MGMT_MAX_DEVICES_SIZE 20 +#define HDMI_HDCP_MGMT_DEVICE_MASK 0x7F +#define HDMI_EDIDREAD_SIZE 0x7F + +#define HPDS_INVALID 0xF +#define CPDS_INVALID 0xF +#define CECRX_INVALID 0xF + +#define REG_16_8_LSB(p) ((u8)(p & 0xFF)) +#define REG_16_8_MSB(p) ((u8)((p & 0xFF00)>>8)) +#define REG_32_8_MSB(p) ((u8)((p & 0xFF000000)>>24)) +#define REG_32_8_MMSB(p) ((u8)((p & 0x00FF0000)>>16)) +#define REG_32_8_MLSB(p) ((u8)((p & 0x0000FF00)>>8)) +#define REG_32_8_LSB(p) ((u8)(p & 0x000000FF)) +#define REG_10_8_MSB(p) ((u8)((p & 0x300)>>8)) +#define REG_12_8_MSB(p) ((u8)((p & 0xf00)>>8)) + +#define AV8100_WAITTIME_1MS 1 +#define AV8100_WAITTIME_5MS 5 +#define AV8100_WAITTIME_10MS 10 +#define AV8100_WAITTIME_50MS 50 +#define AV8100_WATTIME_100US 100 + +static DEFINE_MUTEX(av8100_hw_mutex); +#define LOCK_AV8100_HW mutex_lock(&av8100_hw_mutex) +#define UNLOCK_AV8100_HW mutex_unlock(&av8100_hw_mutex) +static DEFINE_MUTEX(av8100_fwdl_mutex); +#define LOCK_AV8100_FWDL mutex_lock(&av8100_fwdl_mutex) +#define UNLOCK_AV8100_FWDL mutex_unlock(&av8100_fwdl_mutex) + +struct color_conversion_cmd { + unsigned short c0; + unsigned short c1; + unsigned short c2; + unsigned short c3; + unsigned short c4; + unsigned short c5; + unsigned short c6; + unsigned short c7; + unsigned short c8; + unsigned short aoffset; + unsigned short boffset; + unsigned short coffset; + unsigned char lmax; + unsigned char lmin; + unsigned char cmax; + unsigned char cmin; +}; + +struct av8100_config { + struct i2c_client *client; + struct i2c_device_id *id; + struct av8100_video_input_format_cmd hdmi_video_input_cmd; + struct av8100_audio_input_format_cmd hdmi_audio_input_cmd; + struct av8100_video_output_format_cmd hdmi_video_output_cmd; + struct av8100_video_scaling_format_cmd hdmi_video_scaling_cmd; + enum av8100_color_transform color_transform; + struct av8100_cec_message_write_format_cmd + hdmi_cec_message_write_cmd; + struct av8100_cec_message_read_back_format_cmd + hdmi_cec_message_read_back_cmd; + struct av8100_denc_format_cmd hdmi_denc_cmd; + struct av8100_hdmi_cmd hdmi_cmd; + struct av8100_hdcp_send_key_format_cmd hdmi_hdcp_send_key_cmd; + struct av8100_hdcp_management_format_cmd + hdmi_hdcp_management_format_cmd; + struct av8100_infoframes_format_cmd hdmi_infoframes_cmd; + struct av8100_edid_section_readback_format_cmd + hdmi_edid_section_readback_cmd; + struct av8100_pattern_generator_format_cmd hdmi_pattern_generator_cmd; + struct av8100_fuse_aes_key_format_cmd hdmi_fuse_aes_key_cmd; +}; + +enum av8100_plug_state { + AV8100_UNPLUGGED, + AV8100_PLUGGED_STARTUP, + AV8100_PLUGGED +}; + +struct av8100_params { + int denc_off_time;/* 5 volt time */ + int hdmi_off_time;/* 5 volt time */ + int on_time;/* 5 volt time */ + u8 hpdm;/*stby_int_mask*/ + u8 cpdm;/*stby_int_mask*/ + u8 cecm;/*gen_int_mask*/ + u8 hdcpm;/*gen_int_mask*/ + u8 uovbm;/*gen_int_mask*/ + void (*hdmi_ev_cb)(enum av8100_hdmi_event); + enum av8100_plug_state plug_state; + struct clk *inputclk; + bool inputclk_requested; + bool opp_requested; + struct regulator *regulator_pwr; + bool regulator_requested; + bool pre_suspend_power; + bool ints_enabled; + bool irq_requested; +}; + +/** + * struct av8100_cea - CEA(consumer electronic access) standard structure + * @cea_id: + * @cea_nb: + * @vtotale: + **/ + +struct av8100_cea { + char cea_id[40]; + int cea_nb; + int vtotale; + int vactive; + int vsbp; + int vslen; + int vsfp; + char vpol[5]; + int htotale; + int hactive; + int hbp; + int hslen; + int hfp; + int frequence; + char hpol[5]; + int reg_line_duration; + int blkoel_duration; + int uix4; + int pll_mult; + int pll_div; +}; + +enum av8100_command_size { + AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE = 0x17, + AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE = 0x8, + AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE = 0x1E, + AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE = 0x11, + AV8100_COMMAND_COLORSPACECONVERSION_SIZE = 0x1D, + AV8100_COMMAND_CEC_MESSAGE_WRITE_SIZE = 0x12, + AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE = 0x1, + AV8100_COMMAND_DENC_SIZE = 0x6, + AV8100_COMMAND_HDMI_SIZE = 0x4, + AV8100_COMMAND_HDCP_SENDKEY_SIZE = 0xA, + AV8100_COMMAND_HDCP_MANAGEMENT_SIZE = 0x3, + AV8100_COMMAND_INFOFRAMES_SIZE = 0x21, + AV8100_COMMAND_EDID_SECTION_READBACK_SIZE = 0x3, + AV8100_COMMAND_PATTERNGENERATOR_SIZE = 0x4, + AV8100_COMMAND_FUSE_AES_KEY_SIZE = 0x12, + AV8100_COMMAND_FUSE_AES_CHK_SIZE = 0x2, +}; + +struct av8100_device { + struct list_head list; + struct miscdevice miscdev; + struct device *dev; + struct av8100_config config; + struct av8100_status status; + struct timer_list timer; + wait_queue_head_t event; + int flag; + struct av8100_params params; + u8 chip_version; +}; + +static const unsigned int waittime_retry[10] = { + 1, 2, 4, 6, 8, 10, 10, 10, 10, 10}; + +static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on); +static void clr_plug_status(struct av8100_device *adev, + enum av8100_plugin_status status); +static void set_plug_status(struct av8100_device *adev, + enum av8100_plugin_status status); +static void cec_rx(struct av8100_device *adev); +static void cec_tx(struct av8100_device *adev); +static void cec_txerr(struct av8100_device *adev); +static void hdcp_changed(struct av8100_device *adev); +static const struct color_conversion_cmd *get_color_transform_cmd( + struct av8100_device *adev, + enum av8100_color_transform transform); +static int av8100_open(struct inode *inode, struct file *filp); +static int av8100_release(struct inode *inode, struct file *filp); +static long av8100_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static int __devinit av8100_probe(struct i2c_client *i2c_client, + const struct i2c_device_id *id); +static int __devexit av8100_remove(struct i2c_client *i2c_client); + +static const struct file_operations av8100_fops = { + .owner = THIS_MODULE, + .open = av8100_open, + .release = av8100_release, + .unlocked_ioctl = av8100_ioctl +}; + +/* List of devices */ +static LIST_HEAD(av8100_device_list); + +static const struct av8100_cea av8100_all_cea[29] = { +/* cea id + * cea_nr vtot vact vsbpp vslen + * vsfp vpol htot hact hbp hslen hfp freq + * hpol rld bd uix4 pm pd */ +{ "0 CUSTOM ", + 0, 0, 0, 0, 0, + 0, "-", 800, 640, 16, 96, 10, 25200000, + "-", 0, 0, 0, 0, 0},/*Settings to be defined*/ +{ "1 CEA 1 VESA 4 640x480p @ 60 Hz ", + 1, 525, 480, 33, 2, + 10, "-", 800, 640, 49, 290, 146, 25200000, + "-", 2438, 1270, 6, 32, 1},/*RGB888*/ +{ "2 CEA 2 - 3 720x480p @ 60 Hz 4:3 ", + 2, 525, 480, 30, 6, + 9, "-", 858, 720, 34, 130, 128, 27027000, + "-", 1828, 0x3C0, 8, 24, 1},/*RGB565*/ +{ "3 CEA 4 1280x720p @ 60 Hz ", + 4, 750, 720, 20, 5, + 5, "+", 1650, 1280, 114, 39, 228, 74250000, + "+", 1706, 164, 6, 32, 1},/*RGB565*/ +{ "4 CEA 5 1920x1080i @ 60 Hz ", + 5, 1125, 540, 20, 5, + 0, "+", 2200, 1920, 88, 44, 10, 74250000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "5 CEA 6-7 480i (NTSC) ", + 6, 525, 240, 44, 5, + 0, "-", 858, 720, 12, 64, 10, 13513513, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "6 CEA 14-15 480p @ 60 Hz ", + 14, 525, 480, 44, 5, + 0, "-", 858, 720, 12, 64, 10, 27027000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "7 CEA 16 1920x1080p @ 60 Hz ", + 16, 1125, 1080, 36, 5, + 0, "+", 1980, 1280, 440, 40, 10, 133650000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "8 CEA 17-18 720x576p @ 50 Hz ", + 17, 625, 576, 44, 5, + 0, "-", 864, 720, 12, 64, 10, 27000000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "9 CEA 19 1280x720p @ 50 Hz ", + 19, 750, 720, 25, 5, + 0, "+", 1980, 1280, 440, 40, 10, 74250000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "10 CEA 20 1920 x 1080i @ 50 Hz ", + 20, 1125, 540, 20, 5, + 0, "+", 2640, 1920, 528, 44, 10, 74250000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "11 CEA 21-22 576i (PAL) ", + 21, 625, 288, 44, 5, + 0, "-", 1728, 1440, 12, 64, 10, 27000000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "12 CEA 29/30 576p ", + 29, 625, 576, 44, 5, + 0, "-", 864, 720, 12, 64, 10, 27000000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "13 CEA 31 1080p 50Hz ", + 31, 1125, 1080, 44, 5, + 0, "-", 2640, 1920, 12, 64, 10, 148500000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "14 CEA 32 1920x1080p @ 24 Hz ", + 32, 1125, 1080, 36, 5, + 4, "+", 2750, 1920, 660, 44, 153, 74250000, + "+", 2844, 0x530, 6, 32, 1},/*RGB565*/ +{ "15 CEA 33 1920x1080p @ 25 Hz ", + 33, 1125, 1080, 36, 5, + 4, "+", 2640, 1920, 528, 44, 10, 74250000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "16 CEA 34 1920x1080p @ 30Hz ", + 34, 1125, 1080, 36, 5, + 4, "+", 2200, 1920, 91, 44, 153, 74250000, + "+", 2275, 0xAB, 6, 32, 1},/*RGB565*/ +{ "17 CEA 60 1280x720p @ 24 Hz ", + 60, 750, 720, 20, 5, + 5, "+", 3300, 1280, 284, 50, 2276, 59400000, + "+", 4266, 0xAD0, 5, 32, 1},/*RGB565*/ +{ "18 CEA 61 1280x720p @ 25 Hz ", + 61, 750, 720, 20, 5, + 5, "+", 3960, 1280, 228, 39, 2503, 74250000, + "+", 4096, 0x500, 5, 32, 1},/*RGB565*/ +{ "19 CEA 62 1280x720p @ 30 Hz ", + 62, 750, 720, 20, 5, + 5, "+", 3300, 1280, 228, 39, 1820, 74250000, + "+", 3413, 0x770, 5, 32, 1},/*RGB565*/ +{ "20 VESA 9 800x600 @ 60 Hz ", + 109, 628, 600, 28, 4, + 0, "+", 1056, 800, 40, 128, 10, 40000000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "21 VESA 14 848x480 @ 60 Hz ", + 114, 517, 480, 20, 5, + 0, "+", 1088, 848, 24, 80, 10, 33750000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "22 VESA 16 1024x768 @ 60 Hz ", + 116, 806, 768, 38, 6, + 0, "-", 1344, 1024, 24, 135, 10, 65000000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "23 VESA 22 1280x768 @ 60 Hz ", + 122, 790, 768, 34, 4, + 0, "+", 1440, 1280, 48, 160, 10, 68250000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "24 VESA 23 1280x768 @ 60 Hz ", + 123, 798, 768, 30, 7, + 0, "+", 1664, 1280, 64, 128, 10, 79500000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "25 VESA 27 1280x800 @ 60 Hz ", + 127, 823, 800, 23, 6, + 0, "+", 1440, 1280, 48, 32, 10, 71000000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "26 VESA 28 1280x800 @ 60 Hz ", + 128, 831, 800, 31, 6, + 0, "+", 1680, 1280, 72, 128, 10, 83500000, + "-", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "27 VESA 39 1360x768 @ 60 Hz ", + 139, 795, 768, 22, 5, + 0, "-", 1792, 1360, 48, 32, 10, 85500000, + "+", 0, 0, 0, 0, 0},/*Settings to be define*/ +{ "28 VESA 81 1366x768 @ 60 Hz ", + 181, 798, 768, 30, 5, + 0, "+", 1792, 1366, 72, 136, 10, 85750000, + "-", 0, 0, 0, 0, 0} /*Settings to be define*/ +}; + +const struct color_conversion_cmd col_trans_identity = { + .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000, + .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000, + .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100, + .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000, + .lmax = 0xff, + .lmin = 0x00, + .cmax = 0xff, + .cmin = 0x00, +}; + +const struct color_conversion_cmd col_trans_identity_clamp_yuv = { + .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000, + .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000, + .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100, + .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000, + .lmax = 0xeb, + .lmin = 0x10, + .cmax = 0xf0, + .cmin = 0x10, +}; + +const struct color_conversion_cmd col_trans_yuv_to_rgb_v1 = { + .c0 = 0x0087, .c1 = 0x0000, .c2 = 0x00ba, + .c3 = 0x0087, .c4 = 0xffd3, .c5 = 0xffa1, + .c6 = 0x0087, .c7 = 0x00eb, .c8 = 0x0000, + .aoffset = 0xffab, .boffset = 0x004e, .coffset = 0xff92, + .lmax = 0xff, + .lmin = 0x00, + .cmax = 0xff, + .cmin = 0x00, +}; + +const struct color_conversion_cmd col_trans_yuv_to_rgb_v2 = { + .c0 = 0x0198, .c1 = 0x012a, .c2 = 0x0000, + .c3 = 0xff30, .c4 = 0x012a, .c5 = 0xff9c, + .c6 = 0x0000, .c7 = 0x012a, .c8 = 0x0204, + .aoffset = 0xff21, .boffset = 0x0088, .coffset = 0xfeeb, + .lmax = 0xff, + .lmin = 0x00, + .cmax = 0xff, + .cmin = 0x00, +}; + +const struct color_conversion_cmd col_trans_yuv_to_denc = { + .c0 = 0x0100, .c1 = 0x0000, .c2 = 0x0000, + .c3 = 0x0000, .c4 = 0x0100, .c5 = 0x0000, + .c6 = 0x0000, .c7 = 0x0000, .c8 = 0x0100, + .aoffset = 0x0000, .boffset = 0x0000, .coffset = 0x0000, + .lmax = 0xeb, + .lmin = 0x10, + .cmax = 0xf0, + .cmin = 0x10, +}; + +const struct color_conversion_cmd col_trans_rgb_to_denc = { + .c0 = 0x0070, .c1 = 0xffb6, .c2 = 0xffda, + .c3 = 0x0042, .c4 = 0x0081, .c5 = 0x0019, + .c6 = 0xffee, .c7 = 0xffa2, .c8 = 0x0070, + .aoffset = 0x007f, .boffset = 0x0010, .coffset = 0x007f, + .lmax = 0xff, + .lmin = 0x00, + .cmax = 0xff, + .cmin = 0x00, +}; + +static const struct i2c_device_id av8100_id[] = { + { "av8100", 0 }, + { } +}; + +static struct av8100_device *devnr_to_adev(int devnr) +{ + /* Get device from list of devices */ + struct list_head *element; + struct av8100_device *av8100_dev; + int cnt = 0; + + list_for_each(element, &av8100_device_list) { + av8100_dev = list_entry(element, struct av8100_device, list); + if (cnt == devnr) + return av8100_dev; + cnt++; + } + + return NULL; +} + +static struct av8100_device *dev_to_adev(struct device *dev) +{ + /* Get device from list of devices */ + struct list_head *element; + struct av8100_device *av8100_dev; + int cnt = 0; + + list_for_each(element, &av8100_device_list) { + av8100_dev = list_entry(element, struct av8100_device, list); + if (av8100_dev->dev == dev) + return av8100_dev; + cnt++; + } + + return NULL; +} + +static int adev_to_devnr(struct av8100_device *adev) +{ + /* Get devnr from list of devices */ + struct list_head *element; + struct av8100_device *av8100_dev; + int cnt = 0; + + list_for_each(element, &av8100_device_list) { + av8100_dev = list_entry(element, struct av8100_device, list); + if (av8100_dev == adev) + return cnt; + cnt++; + } + + return -EINVAL; +} + +#ifdef CONFIG_PM +static int av8100_suspend(struct device *dev) +{ + int ret = 0; + struct av8100_device *adev; + + adev = dev_to_adev(dev); + if (!adev) + return -EFAULT; + + dev_dbg(dev, "%s\n", __func__); + + adev->params.pre_suspend_power = + (av8100_status_get().av8100_state > AV8100_OPMODE_SHUTDOWN); + + if (adev->params.pre_suspend_power) { + ret = av8100_powerdown(); + if (ret) + dev_err(dev, "av8100_powerdown failed\n"); + } + + return ret; +} + +static int av8100_resume(struct device *dev) +{ + int ret; + u8 hpds = 0; + struct av8100_device *adev; + + adev = dev_to_adev(dev); + if (!adev) + return -EFAULT; + + dev_dbg(dev, "%s\n", __func__); + + if (adev->params.pre_suspend_power) { + ret = av8100_powerup(); + if (ret) { + dev_err(dev, "av8100_powerup failed\n"); + return ret; + } + + /* Check HDMI plug status */ + if (av8100_reg_stby_r(NULL, NULL, &hpds, NULL, NULL)) { + dev_warn(dev, "av8100_reg_stby_r failed\n"); + goto av8100_resume_end; + } + + if (hpds) + set_plug_status(adev, AV8100_HDMI_PLUGIN); /* Plugged*/ + else + clr_plug_status(adev, + AV8100_HDMI_PLUGIN); /* Unplugged*/ + + adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH; + av8100_enable_interrupt(); + } + +av8100_resume_end: + return 0; +} + +static const struct dev_pm_ops av8100_dev_pm_ops = { + .suspend = av8100_suspend, + .resume = av8100_resume, +}; +#endif + +static struct i2c_driver av8100_driver = { + .probe = av8100_probe, + .remove = av8100_remove, + .driver = { + .name = "av8100", +#ifdef CONFIG_PM + .pm = &av8100_dev_pm_ops, +#endif + }, + .id_table = av8100_id, +}; + +static void av8100_plugtimer_int(unsigned long value) +{ + struct av8100_device *adev; + + adev = devnr_to_adev((int)value); + adev->flag |= AV8100_PLUGSTARTUP_EVENT; + wake_up_interruptible(&adev->event); + del_timer(&adev->timer); +} + +static int av8100_int_event_handle(struct av8100_device *adev) +{ + u8 hpdi = 0; + u8 cpdi = 0; + u8 uovbi = 0; + u8 hdcpi = 0; + u8 ceci = 0; + u8 hpds = 0; + u8 cpds = 0; + u8 hdcps = 0; + u8 onuvb = 0; + u8 cectxerr = 0; + u8 cecrx = 0; + u8 cectx = 0; + + /* STANDBY_PENDING_INTERRUPT reg */ + if (av8100_reg_stby_pend_int_r(&hpdi, &cpdi, NULL, NULL)) { + dev_dbg(adev->dev, "av8100_reg_stby_pend_int_r failed\n"); + goto av8100_int_event_handle_1; + } + + /* Plug event */ + if (hpdi | cpdi) { + /* Clear pending interrupts */ + (void)av8100_reg_stby_pend_int_w(1, 1, 1, 0); + + /* STANDBY reg */ + if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL)) { + dev_dbg(adev->dev, "av8100_reg_stby_r failed\n"); + goto av8100_int_event_handle_1; + } + } + + if (cpdi & adev->params.cpdm) { + /* TVout plugin change */ + if (cpds) { + dev_dbg(adev->dev, "cpds 1\n"); + set_plug_status(adev, AV8100_CVBS_PLUGIN); + } else { + dev_dbg(adev->dev, "cpds 0\n"); + clr_plug_status(adev, AV8100_CVBS_PLUGIN); + } + } + + if (hpdi & adev->params.hpdm) { + /* HDMI plugin change */ + if (hpds) { + /* Plugged */ + /* Set 5V always on */ + av8100_5V_w(adev->params.denc_off_time, + 0, + adev->params.on_time); + dev_dbg(adev->dev, "hpds 1\n"); + set_plug_status(adev, AV8100_HDMI_PLUGIN); + } else { + /* Unplugged */ + av8100_5V_w(adev->params.denc_off_time, + adev->params.hdmi_off_time, + adev->params.on_time); + dev_dbg(adev->dev, "hpds 0\n"); + clr_plug_status(adev, AV8100_HDMI_PLUGIN); + } + } + +av8100_int_event_handle_1: + /* GENERAL_INTERRUPT reg */ + if (av8100_reg_gen_int_r(NULL, NULL, NULL, &ceci, + &hdcpi, &uovbi, NULL)) { + dev_dbg(adev->dev, "av8100_reg_gen_int_r failed\n"); + return -EINVAL; + } + + /* CEC or HDCP event */ + if (ceci | hdcpi | uovbi) { + /* Clear pending interrupts */ + av8100_reg_gen_int_w(1, 1, 1, 1, 1, 1); + + /* GENERAL_STATUS reg */ + if (av8100_reg_gen_status_r(&cectxerr, &cecrx, &cectx, NULL, + &onuvb, &hdcps) != 0) { + dev_dbg(adev->dev, "av8100_reg_gen_status_r fail\n"); + return -EINVAL; + } + } + + /* Underflow or overflow */ + if (uovbi) + dev_dbg(adev->dev, "uovbi %d\n", onuvb); + + /* CEC received */ + if (ceci && cecrx) { + u8 val; + + dev_dbg(adev->dev, "cecrx\n"); + + /* Clear cecrx in status reg*/ + if (av8100_reg_r(AV8100_GENERAL_STATUS, &val) == 0) { + if (av8100_reg_w(AV8100_GENERAL_STATUS, + val & ~AV8100_GENERAL_STATUS_CECREC_MASK)) + dev_info(adev->dev, "gen_stat write error\n"); + } else { + dev_info(adev->dev, "gen_stat read error\n"); + } + + /* Report CEC event */ + cec_rx(adev); + } + + /* CEC tx error */ + if (ceci && cectx && cectxerr) { + dev_dbg(adev->dev, "cectxerr\n"); + /* Report CEC tx error event */ + cec_txerr(adev); + } else if (ceci && cectx) { + dev_dbg(adev->dev, "cectx\n"); + /* Report CEC tx event */ + cec_tx(adev); + } + + /* HDCP event */ + if (hdcpi) { + dev_dbg(adev->dev, "hdcpch:%0x\n", hdcps); + /* Report HDCP status change event */ + hdcp_changed(adev); + } + + return 0; +} + +static int av8100_plugstartup_event_handle(struct av8100_device *adev) +{ + u8 hpds = 0; + u8 cpds = 0; + + switch (adev->params.plug_state) { + case AV8100_UNPLUGGED: + case AV8100_PLUGGED: + default: + break; + + case AV8100_PLUGGED_STARTUP: + /* Unmask interrupt */ + adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH; + if (av8100_reg_stby_int_mask_w(adev->params.hpdm, + adev->params.cpdm, + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT, + AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) { + dev_dbg(adev->dev, + "av8100_reg_stby_int_mask_w fail\n"); + } + + mdelay(AV8100_WAITTIME_1MS); + + /* Get actual plug status */ + if (av8100_reg_stby_r(NULL, NULL, &hpds, &cpds, NULL)) + dev_dbg(adev->dev, "av8100_reg_stby_r fail\n"); + + /* Set plugstate */ + if (hpds) { + adev->params.plug_state = AV8100_PLUGGED; + dev_dbg(adev->dev, "plug_state:2\n"); + } else { + adev->params.plug_state = AV8100_UNPLUGGED; + dev_dbg(adev->dev, "plug_state:0\n"); + + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb( + AV8100_HDMI_EVENT_HDMI_PLUGOUT); + } + break; + } + + return 0; +} + +static int av8100_thread(void *p) +{ + u8 flags; + struct av8100_device *adev = p; + + while (1) { + wait_event_interruptible(adev->event, (adev->flag != 0)); + flags = adev->flag; + adev->flag = 0; + + if (adev->status.av8100_state < AV8100_OPMODE_STANDBY) + continue; + + if (flags & AV8100_INT_EVENT) + (void)av8100_int_event_handle(adev); + + if (flags & AV8100_PLUGSTARTUP_EVENT) + (void)av8100_plugstartup_event_handle(adev); + } + + return 0; +} + +static irqreturn_t av8100_intr_handler(int irq, void *p) +{ + struct av8100_device *adev; + + adev = (struct av8100_device *) p; + adev->flag |= AV8100_INT_EVENT; + wake_up_interruptible(&adev->event); + + return IRQ_HANDLED; +} + +static u16 av8100_get_te_line_nb( + enum av8100_output_CEA_VESA output_video_format) +{ + u16 retval; + + switch (output_video_format) { + case AV8100_CEA1_640X480P_59_94HZ: + case AV8100_CEA2_3_720X480P_59_94HZ: + case AV8100_VESA16_1024X768P_60HZ: + retval = AV8100_TE_LINE_NB_30; + break; + + case AV8100_CEA4_1280X720P_60HZ: + case AV8100_CEA60_1280X720P_24HZ: + case AV8100_CEA61_1280X720P_25HZ: + case AV8100_CEA62_1280X720P_30HZ: + retval = AV8100_TE_LINE_NB_21; + break; + + case AV8100_CEA5_1920X1080I_60HZ: + case AV8100_CEA6_7_NTSC_60HZ: + case AV8100_CEA20_1920X1080I_50HZ: + case AV8100_CEA21_22_576I_PAL_50HZ: + case AV8100_VESA27_1280X800P_59_91HZ: + retval = AV8100_TE_LINE_NB_18; + break; + + case AV8100_CEA14_15_480p_60HZ: + retval = AV8100_TE_LINE_NB_32; + break; + + case AV8100_CEA17_18_720X576P_50HZ: + case AV8100_CEA29_30_576P_50HZ: + retval = AV8100_TE_LINE_NB_40; + break; + + case AV8100_CEA19_1280X720P_50HZ: + case AV8100_VESA39_1360X768P_60_02HZ: + retval = AV8100_TE_LINE_NB_22; + break; + + case AV8100_CEA32_1920X1080P_24HZ: + case AV8100_CEA33_1920X1080P_25HZ: + case AV8100_CEA34_1920X1080P_30HZ: + retval = AV8100_TE_LINE_NB_38; + break; + + case AV8100_VESA9_800X600P_60_32HZ: + retval = AV8100_TE_LINE_NB_24; + break; + + case AV8100_VESA14_848X480P_60HZ: + retval = AV8100_TE_LINE_NB_29; + break; + + case AV8100_VESA22_1280X768P_59_99HZ: + retval = AV8100_TE_LINE_NB_17; + break; + + case AV8100_VESA23_1280X768P_59_87HZ: + case AV8100_VESA81_1366X768P_59_79HZ: + retval = AV8100_TE_LINE_NB_25; + break; + + case AV8100_VESA28_1280X800P_59_81HZ: + retval = AV8100_TE_LINE_NB_26; + break; + + case AV8100_CEA16_1920X1080P_60HZ: + case AV8100_CEA31_1920x1080P_50Hz: + default: + /* TODO */ + retval = AV8100_TE_LINE_NB_38; + break; + } + + return retval; +} + +static u16 av8100_get_ui_x4( + enum av8100_output_CEA_VESA output_video_format) +{ + return AV8100_UI_X4_DEFAULT; +} + +static int av8100_config_video_output_dep( + enum av8100_output_CEA_VESA output_format) +{ + int retval; + union av8100_configuration config; + + /* video input */ + config.video_input_format.dsi_input_mode = + AV8100_HDMI_DSI_COMMAND_MODE; + config.video_input_format.input_pixel_format = AV8100_INPUT_PIX_RGB565; + config.video_input_format.total_horizontal_pixel = + av8100_all_cea[output_format].htotale; + config.video_input_format.total_horizontal_active_pixel = + av8100_all_cea[output_format].hactive; + config.video_input_format.total_vertical_lines = + av8100_all_cea[output_format].vtotale; + config.video_input_format.total_vertical_active_lines = + av8100_all_cea[output_format].vactive; + + switch (output_format) { + case AV8100_CEA5_1920X1080I_60HZ: + case AV8100_CEA20_1920X1080I_50HZ: + case AV8100_CEA21_22_576I_PAL_50HZ: + case AV8100_CEA6_7_NTSC_60HZ: + config.video_input_format.video_mode = + AV8100_VIDEO_INTERLACE; + break; + + default: + config.video_input_format.video_mode = + AV8100_VIDEO_PROGRESSIVE; + break; + } + + config.video_input_format.nb_data_lane = + AV8100_DATA_LANES_USED_2; + config.video_input_format.nb_virtual_ch_command_mode = 0; + config.video_input_format.nb_virtual_ch_video_mode = 0; + config.video_input_format.ui_x4 = av8100_get_ui_x4(output_format); + config.video_input_format.TE_line_nb = av8100_get_te_line_nb( + output_format); + config.video_input_format.TE_config = AV8100_TE_OFF; + config.video_input_format.master_clock_freq = 0; + + retval = av8100_conf_prep( + AV8100_COMMAND_VIDEO_INPUT_FORMAT, &config); + if (retval) + return -EFAULT; + + /* DENC */ + switch (output_format) { + case AV8100_CEA21_22_576I_PAL_50HZ: + config.denc_format.cvbs_video_format = AV8100_CVBS_625; + config.denc_format.standard_selection = AV8100_PAL_BDGHI; + break; + + case AV8100_CEA6_7_NTSC_60HZ: + config.denc_format.cvbs_video_format = AV8100_CVBS_525; + config.denc_format.standard_selection = AV8100_NTSC_M; + break; + + default: + /* Not supported */ + break; + } + + return 0; +} + +static int av8100_config_init(struct av8100_device *adev) +{ + int retval; + union av8100_configuration config; + + dev_dbg(adev->dev, "%s\n", __func__); + + memset(&config, 0, sizeof(union av8100_configuration)); + memset(&adev->config, 0, sizeof(struct av8100_config)); + + /* Color conversion */ + config.color_transform = AV8100_COLOR_TRANSFORM_INDENTITY; + retval = av8100_conf_prep( + AV8100_COMMAND_COLORSPACECONVERSION, &config); + if (retval) + return -EFAULT; + + /* DENC */ + config.denc_format.cvbs_video_format = AV8100_CVBS_625; + config.denc_format.standard_selection = AV8100_PAL_BDGHI; + config.denc_format.enable = 0; + config.denc_format.macrovision_enable = 0; + config.denc_format.internal_generator = 0; + retval = av8100_conf_prep(AV8100_COMMAND_DENC, &config); + if (retval) + return -EFAULT; + + /* Video output */ + config.video_output_format.video_output_cea_vesa = + AV8100_CEA4_1280X720P_60HZ; + + retval = av8100_conf_prep( + AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, &config); + if (retval) + return -EFAULT; + + /* Video input */ + av8100_config_video_output_dep( + config.video_output_format.video_output_cea_vesa); + + /* Pattern generator */ + config.pattern_generator_format.pattern_audio_mode = + AV8100_PATTERN_AUDIO_OFF; + config.pattern_generator_format.pattern_type = + AV8100_PATTERN_GENERATOR; + config.pattern_generator_format.pattern_video_format = + AV8100_PATTERN_720P; + retval = av8100_conf_prep(AV8100_COMMAND_PATTERNGENERATOR, + &config); + if (retval) + return -EFAULT; + + /* Audio input */ + config.audio_input_format.audio_input_if_format = + AV8100_AUDIO_I2SDELAYED_MODE; + config.audio_input_format.i2s_input_nb = 1; + config.audio_input_format.sample_audio_freq = AV8100_AUDIO_FREQ_48KHZ; + config.audio_input_format.audio_word_lg = AV8100_AUDIO_16BITS; + config.audio_input_format.audio_format = AV8100_AUDIO_LPCM_MODE; + config.audio_input_format.audio_if_mode = AV8100_AUDIO_MASTER; + config.audio_input_format.audio_mute = AV8100_AUDIO_MUTE_DISABLE; + retval = av8100_conf_prep( + AV8100_COMMAND_AUDIO_INPUT_FORMAT, &config); + if (retval) + return -EFAULT; + + /* HDMI mode */ + config.hdmi_format.hdmi_mode = AV8100_HDMI_ON; + config.hdmi_format.hdmi_format = AV8100_HDMI; + config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0; + retval = av8100_conf_prep(AV8100_COMMAND_HDMI, &config); + if (retval) + return -EFAULT; + + /* EDID section readback */ + config.edid_section_readback_format.address = 0xA0; + config.edid_section_readback_format.block_number = 0; + retval = av8100_conf_prep( + AV8100_COMMAND_EDID_SECTION_READBACK, &config); + if (retval) + return -EFAULT; + + return 0; +} + +static int av8100_params_init(struct av8100_device *adev) +{ + dev_dbg(adev->dev, "%s\n", __func__); + + memset(&adev->params, 0, sizeof(struct av8100_params)); + + adev->params.denc_off_time = AV8100_DENC_OFF_TIME; + adev->params.hdmi_off_time = AV8100_HDMI_OFF_TIME; + adev->params.on_time = AV8100_ON_TIME; + + adev->params.hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH; + adev->params.cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH; + adev->params.hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH; + adev->params.cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH; + adev->params.uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH; + + return 0; +} + +static void clr_plug_status(struct av8100_device *adev, + enum av8100_plugin_status status) +{ + adev->status.av8100_plugin_status &= ~status; + + switch (status) { + case AV8100_HDMI_PLUGIN: + switch (adev->params.plug_state) { + case AV8100_UNPLUGGED: + case AV8100_PLUGGED_STARTUP: + default: + break; + + case AV8100_PLUGGED: + adev->params.plug_state = + AV8100_UNPLUGGED; + dev_dbg(adev->dev, "plug_state:0\n"); + + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb( + AV8100_HDMI_EVENT_HDMI_PLUGOUT); + break; + } + break; + + case AV8100_CVBS_PLUGIN: + /* TODO */ + break; + + default: + break; + } +} + +static void set_plug_status(struct av8100_device *adev, + enum av8100_plugin_status status) +{ + adev->status.av8100_plugin_status |= status; + + switch (status) { + case AV8100_HDMI_PLUGIN: + switch (adev->params.plug_state) { + case AV8100_UNPLUGGED: + adev->params.plug_state = + AV8100_PLUGGED_STARTUP; + + dev_dbg(adev->dev, "plug_state:1\n"); + + /* + * Mask interrupts to avoid plug detect during + * startup + * */ + adev->params.hpdm = + AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW; + if (av8100_reg_stby_int_mask_w( + adev->params.hpdm, + adev->params.cpdm, + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT, + AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW)) { + dev_dbg(adev->dev, + "av8100_reg_stby_int_mask_w fail\n"); + } + + /* Set plug startup timer */ + init_timer(&adev->timer); + adev->timer.expires = jiffies + + AV8100_PLUGSTARTUP_TIME; + adev->timer.function = + av8100_plugtimer_int; + adev->timer.data = 0; + adev->timer.data = adev_to_devnr(adev); + mod_timer(&adev->timer, adev->timer.expires); + + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb( + AV8100_HDMI_EVENT_HDMI_PLUGIN); + break; + + case AV8100_PLUGGED_STARTUP: + case AV8100_PLUGGED: + default: + break; + } + break; + + case AV8100_CVBS_PLUGIN: + /* TODO */ + break; + + default: + break; + } +} + +static void cec_rx(struct av8100_device *adev) +{ + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CEC); +} + +static void cec_tx(struct av8100_device *adev) +{ + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTX); +} + +static void cec_txerr(struct av8100_device *adev) +{ + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_CECTXERR); +} + +static void hdcp_changed(struct av8100_device *adev) +{ + if (adev->params.hdmi_ev_cb) + adev->params.hdmi_ev_cb(AV8100_HDMI_EVENT_HDCP); +} + +static void av8100_set_state(struct av8100_device *adev, + enum av8100_operating_mode state) +{ + adev->status.av8100_state = state; + + if (state <= AV8100_OPMODE_STANDBY) { + clr_plug_status(adev, AV8100_HDMI_PLUGIN); + clr_plug_status(adev, AV8100_CVBS_PLUGIN); + adev->status.hdmi_on = false; + } +} + +/** + * write_single_byte() - Write a single byte to av8100 + * through i2c interface. + * @client: i2c client structure + * @reg: register offset + * @data: data byte to be written + * + * This funtion uses smbus byte write API to write a single byte to av8100 + **/ +static int write_single_byte(struct i2c_client *client, u8 reg, + u8 data) +{ + int ret; + struct device *dev = &client->dev; + + ret = i2c_smbus_write_byte_data(client, reg, data); + if (ret < 0) + dev_dbg(dev, "i2c smbus write byte failed\n"); + + return ret; +} + +/** + * read_single_byte() - read single byte from av8100 + * through i2c interface + * @client: i2c client structure + * @reg: register offset + * @val: register value + * + * This funtion uses smbus read block API to read single byte from the reg + * offset. + **/ +static int read_single_byte(struct i2c_client *client, u8 reg, u8 *val) +{ + int value; + struct device *dev = &client->dev; + + value = i2c_smbus_read_byte_data(client, reg); + if (value < 0) { + dev_dbg(dev, "i2c smbus read byte failed,read data = %x " + "from offset:%x\n" , value, reg); + return -EFAULT; + } + + *val = (u8) value; + return 0; +} + +/** + * write_multi_byte() - Write a multiple bytes to av8100 through + * i2c interface. + * @client: i2c client structure + * @buf: buffer to be written + * @nbytes: nunmber of bytes to be written + * + * This funtion uses smbus block write API's to write n number of bytes to the + * av8100 + **/ +static int write_multi_byte(struct i2c_client *client, u8 reg, + u8 *buf, u8 nbytes) +{ + int ret; + struct device *dev = &client->dev; + + ret = i2c_smbus_write_i2c_block_data(client, reg, nbytes, buf); + if (ret < 0) + dev_dbg(dev, "i2c smbus write multi byte error\n"); + + return ret; +} + +static int configuration_video_input_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_video_input_cmd.dsi_input_mode; + buffer[1] = adev->config.hdmi_video_input_cmd.input_pixel_format; + buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd. + total_horizontal_pixel); + buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd. + total_horizontal_pixel); + buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd. + total_horizontal_active_pixel); + buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd. + total_horizontal_active_pixel); + buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd. + total_vertical_lines); + buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd. + total_vertical_lines); + buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd. + total_vertical_active_lines); + buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd. + total_vertical_active_lines); + buffer[10] = adev->config.hdmi_video_input_cmd.video_mode; + buffer[11] = adev->config.hdmi_video_input_cmd.nb_data_lane; + buffer[12] = adev->config.hdmi_video_input_cmd. + nb_virtual_ch_command_mode; + buffer[13] = adev->config.hdmi_video_input_cmd. + nb_virtual_ch_video_mode; + buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_input_cmd. + TE_line_nb); + buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_input_cmd. + TE_line_nb); + buffer[16] = adev->config.hdmi_video_input_cmd.TE_config; + buffer[17] = REG_32_8_MSB(adev->config.hdmi_video_input_cmd. + master_clock_freq); + buffer[18] = REG_32_8_MMSB(adev->config.hdmi_video_input_cmd. + master_clock_freq); + buffer[19] = REG_32_8_MLSB(adev->config.hdmi_video_input_cmd. + master_clock_freq); + buffer[20] = REG_32_8_LSB(adev->config.hdmi_video_input_cmd. + master_clock_freq); + buffer[21] = adev->config.hdmi_video_input_cmd.ui_x4; + + *length = AV8100_COMMAND_VIDEO_INPUT_FORMAT_SIZE - 1; + return 0; + +} + +static int configuration_audio_input_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_audio_input_cmd.audio_input_if_format; + buffer[1] = adev->config.hdmi_audio_input_cmd.i2s_input_nb; + buffer[2] = adev->config.hdmi_audio_input_cmd.sample_audio_freq; + buffer[3] = adev->config.hdmi_audio_input_cmd.audio_word_lg; + buffer[4] = adev->config.hdmi_audio_input_cmd.audio_format; + buffer[5] = adev->config.hdmi_audio_input_cmd.audio_if_mode; + buffer[6] = adev->config.hdmi_audio_input_cmd.audio_mute; + + *length = AV8100_COMMAND_AUDIO_INPUT_FORMAT_SIZE - 1; + return 0; +} + +static int configuration_video_output_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_video_output_cmd. + video_output_cea_vesa; + + if (buffer[0] == AV8100_CUSTOM) { + buffer[1] = adev->config.hdmi_video_output_cmd. + vsync_polarity; + buffer[2] = adev->config.hdmi_video_output_cmd. + hsync_polarity; + buffer[3] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.total_horizontal_pixel); + buffer[4] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.total_horizontal_pixel); + buffer[5] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.total_horizontal_active_pixel); + buffer[6] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.total_horizontal_active_pixel); + buffer[7] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.total_vertical_in_half_lines); + buffer[8] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.total_vertical_in_half_lines); + buffer[9] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd. + total_vertical_active_in_half_lines); + buffer[10] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd. + total_vertical_active_in_half_lines); + buffer[11] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.hsync_start_in_pixel); + buffer[12] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.hsync_start_in_pixel); + buffer[13] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.hsync_length_in_pixel); + buffer[14] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.hsync_length_in_pixel); + buffer[15] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.vsync_start_in_half_line); + buffer[16] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.vsync_start_in_half_line); + buffer[17] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.vsync_length_in_half_line); + buffer[18] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.vsync_length_in_half_line); + buffer[19] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.hor_video_start_pixel); + buffer[20] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.hor_video_start_pixel); + buffer[21] = REG_16_8_MSB(adev->config. + hdmi_video_output_cmd.vert_video_start_pixel); + buffer[22] = REG_16_8_LSB(adev->config. + hdmi_video_output_cmd.vert_video_start_pixel); + buffer[23] = adev->config.hdmi_video_output_cmd.video_type; + buffer[24] = adev->config.hdmi_video_output_cmd.pixel_repeat; + buffer[25] = REG_32_8_MSB(adev->config. + hdmi_video_output_cmd.pixel_clock_freq_Hz); + buffer[26] = REG_32_8_MMSB(adev->config. + hdmi_video_output_cmd.pixel_clock_freq_Hz); + buffer[27] = REG_32_8_MLSB(adev->config. + hdmi_video_output_cmd.pixel_clock_freq_Hz); + buffer[28] = REG_32_8_LSB(adev->config. + hdmi_video_output_cmd.pixel_clock_freq_Hz); + + *length = AV8100_COMMAND_VIDEO_OUTPUT_FORMAT_SIZE - 1; + } else { + *length = 1; + } + + return 0; +} + +static int configuration_video_scaling_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + h_start_in_pixel); + buffer[1] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + h_start_in_pixel); + buffer[2] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + h_stop_in_pixel); + buffer[3] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + h_stop_in_pixel); + buffer[4] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + v_start_in_line); + buffer[5] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + v_start_in_line); + buffer[6] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + v_stop_in_line); + buffer[7] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + v_stop_in_line); + buffer[8] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + h_start_out_pixel); + buffer[9] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd + .h_start_out_pixel); + buffer[10] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + h_stop_out_pixel); + buffer[11] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + h_stop_out_pixel); + buffer[12] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + v_start_out_line); + buffer[13] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + v_start_out_line); + buffer[14] = REG_16_8_MSB(adev->config.hdmi_video_scaling_cmd. + v_stop_out_line); + buffer[15] = REG_16_8_LSB(adev->config.hdmi_video_scaling_cmd. + v_stop_out_line); + + *length = AV8100_COMMAND_VIDEO_SCALING_FORMAT_SIZE - 1; + return 0; +} + +static int configuration_colorspace_conversion_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + const struct color_conversion_cmd *hdmi_color_space_conversion_cmd; + + hdmi_color_space_conversion_cmd = + get_color_transform_cmd(adev, adev->config.color_transform); + + buffer[0] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c0); + buffer[1] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c0); + buffer[2] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c1); + buffer[3] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c1); + buffer[4] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c2); + buffer[5] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c2); + buffer[6] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c3); + buffer[7] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c3); + buffer[8] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c4); + buffer[9] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c4); + buffer[10] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c5); + buffer[11] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c5); + buffer[12] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c6); + buffer[13] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c6); + buffer[14] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c7); + buffer[15] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c7); + buffer[16] = REG_12_8_MSB(hdmi_color_space_conversion_cmd->c8); + buffer[17] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->c8); + buffer[18] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->aoffset); + buffer[19] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->aoffset); + buffer[20] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->boffset); + buffer[21] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->boffset); + buffer[22] = REG_10_8_MSB(hdmi_color_space_conversion_cmd->coffset); + buffer[23] = REG_16_8_LSB(hdmi_color_space_conversion_cmd->coffset); + buffer[24] = hdmi_color_space_conversion_cmd->lmax; + buffer[25] = hdmi_color_space_conversion_cmd->lmin; + buffer[26] = hdmi_color_space_conversion_cmd->cmax; + buffer[27] = hdmi_color_space_conversion_cmd->cmin; + + *length = AV8100_COMMAND_COLORSPACECONVERSION_SIZE - 1; + return 0; +} + +static int configuration_cec_message_write_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_cec_message_write_cmd.buffer_length; + memcpy(&buffer[1], adev->config.hdmi_cec_message_write_cmd.buffer, + adev->config.hdmi_cec_message_write_cmd.buffer_length); + + *length = adev->config.hdmi_cec_message_write_cmd.buffer_length + 1; + + return 0; +} + +static int configuration_cec_message_read_get(char *buffer, + unsigned int *length) +{ + /* No buffer data */ + *length = AV8100_COMMAND_CEC_MESSAGE_READ_BACK_SIZE - 1; + return 0; +} + +static int configuration_denc_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_denc_cmd.cvbs_video_format; + buffer[1] = adev->config.hdmi_denc_cmd.standard_selection; + buffer[2] = adev->config.hdmi_denc_cmd.enable; + buffer[3] = adev->config.hdmi_denc_cmd.macrovision_enable; + buffer[4] = adev->config.hdmi_denc_cmd.internal_generator; + + *length = AV8100_COMMAND_DENC_SIZE - 1; + return 0; +} + +static int configuration_hdmi_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_cmd.hdmi_mode; + buffer[1] = adev->config.hdmi_cmd.hdmi_format; + buffer[2] = adev->config.hdmi_cmd.dvi_format; + + *length = AV8100_COMMAND_HDMI_SIZE - 1; + return 0; +} + +static int configuration_hdcp_sendkey_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_hdcp_send_key_cmd.key_number; + memcpy(&buffer[1], adev->config.hdmi_hdcp_send_key_cmd.data, + adev->config.hdmi_hdcp_send_key_cmd.data_len); + + *length = adev->config.hdmi_hdcp_send_key_cmd.data_len + 1; + return 0; +} + +static int configuration_hdcp_management_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_hdcp_management_format_cmd.req_type; + buffer[1] = adev->config.hdmi_hdcp_management_format_cmd.encr_use; + + *length = AV8100_COMMAND_HDCP_MANAGEMENT_SIZE - 1; + return 0; +} + +static int configuration_infoframe_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_infoframes_cmd.type; + buffer[1] = adev->config.hdmi_infoframes_cmd.version; + buffer[2] = adev->config.hdmi_infoframes_cmd.length; + buffer[3] = adev->config.hdmi_infoframes_cmd.crc; + memcpy(&buffer[4], adev->config.hdmi_infoframes_cmd.data, + HDMI_INFOFRAME_DATA_SIZE); + + *length = adev->config.hdmi_infoframes_cmd.length + 4; + return 0; +} + +static int av8100_edid_section_readback_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_edid_section_readback_cmd.address; + buffer[1] = adev->config.hdmi_edid_section_readback_cmd. + block_number; + + *length = AV8100_COMMAND_EDID_SECTION_READBACK_SIZE - 1; + return 0; +} + +static int configuration_pattern_generator_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_pattern_generator_cmd.pattern_type; + buffer[1] = adev->config.hdmi_pattern_generator_cmd. + pattern_video_format; + buffer[2] = adev->config.hdmi_pattern_generator_cmd. + pattern_audio_mode; + + *length = AV8100_COMMAND_PATTERNGENERATOR_SIZE - 1; + return 0; +} + +static int configuration_fuse_aes_key_get(struct av8100_device *adev, + char *buffer, unsigned int *length) +{ + buffer[0] = adev->config.hdmi_fuse_aes_key_cmd.fuse_operation; + if (adev->config.hdmi_fuse_aes_key_cmd.fuse_operation) { + /* Write key command */ + memcpy(&buffer[1], adev->config.hdmi_fuse_aes_key_cmd.key, + HDMI_FUSE_AES_KEY_SIZE); + + *length = AV8100_COMMAND_FUSE_AES_KEY_SIZE - 1; + } else { + /* Check key command */ + *length = AV8100_COMMAND_FUSE_AES_CHK_SIZE - 1; + } + return 0; +} + +static int get_command_return_first(struct i2c_client *i2c, + enum av8100_command_type command_type) { + int retval = 0; + char val; + struct device *dev = &i2c->dev; + + retval = read_single_byte(i2c, AV8100_COMMAND_OFFSET, &val); + if (retval) { + dev_dbg(dev, "%s 1st ret failed\n", __func__); + return retval; + } + + if (val != (0x80 | command_type)) { + dev_dbg(dev, "%s 1st ret wrong:%x\n", __func__, val); + return -EFAULT; + } + + return 0; +} + +static int get_command_return_data(struct i2c_client *i2c, + enum av8100_command_type command_type, + u8 *command_buffer, + u8 *buffer_length, + u8 *buffer) +{ + int retval = 0; + char val; + int index = 0; + struct device *dev = &i2c->dev; + + if (buffer_length) + *buffer_length = 0; + + switch (command_type) { + case AV8100_COMMAND_VIDEO_INPUT_FORMAT: + case AV8100_COMMAND_AUDIO_INPUT_FORMAT: + case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT: + case AV8100_COMMAND_VIDEO_SCALING_FORMAT: + case AV8100_COMMAND_COLORSPACECONVERSION: + case AV8100_COMMAND_CEC_MESSAGE_WRITE: + case AV8100_COMMAND_DENC: + case AV8100_COMMAND_HDMI: + case AV8100_COMMAND_INFOFRAMES: + case AV8100_COMMAND_PATTERNGENERATOR: + /* Get the second return byte */ + retval = read_single_byte(i2c, + AV8100_2ND_RET_BYTE_OFFSET, &val); + if (retval) + goto get_command_return_data_fail2r; + + if (val) { + retval = -EFAULT; + goto get_command_return_data_fail2v; + } + break; + + case AV8100_COMMAND_CEC_MESSAGE_READ_BACK: + if ((buffer == NULL) || (buffer_length == NULL)) { + retval = -EINVAL; + goto get_command_return_data_fail; + } + + /* Get the return buffer length */ + retval = read_single_byte(i2c, AV8100_CEC_ADDR_OFFSET, &val); + if (retval) + goto get_command_return_data_fail; + + dev_dbg(dev, "cec buflen:%d\n", val); + *buffer_length = val; + + if (*buffer_length > + HDMI_CEC_READ_MAXSIZE) { + dev_dbg(dev, "CEC size too large %d\n", + *buffer_length); + *buffer_length = HDMI_CEC_READ_MAXSIZE; + } + + dev_dbg(dev, "return data: "); + + /* Get the return buffer */ + for (index = 0; index < *buffer_length; ++index) { + retval = read_single_byte(i2c, + AV8100_CEC_RET_BUF_OFFSET + index, &val); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = val; + dev_dbg(dev, "%02x ", *(buffer + index)); + } + } + + dev_dbg(dev, "\n"); + break; + + case AV8100_COMMAND_HDCP_MANAGEMENT: + { + u8 nrdev; + u8 devcnt; + int cnt; + + /* Get the second return byte */ + retval = read_single_byte(i2c, + AV8100_2ND_RET_BYTE_OFFSET, &val); + if (retval) { + goto get_command_return_data_fail2r; + } else { + /* Check the second return byte */ + if (val) + goto get_command_return_data_fail2v; + } + + if ((buffer == NULL) || (buffer_length == NULL)) + /* Ignore return data */ + break; + + dev_dbg(dev, "req_type:%02x ", command_buffer[0]); + + /* Check if revoc list data is requested */ + if (command_buffer[0] != + HDMI_REQUEST_FOR_REVOCATION_LIST_INPUT) { + *buffer_length = 0; + break; + } + + dev_dbg(dev, "return data: "); + + /* Get the return buffer */ + for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) { + retval = read_single_byte(i2c, + AV8100_HDCP_RET_BUF_OFFSET + index, &val); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = val; + dev_dbg(dev, "%02x ", *(buffer + index)); + } + index++; + } + + /* Get Device count */ + retval = read_single_byte(i2c, + AV8100_HDCP_RET_BUF_OFFSET + index, &nrdev); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = nrdev; + dev_dbg(dev, "%02x ", *(buffer + index)); + } + index++; + + /* Determine number of devices */ + nrdev &= HDMI_HDCP_MGMT_DEVICE_MASK; + if (nrdev > HDMI_HDCP_MGMT_MAX_DEVICES_SIZE) + nrdev = HDMI_HDCP_MGMT_MAX_DEVICES_SIZE; + + /* Get Bksv for each connected equipment */ + for (devcnt = 0; devcnt < nrdev; devcnt++) + for (cnt = 0; cnt < HDMI_HDCP_MGMT_BKSV_SIZE; cnt++) { + retval = read_single_byte(i2c, + AV8100_HDCP_RET_BUF_OFFSET + index, + &val); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = val; + dev_dbg(dev, "%02x ", + *(buffer + index)); + } + index++; + } + + if (nrdev == 0) + goto hdcp_management_end; + + /* Get SHA signature */ + for (cnt = 0; cnt < HDMI_HDCP_MGMT_SHA_SIZE - 1; cnt++) { + retval = read_single_byte(i2c, + AV8100_HDCP_RET_BUF_OFFSET + index, &val); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = val; + dev_dbg(dev, "%02x ", *(buffer + index)); + } + index++; + } + +hdcp_management_end: + *buffer_length = index; + + dev_dbg(dev, "\n"); + } + break; + + case AV8100_COMMAND_EDID_SECTION_READBACK: + if ((buffer == NULL) || (buffer_length == NULL)) { + retval = -EINVAL; + goto get_command_return_data_fail; + } + + /* Return buffer length is fixed */ + *buffer_length = HDMI_EDIDREAD_SIZE; + + dev_dbg(dev, "return data: "); + + /* Get the return buffer */ + for (index = 0; index < *buffer_length; ++index) { + retval = read_single_byte(i2c, + AV8100_EDID_RET_BUF_OFFSET + index, &val); + if (retval) { + *buffer_length = 0; + goto get_command_return_data_fail; + } else { + *(buffer + index) = val; + dev_dbg(dev, "%02x ", *(buffer + index)); + } + } + + dev_dbg(dev, "\n"); + break; + + case AV8100_COMMAND_FUSE_AES_KEY: + if ((buffer == NULL) || (buffer_length == NULL)) { + retval = -EINVAL; + goto get_command_return_data_fail; + } + + /* Get the second return byte */ + retval = read_single_byte(i2c, + AV8100_2ND_RET_BYTE_OFFSET, &val); + + if (retval) + goto get_command_return_data_fail2r; + + /* Check the second return byte */ + if (val) { + retval = -EFAULT; + goto get_command_return_data_fail2v; + } + + /* Return buffer length is fixed */ + *buffer_length = HDMI_FUSE_AES_KEY_RET_SIZE; + + /* Get CRC */ + retval = read_single_byte(i2c, + AV8100_FUSE_CRC_OFFSET, &val); + if (retval) + goto get_command_return_data_fail; + + *buffer = val; + dev_dbg(dev, "CRC:%02x ", val); + + /* Get programmed status */ + retval = read_single_byte(i2c, + AV8100_FUSE_PRGD_OFFSET, &val); + if (retval) + goto get_command_return_data_fail; + + *(buffer + 1) = val; + + dev_dbg(dev, "programmed:%02x ", val); + break; + + case AV8100_COMMAND_HDCP_SENDKEY: + if ((command_buffer[0] == HDMI_LOADAES_END_BLK_NR) && + ((buffer == NULL) || (buffer_length == NULL))) { + retval = -EINVAL; + goto get_command_return_data_fail; + } + + /* Get the second return byte */ + retval = read_single_byte(i2c, + AV8100_2ND_RET_BYTE_OFFSET, &val); + if (retval) + goto get_command_return_data_fail2r; + + if (val) { + retval = -EFAULT; + goto get_command_return_data_fail2v; + } + + if (command_buffer[0] == HDMI_LOADAES_END_BLK_NR) { + /* Return CRC32 if last AES block */ + int cnt; + + dev_dbg(dev, "CRC32:"); + for (cnt = 0; cnt < HDMI_CRC32_SIZE; cnt++) { + if (read_single_byte(i2c, + AV8100_CRC32_OFFSET + cnt, &val)) + goto get_command_return_data_fail; + *(buffer + cnt) = val; + dev_dbg(dev, "%02x", val); + } + + *buffer_length = HDMI_CRC32_SIZE; + } + break; + + default: + retval = -EFAULT; + break; + } + + return retval; +get_command_return_data_fail2r: + dev_dbg(dev, "%s Reading 2nd return byte failed\n", __func__); + return retval; +get_command_return_data_fail2v: + dev_dbg(dev, "%s 2nd return byte is wrong:%x\n", __func__, val); + return retval; +get_command_return_data_fail: + dev_dbg(dev, "%s FAIL\n", __func__); + return retval; +} + +static int av8100_powerup1(struct av8100_device *adev) +{ + int retval; + struct av8100_platform_data *pdata = adev->dev->platform_data; + + /* Regulator enable */ + if ((adev->params.regulator_pwr) && + (adev->params.regulator_requested == false)) { + retval = regulator_enable(adev->params.regulator_pwr); + if (retval < 0) { + dev_warn(adev->dev, "%s: regulator_enable failed\n", + __func__); + return retval; + } + dev_dbg(adev->dev, "regulator_enable ok\n"); + adev->params.regulator_requested = true; + } + + /* Reset av8100 */ + gpio_set_value_cansleep(pdata->reset, 1); + + /* Need to wait before proceeding */ + mdelay(AV8100_WAITTIME_1MS); + + av8100_set_state(adev, AV8100_OPMODE_STANDBY); + + if (pdata->alt_powerupseq) { + dev_dbg(adev->dev, "powerup seq alt\n"); + retval = av8100_5V_w(0, 0, AV8100_ON_TIME); + if (retval) { + dev_err(adev->dev, "%s reg_wr err 1\n", __func__); + goto av8100_powerup1_err; + } + + udelay(AV8100_WATTIME_100US); + + retval = av8100_reg_stby_pend_int_w( + AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH); + if (retval) { + dev_err(adev->dev, "%s reg_wr err 2\n", __func__); + goto av8100_powerup1_err; + } + + udelay(AV8100_WATTIME_100US); + + retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW, + AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq); + if (retval) { + dev_err(adev->dev, "%s reg_wr err 3\n", __func__); + goto av8100_powerup1_err; + } + + mdelay(AV8100_WAITTIME_1MS); + + retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW, + AV8100_STANDBY_STBY_LOW, pdata->mclk_freq); + if (retval) { + dev_err(adev->dev, "%s reg_wr err 4\n", __func__); + goto av8100_powerup1_err; + } + + mdelay(AV8100_WAITTIME_1MS); + + retval = av8100_reg_stby_pend_int_w( + AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW); + if (retval) { + dev_err(adev->dev, "%s reg_wr err 5\n", __func__); + goto av8100_powerup1_err; + } + + mdelay(AV8100_WAITTIME_1MS); + } + + retval = request_irq(pdata->irq, av8100_intr_handler, + IRQF_TRIGGER_RISING, "av8100", adev); + if (retval == 0) + adev->params.irq_requested = true; + else + dev_err(adev->dev, "request_irq %d failed %d\n", + pdata->irq, retval); + + return retval; + +av8100_powerup1_err: + av8100_powerdown(); + return -EFAULT; +} + +static int av8100_powerup2(struct av8100_device *adev) +{ + int retval; + + /* ON time & OFF time on 5v HDMI plug detect */ + retval = av8100_5V_w(adev->params.denc_off_time, + adev->params.hdmi_off_time, + adev->params.on_time); + if (retval) { + dev_err(adev->dev, + "Failed to write the value to av8100 register\n"); + return retval; + } + + mdelay(AV8100_WAITTIME_1MS); + + av8100_set_state(adev, AV8100_OPMODE_SCAN); + + return 0; +} + +static int register_read_internal(u8 offset, u8 *value) +{ + int retval = 0; + struct i2c_client *i2c; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EFAULT; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + i2c = adev->config.client; + + /* Read from register */ + retval = read_single_byte(i2c, offset, value); + if (retval) { + dev_dbg(adev->dev, + "Failed to read the value from av8100 register\n"); + return -EFAULT; + } + + return retval; +} + +static int register_write_internal(u8 offset, u8 value) +{ + int retval; + struct i2c_client *i2c; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EFAULT; + + i2c = adev->config.client; + + /* Write to register */ + retval = write_single_byte(i2c, offset, value); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + return 0; +} + +int av8100_powerscan(void) +{ + int retval; + struct av8100_device *adev; + struct av8100_platform_data *pdata; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EFAULT; + + pdata = adev->dev->platform_data; + + dev_dbg(adev->dev, "%s\n", __func__); + + if (av8100_status_get().av8100_state > AV8100_OPMODE_SCAN) { + dev_dbg(adev->dev, "set to scan mode\n"); + + av8100_disable_interrupt(); + + /* Stby mode */ + retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW, + AV8100_STANDBY_STBY_LOW, pdata->mclk_freq); + if (retval) { + dev_err(adev->dev, + "Failed to write to av8100 register\n"); + return retval; + } + + /* Remove APE OPP requirement */ + if (adev->params.opp_requested) { + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, + (char *)adev->miscdev.name); + prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, + (char *)adev->miscdev.name); + adev->params.opp_requested = false; + } + + /* Clock disable */ + if (adev->params.inputclk && + adev->params.inputclk_requested) { + clk_disable(adev->params.inputclk); + adev->params.inputclk_requested = false; + } + + mdelay(AV8100_WAITTIME_1MS); + + av8100_enable_interrupt(); + + av8100_set_state(adev, AV8100_OPMODE_SCAN); + } + + return 0; +} +EXPORT_SYMBOL(av8100_powerscan); + +int av8100_powerup(void) +{ + int ret = 0; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EFAULT; + + if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED) + return -EINVAL; + + if (av8100_status_get().av8100_state < AV8100_OPMODE_STANDBY) { + ret = av8100_powerup1(adev); + if (ret) { + dev_err(adev->dev, "av8100_powerup1 fail\n"); + return -EFAULT; + } + } + + if (av8100_status_get().av8100_state < AV8100_OPMODE_SCAN) + ret = av8100_powerup2(adev); + + av8100_enable_interrupt(); + + return ret; +} +EXPORT_SYMBOL(av8100_powerup); + +int av8100_powerdown(void) +{ + int retval = 0; + struct av8100_device *adev; + struct av8100_platform_data *pdata; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EFAULT; + + pdata = adev->dev->platform_data; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + goto av8100_powerdown_end; + + av8100_disable_interrupt(); + + if (adev->params.irq_requested) + free_irq(pdata->irq, adev); + adev->params.irq_requested = false; + + if (pdata->alt_powerupseq) { + retval = av8100_reg_stby_pend_int_w( + AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH); + + if (retval) + dev_err(adev->dev, "%s reg_wr err\n", __func__); + msleep(AV8100_WAITTIME_50MS); + } + + /* Remove APE OPP requirement */ + if (adev->params.opp_requested) { + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, + (char *)adev->miscdev.name); + prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, + (char *)adev->miscdev.name); + adev->params.opp_requested = false; + } + + /* Clock disable */ + if (adev->params.inputclk && adev->params.inputclk_requested) { + clk_disable(adev->params.inputclk); + adev->params.inputclk_requested = false; + } + + av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN); + + gpio_set_value_cansleep(pdata->reset, 0); + + /* Regulator disable */ + if ((adev->params.regulator_pwr) && + (adev->params.regulator_requested)) { + dev_dbg(adev->dev, "regulator_disable\n"); + regulator_disable(adev->params.regulator_pwr); + adev->params.regulator_requested = false; + } + + if (pdata->alt_powerupseq) + mdelay(AV8100_WAITTIME_5MS); + +av8100_powerdown_end: + return retval; +} +EXPORT_SYMBOL(av8100_powerdown); + +int av8100_download_firmware(enum interface_type if_type) +{ + int retval; + int temp = 0x0; + int increment = 15; + int index = 0; + int size = 0x0; + char val = 0x0; + char checksum = 0; + int cnt; + int cnt_max; + struct i2c_client *i2c; + u8 uc; + u8 fdl; + u8 hld; + u8 wa; + u8 ra; + struct av8100_platform_data *pdata; + const struct firmware *fw_file; + u8 *fw_buff; + int fw_bytes; + struct av8100_device *adev; + struct av8100_status status; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + LOCK_AV8100_FWDL; + + status = av8100_status_get(); + if (status.av8100_state <= AV8100_OPMODE_SHUTDOWN) { + retval = -EINVAL; + goto av8100_download_firmware_err2; + } + + if (status.av8100_state >= AV8100_OPMODE_INIT) { + dev_dbg(adev->dev, "FW already ok\n"); + retval = 0; + goto av8100_download_firmware_err2; + } + + av8100_set_state(adev, AV8100_OPMODE_INIT); + + pdata = adev->dev->platform_data; + + /* Request firmware */ + if (request_firmware(&fw_file, + AV8100_FW_FILENAME, + adev->dev)) { + dev_err(adev->dev, "fw request failed\n"); + retval = -EFAULT; + goto av8100_download_firmware_err2; + } + + /* Master clock timing, running */ + retval = av8100_reg_stby_w(AV8100_STANDBY_CPD_LOW, + AV8100_STANDBY_STBY_HIGH, pdata->mclk_freq); + if (retval) { + dev_err(adev->dev, + "Failed to write the value to av8100 register\n"); + goto av8100_download_firmware_err; + } + + mdelay(AV8100_WAITTIME_1MS); + + /* Clock enable */ + if (adev->params.inputclk && + adev->params.inputclk_requested == false) { + if (clk_enable(adev->params.inputclk)) { + dev_err(adev->dev, "inputclk en failed\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + adev->params.inputclk_requested = true; + } + + /* Request 100% APE OPP */ + if (adev->params.opp_requested == false) { + if (prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, + (char *)adev->miscdev.name, 100)) { + dev_err(adev->dev, "APE OPP 100 failed\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + if (prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, + (char *)adev->miscdev.name, 100)) { + dev_err(adev->dev, "DDR OPP 100 failed\n"); + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, + (char *)adev->miscdev.name); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + adev->params.opp_requested = true; + } + + msleep(AV8100_WAITTIME_10MS); + + /* Prepare firmware data */ + fw_bytes = fw_file->size; + fw_buff = (u8 *)fw_file->data; + dev_dbg(adev->dev, "fw size:%d\n", fw_bytes); + + i2c = adev->config.client; + + /* Enable firmware download */ + retval = av8100_reg_gen_ctrl_w( + AV8100_GENERAL_CONTROL_FDL_HIGH, + AV8100_GENERAL_CONTROL_HLD_HIGH, + AV8100_GENERAL_CONTROL_WA_LOW, + AV8100_GENERAL_CONTROL_RA_LOW); + if (retval) { + dev_err(adev->dev, + "Failed to write the value to av8100 register\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + retval = av8100_reg_gen_ctrl_r(&fdl, &hld, &wa, &ra); + if (retval) { + dev_err(adev->dev, + "Failed to read the value from av8100 register\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } else { + dev_dbg(adev->dev, "GENERAL_CONTROL_REG register fdl:%d " + "hld:%d wa:%d ra:%d\n", fdl, hld, wa, ra); + } + + LOCK_AV8100_HW; + + temp = fw_bytes % increment; + for (size = 0; size < (fw_bytes-temp); size = size + increment, + index += increment) { + if (if_type == I2C_INTERFACE) { + retval = write_multi_byte(i2c, + AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size, + increment); + if (retval) { + dev_dbg(adev->dev, "Failed to download the " + "av8100 firmware\n"); + UNLOCK_AV8100_HW; + retval = -EFAULT; + goto av8100_download_firmware_err; + } + } else if (if_type == DSI_INTERFACE) { + dev_dbg(adev->dev, + "DSI_INTERFACE is currently not supported\n"); + UNLOCK_AV8100_HW; + retval = -EINVAL; + goto av8100_download_firmware_err; + } else { + UNLOCK_AV8100_HW; + retval = -EINVAL; + goto av8100_download_firmware_err; + } + } + + /* Transfer last firmware bytes */ + if (if_type == I2C_INTERFACE) { + retval = write_multi_byte(i2c, + AV8100_FIRMWARE_DOWNLOAD_ENTRY, fw_buff + size, temp); + if (retval) { + dev_dbg(adev->dev, + "Failed to download the av8100 firmware\n"); + UNLOCK_AV8100_HW; + retval = -EFAULT; + goto av8100_download_firmware_err; + } + } else if (if_type == DSI_INTERFACE) { + /* TODO: Add support for DSI firmware download */ + UNLOCK_AV8100_HW; + retval = -EINVAL; + goto av8100_download_firmware_err; + } else { + UNLOCK_AV8100_HW; + retval = -EINVAL; + goto av8100_download_firmware_err; + } + + /* check transfer*/ + for (size = 0; size < fw_bytes; size++) + checksum = checksum ^ fw_buff[size]; + + UNLOCK_AV8100_HW; + + retval = av8100_reg_fw_dl_entry_r(&val); + if (retval) { + dev_dbg(adev->dev, + "Failed to read the value from the av8100 register\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + dev_dbg(adev->dev, "checksum:%x,val:%x\n", checksum, val); + + if (checksum != val) { + dev_dbg(adev->dev, + ">Fw downloading.... FAIL checksum issue\n"); + dev_dbg(adev->dev, "checksum = %d\n", checksum); + dev_dbg(adev->dev, "checksum read: %d\n", val); + retval = -EFAULT; + goto av8100_download_firmware_err; + } else { + dev_dbg(adev->dev, ">Fw downloading.... success\n"); + } + + /* Set to idle mode */ + av8100_reg_gen_ctrl_w(AV8100_GENERAL_CONTROL_FDL_LOW, + AV8100_GENERAL_CONTROL_HLD_LOW, AV8100_GENERAL_CONTROL_WA_LOW, + AV8100_GENERAL_CONTROL_RA_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to the av8100 register\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + /* Wait Internal Micro controler ready */ + cnt = 0; + cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]); + retval = av8100_reg_gen_status_r(NULL, NULL, NULL, &uc, + NULL, NULL); + while ((retval == 0) && (uc != 0x1) && (cnt < cnt_max)) { + mdelay(waittime_retry[cnt]); + retval = av8100_reg_gen_status_r(NULL, NULL, NULL, + &uc, NULL, NULL); + cnt++; + } + dev_dbg(adev->dev, "av8100 fwdl cnt:%d\n", cnt); + + if (retval) { + dev_dbg(adev->dev, + "Failed to read the value from the av8100 register\n"); + retval = -EFAULT; + goto av8100_download_firmware_err; + } + + if (uc != 0x1) + dev_dbg(adev->dev, "UC is not ready\n"); + + release_firmware(fw_file); + + if (adev->chip_version != 1) { + char *cut_str; + + /* Get cut version */ + retval = read_single_byte(i2c, AV8100_CUTVER_OFFSET, &val); + if (retval) { + dev_err(adev->dev, "Read cut ver failed\n"); + return retval; + } + + switch (val) { + case 0x00: + cut_str = CUT_STR_0; + break; + case 0x01: + cut_str = CUT_STR_1; + break; + case 0x03: + cut_str = CUT_STR_3; + break; + case 0x30: + cut_str = CUT_STR_30; + break; + default: + cut_str = CUT_STR_UNKNOWN; + break; + } + dev_dbg(adev->dev, "Cut ver %d %s\n", val, cut_str); + } + + av8100_set_state(adev, AV8100_OPMODE_IDLE); + + UNLOCK_AV8100_FWDL; + return 0; + +av8100_download_firmware_err: + release_firmware(fw_file); + + /* Remove APE OPP requirement */ + if (adev->params.opp_requested) { + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, + (char *)adev->miscdev.name); + prcmu_qos_remove_requirement(PRCMU_QOS_DDR_OPP, + (char *)adev->miscdev.name); + adev->params.opp_requested = false; + } + + /* Clock disable */ + if (adev->params.inputclk && adev->params.inputclk_requested) { + clk_disable(adev->params.inputclk); + adev->params.inputclk_requested = false; + } + +av8100_download_firmware_err2: + UNLOCK_AV8100_FWDL; + return retval; +} +EXPORT_SYMBOL(av8100_download_firmware); + +int av8100_disable_interrupt(void) +{ + int retval; + u8 hpdm = 0; + u8 cpdm = 0; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + if (!adev->params.ints_enabled) + return 0; + + retval = av8100_reg_stby_pend_int_w( + AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + retval = av8100_reg_gen_int_mask_w( + AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + hpdm = adev->params.hpdm; + cpdm = adev->params.cpdm; + + retval = av8100_reg_stby_int_mask_w( + AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW, + AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW, + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT, + AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + adev->params.hpdm = hpdm; + adev->params.cpdm = cpdm; + adev->params.ints_enabled = false; + + return 0; +} +EXPORT_SYMBOL(av8100_disable_interrupt); + +int av8100_enable_interrupt(void) +{ + int retval; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + if (adev->params.ints_enabled) + return 0; + + retval = av8100_reg_stby_pend_int_w( + AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW, + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + retval = av8100_reg_gen_int_mask_w( + AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW, + AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW, + adev->params.cecm, + adev->params.hdcpm, + adev->params.uovbm, + AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + retval = av8100_reg_stby_int_mask_w( + adev->params.hpdm, + adev->params.cpdm, + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT, + AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + return -EFAULT; + } + + adev->params.ints_enabled = true; + + return 0; +} +EXPORT_SYMBOL(av8100_enable_interrupt); + +int av8100_reg_stby_w( + u8 cpd, u8 stby, u8 mclkrng) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_STANDBY_CPD(cpd) | AV8100_STANDBY_STBY(stby) | + AV8100_STANDBY_MCLKRNG(mclkrng); + + /* Write to register */ + retval = register_write_internal(AV8100_STANDBY, val); + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_w); + +static int av8100_5V_w(u8 denc_off, u8 hdmi_off, u8 on) +{ + u8 val; + int retval; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value. + * chip_version == 1 have one common off time + * chip_version > 1 support different off time for hdmi and tvout. */ + if (adev->chip_version == 1) + val = AV8100_HDMI_5_VOLT_TIME_OFF_TIME(hdmi_off) | + AV8100_HDMI_5_VOLT_TIME_ON_TIME(on); + else + val = AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(denc_off) | + AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(hdmi_off) | + AV8100_HDMI_5_VOLT_TIME_ON_TIME(on); + + /* Write to register */ + retval = register_write_internal(AV8100_HDMI_5_VOLT_TIME, val); + + UNLOCK_AV8100_HW; + + return retval; +} + +int av8100_reg_hdmi_5_volt_time_w(u8 denc_off, u8 hdmi_off, u8 on) +{ + int retval; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + retval = av8100_5V_w(denc_off, hdmi_off, on); + + /* Set vars */ + if (adev->chip_version > 1) + adev->params.denc_off_time = denc_off; + + adev->params.hdmi_off_time = hdmi_off; + if (on) + adev->params.on_time = on; + + return retval; +} +EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_w); + +int av8100_reg_stby_int_mask_w( + u8 hpdm, u8 cpdm, u8 stbygpiocfg, u8 ipol) +{ + int retval; + u8 val; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_STANDBY_INTERRUPT_MASK_HPDM(hpdm) | + AV8100_STANDBY_INTERRUPT_MASK_CPDM(cpdm) | + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(stbygpiocfg) | + AV8100_STANDBY_INTERRUPT_MASK_IPOL(ipol); + + /* Write to register */ + retval = register_write_internal(AV8100_STANDBY_INTERRUPT_MASK, val); + + adev->params.hpdm = hpdm; + adev->params.cpdm = cpdm; + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_int_mask_w); + +int av8100_reg_stby_pend_int_w( + u8 hpdi, u8 cpdi, u8 oni, u8 bpdig) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_STANDBY_PENDING_INTERRUPT_HPDI(hpdi) | + AV8100_STANDBY_PENDING_INTERRUPT_CPDI(cpdi) | + AV8100_STANDBY_PENDING_INTERRUPT_ONI(oni) | + AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(bpdig); + + /* Write to register */ + retval = register_write_internal(AV8100_STANDBY_PENDING_INTERRUPT, val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_pend_int_w); + +int av8100_reg_gen_int_mask_w( + u8 eocm, u8 vsim, u8 vsom, u8 cecm, u8 hdcpm, u8 uovbm, u8 tem) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_GENERAL_INTERRUPT_MASK_EOCM(eocm) | + AV8100_GENERAL_INTERRUPT_MASK_VSIM(vsim) | + AV8100_GENERAL_INTERRUPT_MASK_VSOM(vsom) | + AV8100_GENERAL_INTERRUPT_MASK_CECM(cecm) | + AV8100_GENERAL_INTERRUPT_MASK_HDCPM(hdcpm) | + AV8100_GENERAL_INTERRUPT_MASK_UOVBM(uovbm) | + AV8100_GENERAL_INTERRUPT_MASK_TEM(tem); + + /* Write to register */ + retval = register_write_internal(AV8100_GENERAL_INTERRUPT_MASK, val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_int_mask_w); + +int av8100_reg_gen_int_w( + u8 eoci, u8 vsii, u8 vsoi, u8 ceci, u8 hdcpi, u8 uovbi) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_GENERAL_INTERRUPT_EOCI(eoci) | + AV8100_GENERAL_INTERRUPT_VSII(vsii) | + AV8100_GENERAL_INTERRUPT_VSOI(vsoi) | + AV8100_GENERAL_INTERRUPT_CECI(ceci) | + AV8100_GENERAL_INTERRUPT_HDCPI(hdcpi) | + AV8100_GENERAL_INTERRUPT_UOVBI(uovbi); + + /* Write to register */ + retval = register_write_internal(AV8100_GENERAL_INTERRUPT, val); + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_int_w); + +int av8100_reg_gpio_conf_w( + u8 dat3dir, u8 dat3val, u8 dat2dir, u8 dat2val, u8 dat1dir, + u8 dat1val, u8 ucdbg) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_GPIO_CONFIGURATION_DAT3DIR(dat3dir) | + AV8100_GPIO_CONFIGURATION_DAT3VAL(dat3val) | + AV8100_GPIO_CONFIGURATION_DAT2DIR(dat2dir) | + AV8100_GPIO_CONFIGURATION_DAT2VAL(dat2val) | + AV8100_GPIO_CONFIGURATION_DAT1DIR(dat1dir) | + AV8100_GPIO_CONFIGURATION_DAT1VAL(dat1val) | + AV8100_GPIO_CONFIGURATION_UCDBG(ucdbg); + + /* Write to register */ + retval = register_write_internal(AV8100_GPIO_CONFIGURATION, val); + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gpio_conf_w); + +int av8100_reg_gen_ctrl_w( + u8 fdl, u8 hld, u8 wa, u8 ra) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_GENERAL_CONTROL_FDL(fdl) | + AV8100_GENERAL_CONTROL_HLD(hld) | + AV8100_GENERAL_CONTROL_WA(wa) | + AV8100_GENERAL_CONTROL_RA(ra); + + /* Write to register */ + retval = register_write_internal(AV8100_GENERAL_CONTROL, val); + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_ctrl_w); + +int av8100_reg_fw_dl_entry_w( + u8 mbyte_code_entry) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Set register value */ + val = AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY( + mbyte_code_entry); + + /* Write to register */ + retval = register_write_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, val); + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_fw_dl_entry_w); + +int av8100_reg_w( + u8 offset, u8 value) +{ + int retval = 0; + struct i2c_client *i2c; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + i2c = adev->config.client; + + /* Write to register */ + retval = write_single_byte(i2c, offset, value); + if (retval) { + dev_dbg(adev->dev, + "Failed to write the value to av8100 register\n"); + UNLOCK_AV8100_HW; + return -EFAULT; + } + + UNLOCK_AV8100_HW; + return 0; +} +EXPORT_SYMBOL(av8100_reg_w); + +int av8100_reg_stby_r( + u8 *cpd, u8 *stby, u8 *hpds, u8 *cpds, u8 *mclkrng) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_STANDBY, &val); + + /* Set return params */ + if (cpd) + *cpd = AV8100_STANDBY_CPD_GET(val); + if (stby) + *stby = AV8100_STANDBY_STBY_GET(val); + if (hpds) + *hpds = AV8100_STANDBY_HPDS_GET(val); + if (cpds) + *cpds = AV8100_STANDBY_CPDS_GET(val); + if (mclkrng) + *mclkrng = AV8100_STANDBY_MCLKRNG_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_r); + +int av8100_reg_hdmi_5_volt_time_r( + u8 *denc_off_time, u8 *hdmi_off_time, u8 *on_time) +{ + int retval; + u8 val; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_HDMI_5_VOLT_TIME, &val); + + /* Set return params */ + if (adev->chip_version == 1) { + if (denc_off_time) + *denc_off_time = 0; + if (hdmi_off_time) + *hdmi_off_time = + AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(val); + } else { + if (denc_off_time) + *denc_off_time = + AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(val); + if (hdmi_off_time) + *hdmi_off_time = + AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(val); + } + + if (on_time) + *on_time = AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_hdmi_5_volt_time_r); + +int av8100_reg_stby_int_mask_r( + u8 *hpdm, u8 *cpdm, u8 *stbygpiocfg, u8 *ipol) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_STANDBY_INTERRUPT_MASK, &val); + + /* Set return params */ + if (hpdm) + *hpdm = AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(val); + if (cpdm) + *cpdm = AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(val); + if (stbygpiocfg) + *stbygpiocfg = + AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(val); + if (ipol) + *ipol = AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_int_mask_r); + +int av8100_reg_stby_pend_int_r( + u8 *hpdi, u8 *cpdi, u8 *oni, u8 *sid) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_STANDBY_PENDING_INTERRUPT, + &val); + + /* Set return params */ + if (hpdi) + *hpdi = AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(val); + if (cpdi) + *cpdi = AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(val); + if (oni) + *oni = AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(val); + if (sid) + *sid = AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_stby_pend_int_r); + +int av8100_reg_gen_int_mask_r( + u8 *eocm, + u8 *vsim, + u8 *vsom, + u8 *cecm, + u8 *hdcpm, + u8 *uovbm, + u8 *tem) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_GENERAL_INTERRUPT_MASK, &val); + + /* Set return params */ + if (eocm) + *eocm = AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(val); + if (vsim) + *vsim = AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(val); + if (vsom) + *vsom = AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(val); + if (cecm) + *cecm = AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(val); + if (hdcpm) + *hdcpm = AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(val); + if (uovbm) + *uovbm = AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(val); + if (tem) + *tem = AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_int_mask_r); + +int av8100_reg_gen_int_r( + u8 *eoci, + u8 *vsii, + u8 *vsoi, + u8 *ceci, + u8 *hdcpi, + u8 *uovbi, + u8 *tei) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_GENERAL_INTERRUPT, &val); + + /* Set return params */ + if (eoci) + *eoci = AV8100_GENERAL_INTERRUPT_EOCI_GET(val); + if (vsii) + *vsii = AV8100_GENERAL_INTERRUPT_VSII_GET(val); + if (vsoi) + *vsoi = AV8100_GENERAL_INTERRUPT_VSOI_GET(val); + if (ceci) + *ceci = AV8100_GENERAL_INTERRUPT_CECI_GET(val); + if (hdcpi) + *hdcpi = AV8100_GENERAL_INTERRUPT_HDCPI_GET(val); + if (uovbi) + *uovbi = AV8100_GENERAL_INTERRUPT_UOVBI_GET(val); + if (tei) + *tei = AV8100_GENERAL_INTERRUPT_TEI_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_int_r); + +int av8100_reg_gen_status_r( + u8 *cectxerr, + u8 *cecrec, + u8 *cectrx, + u8 *uc, + u8 *onuvb, + u8 *hdcps) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_GENERAL_STATUS, &val); + + /* Set return params */ + if (cectxerr) + *cectxerr = AV8100_GENERAL_STATUS_CECTXERR_GET(val); + if (cecrec) + *cecrec = AV8100_GENERAL_STATUS_CECREC_GET(val); + if (cectrx) + *cectrx = AV8100_GENERAL_STATUS_CECTRX_GET(val); + if (uc) + *uc = AV8100_GENERAL_STATUS_UC_GET(val); + if (onuvb) + *onuvb = AV8100_GENERAL_STATUS_ONUVB_GET(val); + if (hdcps) + *hdcps = AV8100_GENERAL_STATUS_HDCPS_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_status_r); + +int av8100_reg_gpio_conf_r( + u8 *dat3dir, + u8 *dat3val, + u8 *dat2dir, + u8 *dat2val, + u8 *dat1dir, + u8 *dat1val, + u8 *ucdbg) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_GPIO_CONFIGURATION, &val); + + /* Set return params */ + if (dat3dir) + *dat3dir = AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(val); + if (dat3val) + *dat3val = AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(val); + if (dat2dir) + *dat2dir = AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(val); + if (dat2val) + *dat2val = AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(val); + if (dat1dir) + *dat1dir = AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(val); + if (dat1val) + *dat1val = AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(val); + if (ucdbg) + *ucdbg = AV8100_GPIO_CONFIGURATION_UCDBG_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gpio_conf_r); + +int av8100_reg_gen_ctrl_r( + u8 *fdl, + u8 *hld, + u8 *wa, + u8 *ra) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_GENERAL_CONTROL, &val); + /* Set return params */ + if (fdl) + *fdl = AV8100_GENERAL_CONTROL_FDL_GET(val); + if (hld) + *hld = AV8100_GENERAL_CONTROL_HLD_GET(val); + if (wa) + *wa = AV8100_GENERAL_CONTROL_WA_GET(val); + if (ra) + *ra = AV8100_GENERAL_CONTROL_RA_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_gen_ctrl_r); + +int av8100_reg_fw_dl_entry_r( + u8 *mbyte_code_entry) +{ + int retval; + u8 val; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + /* Read from register */ + retval = register_read_internal(AV8100_FIRMWARE_DOWNLOAD_ENTRY, &val); + + /* Set return params */ + if (mbyte_code_entry) + *mbyte_code_entry = + AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(val); + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_fw_dl_entry_r); + +int av8100_reg_r( + u8 offset, + u8 *value) +{ + int retval = 0; + struct i2c_client *i2c; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + i2c = adev->config.client; + + /* Read from register */ + retval = read_single_byte(i2c, offset, value); + if (retval) { + dev_dbg(adev->dev, + "Failed to read the value from av8100 register\n"); + retval = -EFAULT; + goto av8100_register_read_out; + } + +av8100_register_read_out: + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_reg_r); + +int av8100_conf_get(enum av8100_command_type command_type, + union av8100_configuration *config) +{ + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state == AV8100_OPMODE_UNDEFINED) + return -EINVAL; + + /* Put configuration data to the corresponding data struct depending + * on command type */ + switch (command_type) { + case AV8100_COMMAND_VIDEO_INPUT_FORMAT: + memcpy(&config->video_input_format, + &adev->config.hdmi_video_input_cmd, + sizeof(struct av8100_video_input_format_cmd)); + break; + + case AV8100_COMMAND_AUDIO_INPUT_FORMAT: + memcpy(&config->audio_input_format, + &adev->config.hdmi_audio_input_cmd, + sizeof(struct av8100_audio_input_format_cmd)); + break; + + case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT: + memcpy(&config->video_output_format, + &adev->config.hdmi_video_output_cmd, + sizeof(struct av8100_video_output_format_cmd)); + break; + + case AV8100_COMMAND_VIDEO_SCALING_FORMAT: + memcpy(&config->video_scaling_format, + &adev->config.hdmi_video_scaling_cmd, + sizeof(struct av8100_video_scaling_format_cmd)); + break; + + case AV8100_COMMAND_COLORSPACECONVERSION: + config->color_transform = adev->config.color_transform; + break; + + case AV8100_COMMAND_CEC_MESSAGE_WRITE: + memcpy(&config->cec_message_write_format, + &adev->config.hdmi_cec_message_write_cmd, + sizeof(struct av8100_cec_message_write_format_cmd)); + break; + + case AV8100_COMMAND_CEC_MESSAGE_READ_BACK: + memcpy(&config->cec_message_read_back_format, + &adev->config.hdmi_cec_message_read_back_cmd, + sizeof(struct av8100_cec_message_read_back_format_cmd)); + break; + + case AV8100_COMMAND_DENC: + memcpy(&config->denc_format, &adev->config.hdmi_denc_cmd, + sizeof(struct av8100_denc_format_cmd)); + break; + + case AV8100_COMMAND_HDMI: + memcpy(&config->hdmi_format, &adev->config.hdmi_cmd, + sizeof(struct av8100_hdmi_cmd)); + break; + + case AV8100_COMMAND_HDCP_SENDKEY: + memcpy(&config->hdcp_send_key_format, + &adev->config.hdmi_hdcp_send_key_cmd, + sizeof(struct av8100_hdcp_send_key_format_cmd)); + break; + + case AV8100_COMMAND_HDCP_MANAGEMENT: + memcpy(&config->hdcp_management_format, + &adev->config.hdmi_hdcp_management_format_cmd, + sizeof(struct av8100_hdcp_management_format_cmd)); + break; + + case AV8100_COMMAND_INFOFRAMES: + memcpy(&config->infoframes_format, + &adev->config.hdmi_infoframes_cmd, + sizeof(struct av8100_infoframes_format_cmd)); + break; + + case AV8100_COMMAND_EDID_SECTION_READBACK: + memcpy(&config->edid_section_readback_format, + &adev->config.hdmi_edid_section_readback_cmd, + sizeof(struct + av8100_edid_section_readback_format_cmd)); + break; + + case AV8100_COMMAND_PATTERNGENERATOR: + memcpy(&config->pattern_generator_format, + &adev->config.hdmi_pattern_generator_cmd, + sizeof(struct av8100_pattern_generator_format_cmd)); + break; + + case AV8100_COMMAND_FUSE_AES_KEY: + memcpy(&config->fuse_aes_key_format, + &adev->config.hdmi_fuse_aes_key_cmd, + sizeof(struct av8100_fuse_aes_key_format_cmd)); + break; + + default: + return -EINVAL; + break; + } + + return 0; +} +EXPORT_SYMBOL(av8100_conf_get); + +int av8100_conf_prep(enum av8100_command_type command_type, + union av8100_configuration *config) +{ + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!config || !adev) + return -EINVAL; + + /* Put configuration data to the corresponding data struct depending + * on command type */ + switch (command_type) { + case AV8100_COMMAND_VIDEO_INPUT_FORMAT: + memcpy(&adev->config.hdmi_video_input_cmd, + &config->video_input_format, + sizeof(struct av8100_video_input_format_cmd)); + break; + + case AV8100_COMMAND_AUDIO_INPUT_FORMAT: + memcpy(&adev->config.hdmi_audio_input_cmd, + &config->audio_input_format, + sizeof(struct av8100_audio_input_format_cmd)); + break; + + case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT: + memcpy(&adev->config.hdmi_video_output_cmd, + &config->video_output_format, + sizeof(struct av8100_video_output_format_cmd)); + + /* Set params that depend on video output */ + av8100_config_video_output_dep(adev->config. + hdmi_video_output_cmd.video_output_cea_vesa); + break; + + case AV8100_COMMAND_VIDEO_SCALING_FORMAT: + memcpy(&adev->config.hdmi_video_scaling_cmd, + &config->video_scaling_format, + sizeof(struct av8100_video_scaling_format_cmd)); + break; + + case AV8100_COMMAND_COLORSPACECONVERSION: + adev->config.color_transform = config->color_transform; + break; + + case AV8100_COMMAND_CEC_MESSAGE_WRITE: + memcpy(&adev->config.hdmi_cec_message_write_cmd, + &config->cec_message_write_format, + sizeof(struct av8100_cec_message_write_format_cmd)); + break; + + case AV8100_COMMAND_CEC_MESSAGE_READ_BACK: + memcpy(&adev->config.hdmi_cec_message_read_back_cmd, + &config->cec_message_read_back_format, + sizeof(struct av8100_cec_message_read_back_format_cmd)); + break; + + case AV8100_COMMAND_DENC: + memcpy(&adev->config.hdmi_denc_cmd, &config->denc_format, + sizeof(struct av8100_denc_format_cmd)); + break; + + case AV8100_COMMAND_HDMI: + memcpy(&adev->config.hdmi_cmd, &config->hdmi_format, + sizeof(struct av8100_hdmi_cmd)); + break; + + case AV8100_COMMAND_HDCP_SENDKEY: + memcpy(&adev->config.hdmi_hdcp_send_key_cmd, + &config->hdcp_send_key_format, + sizeof(struct av8100_hdcp_send_key_format_cmd)); + break; + + case AV8100_COMMAND_HDCP_MANAGEMENT: + memcpy(&adev->config.hdmi_hdcp_management_format_cmd, + &config->hdcp_management_format, + sizeof(struct av8100_hdcp_management_format_cmd)); + break; + + case AV8100_COMMAND_INFOFRAMES: + memcpy(&adev->config.hdmi_infoframes_cmd, + &config->infoframes_format, + sizeof(struct av8100_infoframes_format_cmd)); + break; + + case AV8100_COMMAND_EDID_SECTION_READBACK: + memcpy(&adev->config.hdmi_edid_section_readback_cmd, + &config->edid_section_readback_format, + sizeof(struct + av8100_edid_section_readback_format_cmd)); + break; + + case AV8100_COMMAND_PATTERNGENERATOR: + memcpy(&adev->config.hdmi_pattern_generator_cmd, + &config->pattern_generator_format, + sizeof(struct av8100_pattern_generator_format_cmd)); + break; + + case AV8100_COMMAND_FUSE_AES_KEY: + memcpy(&adev->config.hdmi_fuse_aes_key_cmd, + &config->fuse_aes_key_format, + sizeof(struct av8100_fuse_aes_key_format_cmd)); + break; + + default: + return -EINVAL; + break; + } + + return 0; +} +EXPORT_SYMBOL(av8100_conf_prep); + +int av8100_conf_w(enum av8100_command_type command_type, + u8 *return_buffer_length, + u8 *return_buffer, enum interface_type if_type) +{ + int retval = 0; + u8 cmd_buffer[AV8100_COMMAND_MAX_LENGTH]; + u32 cmd_length = 0; + struct i2c_client *i2c; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + if (return_buffer_length) + *return_buffer_length = 0; + + i2c = adev->config.client; + + memset(&cmd_buffer, 0x00, AV8100_COMMAND_MAX_LENGTH); + +#define PRNK_MODE(_m) dev_dbg(adev->dev, "cmd: " #_m "\n"); + + /* Fill the command buffer with configuration data */ + switch (command_type) { + case AV8100_COMMAND_VIDEO_INPUT_FORMAT: + PRNK_MODE(AV8100_COMMAND_VIDEO_INPUT_FORMAT); + configuration_video_input_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_AUDIO_INPUT_FORMAT: + PRNK_MODE(AV8100_COMMAND_AUDIO_INPUT_FORMAT); + configuration_audio_input_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_VIDEO_OUTPUT_FORMAT: + PRNK_MODE(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT); + configuration_video_output_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_VIDEO_SCALING_FORMAT: + PRNK_MODE(AV8100_COMMAND_VIDEO_SCALING_FORMAT); + configuration_video_scaling_get(adev, cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_COLORSPACECONVERSION: + PRNK_MODE(AV8100_COMMAND_COLORSPACECONVERSION); + configuration_colorspace_conversion_get(adev, cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_CEC_MESSAGE_WRITE: + PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_WRITE); + configuration_cec_message_write_get(adev, cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_CEC_MESSAGE_READ_BACK: + PRNK_MODE(AV8100_COMMAND_CEC_MESSAGE_READ_BACK); + configuration_cec_message_read_get(cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_DENC: + PRNK_MODE(AV8100_COMMAND_DENC); + configuration_denc_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_HDMI: + PRNK_MODE(AV8100_COMMAND_HDMI); + configuration_hdmi_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_HDCP_SENDKEY: + PRNK_MODE(AV8100_COMMAND_HDCP_SENDKEY); + configuration_hdcp_sendkey_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_HDCP_MANAGEMENT: + PRNK_MODE(AV8100_COMMAND_HDCP_MANAGEMENT); + configuration_hdcp_management_get(adev, cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_INFOFRAMES: + PRNK_MODE(AV8100_COMMAND_INFOFRAMES); + configuration_infoframe_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_EDID_SECTION_READBACK: + PRNK_MODE(AV8100_COMMAND_EDID_SECTION_READBACK); + av8100_edid_section_readback_get(adev, cmd_buffer, &cmd_length); + break; + + case AV8100_COMMAND_PATTERNGENERATOR: + PRNK_MODE(AV8100_COMMAND_PATTERNGENERATOR); + configuration_pattern_generator_get(adev, cmd_buffer, + &cmd_length); + break; + + case AV8100_COMMAND_FUSE_AES_KEY: + PRNK_MODE(AV8100_COMMAND_FUSE_AES_KEY); + configuration_fuse_aes_key_get(adev, cmd_buffer, &cmd_length); + break; + + default: + dev_dbg(adev->dev, "Invalid command type\n"); + retval = -EFAULT; + break; + } + + LOCK_AV8100_HW; + + if (if_type == I2C_INTERFACE) { + int cnt = 0; + int cnt_max; + + dev_dbg(adev->dev, "av8100_conf_w cmd_type:%02x length:%02x ", + command_type, cmd_length); + dev_dbg(adev->dev, "buffer: "); + while (cnt < cmd_length) { + dev_dbg(adev->dev, "%02x ", cmd_buffer[cnt]); + cnt++; + } + + /* Write the command buffer */ + retval = write_multi_byte(i2c, + AV8100_CMD_BUF_OFFSET, cmd_buffer, cmd_length); + if (retval) { + UNLOCK_AV8100_HW; + return retval; + } + + /* Write the command */ + retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET, + command_type); + if (retval) { + UNLOCK_AV8100_HW; + return retval; + } + + + /* Get the first return byte */ + mdelay(AV8100_WAITTIME_1MS); + cnt = 0; + cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]); + retval = get_command_return_first(i2c, command_type); + while (retval && (cnt < cnt_max)) { + mdelay(waittime_retry[cnt]); + retval = get_command_return_first(i2c, command_type); + cnt++; + } + dev_dbg(adev->dev, "first return cnt:%d\n", cnt); + + if (retval) { + UNLOCK_AV8100_HW; + return retval; + } + + retval = get_command_return_data(i2c, command_type, cmd_buffer, + return_buffer_length, return_buffer); + } else if (if_type == DSI_INTERFACE) { + /* TODO */ + } else { + retval = -EINVAL; + dev_dbg(adev->dev, "Invalid command type\n"); + } + + if (command_type == AV8100_COMMAND_HDMI) { + adev->status.hdmi_on = ((adev->config.hdmi_cmd. + hdmi_mode == AV8100_HDMI_ON) && + (adev->config.hdmi_cmd.hdmi_format == AV8100_HDMI)); + } + + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_conf_w); + +int av8100_conf_w_raw(enum av8100_command_type command_type, + u8 buffer_length, + u8 *buffer, + u8 *return_buffer_length, + u8 *return_buffer) +{ + int retval = 0; + struct i2c_client *i2c; + int cnt; + int cnt_max; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + if (av8100_status_get().av8100_state <= AV8100_OPMODE_SHUTDOWN) + return -EINVAL; + + LOCK_AV8100_HW; + + if (return_buffer_length) + *return_buffer_length = 0; + + i2c = adev->config.client; + + /* Write the command buffer */ + retval = write_multi_byte(i2c, + AV8100_CMD_BUF_OFFSET, buffer, buffer_length); + if (retval) + goto av8100_conf_w_raw_out; + + /* Write the command */ + retval = write_single_byte(i2c, AV8100_COMMAND_OFFSET, + command_type); + if (retval) + goto av8100_conf_w_raw_out; + + + /* Get the first return byte */ + mdelay(AV8100_WAITTIME_1MS); + cnt = 0; + cnt_max = sizeof(waittime_retry) / sizeof(waittime_retry[0]); + retval = get_command_return_first(i2c, command_type); + while (retval && (cnt < cnt_max)) { + mdelay(waittime_retry[cnt]); + retval = get_command_return_first(i2c, command_type); + cnt++; + } + dev_dbg(adev->dev, "first return cnt:%d\n", cnt); + if (retval) + goto av8100_conf_w_raw_out; + + retval = get_command_return_data(i2c, command_type, buffer, + return_buffer_length, return_buffer); + +av8100_conf_w_raw_out: + UNLOCK_AV8100_HW; + return retval; +} +EXPORT_SYMBOL(av8100_conf_w_raw); + +struct av8100_status av8100_status_get(void) +{ + struct av8100_status status = {0}; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (adev) + return adev->status; + else + return status; +} +EXPORT_SYMBOL(av8100_status_get); + +enum av8100_output_CEA_VESA av8100_video_output_format_get(int xres, + int yres, + int htot, + int vtot, + int pixelclk, + bool interlaced) +{ + enum av8100_output_CEA_VESA index = 1; + int yres_div = !interlaced ? 1 : 2; + int hres_div = 1; + long freq1; + long freq2; + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + /* + * 720_576_I need a divider for hact and htot since + * these params need to be twice as large as expected in av8100_all_cea, + * which is used as input parameter to video input config. + */ + if ((xres == 720) && (yres == 576) && (interlaced == true)) + hres_div = 2; + + freq1 = 1000000 / htot * 1000000 / vtot / pixelclk + 1; + while (index < sizeof(av8100_all_cea)/sizeof(struct av8100_cea)) { + freq2 = av8100_all_cea[index].frequence / + av8100_all_cea[index].htotale / + av8100_all_cea[index].vtotale; + + dev_dbg(adev->dev, "freq1:%ld freq2:%ld\n", freq1, freq2); + if ((xres == av8100_all_cea[index].hactive / hres_div) && + (yres == av8100_all_cea[index].vactive * yres_div) && + (htot == av8100_all_cea[index].htotale / hres_div) && + (vtot == av8100_all_cea[index].vtotale) && + (abs(freq1 - freq2) < 2)) { + goto av8100_video_output_format_get_out; + } + index++; + } + +av8100_video_output_format_get_out: + dev_dbg(adev->dev, "av8100_video_output_format_get %d %d %d %d %d\n", + xres, yres, htot, vtot, index); + return index; +} +EXPORT_SYMBOL(av8100_video_output_format_get); + +void av8100_hdmi_event_cb_set(void (*hdmi_ev_cb)(enum av8100_hdmi_event)) +{ + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (adev) + adev->params.hdmi_ev_cb = hdmi_ev_cb; +} +EXPORT_SYMBOL(av8100_hdmi_event_cb_set); + +u8 av8100_ver_get(void) +{ + struct av8100_device *adev; + + adev = devnr_to_adev(AV8100_DEVNR_DEFAULT); + if (!adev) + return -EINVAL; + + return adev->chip_version; +} +EXPORT_SYMBOL(av8100_ver_get); + +static const struct color_conversion_cmd *get_color_transform_cmd( + struct av8100_device *adev, + enum av8100_color_transform transform) +{ + const struct color_conversion_cmd *result; + + switch (transform) { + case AV8100_COLOR_TRANSFORM_INDENTITY: + result = &col_trans_identity; + break; + case AV8100_COLOR_TRANSFORM_INDENTITY_CLAMP_YUV: + result = &col_trans_identity_clamp_yuv; + break; + case AV8100_COLOR_TRANSFORM_YUV_TO_RGB: + if (adev->chip_version == AV8100_CHIPVER_1) + result = &col_trans_yuv_to_rgb_v1; + else + result = &col_trans_yuv_to_rgb_v2; + break; + case AV8100_COLOR_TRANSFORM_YUV_TO_DENC: + result = &col_trans_yuv_to_denc; + break; + case AV8100_COLOR_TRANSFORM_RGB_TO_DENC: + result = &col_trans_rgb_to_denc; + break; + default: + dev_warn(adev->dev, "Unknown color space transform\n"); + result = &col_trans_identity; + break; + } + return result; +} + +static int av8100_open(struct inode *inode, struct file *filp) +{ + pr_debug("%s\n", __func__); + return 0; +} + +static int av8100_release(struct inode *inode, struct file *filp) +{ + pr_debug("%s\n", __func__); + return 0; +} + +static long av8100_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return 0; +} + +int av8100_device_register(struct av8100_device *adev) +{ + adev->miscdev.minor = MISC_DYNAMIC_MINOR; + adev->miscdev.name = "av8100"; + adev->miscdev.fops = &av8100_fops; + + if (misc_register(&adev->miscdev)) { + pr_err("av8100 misc_register failed\n"); + return -EFAULT; + } + return 0; +} + +int av8100_init_device(struct av8100_device *adev, struct device *dev) +{ + adev->dev = dev; + + if (av8100_config_init(adev)) { + dev_info(dev, "av8100_config_init failed\n"); + return -EFAULT; + } + + if (av8100_params_init(adev)) { + dev_info(dev, "av8100_params_init failed\n"); + return -EFAULT; + } + return 0; +} + +static int __devinit av8100_probe(struct i2c_client *i2c_client, + const struct i2c_device_id *id) +{ + int ret = 0; + struct av8100_platform_data *pdata = i2c_client->dev.platform_data; + struct device *dev; + struct av8100_device *adev; + + dev = &i2c_client->dev; + + dev_dbg(dev, "%s\n", __func__); + + /* Allocate device data */ + adev = kzalloc(sizeof(struct av8100_device), GFP_KERNEL); + if (!adev) { + dev_info(dev, "%s: Alloc failure\n", __func__); + return -ENOMEM; + } + + /* Add to list */ + list_add_tail(&adev->list, &av8100_device_list); + + av8100_device_register(adev); + + av8100_init_device(adev, dev); + + av8100_set_state(adev, AV8100_OPMODE_UNDEFINED); + + if (!i2c_check_functionality(i2c_client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA)) { + ret = -ENODEV; + dev_info(dev, "av8100 i2c_check_functionality failed\n"); + goto err1; + } + + init_waitqueue_head(&adev->event); + + adev->config.client = i2c_client; + adev->config.id = (struct i2c_device_id *) id; + i2c_set_clientdata(i2c_client, &adev->config); + + kthread_run(av8100_thread, adev, "av8100_thread"); + + /* Get regulator resource */ + if (pdata->regulator_pwr_id) { + adev->params.regulator_pwr = regulator_get(dev, + pdata->regulator_pwr_id); + if (IS_ERR(adev->params.regulator_pwr)) { + ret = PTR_ERR(adev->params.regulator_pwr); + dev_warn(dev, + "%s: Failed to get regulator '%s'\n", + __func__, pdata->regulator_pwr_id); + adev->params.regulator_pwr = NULL; + goto err1; + } + } + + /* Get clock resource */ + if (pdata->inputclk_id) { + adev->params.inputclk = clk_get(NULL, pdata->inputclk_id); + if (IS_ERR(adev->params.inputclk)) { + adev->params.inputclk = NULL; + dev_warn(dev, "%s: Failed to get clock '%s'\n", + __func__, pdata->inputclk_id); + } + } + + av8100_set_state(adev, AV8100_OPMODE_SHUTDOWN); + + + if (av8100_powerup1(adev)) { + dev_err(adev->dev, "av8100_powerup1 fail\n"); + ret = -EFAULT; + goto err1; + } + + /* Obtain the chip version */ + ret = av8100_reg_stby_pend_int_r(NULL, NULL, NULL, + &adev->chip_version); + if (ret) { + dev_err(adev->dev, "Failed to read chip version\n"); + goto err2; + } + + dev_info(adev->dev, "chip version:%d\n", adev->chip_version); + + switch (adev->chip_version) { + case AV8100_CHIPVER_1: + case AV8100_CHIPVER_2: + break; + + default: + dev_err(adev->dev, "Unsupported chip version:%d\n", + adev->chip_version); + ret = -EINVAL; + goto err2; + break; + } +err2: + (void) av8100_powerdown(); +err1: + return ret; +} + +static int __devexit av8100_remove(struct i2c_client *i2c_client) +{ + struct av8100_device *adev; + + adev = dev_to_adev(&i2c_client->dev); + if (!adev) + return -EFAULT; + + dev_dbg(adev->dev, "%s\n", __func__); + + if (adev->params.inputclk) + clk_put(adev->params.inputclk); + + /* Release regulator resource */ + if (adev->params.regulator_pwr) + regulator_put(adev->params.regulator_pwr); + + misc_deregister(&adev->miscdev); + + /* Remove from list */ + list_del(&adev->list); + + /* Free device data */ + kfree(adev); + + return 0; +} + +int av8100_init(void) +{ + pr_debug("%s\n", __func__); + + if (i2c_add_driver(&av8100_driver)) { + pr_err("av8100 i2c_add_driver failed\n"); + return -EFAULT; + } + + return 0; +} +module_init(av8100_init); + +void av8100_exit(void) +{ + pr_debug("%s\n", __func__); + + i2c_del_driver(&av8100_driver); +} +module_exit(av8100_exit); + +MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson hdmi display driver"); diff --git a/drivers/video/av8100/av8100_regs.h b/drivers/video/av8100/av8100_regs.h new file mode 100644 index 00000000000..6ed9000987a --- /dev/null +++ b/drivers/video/av8100/av8100_regs.h @@ -0,0 +1,346 @@ + +#define AV8100_VAL2REG(__reg, __fld, __val) \ + (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK) +#define AV8100_REG2VAL(__reg, __fld, __val) \ + (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT) + +#define AV8100_STANDBY 0x00000000 +#define AV8100_STANDBY_CPD_SHIFT 0 +#define AV8100_STANDBY_CPD_MASK 0x00000001 +#define AV8100_STANDBY_CPD_HIGH 1 +#define AV8100_STANDBY_CPD_LOW 0 +#define AV8100_STANDBY_CPD(__x) \ + AV8100_VAL2REG(AV8100_STANDBY, CPD, __x) +#define AV8100_STANDBY_CPD_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY, CPD, __x) +#define AV8100_STANDBY_STBY_SHIFT 1 +#define AV8100_STANDBY_STBY_MASK 0x00000002 +#define AV8100_STANDBY_STBY_HIGH 1 +#define AV8100_STANDBY_STBY_LOW 0 +#define AV8100_STANDBY_STBY(__x) \ + AV8100_VAL2REG(AV8100_STANDBY, STBY, __x) +#define AV8100_STANDBY_STBY_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY, STBY, __x) +#define AV8100_STANDBY_HPDS_SHIFT 2 +#define AV8100_STANDBY_HPDS_MASK 0x00000004 +#define AV8100_STANDBY_HPDS(__x) \ + AV8100_VAL2REG(AV8100_STANDBY, HPDS, __x) +#define AV8100_STANDBY_HPDS_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY, HPDS, __x) +#define AV8100_STANDBY_CPDS_SHIFT 3 +#define AV8100_STANDBY_CPDS_MASK 0x00000008 +#define AV8100_STANDBY_CPDS(__x) \ + AV8100_VAL2REG(AV8100_STANDBY, CPDS, __x) +#define AV8100_STANDBY_CPDS_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY, CPDS, __x) +#define AV8100_STANDBY_MCLKRNG_SHIFT 4 +#define AV8100_STANDBY_MCLKRNG_MASK 0x000000F0 +#define AV8100_STANDBY_MCLKRNG(__x) \ + AV8100_VAL2REG(AV8100_STANDBY, MCLKRNG, __x) +#define AV8100_STANDBY_MCLKRNG_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY, MCLKRNG, __x) +#define AV8100_HDMI_5_VOLT_TIME 0x00000001 +#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_SHIFT 0 +#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_MASK 0x0000001F +#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME(__x) \ + AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_OFF_TIME_GET(__x) \ + AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_SHIFT 0 +#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_MASK 0x00000003 +#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME(__x) \ + AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_DAC_OFF_TIME_GET(__x) \ + AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, DAC_OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_SHIFT 2 +#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_MASK 0x0000001C +#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME(__x) \ + AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_SU_OFF_TIME_GET(__x) \ + AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, SU_OFF_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_SHIFT 5 +#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_MASK 0x000000E0 +#define AV8100_HDMI_5_VOLT_TIME_ON_TIME(__x) \ + AV8100_VAL2REG(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x) +#define AV8100_HDMI_5_VOLT_TIME_ON_TIME_GET(__x) \ + AV8100_REG2VAL(AV8100_HDMI_5_VOLT_TIME, ON_TIME, __x) +#define AV8100_STANDBY_INTERRUPT_MASK 0x00000002 +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_SHIFT 0 +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_MASK 0x00000001 +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_HIGH 1 +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_LOW 0 +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_HPDM_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, HPDM, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_SHIFT 1 +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_MASK 0x00000002 +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_HIGH 1 +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_LOW 0 +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_CPDM_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, CPDM, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_SHIFT 2 +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_MASK 0x0000000C +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_INPUT 0x00 +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_ALT 0x01 +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT0 0x02 +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_OUTPUT1 0x03 +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_STBYGPIOCFG_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, STBYGPIOCFG, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_SHIFT 7 +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_MASK 0x00000080 +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_HIGH 1 +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_LOW 0 +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x) +#define AV8100_STANDBY_INTERRUPT_MASK_IPOL_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_INTERRUPT_MASK, IPOL, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT 0x00000003 +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_SHIFT 0 +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_MASK 0x00000001 +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_HIGH 1 +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_LOW 0 +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_HPDI_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, HPDI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_SHIFT 1 +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_MASK 0x00000002 +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_HIGH 1 +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_LOW 0 +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_CPDI_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, CPDI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_SHIFT 2 +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_MASK 0x00000004 +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_HIGH 1 +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_LOW 0 +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_ONI_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, ONI, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_SID_SHIFT 4 +#define AV8100_STANDBY_PENDING_INTERRUPT_SID_MASK 0x000000F0 +#define AV8100_STANDBY_PENDING_INTERRUPT_SID(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_SID_GET(__x) \ + AV8100_REG2VAL(AV8100_STANDBY_PENDING_INTERRUPT, SID, __x) +#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_SHIFT 6 +#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_MASK 0x00000040 +#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_HIGH 1 +#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG_LOW 0 +#define AV8100_STANDBY_PENDING_INTERRUPT_BPDIG(__x) \ + AV8100_VAL2REG(AV8100_STANDBY_PENDING_INTERRUPT, BPDIG, __x) +#define AV8100_GENERAL_INTERRUPT_MASK 0x00000004 +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_SHIFT 0 +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_MASK 0x00000001 +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_EOCM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, EOCM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_SHIFT 1 +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_MASK 0x00000002 +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_VSIM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSIM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_SHIFT 2 +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_MASK 0x00000004 +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_VSOM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, VSOM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_CECM_SHIFT 3 +#define AV8100_GENERAL_INTERRUPT_MASK_CECM_MASK 0x00000008 +#define AV8100_GENERAL_INTERRUPT_MASK_CECM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_CECM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_CECM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_CECM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, CECM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_SHIFT 4 +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_MASK 0x00000010 +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_HDCPM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, HDCPM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_SHIFT 5 +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_MASK 0x00000020 +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_UOVBM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, UOVBM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_TEM_SHIFT 6 +#define AV8100_GENERAL_INTERRUPT_MASK_TEM_MASK 0x00000040 +#define AV8100_GENERAL_INTERRUPT_MASK_TEM_HIGH 1 +#define AV8100_GENERAL_INTERRUPT_MASK_TEM_LOW 0 +#define AV8100_GENERAL_INTERRUPT_MASK_TEM(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x) +#define AV8100_GENERAL_INTERRUPT_MASK_TEM_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT_MASK, TEM, __x) +#define AV8100_GENERAL_INTERRUPT 0x00000005 +#define AV8100_GENERAL_INTERRUPT_EOCI_SHIFT 0 +#define AV8100_GENERAL_INTERRUPT_EOCI_MASK 0x00000001 +#define AV8100_GENERAL_INTERRUPT_EOCI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, EOCI, __x) +#define AV8100_GENERAL_INTERRUPT_EOCI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, EOCI, __x) +#define AV8100_GENERAL_INTERRUPT_VSII_SHIFT 1 +#define AV8100_GENERAL_INTERRUPT_VSII_MASK 0x00000002 +#define AV8100_GENERAL_INTERRUPT_VSII(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSII, __x) +#define AV8100_GENERAL_INTERRUPT_VSII_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSII, __x) +#define AV8100_GENERAL_INTERRUPT_VSOI_SHIFT 2 +#define AV8100_GENERAL_INTERRUPT_VSOI_MASK 0x00000004 +#define AV8100_GENERAL_INTERRUPT_VSOI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, VSOI, __x) +#define AV8100_GENERAL_INTERRUPT_VSOI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, VSOI, __x) +#define AV8100_GENERAL_INTERRUPT_CECI_SHIFT 3 +#define AV8100_GENERAL_INTERRUPT_CECI_MASK 0x00000008 +#define AV8100_GENERAL_INTERRUPT_CECI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, CECI, __x) +#define AV8100_GENERAL_INTERRUPT_CECI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, CECI, __x) +#define AV8100_GENERAL_INTERRUPT_HDCPI_SHIFT 4 +#define AV8100_GENERAL_INTERRUPT_HDCPI_MASK 0x00000010 +#define AV8100_GENERAL_INTERRUPT_HDCPI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, HDCPI, __x) +#define AV8100_GENERAL_INTERRUPT_HDCPI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, HDCPI, __x) +#define AV8100_GENERAL_INTERRUPT_UOVBI_SHIFT 5 +#define AV8100_GENERAL_INTERRUPT_UOVBI_MASK 0x00000020 +#define AV8100_GENERAL_INTERRUPT_UOVBI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, UOVBI, __x) +#define AV8100_GENERAL_INTERRUPT_UOVBI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, UOVBI, __x) +#define AV8100_GENERAL_INTERRUPT_TEI_SHIFT 6 +#define AV8100_GENERAL_INTERRUPT_TEI_MASK 0x00000040 +#define AV8100_GENERAL_INTERRUPT_TEI(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_INTERRUPT, TEI, __x) +#define AV8100_GENERAL_INTERRUPT_TEI_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_INTERRUPT, TEI, __x) +#define AV8100_GENERAL_STATUS 0x00000006 +#define AV8100_GENERAL_STATUS_CECTXERR_SHIFT 0 +#define AV8100_GENERAL_STATUS_CECTXERR_MASK 0x00000001 +#define AV8100_GENERAL_STATUS_CECTXERR_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTXERR, __x) +#define AV8100_GENERAL_STATUS_CECREC_SHIFT 1 +#define AV8100_GENERAL_STATUS_CECREC_MASK 0x00000002 +#define AV8100_GENERAL_STATUS_CECREC_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECREC, __x) +#define AV8100_GENERAL_STATUS_CECTRX_SHIFT 2 +#define AV8100_GENERAL_STATUS_CECTRX_MASK 0x00000004 +#define AV8100_GENERAL_STATUS_CECTRX_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, CECTRX, __x) +#define AV8100_GENERAL_STATUS_UC_SHIFT 3 +#define AV8100_GENERAL_STATUS_UC_MASK 0x00000008 +#define AV8100_GENERAL_STATUS_UC_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, UC, __x) +#define AV8100_GENERAL_STATUS_ONUVB_SHIFT 4 +#define AV8100_GENERAL_STATUS_ONUVB_MASK 0x00000010 +#define AV8100_GENERAL_STATUS_ONUVB_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, ONUVB, __x) +#define AV8100_GENERAL_STATUS_HDCPS_SHIFT 5 +#define AV8100_GENERAL_STATUS_HDCPS_MASK 0x000000E0 +#define AV8100_GENERAL_STATUS_HDCPS_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_STATUS, HDCPS, __x) +#define AV8100_GPIO_CONFIGURATION 0x00000007 +#define AV8100_GPIO_CONFIGURATION_DAT3DIR_SHIFT 0 +#define AV8100_GPIO_CONFIGURATION_DAT3DIR_MASK 0x00000001 +#define AV8100_GPIO_CONFIGURATION_DAT3DIR(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT3DIR_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT3VAL_SHIFT 1 +#define AV8100_GPIO_CONFIGURATION_DAT3VAL_MASK 0x00000002 +#define AV8100_GPIO_CONFIGURATION_DAT3VAL(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x) +#define AV8100_GPIO_CONFIGURATION_DAT3VAL_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT3VAL, __x) +#define AV8100_GPIO_CONFIGURATION_DAT2DIR_SHIFT 2 +#define AV8100_GPIO_CONFIGURATION_DAT2DIR_MASK 0x00000004 +#define AV8100_GPIO_CONFIGURATION_DAT2DIR(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT2DIR_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT2VAL_SHIFT 3 +#define AV8100_GPIO_CONFIGURATION_DAT2VAL_MASK 0x00000008 +#define AV8100_GPIO_CONFIGURATION_DAT2VAL(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x) +#define AV8100_GPIO_CONFIGURATION_DAT2VAL_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT2VAL, __x) +#define AV8100_GPIO_CONFIGURATION_DAT1DIR_SHIFT 4 +#define AV8100_GPIO_CONFIGURATION_DAT1DIR_MASK 0x00000010 +#define AV8100_GPIO_CONFIGURATION_DAT1DIR(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT1DIR_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1DIR, __x) +#define AV8100_GPIO_CONFIGURATION_DAT1VAL_SHIFT 5 +#define AV8100_GPIO_CONFIGURATION_DAT1VAL_MASK 0x00000020 +#define AV8100_GPIO_CONFIGURATION_DAT1VAL(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x) +#define AV8100_GPIO_CONFIGURATION_DAT1VAL_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, DAT1VAL, __x) +#define AV8100_GPIO_CONFIGURATION_UCDBG_SHIFT 6 +#define AV8100_GPIO_CONFIGURATION_UCDBG_MASK 0x00000040 +#define AV8100_GPIO_CONFIGURATION_UCDBG(__x) \ + AV8100_VAL2REG(AV8100_GPIO_CONFIGURATION, UCDBG, __x) +#define AV8100_GPIO_CONFIGURATION_UCDBG_GET(__x) \ + AV8100_REG2VAL(AV8100_GPIO_CONFIGURATION, UCDBG, __x) +#define AV8100_GENERAL_CONTROL 0x00000008 +#define AV8100_GENERAL_CONTROL_FDL_SHIFT 4 +#define AV8100_GENERAL_CONTROL_FDL_MASK 0x00000010 +#define AV8100_GENERAL_CONTROL_FDL_HIGH 1 +#define AV8100_GENERAL_CONTROL_FDL_LOW 0 +#define AV8100_GENERAL_CONTROL_FDL(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_CONTROL, FDL, __x) +#define AV8100_GENERAL_CONTROL_FDL_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_CONTROL, FDL, __x) +#define AV8100_GENERAL_CONTROL_HLD_SHIFT 5 +#define AV8100_GENERAL_CONTROL_HLD_MASK 0x00000020 +#define AV8100_GENERAL_CONTROL_HLD_HIGH 1 +#define AV8100_GENERAL_CONTROL_HLD_LOW 0 +#define AV8100_GENERAL_CONTROL_HLD(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_CONTROL, HLD, __x) +#define AV8100_GENERAL_CONTROL_HLD_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_CONTROL, HLD, __x) +#define AV8100_GENERAL_CONTROL_WA_SHIFT 6 +#define AV8100_GENERAL_CONTROL_WA_MASK 0x00000040 +#define AV8100_GENERAL_CONTROL_WA_HIGH 1 +#define AV8100_GENERAL_CONTROL_WA_LOW 0 +#define AV8100_GENERAL_CONTROL_WA(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_CONTROL, WA, __x) +#define AV8100_GENERAL_CONTROL_WA_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_CONTROL, WA, __x) +#define AV8100_GENERAL_CONTROL_RA_SHIFT 7 +#define AV8100_GENERAL_CONTROL_RA_MASK 0x00000080 +#define AV8100_GENERAL_CONTROL_RA_HIGH 1 +#define AV8100_GENERAL_CONTROL_RA_LOW 0 +#define AV8100_GENERAL_CONTROL_RA(__x) \ + AV8100_VAL2REG(AV8100_GENERAL_CONTROL, RA, __x) +#define AV8100_GENERAL_CONTROL_RA_GET(__x) \ + AV8100_REG2VAL(AV8100_GENERAL_CONTROL, RA, __x) +#define AV8100_FIRMWARE_DOWNLOAD_ENTRY 0x0000000F +#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_SHIFT 0 +#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_MASK 0x000000FF +#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY(__x) \ + AV8100_VAL2REG(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x) +#define AV8100_FIRMWARE_DOWNLOAD_ENTRY_MBYTE_CODE_ENTRY_GET(__x) \ + AV8100_REG2VAL(AV8100_FIRMWARE_DOWNLOAD_ENTRY, MBYTE_CODE_ENTRY, __x) diff --git a/drivers/video/av8100/hdmi.c b/drivers/video/av8100/hdmi.c new file mode 100644 index 00000000000..3159c4446f1 --- /dev/null +++ b/drivers/video/av8100/hdmi.c @@ -0,0 +1,2479 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson HDMI driver + * + * Author: Per Persson <per.xb.persson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/ioctl.h> +#include <linux/uaccess.h> +#include <video/av8100.h> +#include <video/hdmi.h> +#include <linux/poll.h> +#include <linux/mutex.h> +#include <linux/ctype.h> +#include "hdmi_loc.h" +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/list.h> + +#define SYSFS_EVENT_FILENAME "evread" +#define HDMI_DEVNR_DEFAULT 0 + +DEFINE_MUTEX(hdmi_events_mutex); +#define LOCK_HDMI_EVENTS mutex_lock(&hdmi_events_mutex) +#define UNLOCK_HDMI_EVENTS mutex_unlock(&hdmi_events_mutex) +#define EVENTS_MASK 0xFF + +struct hdmi_device { + struct list_head list; + struct miscdevice miscdev; + struct device *dev; + struct hdmi_sysfs_data sysfs_data; + int events; + int events_mask; + wait_queue_head_t event_wq; + bool events_received; + int devnr; +}; + +/* List of devices */ +static LIST_HEAD(hdmi_device_list); + +static ssize_t store_storeastext(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_plugdeten(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_edidread(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_edidread(struct device *dev, struct device_attribute *attr, + char *buf); +static ssize_t store_ceceven(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_cecread(struct device *dev, struct device_attribute *attr, + char *buf); +static ssize_t store_cecsend(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_infofrsend(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_hdcpeven(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_hdcpchkaesotp(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_hdcpfuseaes(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_hdcpfuseaes(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_hdcploadaes(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_hdcploadaes(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_hdcpauthencr(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_hdcpauthencr(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t show_hdcpstateget(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t show_evread(struct device *dev, struct device_attribute *attr, + char *buf); +static ssize_t store_evclr(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_audiocfg(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_plugstatus(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_poweronoff(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_poweronoff(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_evwakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); + +static const struct device_attribute hdmi_sysfs_attrs[] = { + __ATTR(storeastext, S_IWUSR, NULL, store_storeastext), + __ATTR(plugdeten, S_IWUSR, NULL, store_plugdeten), + __ATTR(edidread, S_IRUGO | S_IWUSR, show_edidread, store_edidread), + __ATTR(ceceven, S_IWUSR, NULL, store_ceceven), + __ATTR(cecread, S_IRUGO, show_cecread, NULL), + __ATTR(cecsend, S_IWUSR, NULL, store_cecsend), + __ATTR(infofrsend, S_IWUSR, NULL, store_infofrsend), + __ATTR(hdcpeven, S_IWUSR, NULL, store_hdcpeven), + __ATTR(hdcpchkaesotp, S_IRUGO, show_hdcpchkaesotp, NULL), + __ATTR(hdcpfuseaes, S_IRUGO | S_IWUSR, show_hdcpfuseaes, + store_hdcpfuseaes), + __ATTR(hdcploadaes, S_IRUGO | S_IWUSR, show_hdcploadaes, + store_hdcploadaes), + __ATTR(hdcpauthencr, S_IRUGO | S_IWUSR, show_hdcpauthencr, + store_hdcpauthencr), + __ATTR(hdcpstateget, S_IRUGO, show_hdcpstateget, NULL), + __ATTR(evread, S_IRUGO, show_evread, NULL), + __ATTR(evclr, S_IWUSR, NULL, store_evclr), + __ATTR(audiocfg, S_IWUSR, NULL, store_audiocfg), + __ATTR(plugstatus, S_IRUGO, show_plugstatus, NULL), + __ATTR(poweronoff, S_IRUGO | S_IWUSR, show_poweronoff, + store_poweronoff), + __ATTR(evwakeup, S_IWUSR, NULL, store_evwakeup), + __ATTR_NULL +}; + +/* Hex to int conversion */ +static unsigned int htoi(const char *ptr) +{ + unsigned int value = 0; + char ch; + + if (!ptr) + return 0; + + ch = *ptr; + if (isdigit(ch)) + value = ch - '0'; + else + value = toupper(ch) - 'A' + 10; + + value <<= 4; + ch = *(++ptr); + + if (isdigit(ch)) + value += ch - '0'; + else + value += toupper(ch) - 'A' + 10; + + return value; +} + +static struct hdmi_device *dev_to_hdev(struct device *dev) +{ + /* Get device from list of devices */ + struct list_head *element; + struct hdmi_device *hdmi_dev; + int cnt = 0; + + list_for_each(element, &hdmi_device_list) { + hdmi_dev = list_entry(element, struct hdmi_device, list); + if (hdmi_dev->dev == dev) + return hdmi_dev; + cnt++; + } + + return NULL; +} + +static struct hdmi_device *devnr_to_hdev(int devnr) +{ + /* Get device from list of devices */ + struct list_head *element; + struct hdmi_device *hdmi_dev; + int cnt = 0; + + list_for_each(element, &hdmi_device_list) { + hdmi_dev = list_entry(element, struct hdmi_device, list); + if (cnt == devnr) + return hdmi_dev; + cnt++; + } + + return NULL; +} + +static int event_enable(struct hdmi_device *hdev, bool enable, + enum hdmi_event ev) +{ + struct kobject *kobj = &hdev->dev->kobj; + + dev_dbg(hdev->dev, "enable_event %d %02x\n", enable, ev); + if (enable) + hdev->events_mask |= ev; + else + hdev->events_mask &= ~ev; + + if (hdev->events & ev) { + /* Report pending event */ + /* Wake up application waiting for event via call to poll() */ + sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME); + + LOCK_HDMI_EVENTS; + hdev->events_received = true; + UNLOCK_HDMI_EVENTS; + + wake_up_interruptible(&hdev->event_wq); + } + + return 0; +} + +static int plugdeten(struct hdmi_device *hdev, struct plug_detect *pldet) +{ + struct av8100_status status; + u8 denc_off_time = 0; + int retval; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + event_enable(hdev, pldet->hdmi_detect_enable != 0, + HDMI_EVENT_HDMI_PLUGIN); + event_enable(hdev, pldet->hdmi_detect_enable != 0, + HDMI_EVENT_HDMI_PLUGOUT); + + av8100_reg_hdmi_5_volt_time_r(&denc_off_time, NULL, NULL); + + retval = av8100_reg_hdmi_5_volt_time_w( + denc_off_time, + pldet->hdmi_off_time, + pldet->on_time); + + if (retval) { + dev_err(hdev->dev, "Failed to write the value to av8100 " + "register\n"); + return -EFAULT; + } + + return retval; +} + +static int edidread(struct hdmi_device *hdev, struct edid_read *edidread, + u8 *len, u8 *data) +{ + union av8100_configuration config; + struct av8100_status status; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.edid_section_readback_format.address = edidread->address; + config.edid_section_readback_format.block_number = edidread->block_nr; + + dev_dbg(hdev->dev, "addr:%0x blnr:%0x", + config.edid_section_readback_format.address, + config.edid_section_readback_format.block_number); + + if (av8100_conf_prep(AV8100_COMMAND_EDID_SECTION_READBACK, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_EDID_SECTION_READBACK, + len, data, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + dev_dbg(hdev->dev, "len:%0x\n", *len); + + return 0; +} + +static int cecread(struct hdmi_device *hdev, u8 *src, u8 *dest, u8 *data_len, + u8 *data) +{ + union av8100_configuration config; + struct av8100_status status; + u8 buf_len; + u8 buff[HDMI_CEC_READ_MAXSIZE]; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_READ_BACK, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_READ_BACK, + &buf_len, buff, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + if (buf_len > 0) { + *src = (buff[0] & 0xF0) >> 4; + *dest = buff[0] & 0x0F; + *data_len = buf_len - 1; + memcpy(data, &buff[1], buf_len - 1); + } else + *data_len = 0; + + return 0; +} + +/* CEC tx status can be set or read */ +static bool cec_tx_status(struct hdmi_device *hdev, + enum cec_tx_status_action action) +{ + static bool cec_tx_busy; + + switch (action) { + case CEC_TX_SET_FREE: + cec_tx_busy = false; + dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy); + break; + + case CEC_TX_SET_BUSY: + cec_tx_busy = true; + dev_dbg(hdev->dev, "cec_tx_busy set:%d\n", cec_tx_busy); + break; + + case CEC_TX_CHECK: + default: + dev_dbg(hdev->dev, "cec_tx_busy chk:%d\n", cec_tx_busy); + break; + } + + return cec_tx_busy; +} + +static int cecsend(struct hdmi_device *hdev, u8 src, u8 dest, u8 data_len, + u8 *data) +{ + union av8100_configuration config; + struct av8100_status status; + int cnt; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.cec_message_write_format.buffer[0] = ((src & 0x0F) << 4) + + (dest & 0x0F); + config.cec_message_write_format.buffer_length = data_len + 1; + memcpy(&config.cec_message_write_format.buffer[1], data, data_len); + + if (av8100_conf_prep(AV8100_COMMAND_CEC_MESSAGE_WRITE, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_enable_interrupt() != 0) { + dev_err(hdev->dev, "av8100_ei FAIL\n"); + return -EINVAL; + } + + cnt = 0; + while ((cnt < CECTX_TRY) && cec_tx_status(hdev, CEC_TX_CHECK)) { + /* Wait for pending CEC to be finished */ + msleep(CECTX_WAITTIME); + cnt++; + } + dev_dbg(hdev->dev, "cectxcnt:%d\n", cnt); + + if (av8100_conf_w(AV8100_COMMAND_CEC_MESSAGE_WRITE, + NULL, NULL, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + cec_tx_status(hdev, CEC_TX_SET_BUSY); + + return 0; +} + +static int infofrsend(struct hdmi_device *hdev, u8 type, u8 version, u8 crc, + u8 data_len, u8 *data) +{ + union av8100_configuration config; + struct av8100_status status; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + if ((data_len < 1) || (data_len > HDMI_INFOFRAME_MAX_SIZE)) + return -EINVAL; + + config.infoframes_format.type = type; + config.infoframes_format.version = version; + config.infoframes_format.crc = crc; + config.infoframes_format.length = data_len; + memcpy(&config.infoframes_format.data, data, data_len); + if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES, + NULL, NULL, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + return 0; +} + +static int hdcpchkaesotp(struct hdmi_device *hdev, u8 *crc, u8 *progged) +{ + union av8100_configuration config; + struct av8100_status status; + u8 buf_len; + u8 buf[2]; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_READ; + memset(config.fuse_aes_key_format.key, 0, AV8100_FUSE_KEY_SIZE); + if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY, + &buf_len, buf, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + if (buf_len == 2) { + *crc = buf[0]; + *progged = buf[1]; + } + + return 0; +} + +static int hdcpfuseaes(struct hdmi_device *hdev, u8 *key, u8 crc, u8 *result) +{ + union av8100_configuration config; + struct av8100_status status; + u8 buf_len; + u8 buf[2]; + + /* Default not OK */ + *result = HDMI_RESULT_NOT_OK; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.fuse_aes_key_format.fuse_operation = AV8100_FUSE_WRITE; + memcpy(config.fuse_aes_key_format.key, key, AV8100_FUSE_KEY_SIZE); + if (av8100_conf_prep(AV8100_COMMAND_FUSE_AES_KEY, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_FUSE_AES_KEY, + &buf_len, buf, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + if (buf_len == 2) { + dev_dbg(hdev->dev, "buf[0]:%02x buf[1]:%02x\n", buf[0], buf[1]); + if ((crc == buf[0]) && (buf[1] == 1)) + /* OK */ + *result = HDMI_RESULT_OK; + else + *result = HDMI_RESULT_CRC_MISMATCH; + } + + return 0; +} + +static int hdcploadaes(struct hdmi_device *hdev, u8 block, u8 key_len, u8 *key, + u8 *result, u8 *crc32) +{ + union av8100_configuration config; + struct av8100_status status; + u8 buf_len; + u8 buf[CRC32_SIZE]; + + /* Default not OK */ + *result = HDMI_RESULT_NOT_OK; + + dev_dbg(hdev->dev, "%s block:%d\n", __func__, block); + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.hdcp_send_key_format.key_number = block; + config.hdcp_send_key_format.data_len = key_len; + memcpy(config.hdcp_send_key_format.data, key, key_len); + if (av8100_conf_prep(AV8100_COMMAND_HDCP_SENDKEY, &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_HDCP_SENDKEY, + &buf_len, buf, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + if ((buf_len == CRC32_SIZE) && (crc32)) { + memcpy(crc32, buf, CRC32_SIZE); + dev_dbg(hdev->dev, "crc32:%02x%02x%02x%02x\n", + crc32[0], crc32[1], crc32[2], crc32[3]); + } + + *result = HDMI_RESULT_OK; + + return 0; +} + +static int hdcpauthencr(struct hdmi_device *hdev, u8 auth_type, u8 encr_type, + u8 *len, u8 *data) +{ + union av8100_configuration config; + struct av8100_status status; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + switch (auth_type) { + case HDMI_HDCP_AUTH_OFF: + default: + config.hdcp_management_format.req_type = + AV8100_HDCP_AUTH_REQ_OFF; + break; + + case HDMI_HDCP_AUTH_START: + config.hdcp_management_format.req_type = + AV8100_HDCP_AUTH_REQ_ON; + break; + + case HDMI_HDCP_AUTH_REV_LIST_REQ: + config.hdcp_management_format.req_type = + AV8100_HDCP_REV_LIST_REQ; + break; + case HDMI_HDCP_AUTH_CONT: + config.hdcp_management_format.req_type = + AV8100_HDCP_AUTH_CONT; + break; + } + + switch (encr_type) { + case HDMI_HDCP_ENCR_OESS: + default: + config.hdcp_management_format.encr_use = + AV8100_HDCP_ENCR_USE_OESS; + break; + + case HDMI_HDCP_ENCR_EESS: + config.hdcp_management_format.encr_use = + AV8100_HDCP_ENCR_USE_EESS; + break; + } + + if (av8100_conf_prep(AV8100_COMMAND_HDCP_MANAGEMENT, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_HDCP_MANAGEMENT, + len, data, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + return 0; +} + +static u8 events_read(struct hdmi_device *hdev) +{ + int ret; + + LOCK_HDMI_EVENTS; + ret = hdev->events; + dev_dbg(hdev->dev, "%s %02x\n", __func__, hdev->events); + UNLOCK_HDMI_EVENTS; + + return ret; +} + +static int events_clear(struct hdmi_device *hdev, u8 ev) +{ + dev_dbg(hdev->dev, "%s %02x\n", __func__, ev); + + LOCK_HDMI_EVENTS; + hdev->events &= ~ev & EVENTS_MASK; + UNLOCK_HDMI_EVENTS; + + return 0; +} + +static int event_wakeup(struct hdmi_device *hdev) +{ + struct kobject *kobj = &hdev->dev->kobj; + + dev_dbg(hdev->dev, "%s", __func__); + + LOCK_HDMI_EVENTS; + hdev->events |= HDMI_EVENT_WAKEUP; + hdev->events_received = true; + UNLOCK_HDMI_EVENTS; + + /* Wake up application waiting for event via call to poll() */ + sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME); + wake_up_interruptible(&hdev->event_wq); + + return 0; +} + +static int audiocfg(struct hdmi_device *hdev, struct audio_cfg *cfg) +{ + union av8100_configuration config; + struct av8100_status status; + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup failed\n"); + return -EINVAL; + } + } + + if (status.av8100_state <= AV8100_OPMODE_INIT) { + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + } + + config.audio_input_format.audio_input_if_format = cfg->if_format; + config.audio_input_format.i2s_input_nb = cfg->i2s_entries; + config.audio_input_format.sample_audio_freq = cfg->freq; + config.audio_input_format.audio_word_lg = cfg->word_length; + config.audio_input_format.audio_format = cfg->format; + config.audio_input_format.audio_if_mode = cfg->if_mode; + config.audio_input_format.audio_mute = cfg->mute; + + if (av8100_conf_prep(AV8100_COMMAND_AUDIO_INPUT_FORMAT, + &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT, + NULL, NULL, I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + + return 0; +} + +/* sysfs */ +static ssize_t store_storeastext(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if ((count != HDMI_STOREASTEXT_BIN_SIZE) && + (count != HDMI_STOREASTEXT_TEXT_SIZE) && + (count != HDMI_STOREASTEXT_TEXT_SIZE + 1)) + return -EINVAL; + + if ((count == HDMI_STOREASTEXT_BIN_SIZE) && (*buf == 0x1)) + hdev->sysfs_data.store_as_hextext = true; + else if (((count == HDMI_STOREASTEXT_TEXT_SIZE) || + (count == HDMI_STOREASTEXT_TEXT_SIZE + 1)) && (*buf == '0') && + (*(buf + 1) == '1')) { + hdev->sysfs_data.store_as_hextext = true; + } else { + hdev->sysfs_data.store_as_hextext = false; + } + + dev_dbg(hdev->dev, "store_as_hextext:%0d\n", + hdev->sysfs_data.store_as_hextext); + + return count; +} + +static ssize_t store_plugdeten(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct plug_detect plug_detect; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_PLUGDETEN_TEXT_SIZE) && + (count != HDMI_PLUGDETEN_TEXT_SIZE + 1)) + return -EINVAL; + plug_detect.hdmi_detect_enable = htoi(buf + index); + index += 2; + plug_detect.on_time = htoi(buf + index); + index += 2; + plug_detect.hdmi_off_time = htoi(buf + index); + index += 2; + } else { + if (count != HDMI_PLUGDETEN_BIN_SIZE) + return -EINVAL; + plug_detect.hdmi_detect_enable = *(buf + index++); + plug_detect.on_time = *(buf + index++); + plug_detect.hdmi_off_time = *(buf + index++); + } + + if (plugdeten(hdev, &plug_detect)) + return -EINVAL; + + return count; +} + +static ssize_t store_edidread(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct edid_read edid_read; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + dev_dbg(hdev->dev, "count:%d\n", count); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_EDIDREAD_TEXT_SIZE) && + (count != HDMI_EDIDREAD_TEXT_SIZE + 1)) + return -EINVAL; + edid_read.address = htoi(buf + index); + index += 2; + edid_read.block_nr = htoi(buf + index); + index += 2; + } else { + if (count != HDMI_EDIDREAD_BIN_SIZE) + return -EINVAL; + edid_read.address = *(buf + index++); + edid_read.block_nr = *(buf + index++); + } + + if (edidread(hdev, &edid_read, &hdev->sysfs_data.edid_data.buf_len, + hdev->sysfs_data.edid_data.buf)) + return -EINVAL; + + return count; +} + +static ssize_t show_edidread(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int len; + int index = 0; + int cnt; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + len = hdev->sysfs_data.edid_data.buf_len; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", len); + index += 2; + } else + *(buf + index++) = len; + + dev_dbg(hdev->dev, "len:%02x\n", len); + + cnt = 0; + while (cnt < len) { + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", + hdev->sysfs_data.edid_data.buf[cnt]); + index += 2; + } else + *(buf + index++) = + hdev->sysfs_data.edid_data.buf[cnt]; + + dev_dbg(hdev->dev, "%02x ", + hdev->sysfs_data.edid_data.buf[cnt]); + + cnt++; + } + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_ceceven(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + bool enable = false; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_CECEVEN_TEXT_SIZE) && + (count != HDMI_CECEVEN_TEXT_SIZE + 1)) + return -EINVAL; + if ((*buf == '0') && (*(buf + 1) == '1')) + enable = true; + } else { + if (count != HDMI_CECEVEN_BIN_SIZE) + return -EINVAL; + if (*buf == 0x01) + enable = true; + } + + event_enable(hdev, enable, HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR | + HDMI_EVENT_CECTX); + + return count; +} + +static ssize_t show_cecread(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct cec_rw cec_read; + int index = 0; + int cnt; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (cecread(hdev, &cec_read.src, &cec_read.dest, &cec_read.length, + cec_read.data)) + return -EINVAL; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", cec_read.src); + index += 2; + snprintf(buf + index, 3, "%02x", cec_read.dest); + index += 2; + snprintf(buf + index, 3, "%02x", cec_read.length); + index += 2; + } else { + *(buf + index++) = cec_read.src; + *(buf + index++) = cec_read.dest; + *(buf + index++) = cec_read.length; + } + + dev_dbg(hdev->dev, "len:%02x\n", cec_read.length); + + cnt = 0; + while (cnt < cec_read.length) { + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", cec_read.data[cnt]); + index += 2; + } else + *(buf + index++) = cec_read.data[cnt]; + + dev_dbg(hdev->dev, "%02x ", cec_read.data[cnt]); + + cnt++; + } + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_cecsend(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct cec_rw cec_w; + int index = 0; + int cnt; + int store_as_text; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if ((*buf == 'F') || (*buf == 'f')) + /* To be able to override bin format for test purpose */ + store_as_text = 1; + else + store_as_text = hdev->sysfs_data.store_as_hextext; + + if (store_as_text) { + if ((count < HDMI_CECSEND_TEXT_SIZE_MIN) || + (count > HDMI_CECSEND_TEXT_SIZE_MAX)) + return -EINVAL; + + cec_w.src = htoi(buf + index) & 0x0F; + index += 2; + cec_w.dest = htoi(buf + index); + index += 2; + cec_w.length = htoi(buf + index); + index += 2; + if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE) + return -EINVAL; + cnt = 0; + while (cnt < cec_w.length) { + cec_w.data[cnt] = htoi(buf + index); + index += 2; + dev_dbg(hdev->dev, "%02x ", cec_w.data[cnt]); + cnt++; + } + } else { + if ((count < HDMI_CECSEND_BIN_SIZE_MIN) || + (count > HDMI_CECSEND_BIN_SIZE_MAX)) + return -EINVAL; + + cec_w.src = *(buf + index++); + cec_w.dest = *(buf + index++); + cec_w.length = *(buf + index++); + if (cec_w.length > HDMI_CEC_WRITE_MAXSIZE) + return -EINVAL; + memcpy(cec_w.data, buf + index, cec_w.length); + } + + if (cecsend(hdev, cec_w.src, cec_w.dest, cec_w.length, cec_w.data)) + return -EINVAL; + + return count; +} + +static ssize_t store_infofrsend(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct info_fr info_fr; + int index = 0; + int cnt; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count < HDMI_INFOFRSEND_TEXT_SIZE_MIN) || + (count > HDMI_INFOFRSEND_TEXT_SIZE_MAX)) + return -EINVAL; + + info_fr.type = htoi(&buf[index]); + index += 2; + info_fr.ver = htoi(&buf[index]); + index += 2; + info_fr.crc = htoi(&buf[index]); + index += 2; + info_fr.length = htoi(&buf[index]); + index += 2; + + if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE) + return -EINVAL; + cnt = 0; + while (cnt < info_fr.length) { + info_fr.data[cnt] = htoi(buf + index); + index += 2; + dev_dbg(hdev->dev, "%02x ", info_fr.data[cnt]); + cnt++; + } + } else { + if ((count < HDMI_INFOFRSEND_BIN_SIZE_MIN) || + (count > HDMI_INFOFRSEND_BIN_SIZE_MAX)) + return -EINVAL; + + info_fr.type = *(buf + index++); + info_fr.ver = *(buf + index++); + info_fr.crc = *(buf + index++); + info_fr.length = *(buf + index++); + + if (info_fr.length > HDMI_INFOFRAME_MAX_SIZE) + return -EINVAL; + memcpy(info_fr.data, buf + index, info_fr.length); + } + + if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc, + info_fr.length, info_fr.data)) + return -EINVAL; + + return count; +} + +static ssize_t store_hdcpeven(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + bool enable = false; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_HDCPEVEN_TEXT_SIZE) && + (count != HDMI_HDCPEVEN_TEXT_SIZE + 1)) + return -EINVAL; + if ((*buf == '0') && (*(buf + 1) == '1')) + enable = true; + } else { + if (count != HDMI_HDCPEVEN_BIN_SIZE) + return -EINVAL; + if (*buf == 0x01) + enable = true; + } + + event_enable(hdev, enable, HDMI_EVENT_HDCP); + + return count; +} + +static ssize_t show_hdcpchkaesotp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + u8 crc; + u8 progged; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdcpchkaesotp(hdev, &crc, &progged)) + return -EINVAL; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", progged); + index += 2; + } else { + *(buf + index++) = progged; + } + + dev_dbg(hdev->dev, "progged:%02x\n", progged); + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_hdcpfuseaes(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct hdcp_fuseaes hdcp_fuseaes; + int index = 0; + int cnt; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + /* Default not OK */ + hdev->sysfs_data.fuse_result = HDMI_RESULT_NOT_OK; + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_HDCP_FUSEAES_TEXT_SIZE) && + (count != HDMI_HDCP_FUSEAES_TEXT_SIZE + 1)) + return -EINVAL; + + cnt = 0; + while (cnt < HDMI_HDCP_FUSEAES_KEYSIZE) { + hdcp_fuseaes.key[cnt] = htoi(buf + index); + index += 2; + dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.key[cnt]); + cnt++; + } + hdcp_fuseaes.crc = htoi(&buf[index]); + index += 2; + dev_dbg(hdev->dev, "%02x ", hdcp_fuseaes.crc); + } else { + if (count != HDMI_HDCP_FUSEAES_BIN_SIZE) + return -EINVAL; + + memcpy(hdcp_fuseaes.key, buf + index, + HDMI_HDCP_FUSEAES_KEYSIZE); + index += HDMI_HDCP_FUSEAES_KEYSIZE; + hdcp_fuseaes.crc = *(buf + index++); + } + + if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc, + &hdcp_fuseaes.result)) + return -EINVAL; + + dev_dbg(hdev->dev, "fuseresult:%02x ", hdcp_fuseaes.result); + + hdev->sysfs_data.fuse_result = hdcp_fuseaes.result; + + return count; +} + +static ssize_t show_hdcpfuseaes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", hdev->sysfs_data.fuse_result); + index += 2; + } else + *(buf + index++) = hdev->sysfs_data.fuse_result; + + dev_dbg(hdev->dev, "status:%02x\n", hdev->sysfs_data.fuse_result); + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_hdcploadaes(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct hdcp_loadaesone hdcp_loadaes; + int index = 0; + int block_cnt; + int cnt; + u8 crc32_rcvd[CRC32_SIZE]; + u8 crc; + u8 progged; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + /* Default not OK */ + hdev->sysfs_data.loadaes_result = HDMI_RESULT_NOT_OK; + + if (hdcpchkaesotp(hdev, &crc, &progged)) + return -EINVAL; + + if (!progged) { + /* AES is not fused */ + hdcp_loadaes.result = HDMI_AES_NOT_FUSED; + goto store_hdcploadaes_err; + } + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_HDCP_LOADAES_TEXT_SIZE) && + (count != HDMI_HDCP_LOADAES_TEXT_SIZE + 1)) { + dev_err(hdev->dev, "%s", "count mismatch\n"); + return -EINVAL; + } + + /* AES */ + block_cnt = 0; + while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) { + cnt = 0; + while (cnt < HDMI_HDCP_AES_KEYSIZE) { + hdcp_loadaes.key[cnt] = htoi(buf + index); + index += 2; + dev_dbg(hdev->dev, "%02x ", + hdcp_loadaes.key[cnt]); + cnt++; + } + + if (hdcploadaes(hdev, + block_cnt + HDMI_HDCP_AES_BLOCK_START, + HDMI_HDCP_AES_KEYSIZE, + hdcp_loadaes.key, + &hdcp_loadaes.result, + crc32_rcvd)) { + dev_err(hdev->dev, "%s %d\n", + "hdcploadaes err aes block", + block_cnt + HDMI_HDCP_AES_BLOCK_START); + return -EINVAL; + } + + if (hdcp_loadaes.result) + goto store_hdcploadaes_err; + + block_cnt++; + } + + /* KSV */ + memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE); + cnt = HDMI_HDCP_AES_KSVZEROESSIZE; + while (cnt < HDMI_HDCP_AES_KSVSIZE + + HDMI_HDCP_AES_KSVZEROESSIZE) { + hdcp_loadaes.key[cnt] = + htoi(&buf[index]); + index += 2; + dev_dbg(hdev->dev, "%02x ", hdcp_loadaes.key[cnt]); + cnt++; + } + + if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK, + HDMI_HDCP_AES_KSVSIZE + + HDMI_HDCP_AES_KSVZEROESSIZE, + hdcp_loadaes.key, + &hdcp_loadaes.result, + NULL)) { + dev_err(hdev->dev, + "%s %d\n", "hdcploadaes err in ksv\n", + block_cnt + HDMI_HDCP_AES_BLOCK_START); + return -EINVAL; + } + + if (hdcp_loadaes.result) + goto store_hdcploadaes_err; + + /* CRC32 */ + for (cnt = 0; cnt < CRC32_SIZE; cnt++) { + hdcp_loadaes.crc32[cnt] = htoi(buf + index); + index += 2; + } + + if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) { + dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n", + hdcp_loadaes.crc32[0], + hdcp_loadaes.crc32[1], + hdcp_loadaes.crc32[2], + hdcp_loadaes.crc32[3]); + hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH; + goto store_hdcploadaes_err; + } + } else { + if (count != HDMI_HDCP_LOADAES_BIN_SIZE) { + dev_err(hdev->dev, "%s", "count mismatch\n"); + return -EINVAL; + } + + /* AES */ + block_cnt = 0; + while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) { + memcpy(hdcp_loadaes.key, buf + index, + HDMI_HDCP_AES_KEYSIZE); + index += HDMI_HDCP_AES_KEYSIZE; + + if (hdcploadaes(hdev, + block_cnt + HDMI_HDCP_AES_BLOCK_START, + HDMI_HDCP_AES_KEYSIZE, + hdcp_loadaes.key, + &hdcp_loadaes.result, + crc32_rcvd)) { + dev_err(hdev->dev, "%s %d\n", + "hdcploadaes err aes block", + block_cnt + HDMI_HDCP_AES_BLOCK_START); + return -EINVAL; + } + + if (hdcp_loadaes.result) + goto store_hdcploadaes_err; + + block_cnt++; + } + + /* KSV */ + memset(hdcp_loadaes.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE); + memcpy(hdcp_loadaes.key + HDMI_HDCP_AES_KSVZEROESSIZE, + buf + index, + HDMI_HDCP_AES_KSVSIZE); + index += HDMI_HDCP_AES_KSVSIZE; + + if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK, + HDMI_HDCP_AES_KSVSIZE + + HDMI_HDCP_AES_KSVZEROESSIZE, + hdcp_loadaes.key, + &hdcp_loadaes.result, + NULL)) { + dev_err(hdev->dev, "%s %d\n", + "hdcploadaes err in ksv\n", + block_cnt + HDMI_HDCP_AES_BLOCK_START); + return -EINVAL; + } + + memcpy(hdcp_loadaes.crc32, buf + index, CRC32_SIZE); + index += CRC32_SIZE; + + /* CRC32 */ + if (memcmp(hdcp_loadaes.crc32, crc32_rcvd, CRC32_SIZE)) { + dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n", + hdcp_loadaes.crc32[0], + hdcp_loadaes.crc32[1], + hdcp_loadaes.crc32[2], + hdcp_loadaes.crc32[3]); + hdcp_loadaes.result = HDMI_RESULT_CRC_MISMATCH; + } + } + +store_hdcploadaes_err: + hdev->sysfs_data.loadaes_result = hdcp_loadaes.result; + return count; +} + +static ssize_t show_hdcploadaes(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", + hdev->sysfs_data.loadaes_result); + index += 2; + } else + *(buf + index++) = hdev->sysfs_data.loadaes_result; + + dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.loadaes_result); + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_hdcpauthencr(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct hdcp_authencr hdcp_authencr; + int index = 0; + u8 crc; + u8 progged; + int result = HDMI_RESULT_NOT_OK; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + /* Default */ + hdev->sysfs_data.authencr.buf_len = 0; + + if (hdcpchkaesotp(hdev, &crc, &progged)) { + result = HDMI_AES_NOT_FUSED; + goto store_hdcpauthencr_end; + } + + if (!progged) { + /* AES is not fused */ + result = HDMI_AES_NOT_FUSED; + goto store_hdcpauthencr_end; + } + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_HDCPAUTHENCR_TEXT_SIZE) && + (count != HDMI_HDCPAUTHENCR_TEXT_SIZE + 1)) + goto store_hdcpauthencr_end; + + hdcp_authencr.auth_type = htoi(buf + index); + index += 2; + hdcp_authencr.encr_type = htoi(buf + index); + index += 2; + } else { + if (count != HDMI_HDCPAUTHENCR_BIN_SIZE) + goto store_hdcpauthencr_end; + + hdcp_authencr.auth_type = *(buf + index++); + hdcp_authencr.encr_type = *(buf + index++); + } + + if (hdcpauthencr(hdev, hdcp_authencr.auth_type, hdcp_authencr.encr_type, + &hdev->sysfs_data.authencr.buf_len, + hdev->sysfs_data.authencr.buf)) + goto store_hdcpauthencr_end; + + result = HDMI_RESULT_OK; + +store_hdcpauthencr_end: + hdev->sysfs_data.authencr.result = result; + return count; +} + +static ssize_t show_hdcpauthencr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int len; + int index = 0; + int cnt; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + /* result */ + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", + hdev->sysfs_data.authencr.result); + index += 2; + } else + *(buf + index++) = hdev->sysfs_data.authencr.result; + + dev_dbg(hdev->dev, "result:%02x\n", hdev->sysfs_data.authencr.result); + + /* resp_size */ + len = hdev->sysfs_data.authencr.buf_len; + if (len > AUTH_BUF_LEN) + len = AUTH_BUF_LEN; + dev_dbg(hdev->dev, "resp_size:%d\n", len); + + /* resp */ + cnt = 0; + while (cnt < len) { + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", + hdev->sysfs_data.authencr.buf[cnt]); + index += 2; + + dev_dbg(hdev->dev, "%02x ", + hdev->sysfs_data.authencr.buf[cnt]); + + } else + *(buf + index++) = hdev->sysfs_data.authencr.buf[cnt]; + + cnt++; + } + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t show_hdcpstateget(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + u8 hdcp_state; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL, &hdcp_state)) + return -EINVAL; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", hdcp_state); + index += 2; + } else + *(buf + index++) = hdcp_state; + + dev_dbg(hdev->dev, "status:%02x\n", hdcp_state); + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t show_evread(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int index = 0; + u8 ev; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + ev = events_read(hdev); + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", ev); + index += 2; + } else + *(buf + index++) = ev; + + if (hdev->sysfs_data.store_as_hextext) + index++; + + /* Events are read: clear events */ + events_clear(hdev, EVENTS_MASK); + + return index; +} + +static ssize_t store_evclr(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + u8 ev; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_EVCLR_TEXT_SIZE) && + (count != HDMI_EVCLR_TEXT_SIZE + 1)) + return -EINVAL; + + ev = htoi(&buf[index]); + index += 2; + } else { + if (count != HDMI_EVCLR_BIN_SIZE) + return -EINVAL; + + ev = *(buf + index++); + } + + events_clear(hdev, ev); + + return count; +} + +static ssize_t store_audiocfg(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + struct audio_cfg audio_cfg; + int index = 0; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_AUDIOCFG_TEXT_SIZE) && + (count != HDMI_AUDIOCFG_TEXT_SIZE + 1)) + return -EINVAL; + + audio_cfg.if_format = htoi(&buf[index]); + index += 2; + audio_cfg.i2s_entries = htoi(&buf[index]); + index += 2; + audio_cfg.freq = htoi(&buf[index]); + index += 2; + audio_cfg.word_length = htoi(&buf[index]); + index += 2; + audio_cfg.format = htoi(&buf[index]); + index += 2; + audio_cfg.if_mode = htoi(&buf[index]); + index += 2; + audio_cfg.mute = htoi(&buf[index]); + index += 2; + } else { + if (count != HDMI_AUDIOCFG_BIN_SIZE) + return -EINVAL; + + audio_cfg.if_format = *(buf + index++); + audio_cfg.i2s_entries = *(buf + index++); + audio_cfg.freq = *(buf + index++); + audio_cfg.word_length = *(buf + index++); + audio_cfg.format = *(buf + index++); + audio_cfg.if_mode = *(buf + index++); + audio_cfg.mute = *(buf + index++); + } + + audiocfg(hdev, &audio_cfg); + + return count; +} + +static ssize_t show_plugstatus(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int index = 0; + struct av8100_status av8100_status; + u8 plstat; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + av8100_status = av8100_status_get(); + plstat = av8100_status.av8100_plugin_status == AV8100_HDMI_PLUGIN; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", plstat); + index += 2; + } else + *(buf + index++) = plstat; + + if (hdev->sysfs_data.store_as_hextext) + index++; + + return index; +} + +static ssize_t store_poweronoff(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + bool enable = false; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + if (hdev->sysfs_data.store_as_hextext) { + if ((count != HDMI_POWERONOFF_TEXT_SIZE) && + (count != HDMI_POWERONOFF_TEXT_SIZE + 1)) + return -EINVAL; + if ((*buf == '0') && (*(buf + 1) == '1')) + enable = true; + } else { + if (count != HDMI_POWERONOFF_BIN_SIZE) + return -EINVAL; + if (*buf == 0x01) + enable = true; + } + + if (enable == 0) { + if (av8100_powerdown() != 0) { + dev_err(hdev->dev, "av8100_powerdown FAIL\n"); + return -EINVAL; + } + } else { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup FAIL\n"); + return -EINVAL; + } + } + + return count; +} + +static ssize_t show_poweronoff(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + int index = 0; + struct av8100_status status; + u8 power_state; + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_SCAN) + power_state = 0; + else + power_state = 1; + + if (hdev->sysfs_data.store_as_hextext) { + snprintf(buf + index, 3, "%02x", power_state); + index += 3; + } else { + *(buf + index++) = power_state; + } + + return index; +} + +static ssize_t store_evwakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hdmi_device *hdev = dev_to_hdev(dev); + + if (!hdev) + return -EFAULT; + + dev_dbg(hdev->dev, "%s\n", __func__); + + event_wakeup(hdev); + + return count; +} + +static int hdmi_open(struct inode *inode, struct file *filp) +{ + return 0; +} + +static int hdmi_release(struct inode *inode, struct file *filp) +{ + return 0; +} + +/* ioctl */ +static long hdmi_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + u8 value = 0; + struct hdmi_register reg; + struct av8100_status status; + u8 aes_status; + struct hdmi_device *hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT); + + switch (cmd) { + case IOC_PLUG_DETECT_ENABLE: + { + struct plug_detect plug_detect; + + if (copy_from_user(&plug_detect, (void *)arg, + sizeof(struct plug_detect))) + return -EINVAL; + + if (plugdeten(hdev, &plug_detect)) + return -EINVAL; + } + break; + + case IOC_EDID_READ: + { + struct edid_read edid_read; + + if (copy_from_user(&edid_read, (void *)arg, + sizeof(struct edid_read))) + return -EINVAL; + + if (edidread(hdev, &edid_read, &edid_read.data_length, + edid_read.data)) + return -EINVAL; + + if (copy_to_user((void *)arg, (void *)&edid_read, + sizeof(struct edid_read))) { + return -EINVAL; + } + } + break; + + case IOC_CEC_EVENT_ENABLE: + if (copy_from_user(&value, (void *)arg, sizeof(u8))) + return -EINVAL; + + event_enable(hdev, value != 0, + HDMI_EVENT_CEC | HDMI_EVENT_CECTXERR | + HDMI_EVENT_CECTX); + break; + + case IOC_CEC_READ: + { + struct cec_rw cec_read; + + if (cecread(hdev, &cec_read.src, &cec_read.dest, + &cec_read.length, cec_read.data)) + return -EINVAL; + + if (copy_to_user((void *)arg, (void *)&cec_read, + sizeof(struct cec_rw))) { + return -EINVAL; + } + } + break; + + case IOC_CEC_SEND: + { + struct cec_rw cec_send; + + if (copy_from_user(&cec_send, (void *)arg, + sizeof(struct cec_rw))) + return -EINVAL; + + if (cecsend(hdev, cec_send.src, cec_send.dest, cec_send.length, + cec_send.data)) + return -EINVAL; + } + break; + + case IOC_INFOFRAME_SEND: + { + struct info_fr info_fr; + + if (copy_from_user(&info_fr, (void *)arg, + sizeof(struct info_fr))) + return -EINVAL; + + if (infofrsend(hdev, info_fr.type, info_fr.ver, info_fr.crc, + info_fr.length, info_fr.data)) + return -EINVAL; + } + break; + + case IOC_HDCP_EVENT_ENABLE: + if (copy_from_user(&value, (void *)arg, sizeof(u8))) + return -EINVAL; + + event_enable(hdev, value != 0, HDMI_EVENT_HDCP); + break; + + case IOC_HDCP_CHKAESOTP: + if (hdcpchkaesotp(hdev, &value, &aes_status)) + return -EINVAL; + + if (copy_to_user((void *)arg, (void *)&aes_status, + sizeof(u8))) { + return -EINVAL; + } + break; + + case IOC_HDCP_FUSEAES: + { + struct hdcp_fuseaes hdcp_fuseaes; + + if (copy_from_user(&hdcp_fuseaes, (void *)arg, + sizeof(struct hdcp_fuseaes))) + return -EINVAL; + + if (hdcpfuseaes(hdev, hdcp_fuseaes.key, hdcp_fuseaes.crc, + &hdcp_fuseaes.result)) + return -EINVAL; + + if (copy_to_user((void *)arg, (void *)&hdcp_fuseaes, + sizeof(struct hdcp_fuseaes))) { + return -EINVAL; + } + } + break; + + case IOC_HDCP_LOADAES: + { + int block_cnt; + struct hdcp_loadaesone hdcp_loadaesone; + struct hdcp_loadaesall hdcp_loadaesall; + + if (copy_from_user(&hdcp_loadaesall, (void *)arg, + sizeof(struct hdcp_loadaesall))) + return -EINVAL; + + if (hdcpchkaesotp(hdev, &value, &aes_status)) + return -EINVAL; + + if (!aes_status) { + /* AES is not fused */ + hdcp_loadaesone.result = HDMI_AES_NOT_FUSED; + goto ioc_hdcploadaes_err; + } + + /* AES */ + block_cnt = 0; + while (block_cnt < HDMI_HDCP_AES_NR_OF_BLOCKS) { + memcpy(hdcp_loadaesone.key, hdcp_loadaesall.key + + block_cnt * HDMI_HDCP_AES_KEYSIZE, + HDMI_HDCP_AES_KEYSIZE); + + if (hdcploadaes(hdev, + block_cnt + HDMI_HDCP_AES_BLOCK_START, + HDMI_HDCP_AES_KEYSIZE, + hdcp_loadaesone.key, + &hdcp_loadaesone.result, + hdcp_loadaesone.crc32)) + return -EINVAL; + + if (hdcp_loadaesone.result) + return -EINVAL; + + block_cnt++; + } + + /* KSV */ + memset(hdcp_loadaesone.key, 0, HDMI_HDCP_AES_KSVZEROESSIZE); + memcpy(hdcp_loadaesone.key + HDMI_HDCP_AES_KSVZEROESSIZE, + hdcp_loadaesall.ksv, HDMI_HDCP_AES_KSVSIZE); + + if (hdcploadaes(hdev, HDMI_HDCP_KSV_BLOCK, + HDMI_HDCP_AES_KSVSIZE + + HDMI_HDCP_AES_KSVZEROESSIZE, + hdcp_loadaesone.key, + &hdcp_loadaesone.result, + NULL)) + return -EINVAL; + + if (hdcp_loadaesone.result) + return -EINVAL; + + /* CRC32 */ + if (memcmp(hdcp_loadaesall.crc32, hdcp_loadaesone.crc32, + CRC32_SIZE)) { + dev_dbg(hdev->dev, "crc32exp:%02x%02x%02x%02x\n", + hdcp_loadaesall.crc32[0], + hdcp_loadaesall.crc32[1], + hdcp_loadaesall.crc32[2], + hdcp_loadaesall.crc32[3]); + hdcp_loadaesone.result = HDMI_RESULT_CRC_MISMATCH; + goto ioc_hdcploadaes_err; + } + +ioc_hdcploadaes_err: + hdcp_loadaesall.result = hdcp_loadaesone.result; + + if (copy_to_user((void *)arg, (void *)&hdcp_loadaesall, + sizeof(struct hdcp_loadaesall))) { + return -EINVAL; + } + } + break; + + case IOC_HDCP_AUTHENCR_REQ: + { + struct hdcp_authencr hdcp_authencr; + int result = HDMI_RESULT_NOT_OK; + + u8 buf[AUTH_BUF_LEN]; + + if (copy_from_user(&hdcp_authencr, (void *)arg, + sizeof(struct hdcp_authencr))) + return -EINVAL; + + /* Default not OK */ + hdcp_authencr.resp_size = 0; + + if (hdcpchkaesotp(hdev, &value, &aes_status)) { + result = HDMI_AES_NOT_FUSED; + goto hdcp_authencr_end; + } + + if (!aes_status) { + /* AES is not fused */ + result = HDMI_AES_NOT_FUSED; + goto hdcp_authencr_end; + } + + if (hdcpauthencr(hdev, hdcp_authencr.auth_type, + hdcp_authencr.encr_type, + &value, + buf)) { + result = HDMI_RESULT_NOT_OK; + goto hdcp_authencr_end; + } + + if (value > AUTH_BUF_LEN) + value = AUTH_BUF_LEN; + + result = HDMI_RESULT_OK; + hdcp_authencr.resp_size = value; + memcpy(hdcp_authencr.resp, buf, value); + +hdcp_authencr_end: + hdcp_authencr.result = result; + if (copy_to_user((void *)arg, (void *)&hdcp_authencr, + sizeof(struct hdcp_authencr))) + return -EINVAL; + } + break; + + case IOC_HDCP_STATE_GET: + if (av8100_reg_gen_status_r(NULL, NULL, NULL, NULL, NULL, + &value)) + return -EINVAL; + + if (copy_to_user((void *)arg, (void *)&value, + sizeof(u8))) { + return -EINVAL; + } + break; + + case IOC_EVENTS_READ: + value = events_read(hdev); + + if (copy_to_user((void *)arg, (void *)&value, + sizeof(u8))) { + return -EINVAL; + } + + /* Events are read: clear events */ + events_clear(hdev, EVENTS_MASK); + break; + + case IOC_EVENTS_CLEAR: + if (copy_from_user(&value, (void *)arg, sizeof(u8))) + return -EINVAL; + + events_clear(hdev, value); + break; + + case IOC_AUDIO_CFG: + { + struct audio_cfg audio_cfg; + + if (copy_from_user(&audio_cfg, (void *)arg, + sizeof(struct audio_cfg))) + return -EINVAL; + + audiocfg(hdev, &audio_cfg); + } + break; + + case IOC_PLUG_STATUS: + status = av8100_status_get(); + value = status.av8100_plugin_status == AV8100_HDMI_PLUGIN; + + if (copy_to_user((void *)arg, (void *)&value, + sizeof(u8))) { + return -EINVAL; + } + break; + + case IOC_POWERONOFF: + /* Get desired power state on or off */ + if (copy_from_user(&value, (void *)arg, sizeof(u8))) + return -EINVAL; + + if (value == 0) { + if (av8100_powerdown() != 0) { + dev_err(hdev->dev, "av8100_powerdown FAIL\n"); + return -EINVAL; + } + } else { + if (av8100_powerup() != 0) { + dev_err(hdev->dev, "av8100_powerup FAIL\n"); + return -EINVAL; + } + } + break; + + case IOC_EVENT_WAKEUP: + /* Trigger event */ + event_wakeup(hdev); + break; + + case IOC_POWERSTATE: + status = av8100_status_get(); + value = status.av8100_state >= AV8100_OPMODE_SCAN; + + if (copy_to_user((void *)arg, (void *)&value, + sizeof(u8))) { + return -EINVAL; + } + break; + + /* Internal */ + case IOC_HDMI_ENABLE_INTERRUPTS: + av8100_disable_interrupt(); + if (av8100_enable_interrupt() != 0) { + dev_err(hdev->dev, "av8100_ei FAIL\n"); + return -EINVAL; + } + break; + + case IOC_HDMI_DOWNLOAD_FW: + if (av8100_download_firmware(I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100 dl fw FAIL\n"); + return -EINVAL; + } + break; + + case IOC_HDMI_ONOFF: + { + union av8100_configuration config; + + /* Get desired HDMI mode on or off */ + if (copy_from_user(&value, (void *)arg, sizeof(u8))) + return -EFAULT; + + if (av8100_conf_get(AV8100_COMMAND_HDMI, &config) != 0) { + dev_err(hdev->dev, "av8100_conf_get FAIL\n"); + return -EINVAL; + } + if (value == 0) + config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF; + else + config.hdmi_format.hdmi_mode = AV8100_HDMI_ON; + + if (av8100_conf_prep(AV8100_COMMAND_HDMI, &config) != 0) { + dev_err(hdev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL, + I2C_INTERFACE) != 0) { + dev_err(hdev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + } + break; + + case IOC_HDMI_REGISTER_WRITE: + if (copy_from_user(®, (void *)arg, + sizeof(struct hdmi_register))) { + return -EINVAL; + } + + if (av8100_reg_w(reg.offset, reg.value) != 0) { + dev_err(hdev->dev, "hdmi_register_write FAIL\n"); + return -EINVAL; + } + break; + + case IOC_HDMI_REGISTER_READ: + if (copy_from_user(®, (void *)arg, + sizeof(struct hdmi_register))) { + return -EINVAL; + } + + if (av8100_reg_r(reg.offset, ®.value) != 0) { + dev_err(hdev->dev, "hdmi_register_write FAIL\n"); + return -EINVAL; + } + + if (copy_to_user((void *)arg, (void *)®, + sizeof(struct hdmi_register))) { + return -EINVAL; + } + break; + + case IOC_HDMI_STATUS_GET: + status = av8100_status_get(); + + if (copy_to_user((void *)arg, (void *)&status, + sizeof(struct av8100_status))) { + return -EINVAL; + } + break; + + case IOC_HDMI_CONFIGURATION_WRITE: + { + struct hdmi_command_register command_reg; + + if (copy_from_user(&command_reg, (void *)arg, + sizeof(struct hdmi_command_register)) != 0) { + dev_err(hdev->dev, "IOC_HDMI_CONFIGURATION_WRITE " + "fail 1\n"); + command_reg.return_status = EINVAL; + } else { + command_reg.return_status = 0; + if (av8100_conf_w_raw(command_reg.cmd_id, + command_reg.buf_len, + command_reg.buf, + &(command_reg.buf_len), + command_reg.buf) != 0) { + dev_err(hdev->dev, + "IOC_HDMI_CONFIGURATION_WRITE " + "fail 2\n"); + command_reg.return_status = EINVAL; + } + } + + if (copy_to_user((void *)arg, (void *)&command_reg, + sizeof(struct hdmi_command_register)) != 0) { + return -EINVAL; + } + } + break; + + default: + break; + } + + return 0; +} + +static unsigned int +hdmi_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct hdmi_device *hdev; + + hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT); + if (!hdev) + return 0; + + dev_dbg(hdev->dev, "%s\n", __func__); + + poll_wait(filp, &hdev->event_wq , wait); + + LOCK_HDMI_EVENTS; + if (hdev->events_received == true) { + hdev->events_received = false; + mask = POLLIN | POLLRDNORM; + } + UNLOCK_HDMI_EVENTS; + + return mask; +} + +static const struct file_operations hdmi_fops = { + .owner = THIS_MODULE, + .open = hdmi_open, + .release = hdmi_release, + .unlocked_ioctl = hdmi_ioctl, + .poll = hdmi_poll +}; + +/* Event callback function called by hw driver */ +void hdmi_event(enum av8100_hdmi_event ev) +{ + int events_old; + int events_new; + struct hdmi_device *hdev; + struct kobject *kobj; + + hdev = devnr_to_hdev(HDMI_DEVNR_DEFAULT); + if (!hdev) + return; + + dev_dbg(hdev->dev, "hdmi_event %02x\n", ev); + + kobj = &(hdev->dev->kobj); + + LOCK_HDMI_EVENTS; + + events_old = hdev->events; + + /* Set event */ + switch (ev) { + case AV8100_HDMI_EVENT_HDMI_PLUGIN: + hdev->events &= ~HDMI_EVENT_HDMI_PLUGOUT; + hdev->events |= HDMI_EVENT_HDMI_PLUGIN; + break; + + case AV8100_HDMI_EVENT_HDMI_PLUGOUT: + hdev->events &= ~HDMI_EVENT_HDMI_PLUGIN; + hdev->events |= HDMI_EVENT_HDMI_PLUGOUT; + cec_tx_status(hdev, CEC_TX_SET_FREE); + break; + + case AV8100_HDMI_EVENT_CEC: + hdev->events |= HDMI_EVENT_CEC; + break; + + case AV8100_HDMI_EVENT_HDCP: + hdev->events |= HDMI_EVENT_HDCP; + break; + + case AV8100_HDMI_EVENT_CECTXERR: + hdev->events |= HDMI_EVENT_CECTXERR; + cec_tx_status(hdev, CEC_TX_SET_FREE); + break; + + case AV8100_HDMI_EVENT_CECTX: + hdev->events |= HDMI_EVENT_CECTX; + cec_tx_status(hdev, CEC_TX_SET_FREE); + break; + + default: + break; + } + + events_new = hdev->events_mask & hdev->events; + + UNLOCK_HDMI_EVENTS; + + dev_dbg(hdev->dev, "hdmi events:%02x, events_old:%02x mask:%02x\n", + events_new, events_old, hdev->events_mask); + + if (events_new != events_old) { + /* Wake up application waiting for event via call to poll() */ + sysfs_notify(kobj, NULL, SYSFS_EVENT_FILENAME); + + LOCK_HDMI_EVENTS; + hdev->events_received = true; + UNLOCK_HDMI_EVENTS; + + wake_up_interruptible(&hdev->event_wq); + } +} +EXPORT_SYMBOL(hdmi_event); + +int hdmi_device_register(struct hdmi_device *hdev) +{ + hdev->miscdev.minor = MISC_DYNAMIC_MINOR; + hdev->miscdev.name = "hdmi"; + hdev->miscdev.fops = &hdmi_fops; + + if (misc_register(&hdev->miscdev)) { + pr_err("hdmi misc_register failed\n"); + return -EFAULT; + } + + hdev->dev = hdev->miscdev.this_device; + + return 0; +} + +int __init hdmi_init(void) +{ + struct hdmi_device *hdev; + int i; + int ret; + + /* Allocate device data */ + hdev = kzalloc(sizeof(struct hdmi_device), GFP_KERNEL); + if (!hdev) { + pr_err("%s: Alloc failure\n", __func__); + return -ENOMEM; + } + + /* Add to list */ + list_add_tail(&hdev->list, &hdmi_device_list); + + if (hdmi_device_register(hdev)) { + pr_err("%s: Alloc failure\n", __func__); + return -EFAULT; + } + + hdev->devnr = HDMI_DEVNR_DEFAULT; + + /* Default sysfs file format is hextext */ + hdev->sysfs_data.store_as_hextext = true; + + init_waitqueue_head(&hdev->event_wq); + + /* Create sysfs attrs */ + for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++) { + ret = device_create_file(hdev->dev, &hdmi_sysfs_attrs[i]); + if (ret) + dev_err(hdev->dev, + "Unable to create sysfs attr %s (%d)\n", + hdmi_sysfs_attrs[i].attr.name, ret); + } + + /* Register event callback */ + av8100_hdmi_event_cb_set(hdmi_event); + + return 0; +} +late_initcall(hdmi_init); + +void hdmi_exit(void) +{ + struct hdmi_device *hdev = NULL; + int i; + + if (list_empty(&hdmi_device_list)) + return; + else + hdev = list_entry(hdmi_device_list.next, + struct hdmi_device, list); + + /* Deregister event callback */ + av8100_hdmi_event_cb_set(NULL); + + /* Remove sysfs attrs */ + for (i = 0; attr_name(hdmi_sysfs_attrs[i]); i++) + device_remove_file(hdev->dev, &hdmi_sysfs_attrs[i]); + + misc_deregister(&hdev->miscdev); + + /* Remove from list */ + list_del(&hdev->list); + + /* Free device data */ + kfree(hdev); +} diff --git a/drivers/video/av8100/hdmi_loc.h b/drivers/video/av8100/hdmi_loc.h new file mode 100644 index 00000000000..20314910db9 --- /dev/null +++ b/drivers/video/av8100/hdmi_loc.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * Author: Per Persson <per.xb.persson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ +#ifndef __HDMI_LOC__H__ +#define __HDMI_LOC__H__ + +#define EDID_BUF_LEN 128 +#define COMMAND_BUF_LEN 128 +#define AES_KEY_SIZE 16 +#define CRC32_SIZE 4 +#define AUTH_BUF_LEN 126 +#define CECTX_TRY 20 +#define CECTX_WAITTIME 25 + +struct edid_data { + u8 buf_len; + u8 buf[EDID_BUF_LEN]; +}; + +struct authencr { + int result; + u8 buf_len; + u8 buf[AUTH_BUF_LEN]; +}; + +struct hdmi_register { + unsigned char value; + unsigned char offset; +}; + +struct hdcp_loadaesone { + u8 key[AES_KEY_SIZE]; + u8 result; + u8 crc32[CRC32_SIZE]; +}; + +struct hdmi_sysfs_data { + bool store_as_hextext; + struct plug_detect plug_detect; + bool enable_cec_event; + struct edid_data edid_data; + struct cec_rw cec_read; + bool fuse_result; + int loadaes_result; + struct authencr authencr; +}; + +struct hdmi_command_register { + unsigned char cmd_id; /* input */ + unsigned char buf_len; /* input, output */ + unsigned char buf[COMMAND_BUF_LEN]; /* input, output */ + unsigned char return_status; /* output */ +}; + +enum cec_tx_status_action { + CEC_TX_SET_FREE, + CEC_TX_SET_BUSY, + CEC_TX_CHECK +}; + +/* Internal */ +#define IOC_HDMI_ENABLE_INTERRUPTS _IOWR(HDMI_IOC_MAGIC, 32, int) +#define IOC_HDMI_DOWNLOAD_FW _IOWR(HDMI_IOC_MAGIC, 33, int) +#define IOC_HDMI_ONOFF _IOWR(HDMI_IOC_MAGIC, 34, int) +#define IOC_HDMI_REGISTER_WRITE _IOWR(HDMI_IOC_MAGIC, 35, int) +#define IOC_HDMI_REGISTER_READ _IOWR(HDMI_IOC_MAGIC, 36, int) +#define IOC_HDMI_STATUS_GET _IOWR(HDMI_IOC_MAGIC, 37, int) +#define IOC_HDMI_CONFIGURATION_WRITE _IOWR(HDMI_IOC_MAGIC, 38, int) + +#endif /* __HDMI_LOC__H__ */ diff --git a/drivers/video/b2r2/Kconfig b/drivers/video/b2r2/Kconfig new file mode 100644 index 00000000000..8cc81876de7 --- /dev/null +++ b/drivers/video/b2r2/Kconfig @@ -0,0 +1,134 @@ +config FB_B2R2 + tristate "B2R2 engine support" + default n + help + B2R2 engine does various bit-blitting operations,post-processor operations + and various compositions. + +config B2R2_PLUG_CONF + bool "B2R2 bus plug configuration" + depends on FB_B2R2 + default n + help + Configures how B2R2 access the memory bus. Enabling this will increase + the performance of B2R2 at the cost of using the bus more heavily. + + If this is set to 'n', the hardware defaults will be used. + +choice + prompt "Opcode size" + depends on B2R2_PLUG_CONF + default B2R2_OPSIZE_64 + + config B2R2_OPSIZE_8 + bool "8 bytes" + config B2R2_OPSIZE_16 + bool "16 bytes" + config B2R2_OPSIZE_32 + bool "32 bytes" + config B2R2_OPSIZE_64 + bool "64 bytes" + +endchoice + +choice + prompt "Chunk size" + depends on B2R2_PLUG_CONF + default B2R2_CHSIZE_128 + + config B2R2_CHSIZE_1 + bool "1 op" + config B2R2_CHSIZE_2 + bool "2 ops" + config B2R2_CHSIZE_4 + bool "4 ops" + config B2R2_CHSIZE_8 + bool "8 ops" + config B2R2_CHSIZE_16 + bool "16 ops" + config B2R2_CHSIZE_32 + bool "32 ops" + config B2R2_CHSIZE_64 + bool "64 ops" + config B2R2_CHSIZE_128 + bool "128 ops" +endchoice + +choice + prompt "Message size" + depends on B2R2_PLUG_CONF + default B2R2_MGSIZE_128 + + config B2R2_MGSIZE_1 + bool "1 chunk" + config B2R2_MGSIZE_2 + bool "2 chunks" + config B2R2_MGSIZE_4 + bool "4 chunks" + config B2R2_MGSIZE_8 + bool "8 s" + config B2R2_MGSIZE_16 + bool "16 chunks" + config B2R2_MGSIZE_32 + bool "32 chunks" + config B2R2_MGSIZE_64 + bool "64 chunks" + config B2R2_MGSIZE_128 + bool "128 chunks" +endchoice + +choice + prompt "Page size" + depends on B2R2_PLUG_CONF + default B2R2_PGSIZE_256 + + config B2R2_PGSIZE_64 + bool "64 bytes" + config B2R2_PGSIZE_128 + bool "128 bytes" + config B2R2_PGSIZE_256 + bool "256 bytes" +endchoice + +config B2R2_DEBUG + bool "B2R2 debugging" + default n + depends on FB_B2R2 + help + Enable debugging features for the B2R2 driver. + +config B2R2_PROFILER + tristate "B2R2 profiler" + default n + depends on FB_B2R2 + help + Enables the profiler for the B2R2 driver. + + It is recommended to build this as a module, since the configuration + of filters etc. is done at load time. + +config B2R2_GENERIC + bool "B2R2 generic path" + default y + depends on FB_B2R2 + help + Enables support for the generic path in the B2R2 driver. This path should + be used when there is no optimized implementation for a request. + +choice + prompt "Generic usage mode" + depends on B2R2_GENERIC + default B2R2_GENERIC_FALLBACK + + config B2R2_GENERIC_FALLBACK + bool "Fallback" + help + The optimized path will be used for all supported operations, and the + generic path will be used as a fallback for the ones not implemented. + + config B2R2_GENERIC_ONLY + bool "Always" + help + The generic path will be used for all operations. + +endchoice diff --git a/drivers/video/b2r2/Makefile b/drivers/video/b2r2/Makefile new file mode 100644 index 00000000000..f271f4e7ea1 --- /dev/null +++ b/drivers/video/b2r2/Makefile @@ -0,0 +1,15 @@ +# Make file for compiling and loadable module B2R2 + +obj-$(CONFIG_FB_B2R2) += b2r2.o + +b2r2-objs = b2r2_api.o b2r2_blt_main.o b2r2_core.o b2r2_mem_alloc.o b2r2_generic.o b2r2_node_gen.o b2r2_node_split.o b2r2_profiler_socket.o b2r2_timing.o b2r2_filters.o b2r2_utils.o b2r2_input_validation.o b2r2_hw_convert.o + +ifdef CONFIG_B2R2_DEBUG +b2r2-objs += b2r2_debug.o +endif + +ifeq ($(CONFIG_FB_B2R2),m) +obj-y += b2r2_kernel_if.o +endif + +obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler/ diff --git a/drivers/video/b2r2/b2r2_api.c b/drivers/video/b2r2/b2r2_api.c new file mode 100644 index 00000000000..0361e85ebf3 --- /dev/null +++ b/drivers/video/b2r2/b2r2_api.c @@ -0,0 +1,1643 @@ +/* + * Copyright (C) ST-Ericsson SA 2010/2012 + * + * ST-Ericsson B2R2 Blitter module API + * + * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com> + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/list.h> +#ifdef CONFIG_ANDROID_PMEM +#include <linux/android_pmem.h> +#endif +#include <linux/fb.h> +#include <linux/uaccess.h> +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +#endif +#include <asm/cacheflush.h> +#include <linux/smp.h> +#include <linux/dma-mapping.h> +#include <linux/sched.h> +#include <linux/err.h> +#include <linux/hwmem.h> +#include <linux/kref.h> + +#include "b2r2_internal.h" +#include "b2r2_control.h" +#include "b2r2_core.h" +#include "b2r2_timing.h" +#include "b2r2_utils.h" +#include "b2r2_debug.h" +#include "b2r2_input_validation.h" +#include "b2r2_profiler_socket.h" +#include "b2r2_hw.h" + +/* + * TODO: + * Implementation of query cap + * Support for user space virtual pointer to physically consecutive memory + * Support for user space virtual pointer to physically scattered memory + * Callback reads lagging behind in blt_api_stress app + * Store smaller items in the report list instead of the whole request + * Support read of many report records at once. + */ + +#define DATAS_START_SIZE 10 +#define DATAS_GROW_SIZE 5 + +/** + * @miscdev: The miscdev presenting b2r2 to the system + */ +struct b2r2_blt { + spinlock_t lock; + int next_job_id; + struct miscdevice miscdev; + struct device *dev; + struct mutex datas_lock; + /** + * datas - Stores the b2r2_blt_data mapped to the cliend handle + */ + struct b2r2_blt_data **datas; + /** + * data_count - The current maximum of active datas + */ + int data_count; +}; + +struct b2r2_blt_data { + struct b2r2_control_instance *ctl_instace[B2R2_MAX_NBR_DEVICES]; +}; + +/** + * Used to keep track of coming and going b2r2 cores and + * the number of active instance references + */ +struct b2r2_control_ref { + struct b2r2_control *b2r2_control; + spinlock_t lock; +}; + +/** + * b2r2_blt - The blitter device, /dev/b2r2_blt + */ +struct kref blt_refcount; +static struct b2r2_blt *b2r2_blt; + +/** + * b2r2_control - The core controls and synchronization mechanism + */ +static struct b2r2_control_ref b2r2_controls[B2R2_MAX_NBR_DEVICES]; + +/** + * b2r2_blt_add_control - Add the b2r2 core control + */ +void b2r2_blt_add_control(struct b2r2_control *cont) +{ + unsigned long flags; + BUG_ON(cont->id < 0 || cont->id >= B2R2_MAX_NBR_DEVICES); + + spin_lock_irqsave(&b2r2_controls[cont->id].lock, flags); + if (b2r2_controls[cont->id].b2r2_control == NULL) + b2r2_controls[cont->id].b2r2_control = cont; + spin_unlock_irqrestore(&b2r2_controls[cont->id].lock, flags); +} + +/** + * b2r2_blt_remove_control - Remove the b2r2 core control + */ +void b2r2_blt_remove_control(struct b2r2_control *cont) +{ + unsigned long flags; + BUG_ON(cont->id < 0 || cont->id >= B2R2_MAX_NBR_DEVICES); + + spin_lock_irqsave(&b2r2_controls[cont->id].lock, flags); + b2r2_controls[cont->id].b2r2_control = NULL; + spin_unlock_irqrestore(&b2r2_controls[cont->id].lock, flags); +} + +/** + * b2r2_blt_get_control - Lock control for writing/removal + */ +static struct b2r2_control *b2r2_blt_get_control(int i) +{ + struct b2r2_control *cont; + unsigned long flags; + BUG_ON(i < 0 || i >= B2R2_MAX_NBR_DEVICES); + + spin_lock_irqsave(&b2r2_controls[i].lock, flags); + cont = (struct b2r2_control *) b2r2_controls[i].b2r2_control; + if (cont != NULL) { + if (!cont->enabled) + cont = NULL; + else + kref_get(&cont->ref); + } + spin_unlock_irqrestore(&b2r2_controls[i].lock, flags); + + return cont; +} + +/** + * b2r2_blt_release_control - Unlock control for writing/removal + */ +static void b2r2_blt_release_control(int i) +{ + struct b2r2_control *cont; + unsigned long flags; + BUG_ON(i < 0 || i >= B2R2_MAX_NBR_DEVICES); + + spin_lock_irqsave(&b2r2_controls[i].lock, flags); + cont = (struct b2r2_control *) b2r2_controls[i].b2r2_control; + spin_unlock_irqrestore(&b2r2_controls[i].lock, flags); + if (cont != NULL) + kref_put(&cont->ref, b2r2_core_release); +} + +/** + * Increase size of array containing b2r2 handles + */ +static int grow_datas(void) +{ + struct b2r2_blt_data **new_datas = NULL; + int new_data_count = b2r2_blt->data_count + DATAS_GROW_SIZE; + int ret = 0; + + new_datas = kzalloc(new_data_count * sizeof(*new_datas), GFP_KERNEL); + if (new_datas == NULL) { + ret = -ENOMEM; + goto exit; + } + + memcpy(new_datas, b2r2_blt->datas, + b2r2_blt->data_count * sizeof(*b2r2_blt->datas)); + + kfree(b2r2_blt->datas); + + b2r2_blt->data_count = new_data_count; + b2r2_blt->datas = new_datas; +exit: + return ret; +} + +/** + * Allocate and/or reserve a b2r2 handle + */ +static int alloc_handle(struct b2r2_blt_data *blt_data) +{ + int handle; + int ret; + + mutex_lock(&b2r2_blt->datas_lock); + + if (b2r2_blt->datas == NULL) { + b2r2_blt->datas = kzalloc( + DATAS_START_SIZE * sizeof(*b2r2_blt->datas), + GFP_KERNEL); + if (b2r2_blt->datas == NULL) { + ret = -ENOMEM; + goto exit; + } + b2r2_blt->data_count = DATAS_START_SIZE; + } + + for (handle = 0; handle < b2r2_blt->data_count; handle++) { + if (b2r2_blt->datas[handle] == NULL) { + b2r2_blt->datas[handle] = blt_data; + break; + } + + if (handle == b2r2_blt->data_count - 1) { + ret = grow_datas(); + if (ret < 0) + goto exit; + } + } + ret = handle; +exit: + mutex_unlock(&b2r2_blt->datas_lock); + + return ret; +} + +/** + * Get b2r2 data from b2r2 handle + */ +static struct b2r2_blt_data *get_data(int handle) +{ + if (handle >= b2r2_blt->data_count || handle < 0) + return NULL; + else + return b2r2_blt->datas[handle]; +} + +/** + * Unreserve b2r2 handle + */ +static void free_handle(int handle) +{ + if (handle < b2r2_blt->data_count && handle >= 0) + b2r2_blt->datas[handle] = NULL; +} + +/** + * Get the next job number. This is the one returned to the client + * if the blit request was successful. + */ +static int get_next_job_id(void) +{ + int job_id; + unsigned long flags; + + spin_lock_irqsave(&b2r2_blt->lock, flags); + if (b2r2_blt->next_job_id < 1) + b2r2_blt->next_job_id = 1; + job_id = b2r2_blt->next_job_id++; + spin_unlock_irqrestore(&b2r2_blt->lock, flags); + + return job_id; +} + +/** + * Limit the number of cores used in some "easy" and impossible cases + */ +static int limit_blits(int n_split, struct b2r2_blt_req *user_req) +{ + if (n_split <= 1) + return n_split; + + if (user_req->dst_rect.width < 24 && user_req->dst_rect.height < 24) + return 1; + + if (user_req->src_rect.width < n_split && + user_req->src_rect.height < n_split) + return 1; + + return n_split; +} + +/** + * Check if the format inherently requires the b2r2 scaling engine to be active + */ +static bool is_scaling_fmt(enum b2r2_blt_fmt fmt) +{ + /* Plane separated formats must be treated as scaling */ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return true; + default: + return false; + } +} + +/** + * Check for macroblock formats + */ +static bool is_mb_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return true; + default: + return false; + } +} + +/** + * Split a request rectangle on available cores + */ +static int b2r2_blt_split_request(struct b2r2_blt_data *blt_data, + struct b2r2_blt_req *user_req, + struct b2r2_blt_request **split_requests, + struct b2r2_control_instance **ctl, + int *n_split) +{ + int sstep_x, sstep_y, dstep_x, dstep_y; + int dstart_x, dstart_y; + int bstart_x, bstart_y; + int dpos_x, dpos_y; + int bpos_x, bpos_y; + int dso_x = 1; + int dso_y = 1; + int sf_x, sf_y; + int i; + int srw, srh; + int drw, drh; + bool ssplit_x = true; + bool dsplit_x = true; + enum b2r2_blt_transform transform; + bool is_rotation = false; + bool is_scaling = false; + bool bg_blend = false; + u32 core_mask = 0; + + srw = user_req->src_rect.width; + srh = user_req->src_rect.height; + drw = user_req->dst_rect.width; + drh = user_req->dst_rect.height; + transform = user_req->transform; + + /* Early exit in the basic cases */ + if (*n_split == 0) { + return -ENOSYS; + } else if (*n_split == 1 || + (srw < *n_split && srh < *n_split) || + (drw < *n_split && drh < *n_split) || + is_mb_fmt(user_req->src_img.fmt)) { + /* Handle macroblock formats with one + * core for now since there seems to be some bug + * related to macroblock access patterns + */ + memcpy(&split_requests[0]->user_req, + user_req, + sizeof(*user_req)); + split_requests[0]->core_mask = 1; + *n_split = 1; + return 0; + } + + /* + * TODO: fix the load balancing algorithm + */ + + is_rotation = (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0; + + /* Check for scaling */ + if (is_rotation) { + is_scaling = (user_req->src_rect.width != + user_req->dst_rect.height) || + (user_req->src_rect.height != + user_req->dst_rect.width); + } else { + is_scaling = (user_req->src_rect.width != + user_req->dst_rect.width) || + (user_req->src_rect.height != + user_req->dst_rect.height); + } + + is_scaling = is_scaling || + is_scaling_fmt(user_req->src_img.fmt) || + is_scaling_fmt(user_req->dst_img.fmt); + + bg_blend = ((user_req->flags & B2R2_BLT_FLAG_BG_BLEND) != 0); + + /* + * Split the request + */ + + b2r2_log_info(b2r2_blt->dev, "%s: In (t:0x%08X, f:0x%08X):\n" + "\tsrc_rect x:%d, y:%d, w:%d, h:%d src fmt:0x%x\n" + "\tdst_rect x:%d, y:%d, w:%d, h:%d dst fmt:0x%x\n", + __func__, + user_req->transform, + user_req->flags, + user_req->src_rect.x, + user_req->src_rect.y, + user_req->src_rect.width, + user_req->src_rect.height, + user_req->src_img.fmt, + user_req->dst_rect.x, + user_req->dst_rect.y, + user_req->dst_rect.width, + user_req->dst_rect.height, + user_req->dst_img.fmt); + + /* TODO: We need sub pixel precision here, + * or a better way to split rects */ + dstart_x = user_req->dst_rect.x; + dstart_y = user_req->dst_rect.y; + if (bg_blend) { + bstart_x = user_req->bg_rect.x; + bstart_y = user_req->bg_rect.y; + } + + if (srw && srh) { + if ((srw < srh) && !is_scaling) { + ssplit_x = false; + sstep_y = srh / *n_split; + /* Round up */ + if (srh % (*n_split)) + sstep_y++; + + if (srh > 16) + sstep_y = ((sstep_y + 16) >> 4) << 4; + + if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + sf_y = (drw << 10) / srh; + dstep_x = (sf_y * sstep_y) >> 10; + } else { + dsplit_x = false; + sf_y = (drh << 10) / srh; + dstep_y = (sf_y * sstep_y) >> 10; + } + } else { + sstep_x = srw / *n_split; + /* Round up */ + if (srw % (*n_split)) + sstep_x++; + + if (is_scaling) { + int scale_step_size = + B2R2_RESCALE_MAX_WIDTH - 1; + int pad = (scale_step_size - + (sstep_x % scale_step_size)); + if ((sstep_x + pad) < srw) + sstep_x += pad; + } else { + /* Aim for even 16px multiples */ + if ((sstep_x & 0xF) && ((sstep_x + 16) < srw)) + sstep_x = ((sstep_x + 16) >> 4) << 4; + } + + if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + dsplit_x = false; + sf_x = (drh << 10) / srw; + dstep_y = (sf_x * sstep_x) >> 10; + } else { + sf_x = (drw << 10) / srw; + dstep_x = (sf_x * sstep_x) >> 10; + } + } + + } else { + sstep_x = sstep_y = 0; + + if (drw < drh) { + dsplit_x = false; + dstep_y = drh / *n_split; + /* Round up */ + if (drh % *n_split) + dstep_y++; + + /* Aim for even 16px multiples */ + if ((dstep_y & 0xF) && ((dstep_y + 16) < drh)) + dstep_y = ((dstep_y + 16) >> 4) << 4; + } else { + dstep_x = drw / *n_split; + /* Round up */ + if (drw % *n_split) + dstep_x++; + + /* Aim for even 16px multiples */ + if ((dstep_x & 0xF) && ((dstep_x + 16) < drw)) + dstep_x = ((dstep_x + 16) >> 4) << 4; + } + } + + /* Check for flip and rotate to establish destination + * step order */ + if (transform & B2R2_BLT_TRANSFORM_FLIP_H) { + dstart_x += drw; + if (bg_blend) + bstart_x += drw; + dso_x = -1; + } + if ((transform & B2R2_BLT_TRANSFORM_FLIP_V) || + (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90)) { + dstart_y += drh; + if (bg_blend) + bstart_y += drh; + dso_y = -1; + } + + /* Set scan starting position */ + dpos_x = dstart_x; + dpos_y = dstart_y; + if (bg_blend) { + bpos_x = bstart_x; + bpos_y = bstart_y; + } + + for (i = 0; i < *n_split; i++) { + struct b2r2_blt_req *sreq = + &split_requests[i]->user_req; + + /* First mimic all */ + memcpy(sreq, user_req, sizeof(*user_req)); + + /* Then change the rects */ + if (srw && srh) { + if (ssplit_x) { + if (sstep_x > 0) { + sreq->src_rect.width = + min(sstep_x, srw); + sreq->src_rect.x += i*sstep_x; + srw -= sstep_x; + } else { + sreq->src_rect.width = srw; + } + } else { + if (sstep_y > 0) { + sreq->src_rect.y += i*sstep_y; + sreq->src_rect.height = + min(sstep_y, srh); + srh -= sstep_y; + } else { + sreq->src_rect.height = srh; + } + } + } + + if (dsplit_x) { + int sx = min(dstep_x, drw); + if (dso_x < 0) { + dpos_x += dso_x * sx; + if (bg_blend) + bpos_x += dso_x * sx; + } + sreq->dst_rect.width = sx; + sreq->dst_rect.x = dpos_x; + if (bg_blend) { + sreq->bg_rect.width = sx; + sreq->bg_rect.x = bpos_x; + } + if (dso_x > 0) { + dpos_x += dso_x * sx; + if (bg_blend) + bpos_x += dso_x * sx; + } + drw -= sx; + } else { + int sy = min(dstep_y, drh); + if (dso_y < 0) { + dpos_y += dso_y * sy; + if (bg_blend) + bpos_y += dso_y * sy; + } + sreq->dst_rect.height = sy; + sreq->dst_rect.y = dpos_y; + if (bg_blend) { + sreq->bg_rect.height = sy; + sreq->bg_rect.y = bpos_y; + } + if (dso_y > 0) { + dpos_y += dso_y * sy; + if (bg_blend) + bpos_y += dso_y * sy; + } + drh -= sy; + } + + b2r2_log_info(b2r2_blt->dev, "%s: Out:\n" + "\tsrc_rect x:%d, y:%d, w:%d, h:%d\n" + "\tdst_rect x:%d, y:%d, w:%d, h:%d\n" + "\tbg_rect x:%d, y:%d, w:%d, h:%d\n", + __func__, + sreq->src_rect.x, + sreq->src_rect.y, + sreq->src_rect.width, + sreq->src_rect.height, + sreq->dst_rect.x, + sreq->dst_rect.y, + sreq->dst_rect.width, + sreq->dst_rect.height, + sreq->bg_rect.x, + sreq->bg_rect.y, + sreq->bg_rect.width, + sreq->bg_rect.height); + + core_mask |= (1 << i); + } + + for (i = 0; i < *n_split; i++) + split_requests[i]->core_mask = core_mask; + + return 0; +} + +/** + * Get available b2r2 control instances. It will be limited + * to the number of cores available at the current point in time. + * It will also cause the cores to stay active during the time until + * release_control_instances is called. + */ +static void get_control_instances(struct b2r2_blt_data *blt_data, + struct b2r2_control_instance **ctl, int max_size, + int *count) +{ + int i; + + *count = 0; + for (i = 0; i < max_size; i++) { + struct b2r2_control_instance *ci = blt_data->ctl_instace[i]; + if (ci) { + struct b2r2_control *cont = + b2r2_blt_get_control(ci->control_id); + if (cont) { + ctl[*count] = ci; + *count += 1; + } + } + } +} + +/** + * Release b2r2 control instances. The cores allocated for the request + * are given back. + */ +static void release_control_instances(struct b2r2_control_instance **ctl, + int count) +{ + int i; + + /* Release the handles to the core controls */ + for (i = 0; i < count; i++) { + if (ctl[i]) + b2r2_blt_release_control(ctl[i]->control_id); + } +} + +/** + * Free b2r2 request + */ +static void b2r2_free_request(struct b2r2_blt_request *request) +{ + if (request) { + /* Free requests in split_requests */ + if (request->clut) + dma_free_coherent(b2r2_blt->dev, + CLUT_SIZE, + request->clut, + request->clut_phys_addr); + request->clut = NULL; + request->clut_phys_addr = 0; + kfree(request); + } +} + +/** + * Allocate internal b2r2 request based on user input. + */ +static int b2r2_alloc_request(struct b2r2_blt_req *user_req, + bool us_req, struct b2r2_blt_request **request_out) +{ + int ret = 0; + struct b2r2_blt_request *request = + kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) + return -ENOMEM; + + /* Initialize the structure */ + INIT_LIST_HEAD(&request->list); + + /* + * If the user specified a color look-up table, + * make a copy that the HW can use. + */ + if ((user_req->flags & + B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) { + request->clut = dma_alloc_coherent( + b2r2_blt->dev, + CLUT_SIZE, + &(request->clut_phys_addr), + GFP_DMA | GFP_KERNEL); + if (request->clut == NULL) { + b2r2_log_err(b2r2_blt->dev, + "%s CLUT allocation " + "failed.\n", __func__); + ret = -ENOMEM; + goto exit; + } + + if (us_req) { + if (copy_from_user(request->clut, + user_req->clut, CLUT_SIZE)) { + b2r2_log_err(b2r2_blt->dev, "%s: CLUT " + "copy_from_user failed\n", + __func__); + ret = -EFAULT; + goto exit; + } + } else { + memcpy(request->clut, user_req->clut, + CLUT_SIZE); + } + } + + request->profile = is_profiler_registered_approx(); + + *request_out = request; +exit: + if (ret != 0) + b2r2_free_request(request); + + return ret; +} + +/** + * Do the blit job split on available cores. + */ +static int b2r2_blt_blit_internal(int handle, + struct b2r2_blt_req *user_req, + bool us_req) +{ + int request_id; + int i; + int n_instance = 0; + int n_blit = 0; + int ret = 0; + struct b2r2_blt_data *blt_data; + struct b2r2_blt_req ureq; + + /* The requests and the designated workers */ + struct b2r2_blt_request *split_requests[B2R2_MAX_NBR_DEVICES]; + struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES]; + + blt_data = get_data(handle); + if (blt_data == NULL) { + b2r2_log_warn(b2r2_blt->dev, + "%s, blitter instance not found (handle=%d)\n", + __func__, handle); + return -ENOSYS; + } + + /* Get the b2r2 core controls for the job */ + get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_instance); + if (n_instance == 0) { + b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n", + __func__); + return -ENOSYS; + } + + /* Get the user data */ + if (us_req) { + if (copy_from_user(&ureq, user_req, sizeof(ureq))) { + b2r2_log_err(b2r2_blt->dev, + "%s: copy_from_user failed\n", + __func__); + ret = -EFAULT; + goto exit; + } + } else { + memcpy(&ureq, user_req, sizeof(ureq)); + } + + /* + * B2R2 cannot handle destination clipping on buffers + * allocated close to 64MiB bank boundaries. + * recalculate src_ and dst_rect to avoid clipping. + * + * Also this is needed to ensure the request split + * operates on visible areas + */ + b2r2_recalculate_rects(b2r2_blt->dev, &ureq); + + if (!b2r2_validate_user_req(b2r2_blt->dev, &ureq)) { + b2r2_log_warn(b2r2_blt->dev, + "%s: b2r2_validate_user_req failed.\n", + __func__); + ret = -EINVAL; + goto exit; + } + + /* Don't split small requests */ + n_blit = limit_blits(n_instance, &ureq); + + /* The id needs to be universal on + * all cores */ + request_id = get_next_job_id(); + +#ifdef CONFIG_B2R2_GENERIC_ONLY + /* Limit the generic only solution to one core (for now) */ + n_blit = 1; +#endif + + for (i = 0; i < n_blit; i++) { + ret = b2r2_alloc_request(&ureq, us_req, &split_requests[i]); + if (ret < 0 || !split_requests[i]) { + b2r2_log_err(b2r2_blt->dev, "%s: Failed to alloc mem\n", + __func__); + ret = -ENOMEM; + break; + } + split_requests[i]->instance = ctl[i]; + split_requests[i]->job.job_id = request_id; + split_requests[i]->job.data = (int) ctl[i]->control->data; + } + + /* Split the request */ + if (ret >= 0) + ret = b2r2_blt_split_request(blt_data, &ureq, + &split_requests[0], &ctl[0], &n_blit); + + /* If anything failed, clean up allocated memory */ + if (ret < 0) { + for (i = 0; i < n_blit; i++) + b2r2_free_request(split_requests[i]); + b2r2_log_err(b2r2_blt->dev, + "%s: b2r2_blt_split_request failed.\n", + __func__); + goto exit; + } + +#ifdef CONFIG_B2R2_GENERIC_ONLY + if (ureq.flags & B2R2_BLT_FLAG_BG_BLEND) { + /* No support for BG BLEND in generic + * implementation yet */ + b2r2_log_warn(b2r2_blt->dev, "%s: Unsupported: " + "Background blend in b2r2_generic_blt\n", + __func__); + ret = -ENOSYS; + b2r2_free_request(split_requests[0]); + goto exit; + } + /* Use the generic path for all operations */ + ret = b2r2_generic_blt(split_requests[0]); +#else + /* Call each blitter control */ + for (i = 0; i < n_blit; i++) { + ret = b2r2_control_blt(split_requests[i]); + if (ret < 0) { + b2r2_log_warn(b2r2_blt->dev, + "%s: b2r2_control_blt failed.\n", __func__); + break; + } + } + if (ret != -ENOSYS) { + int j; + /* TODO: if one blitter fails then cancel the jobs added */ + + /* Call waitjob for successful jobs + * (synchs if specified in request) */ + if (ureq.flags & B2R2_BLT_FLAG_DRY_RUN) + goto exit; + + for (j = 0; j < i; j++) { + int rtmp; + + rtmp = b2r2_control_waitjob(split_requests[j]); + if (rtmp < 0) { + b2r2_log_err(b2r2_blt->dev, + "%s: b2r2_control_waitjob failed.\n", + __func__); + } + + /* Save just the one error */ + ret = (ret >= 0) ? rtmp : ret; + } + } +#endif +#ifdef CONFIG_B2R2_GENERIC_FALLBACK + if (ret == -ENOSYS) { + struct b2r2_blt_request *request_gen = NULL; + if (ureq.flags & B2R2_BLT_FLAG_BG_BLEND) { + /* No support for BG BLEND in generic + * implementation yet */ + b2r2_log_warn(b2r2_blt->dev, "%s: Unsupported: " + "Background blend in b2r2_generic_blt\n", + __func__); + goto exit; + } + + b2r2_log_info(b2r2_blt->dev, + "b2r2_blt=%d Going generic.\n", ret); + ret = b2r2_alloc_request(&ureq, us_req, &request_gen); + if (ret < 0 || !request_gen) { + b2r2_log_err(b2r2_blt->dev, + "%s: Failed to alloc mem for " + "request_gen\n", __func__); + ret = -ENOMEM; + goto exit; + } + + /* Initialize the structure */ + request_gen->instance = ctl[0]; + memcpy(&request_gen->user_req, &ureq, + sizeof(request_gen->user_req)); + request_gen->core_mask = 1; + request_gen->job.job_id = request_id; + request_gen->job.data = (int) ctl[0]->control->data; + + ret = b2r2_generic_blt(request_gen); + b2r2_log_info(b2r2_blt->dev, "\nb2r2_generic_blt=%d " + "Generic done.\n", ret); + } +#endif +exit: + release_control_instances(ctl, n_instance); + + ret = ret >= 0 ? request_id : ret; + + return ret; +} + +/** + * Free the memory used for the b2r2_blt device + */ +static void b2r2_blt_release(struct kref *ref) +{ + BUG_ON(b2r2_blt == NULL); + if (b2r2_blt == NULL) + return; + kfree(b2r2_blt->datas); + kfree(b2r2_blt); + b2r2_blt = NULL; +} + +int b2r2_blt_open(void) +{ + int ret = 0; + struct b2r2_blt_data *blt_data = NULL; + int i; + + if (!atomic_inc_not_zero(&blt_refcount.refcount)) + return -ENOSYS; + + /* Allocate blitter instance data structure */ + blt_data = (struct b2r2_blt_data *) + kzalloc(sizeof(*blt_data), GFP_KERNEL); + if (!blt_data) { + b2r2_log_err(b2r2_blt->dev, "%s: Failed to alloc\n", __func__); + ret = -ENOMEM; + goto err; + } + + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) { + struct b2r2_control *control = b2r2_blt_get_control(i); + if (control != NULL) { + struct b2r2_control_instance *ci; + + /* Allocate and initialize the control instance */ + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) { + b2r2_log_err(b2r2_blt->dev, + "%s: Failed to alloc\n", + __func__); + ret = -ENOMEM; + b2r2_blt_release_control(i); + goto err; + } + ci->control_id = i; + ci->control = control; + ret = b2r2_control_open(ci); + if (ret < 0) { + b2r2_log_err(b2r2_blt->dev, + "%s: Failed to open b2r2 control %d\n", + __func__, i); + kfree(ci); + b2r2_blt_release_control(i); + goto err; + } + blt_data->ctl_instace[i] = ci; + b2r2_blt_release_control(i); + } else { + blt_data->ctl_instace[i] = NULL; + } + } + + /* TODO: Create kernel worker kthread */ + + ret = alloc_handle(blt_data); + if (ret < 0) + goto err; + + kref_put(&blt_refcount, b2r2_blt_release); + + return ret; + +err: + /* Destroy the blitter instance data structure */ + if (blt_data) { + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) + kfree(blt_data->ctl_instace[i]); + kfree(blt_data); + } + + kref_put(&blt_refcount, b2r2_blt_release); + + return ret; +} +EXPORT_SYMBOL(b2r2_blt_open); + +int b2r2_blt_close(int handle) +{ + int i; + struct b2r2_blt_data *blt_data; + int ret = 0; + + if (!atomic_inc_not_zero(&blt_refcount.refcount)) + return -ENOSYS; + + b2r2_log_info(b2r2_blt->dev, "%s\n", __func__); + + blt_data = get_data(handle); + if (blt_data == NULL) { + b2r2_log_warn(b2r2_blt->dev, + "%s, blitter data not found (handle=%d)\n", + __func__, handle); + ret = -ENOSYS; + goto exit; + } + free_handle(handle); + + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) { + struct b2r2_control_instance *ci = + blt_data->ctl_instace[i]; + if (ci != NULL) { + struct b2r2_control *cont = + b2r2_blt_get_control(ci->control_id); + if (cont) { + /* Release the instance */ + b2r2_control_release(ci); + b2r2_blt_release_control(ci->control_id); + } + kfree(ci); + } + } + kfree(blt_data); + +exit: + kref_put(&blt_refcount, b2r2_blt_release); + + return ret; +} +EXPORT_SYMBOL(b2r2_blt_close); + +int b2r2_blt_request(int handle, + struct b2r2_blt_req *user_req) +{ + int ret = 0; + + if (!atomic_inc_not_zero(&blt_refcount.refcount)) + return -ENOSYS; + + /* Exclude some currently unsupported cases */ + if ((user_req->flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) || + (user_req->flags & B2R2_BLT_FLAG_REPORT_PERFORMANCE) || + (user_req->report1 != 0)) { + b2r2_log_err(b2r2_blt->dev, + "%s No callback support in the kernel API\n", + __func__); + ret = -ENOSYS; + goto exit; + } + + ret = b2r2_blt_blit_internal(handle, user_req, false); + +exit: + kref_put(&blt_refcount, b2r2_blt_release); + + return ret; +} +EXPORT_SYMBOL(b2r2_blt_request); + +int b2r2_blt_synch(int handle, int request_id) +{ + int ret = 0; + int i; + int n_synch = 0; + struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES]; + struct b2r2_blt_data *blt_data; + + if (!atomic_inc_not_zero(&blt_refcount.refcount)) + return -ENOSYS; + + b2r2_log_info(b2r2_blt->dev, "%s\n", __func__); + + blt_data = get_data(handle); + if (blt_data == NULL) { + b2r2_log_warn(b2r2_blt->dev, + "%s, blitter data not found (handle=%d)\n", + __func__, handle); + ret = -ENOSYS; + goto exit; + } + + /* Get the b2r2 core controls for the job */ + get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_synch); + if (n_synch == 0) { + b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n", + __func__); + ret = -ENOSYS; + goto exit; + } + + for (i = 0; i < n_synch; i++) { + ret = b2r2_control_synch(ctl[i], request_id); + if (ret != 0) { + b2r2_log_err(b2r2_blt->dev, + "%s: b2r2_control_synch failed.\n", + __func__); + break; + } + } + + /* Release the handles to the core controls */ + release_control_instances(ctl, n_synch); + +exit: + kref_put(&blt_refcount, b2r2_blt_release); + + b2r2_log_info(b2r2_blt->dev, + "%s, request_id=%d, returns %d\n", __func__, request_id, ret); + + return ret; +} +EXPORT_SYMBOL(b2r2_blt_synch); + +/** + * The user space API + */ + +/** + * b2r2_blt_open_us - Implements file open on the b2r2_blt device + * + * @inode: File system inode + * @filp: File pointer + * + * A b2r2_blt_data handle is created and stored in the file structure. + */ +static int b2r2_blt_open_us(struct inode *inode, struct file *filp) +{ + int ret = 0; + int handle; + + handle = b2r2_blt_open(); + if (handle < 0) { + b2r2_log_err(b2r2_blt->dev, "%s: Failed to open handle\n", + __func__); + ret = handle; + goto exit; + } + filp->private_data = (void *) handle; +exit: + return ret; +} + +/** + * b2r2_blt_release_us - Implements last close on an instance of + * the b2r2_blt device + * + * @inode: File system inode + * @filp: File pointer + * + * All active jobs are finished or cancelled and allocated data + * is released. + */ +static int b2r2_blt_release_us(struct inode *inode, struct file *filp) +{ + int ret; + ret = b2r2_blt_close((int) filp->private_data); + return ret; +} + +/** + * Query B2R2 capabilities + * + * @blt_data: The B2R2 BLT instance + * @query_cap: The structure receiving the capabilities + */ +static int b2r2_blt_query_cap(struct b2r2_blt_data *blt_data, + struct b2r2_blt_query_cap *query_cap) +{ + /* FIXME: Not implemented yet */ + return -ENOSYS; +} + +/** + * b2r2_blt_ioctl_us - This routine implements b2r2_blt ioctl interface + * + * @file: file pointer. + * @cmd :ioctl command. + * @arg: input argument for ioctl. + * + * Returns 0 if OK else negative error code + */ +static long b2r2_blt_ioctl_us(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + int handle = (int) file->private_data; + + /** Process actual ioctl */ + b2r2_log_info(b2r2_blt->dev, "%s\n", __func__); + + /* Get the instance from the file structure */ + switch (cmd) { + case B2R2_BLT_IOC: { + /* arg is user pointer to struct b2r2_blt_request */ + ret = b2r2_blt_blit_internal(handle, + (struct b2r2_blt_req *) arg, true); + break; + } + + case B2R2_BLT_SYNCH_IOC: + /* arg is request_id */ + ret = b2r2_blt_synch(handle, (int) arg); + break; + + case B2R2_BLT_QUERY_CAP_IOC: { + /* Arg is struct b2r2_blt_query_cap */ + struct b2r2_blt_query_cap query_cap; + struct b2r2_blt_data *blt_data = get_data(handle); + + /* Get the user data */ + if (copy_from_user(&query_cap, (void *)arg, + sizeof(query_cap))) { + b2r2_log_err(b2r2_blt->dev, + "%s: copy_from_user failed\n", + __func__); + ret = -EFAULT; + goto exit; + } + + /* Fill in our capabilities */ + ret = b2r2_blt_query_cap(blt_data, &query_cap); + + /* Return data to user */ + if (copy_to_user((void *)arg, &query_cap, + sizeof(query_cap))) { + b2r2_log_err(b2r2_blt->dev, + "%s: copy_to_user failed\n", + __func__); + ret = -EFAULT; + goto exit; + } + break; + } + + default: + /* Unknown command */ + b2r2_log_err(b2r2_blt->dev, "%s: Unknown cmd %d\n", + __func__, cmd); + ret = -EINVAL; + break; + + } + +exit: + if (ret < 0) + b2r2_log_err(b2r2_blt->dev, "%s: Return with error %d!\n", + __func__, -ret); + + return ret; +} + +/** + * b2r2_blt_poll - Support for user-space poll, select & epoll. + * Used for user-space callback + * + * @filp: File to poll on + * @wait: Poll table to wait on + * + * This function checks if there are anything to read + */ +static unsigned b2r2_blt_poll_us(struct file *filp, poll_table *wait) +{ + struct b2r2_blt_data *blt_data = + (struct b2r2_blt_data *) filp->private_data; + struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES]; + unsigned int ret = POLLIN | POLLRDNORM; + int n_poll = 0; + int i; + + b2r2_log_info(b2r2_blt->dev, "%s\n", __func__); + + /* Get the b2r2 core controls for the job */ + get_control_instances(blt_data, ctl, B2R2_MAX_NBR_DEVICES, &n_poll); + if (n_poll == 0) { + b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n", + __func__); + ret = -ENOSYS; + goto exit; + } + + /* Poll each core control instance */ + for (i = 0; i < n_poll && ret != 0; i++) { + poll_wait(filp, &ctl[i]->report_list_waitq, wait); + mutex_lock(&ctl[i]->lock); + if (list_empty(&ctl[i]->report_list)) + ret = 0; /* No reports */ + mutex_unlock(&ctl[i]->lock); + } + + /* Release the handles to the core controls */ + release_control_instances(ctl, n_poll); + +exit: + b2r2_log_info(b2r2_blt->dev, "%s: returns %d, n_poll: %d\n", + __func__, ret, n_poll); + + return ret; +} + +/** + * b2r2_blt_read - Read report data, user for user-space callback + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static ssize_t b2r2_blt_read_us(struct file *filp, + char __user *buf, size_t count, loff_t *f_pos) +{ + int ret = 0; + int n_read = 0; + int i; + int first_index = 0; + struct b2r2_blt_report report; + struct b2r2_blt_request *requests[B2R2_MAX_NBR_DEVICES]; + struct b2r2_blt_data *blt_data = + (struct b2r2_blt_data *) filp->private_data; + struct b2r2_control_instance *ctl[B2R2_MAX_NBR_DEVICES]; + struct b2r2_control_instance *first = NULL; + bool block = ((filp->f_flags & O_NONBLOCK) == 0); + u32 core_mask = 0; + + b2r2_log_info(b2r2_blt->dev, "%s\n", __func__); + + /* + * We return only complete report records, one at a time. + * Might be more efficient to support read of many. + */ + count = (count / sizeof(struct b2r2_blt_report)) * + sizeof(struct b2r2_blt_report); + if (count > sizeof(struct b2r2_blt_report)) + count = sizeof(struct b2r2_blt_report); + if (count == 0) + return count; + + memset(ctl, 0, sizeof(*ctl) * B2R2_MAX_NBR_DEVICES); + /* Get the b2r2 core controls for the job */ + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) { + struct b2r2_control_instance *ci = blt_data->ctl_instace[i]; + if (ci) { + struct b2r2_control *cont = + b2r2_blt_get_control(ci->control_id); + if (cont) { + ctl[i] = ci; + n_read++; + } + } + } + if (n_read == 0) { + b2r2_log_err(b2r2_blt->dev, "%s: No b2r2 cores available.\n", + __func__); + return -ENOSYS; + } + + /* Find which control to ask for a report first */ + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) { + if (ctl[i] != NULL) { + first = ctl[i]; + first_index = i; + break; + } + } + if (!first) { + b2r2_log_err(b2r2_blt->dev, "%s: Internal error.\n", + __func__); + return -ENOSYS; + } + + memset(requests, 0, sizeof(*requests) * B2R2_MAX_NBR_DEVICES); + /* Read report from core 0 */ + ret = b2r2_control_read(first, &requests[first_index], block); + if (ret <= 0 || requests[0] == NULL) { + b2r2_log_err(b2r2_blt->dev, "%s: b2r2_control_read failed.\n", + __func__); + ret = -EFAULT; + goto exit; + } + core_mask = requests[first_index]->core_mask >> 1; + core_mask &= ~(1 << first_index); + + /* If there are any more cores, try reading the report + * with the specific ID from the other cores */ + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) { + if ((core_mask & 1) && (ctl[i] != NULL)) { + /* TODO: Do we need to wait here? */ + ret = b2r2_control_read_id(ctl[i], &requests[i], block, + requests[first_index]->request_id); + if (ret <= 0 || requests[i] == NULL) { + b2r2_log_err(b2r2_blt->dev, + "%s: b2r2_control_read failed.\n", + __func__); + break; + } + } + core_mask = core_mask >> 1; + } + + if (ret > 0) { + /* Construct a report and copy to userland */ + report.request_id = requests[0]->request_id; + report.report1 = requests[0]->user_req.report1; + report.report2 = requests[0]->user_req.report2; + report.usec_elapsed = 0; /* TBD */ + + if (copy_to_user(buf, &report, sizeof(report))) { + b2r2_log_err(b2r2_blt->dev, + "%s: copy_to_user failed.\n", + __func__); + ret = -EFAULT; + } + } + + if (ret > 0) { + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) + /* + * Release matching the addref when the job was put + * into the report list + */ + if (requests[i] != NULL) + b2r2_core_job_release(&requests[i]->job, + __func__); + } else { + /* We failed at one core or copy to user failed */ + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) + if (requests[i] != NULL) + list_add(&requests[i]->list, + &ctl[i]->report_list); + goto exit; + } + + ret = count; + +exit: + /* Release the handles to the core controls */ + release_control_instances(ctl, n_read); + + return ret; +} + +/** + * b2r2_blt_fops - File operations for b2r2_blt + */ +static const struct file_operations b2r2_blt_fops = { + .owner = THIS_MODULE, + .open = b2r2_blt_open_us, + .release = b2r2_blt_release_us, + .unlocked_ioctl = b2r2_blt_ioctl_us, + .poll = b2r2_blt_poll_us, + .read = b2r2_blt_read_us, +}; + + +/** + * b2r2_probe() - This routine loads the B2R2 core driver + * + * @pdev: platform device. + */ +static int b2r2_blt_probe(struct platform_device *pdev) +{ + int ret = 0; + int i; + + BUG_ON(pdev == NULL); + + dev_info(&pdev->dev, "%s start.\n", __func__); + + if (!b2r2_blt) { + b2r2_blt = kzalloc(sizeof(*b2r2_blt), GFP_KERNEL); + if (!b2r2_blt) { + dev_err(&pdev->dev, "b2r2_blt alloc failed\n"); + ret = -EINVAL; + goto error_exit; + } + + /* Init b2r2 core control reference counters */ + for (i = 0; i < B2R2_MAX_NBR_DEVICES; i++) + spin_lock_init(&b2r2_controls[i].lock); + } + + mutex_init(&b2r2_blt->datas_lock); + spin_lock_init(&b2r2_blt->lock); + b2r2_blt->dev = &pdev->dev; + + /* Register b2r2 driver */ + b2r2_blt->miscdev.parent = b2r2_blt->dev; + b2r2_blt->miscdev.minor = MISC_DYNAMIC_MINOR; + b2r2_blt->miscdev.name = "b2r2_blt"; + b2r2_blt->miscdev.fops = &b2r2_blt_fops; + + ret = misc_register(&b2r2_blt->miscdev); + if (ret != 0) { + printk(KERN_WARNING "%s: registering misc device fails\n", + __func__); + goto error_exit; + } + + b2r2_blt->dev = b2r2_blt->miscdev.this_device; + b2r2_blt->dev->coherent_dma_mask = 0xFFFFFFFF; + + kref_init(&blt_refcount); + + dev_info(&pdev->dev, "%s done.\n", __func__); + + return ret; + +/** Recover from any error if something fails */ +error_exit: + + kfree(b2r2_blt); + + dev_info(&pdev->dev, "%s done with errors (%d).\n", __func__, ret); + + return ret; +} + +/** + * b2r2_blt_remove - This routine unloads b2r2_blt driver + * + * @pdev: platform device. + */ +static int b2r2_blt_remove(struct platform_device *pdev) +{ + BUG_ON(pdev == NULL); + dev_info(&pdev->dev, "%s started.\n", __func__); + misc_deregister(&b2r2_blt->miscdev); + kref_put(&blt_refcount, b2r2_blt_release); + return 0; +} + +/** + * b2r2_blt_suspend() - This routine puts the B2R2 blitter in to sustend state. + * @pdev: platform device. + * + * This routine stores the current state of the b2r2 device and puts in to + * suspend state. + * + */ +int b2r2_blt_suspend(struct platform_device *pdev, pm_message_t state) +{ + return 0; +} + +/** + * b2r2_blt_resume() - This routine resumes the B2R2 blitter from sustend state. + * @pdev: platform device. + * + * This routine restore back the current state of the b2r2 device resumes. + * + */ +int b2r2_blt_resume(struct platform_device *pdev) +{ + return 0; +} + +/** + * struct platform_b2r2_driver - Platform driver configuration for the + * B2R2 core driver + */ +static struct platform_driver platform_b2r2_blt_driver = { + .remove = b2r2_blt_remove, + .driver = { + .name = "b2r2_blt", + }, + .suspend = b2r2_blt_suspend, + .resume = b2r2_blt_resume, +}; + +/** + * b2r2_init() - Module init function for the B2R2 core module + */ +static int __init b2r2_blt_init(void) +{ + printk(KERN_INFO "%s\n", __func__); + return platform_driver_probe(&platform_b2r2_blt_driver, b2r2_blt_probe); +} +module_init(b2r2_blt_init); + +/** + * b2r2_exit() - Module exit function for the B2R2 core module + */ +static void __exit b2r2_blt_exit(void) +{ + printk(KERN_INFO "%s\n", __func__); + platform_driver_unregister(&platform_b2r2_blt_driver); + return; +} +module_exit(b2r2_blt_exit); + +MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>"); +MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/b2r2/b2r2_blt_main.c b/drivers/video/b2r2/b2r2_blt_main.c new file mode 100644 index 00000000000..3727f742bf1 --- /dev/null +++ b/drivers/video/b2r2/b2r2_blt_main.c @@ -0,0 +1,2840 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 Blitter module + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/list.h> +#ifdef CONFIG_ANDROID_PMEM +#include <linux/android_pmem.h> +#endif +#include <linux/fb.h> +#include <linux/uaccess.h> +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +#endif +#include <asm/cacheflush.h> +#include <linux/smp.h> +#include <linux/dma-mapping.h> +#include <linux/sched.h> +#include <linux/err.h> +#include <linux/hwmem.h> + +#include "b2r2_internal.h" +#include "b2r2_control.h" +#include "b2r2_node_split.h" +#include "b2r2_generic.h" +#include "b2r2_mem_alloc.h" +#include "b2r2_profiler_socket.h" +#include "b2r2_timing.h" +#include "b2r2_debug.h" +#include "b2r2_utils.h" +#include "b2r2_input_validation.h" +#include "b2r2_core.h" +#include "b2r2_filters.h" + +#define B2R2_HEAP_SIZE (4 * PAGE_SIZE) +#define MAX_TMP_BUF_SIZE (128 * PAGE_SIZE) + +/* + * TODO: + * Implementation of query cap + * Support for user space virtual pointer to physically consecutive memory + * Support for user space virtual pointer to physically scattered memory + * Callback reads lagging behind in blt_api_stress app + * Store smaller items in the report list instead of the whole request + * Support read of many report records at once. + */ + +/* Local functions */ +static void inc_stat(struct b2r2_control *cont, unsigned long *stat); +static void dec_stat(struct b2r2_control *cont, unsigned long *stat); + +#ifndef CONFIG_B2R2_GENERIC_ONLY +static void job_callback(struct b2r2_core_job *job); +static void job_release(struct b2r2_core_job *job); +static int job_acquire_resources(struct b2r2_core_job *job, bool atomic); +static void job_release_resources(struct b2r2_core_job *job, bool atomic); +#endif + +#ifdef CONFIG_B2R2_GENERIC +static void job_callback_gen(struct b2r2_core_job *job); +static void job_release_gen(struct b2r2_core_job *job); +static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic); +static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic); +static void tile_job_callback_gen(struct b2r2_core_job *job); +static void tile_job_release_gen(struct b2r2_core_job *job); +#endif + + +static int resolve_buf(struct b2r2_control *cont, + struct b2r2_blt_img *img, struct b2r2_blt_rect *rect_2b_used, + bool is_dst, struct b2r2_resolved_buf *resolved); +static void unresolve_buf(struct b2r2_control *cont, + struct b2r2_blt_buf *buf, struct b2r2_resolved_buf *resolved); +static void sync_buf(struct b2r2_control *cont, struct b2r2_blt_img *img, + struct b2r2_resolved_buf *resolved, bool is_dst, + struct b2r2_blt_rect *rect); +static bool is_report_list_empty(struct b2r2_control_instance *instance); +static bool is_synching(struct b2r2_control_instance *instance); +static void get_actual_dst_rect(struct b2r2_blt_req *req, + struct b2r2_blt_rect *actual_dst_rect); +static void set_up_hwmem_region(struct b2r2_control *cont, + struct b2r2_blt_img *img, struct b2r2_blt_rect *rect, + struct hwmem_region *region); +static int resolve_hwmem(struct b2r2_control *cont, struct b2r2_blt_img *img, + struct b2r2_blt_rect *rect_2b_used, bool is_dst, + struct b2r2_resolved_buf *resolved_buf); +static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf); + +/** + * struct sync_args - Data for clean/flush + * + * @start: Virtual start address + * @end: Virtual end address + */ +struct sync_args { + unsigned long start; + unsigned long end; +}; +/** + * flush_l1_cache_range_curr_cpu() - Cleans and invalidates L1 cache on the + * current CPU + * + * @arg: Pointer to sync_args structure + */ +static inline void flush_l1_cache_range_curr_cpu(void *arg) +{ + struct sync_args *sa = (struct sync_args *)arg; + + dmac_flush_range((void *)sa->start, (void *)sa->end); +} + +#ifdef CONFIG_SMP +/** + * inv_l1_cache_range_all_cpus() - Cleans and invalidates L1 cache on all CPU:s + * + * @sa: Pointer to sync_args structure + */ +static void flush_l1_cache_range_all_cpus(struct sync_args *sa) +{ + on_each_cpu(flush_l1_cache_range_curr_cpu, sa, 1); +} +#endif + +/** + * clean_l1_cache_range_curr_cpu() - Cleans L1 cache on current CPU + * + * Ensures that data is written out from the CPU:s L1 cache, + * it will still be in the cache. + * + * @arg: Pointer to sync_args structure + */ +static inline void clean_l1_cache_range_curr_cpu(void *arg) +{ + struct sync_args *sa = (struct sync_args *)arg; + + dmac_map_area((void *)sa->start, + (void *)sa->end - (void *)sa->start, + DMA_TO_DEVICE); +} + +#ifdef CONFIG_SMP +/** + * clean_l1_cache_range_all_cpus() - Cleans L1 cache on all CPU:s + * + * Ensures that data is written out from all CPU:s L1 cache, + * it will still be in the cache. + * + * @sa: Pointer to sync_args structure + */ +static void clean_l1_cache_range_all_cpus(struct sync_args *sa) +{ + on_each_cpu(clean_l1_cache_range_curr_cpu, sa, 1); +} +#endif + +/** + * b2r2_blt_open - Implements file open on the b2r2_blt device + * + * @inode: File system inode + * @filp: File pointer + * + * A B2R2 BLT instance is created and stored in the file structure. + */ +int b2r2_control_open(struct b2r2_control_instance *instance) +{ + int ret = 0; + struct b2r2_control *cont = instance->control; + + b2r2_log_info(cont->dev, "%s\n", __func__); + inc_stat(cont, &cont->stat_n_in_open); + + INIT_LIST_HEAD(&instance->report_list); + mutex_init(&instance->lock); + init_waitqueue_head(&instance->report_list_waitq); + init_waitqueue_head(&instance->synch_done_waitq); + dec_stat(cont, &cont->stat_n_in_open); + + return ret; +} + +/** + * b2r2_blt_release - Implements last close on an instance of + * the b2r2_blt device + * + * @inode: File system inode + * @filp: File pointer + * + * All active jobs are finished or cancelled and allocated data + * is released. + */ +int b2r2_control_release(struct b2r2_control_instance *instance) +{ + int ret; + struct b2r2_control *cont = instance->control; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + inc_stat(cont, &cont->stat_n_in_release); + + /* Finish all outstanding requests */ + ret = b2r2_control_synch(instance, 0); + if (ret < 0) + b2r2_log_warn(cont->dev, "%s: b2r2_blt_sync failed with %d\n", + __func__, ret); + + /* Now cancel any remaining outstanding request */ + if (instance->no_of_active_requests) { + struct b2r2_core_job *job; + + b2r2_log_warn(cont->dev, "%s: %d active requests\n", __func__, + instance->no_of_active_requests); + + /* Find and cancel all jobs belonging to us */ + job = b2r2_core_job_find_first_with_tag(cont, + (int) instance); + while (job) { + b2r2_core_job_cancel(job); + /* Matches addref in b2r2_core_job_find... */ + b2r2_core_job_release(job, __func__); + job = b2r2_core_job_find_first_with_tag(cont, + (int) instance); + } + + b2r2_log_warn(cont->dev, "%s: %d active requests after " + "cancel\n", __func__, instance->no_of_active_requests); + } + + /* Release jobs in report list */ + mutex_lock(&instance->lock); + while (!list_empty(&instance->report_list)) { + struct b2r2_blt_request *request = list_first_entry( + &instance->report_list, + struct b2r2_blt_request, + list); + list_del_init(&request->list); + mutex_unlock(&instance->lock); + /* + * This release matches the addref when the job was put into + * the report list + */ + b2r2_core_job_release(&request->job, __func__); + mutex_lock(&instance->lock); + } + mutex_unlock(&instance->lock); + + dec_stat(cont, &cont->stat_n_in_release); + + return 0; +} + +size_t b2r2_control_read(struct b2r2_control_instance *instance, + struct b2r2_blt_request **request_out, bool block) +{ + struct b2r2_blt_request *request = NULL; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_control *cont = instance->control; +#endif + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* + * Loop and wait here until we have anything to return or + * until interrupted + */ + mutex_lock(&instance->lock); + while (list_empty(&instance->report_list)) { + mutex_unlock(&instance->lock); + + /* Return if non blocking read */ + if (!block) + return -EAGAIN; + + b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__); + if (wait_event_interruptible( + instance->report_list_waitq, + !is_report_list_empty(instance))) + /* signal: tell the fs layer to handle it */ + return -ERESTARTSYS; + + /* Otherwise loop, but first reaquire the lock */ + mutex_lock(&instance->lock); + } + + if (!list_empty(&instance->report_list)) + request = list_first_entry( + &instance->report_list, struct b2r2_blt_request, list); + + if (request) { + /* Remove from list to avoid reading twice */ + list_del_init(&request->list); + + *request_out = request; + } + mutex_unlock(&instance->lock); + + if (request) + return 1; + + /* No report returned */ + return 0; +} + +size_t b2r2_control_read_id(struct b2r2_control_instance *instance, + struct b2r2_blt_request **request_out, bool block, + int request_id) +{ + struct b2r2_blt_request *request = NULL; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_control *cont = instance->control; +#endif + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* + * Loop and wait here until we have anything to return or + * until interrupted + */ + mutex_lock(&instance->lock); + while (list_empty(&instance->report_list)) { + mutex_unlock(&instance->lock); + + /* Return if non blocking read */ + if (!block) + return -EAGAIN; + + b2r2_log_info(cont->dev, "%s - Going to sleep\n", __func__); + if (wait_event_interruptible( + instance->report_list_waitq, + !is_report_list_empty(instance))) + /* signal: tell the fs layer to handle it */ + return -ERESTARTSYS; + + /* Otherwise loop, but first reaquire the lock */ + mutex_lock(&instance->lock); + } + + if (!list_empty(&instance->report_list)) { + struct b2r2_blt_request *pos; + list_for_each_entry(pos, &instance->report_list, list) { + if (pos->request_id) + request = pos; + } + } + + if (request) { + /* Remove from list to avoid reading twice */ + list_del_init(&request->list); + *request_out = request; + } + mutex_unlock(&instance->lock); + + if (request) + return 1; + + /* No report returned */ + return 0; +} + +#ifndef CONFIG_B2R2_GENERIC_ONLY +/** + * b2r2_blt - Implementation of the B2R2 blit request + * + * @instance: The B2R2 BLT instance + * @request; The request to perform + */ +int b2r2_control_blt(struct b2r2_blt_request *request) +{ + int ret = 0; + struct b2r2_blt_rect actual_dst_rect; + int request_id = 0; + struct b2r2_node *last_node = request->first_node; + int node_count; + struct b2r2_control_instance *instance = request->instance; + struct b2r2_control *cont = instance->control; + + u32 thread_runtime_at_start = 0; + + if (request->profile) { + request->start_time_nsec = b2r2_get_curr_nsec(); + thread_runtime_at_start = (u32)task_sched_runtime(current); + } + + b2r2_log_info(cont->dev, "%s\n", __func__); + + inc_stat(cont, &cont->stat_n_in_blt); + + /* Debug prints of incoming request */ + b2r2_log_info(cont->dev, + "src.fmt=%#010x src.buf={%d,%d,%d} " + "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n", + request->user_req.src_img.fmt, + request->user_req.src_img.buf.type, + request->user_req.src_img.buf.fd, + request->user_req.src_img.buf.offset, + request->user_req.src_img.width, + request->user_req.src_img.height, + request->user_req.src_rect.x, + request->user_req.src_rect.y, + request->user_req.src_rect.width, + request->user_req.src_rect.height); + + if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) + b2r2_log_info(cont->dev, + "bg.fmt=%#010x bg.buf={%d,%d,%d} " + "bg.w,h={%d,%d} bg.rect={%d,%d,%d,%d}\n", + request->user_req.bg_img.fmt, + request->user_req.bg_img.buf.type, + request->user_req.bg_img.buf.fd, + request->user_req.bg_img.buf.offset, + request->user_req.bg_img.width, + request->user_req.bg_img.height, + request->user_req.bg_rect.x, + request->user_req.bg_rect.y, + request->user_req.bg_rect.width, + request->user_req.bg_rect.height); + + b2r2_log_info(cont->dev, + "dst.fmt=%#010x dst.buf={%d,%d,%d} " + "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n", + request->user_req.dst_img.fmt, + request->user_req.dst_img.buf.type, + request->user_req.dst_img.buf.fd, + request->user_req.dst_img.buf.offset, + request->user_req.dst_img.width, + request->user_req.dst_img.height, + request->user_req.dst_rect.x, + request->user_req.dst_rect.y, + request->user_req.dst_rect.width, + request->user_req.dst_rect.height); + + inc_stat(cont, &cont->stat_n_in_blt_synch); + + /* Wait here if synch is ongoing */ + ret = wait_event_interruptible(instance->synch_done_waitq, + !is_synching(instance)); + if (ret) { + b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n", + __func__, ret); + ret = -EAGAIN; + dec_stat(cont, &cont->stat_n_in_blt_synch); + goto synch_interrupted; + } + + dec_stat(cont, &cont->stat_n_in_blt_synch); + + /* Resolve the buffers */ + + /* Source buffer */ + ret = resolve_buf(cont, &request->user_req.src_img, + &request->user_req.src_rect, + false, &request->src_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n", + __func__, ret); + ret = -EAGAIN; + goto resolve_src_buf_failed; + } + + /* Background buffer */ + if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) { + ret = resolve_buf(cont, &request->user_req.bg_img, + &request->user_req.bg_rect, + false, &request->bg_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve bg buf failed," + " %d\n", __func__, ret); + ret = -EAGAIN; + goto resolve_bg_buf_failed; + } + } + + /* Source mask buffer */ + ret = resolve_buf(cont, &request->user_req.src_mask, + &request->user_req.src_rect, false, + &request->src_mask_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve src mask buf failed," + " %d\n", __func__, ret); + ret = -EAGAIN; + goto resolve_src_mask_buf_failed; + } + + /* Destination buffer */ + get_actual_dst_rect(&request->user_req, &actual_dst_rect); + ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect, + true, &request->dst_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n", + __func__, ret); + ret = -EAGAIN; + goto resolve_dst_buf_failed; + } + + /* Debug prints of resolved buffers */ + b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n", + request->src_resolved.physical_address, + request->src_resolved.virtual_address, + request->src_resolved.is_pmem, + request->src_resolved.filep, + request->src_resolved.file_physical_start, + request->src_resolved.file_virtual_start, + request->src_resolved.file_len); + + if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) + b2r2_log_info(cont->dev, "bg.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n", + request->bg_resolved.physical_address, + request->bg_resolved.virtual_address, + request->bg_resolved.is_pmem, + request->bg_resolved.filep, + request->bg_resolved.file_physical_start, + request->bg_resolved.file_virtual_start, + request->bg_resolved.file_len); + + b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n", + request->dst_resolved.physical_address, + request->dst_resolved.virtual_address, + request->dst_resolved.is_pmem, + request->dst_resolved.filep, + request->dst_resolved.file_physical_start, + request->dst_resolved.file_virtual_start, + request->dst_resolved.file_len); + + /* Calculate the number of nodes (and resources) needed for this job */ + ret = b2r2_node_split_analyze(request, MAX_TMP_BUF_SIZE, &node_count, + &request->bufs, &request->buf_count, + &request->node_split_job); + if (ret == -ENOSYS) { + /* There was no optimized path for this request */ + b2r2_log_info(cont->dev, "%s: No optimized path for request\n", + __func__); + goto no_optimized_path; + + } else if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Failed to analyze request," + " ret = %d\n", __func__, ret); +#ifdef CONFIG_DEBUG_FS + { + /* Failed, dump job to dmesg */ + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + b2r2_log_info(cont->dev, "%s: Analyze failed for:\n", + __func__); + if (Buf != NULL) { + sprintf_req(request, Buf, sizeof(char) * 4096); + b2r2_log_info(cont->dev, "%s", Buf); + kfree(Buf); + } else { + b2r2_log_info(cont->dev, "Unable to print the" + " request. Message buffer" + " allocation failed.\n"); + } + } +#endif + goto generate_nodes_failed; + } + + /* Allocate the nodes needed */ +#ifdef B2R2_USE_NODE_GEN + request->first_node = b2r2_blt_alloc_nodes(cont, + node_count); + if (request->first_node == NULL) { + b2r2_log_warn(cont->dev, "%s: Failed to allocate nodes," + " ret = %d\n", __func__, ret); + goto generate_nodes_failed; + } +#else + ret = b2r2_node_alloc(cont, node_count, &(request->first_node)); + if (ret < 0 || request->first_node == NULL) { + b2r2_log_warn(cont->dev, + "%s: Failed to allocate nodes, ret = %d\n", + __func__, ret); + goto generate_nodes_failed; + } +#endif + + /* Build the B2R2 node list */ + ret = b2r2_node_split_configure(cont, &request->node_split_job, + request->first_node); + + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s:" + " Failed to perform node split, ret = %d\n", + __func__, ret); + goto generate_nodes_failed; + } + + /* Exit here if dry run */ + if (request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) + goto exit_dry_run; + + /* Configure the request */ + last_node = request->first_node; + while (last_node && last_node->next) + last_node = last_node->next; + + request->job.tag = (int) instance; + request->job.data = (int) cont->data; + request->job.prio = request->user_req.prio; + request->job.first_node_address = + request->first_node->physical_address; + request->job.last_node_address = + last_node->physical_address; + request->job.callback = job_callback; + request->job.release = job_release; + request->job.acquire_resources = job_acquire_resources; + request->job.release_resources = job_release_resources; + + /* Synchronize memory occupied by the buffers */ + + /* Source buffer */ + if (!(request->user_req.flags & + B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) && + (request->user_req.src_img.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.src_img.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.src_img, + &request->src_resolved, false, + &request->user_req.src_rect); + + /* Background buffer */ + if ((request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) && + !(request->user_req.flags & + B2R2_BLT_FLAG_BG_NO_CACHE_FLUSH) && + (request->user_req.bg_img.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.bg_img.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.bg_img, + &request->bg_resolved, false, + &request->user_req.bg_rect); + + /* Source mask buffer */ + if (!(request->user_req.flags & + B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) && + (request->user_req.src_mask.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.src_mask.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.src_mask, + &request->src_mask_resolved, false, NULL); + + /* Destination buffer */ + if (!(request->user_req.flags & + B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) && + (request->user_req.dst_img.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.dst_img.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.dst_img, + &request->dst_resolved, true, + &request->user_req.dst_rect); + +#ifdef CONFIG_DEBUG_FS + /* Remember latest request for debugfs */ + cont->debugfs_latest_request = *request; +#endif + + /* Submit the job */ + b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__); + + inc_stat(cont, &cont->stat_n_in_blt_add); + + if (request->profile) + request->nsec_active_in_cpu = + (s32)((u32)task_sched_runtime(current) - + thread_runtime_at_start); + + mutex_lock(&instance->lock); + + /* Add the job to b2r2_core */ + request_id = b2r2_core_job_add(cont, &request->job); + request->request_id = request_id; + + dec_stat(cont, &cont->stat_n_in_blt_add); + + if (request_id < 0) { + b2r2_log_warn(cont->dev, "%s: Failed to add job, ret = %d\n", + __func__, request_id); + ret = request_id; + mutex_unlock(&instance->lock); + goto job_add_failed; + } + + inc_stat(cont, &cont->stat_n_jobs_added); + + instance->no_of_active_requests++; + mutex_unlock(&instance->lock); + + return ret >= 0 ? request_id : ret; + +job_add_failed: +exit_dry_run: +no_optimized_path: +generate_nodes_failed: + unresolve_buf(cont, &request->user_req.dst_img.buf, + &request->dst_resolved); +resolve_dst_buf_failed: + unresolve_buf(cont, &request->user_req.src_mask.buf, + &request->src_mask_resolved); +resolve_src_mask_buf_failed: + if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) + unresolve_buf(cont, &request->user_req.bg_img.buf, + &request->bg_resolved); +resolve_bg_buf_failed: + unresolve_buf(cont, &request->user_req.src_img.buf, + &request->src_resolved); +resolve_src_buf_failed: +synch_interrupted: + if ((request->user_req.flags & B2R2_BLT_FLAG_DRY_RUN) == 0 || ret) + b2r2_log_warn(cont->dev, "%s returns with error %d\n", + __func__, ret); + job_release(&request->job); + dec_stat(cont, &cont->stat_n_jobs_released); + + dec_stat(cont, &cont->stat_n_in_blt); + + return ret; +} + +int b2r2_control_waitjob(struct b2r2_blt_request *request) +{ + int ret = 0; + struct b2r2_control_instance *instance = request->instance; + struct b2r2_control *cont = instance->control; + + /* Wait for the job to be done if synchronous */ + if ((request->user_req.flags & B2R2_BLT_FLAG_ASYNCH) == 0) { + b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n", + __func__); + + inc_stat(cont, &cont->stat_n_in_blt_wait); + + ret = b2r2_core_job_wait(&request->job); + + dec_stat(cont, &cont->stat_n_in_blt_wait); + + if (ret < 0 && ret != -ENOENT) + b2r2_log_warn(cont->dev, "%s: Failed to wait job," + " ret = %d\n", __func__, ret); + else + b2r2_log_info(cont->dev, "%s: Synchronous wait done\n", + __func__); + } + + /* + * Release matching the addref in b2r2_core_job_add, + * the request must not be accessed after this call + */ + b2r2_core_job_release(&request->job, __func__); + dec_stat(cont, &cont->stat_n_in_blt); + + return ret; +} + +/** + * Called when job is done or cancelled + * + * @job: The job + */ +static void job_callback(struct b2r2_core_job *job) +{ + struct b2r2_blt_request *request = NULL; + struct b2r2_core *core = NULL; + struct b2r2_control *cont = NULL; + + request = container_of(job, struct b2r2_blt_request, job); + core = (struct b2r2_core *) job->data; + cont = core->control; + + if (cont->dev) + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* Local addref / release within this func */ + b2r2_core_job_addref(job, __func__); + + /* Unresolve the buffers */ + unresolve_buf(cont, &request->user_req.src_img.buf, + &request->src_resolved); + unresolve_buf(cont, &request->user_req.src_mask.buf, + &request->src_mask_resolved); + unresolve_buf(cont, &request->user_req.dst_img.buf, + &request->dst_resolved); + if (request->user_req.flags & B2R2_BLT_FLAG_BG_BLEND) + unresolve_buf(cont, &request->user_req.bg_img.buf, + &request->bg_resolved); + + /* Move to report list if the job shall be reported */ + /* FIXME: Use a smaller struct? */ + /* TODO: In the case of kernel API call, feed an asynch task to the + * instance worker (kthread) instead of polling for a report */ + mutex_lock(&request->instance->lock); + if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) { + /* Move job to report list */ + list_add_tail(&request->list, + &request->instance->report_list); + inc_stat(cont, &cont->stat_n_jobs_in_report_list); + + /* Wake up poll */ + wake_up_interruptible( + &request->instance->report_list_waitq); + + /* Add a reference because we put the job in the report list */ + b2r2_core_job_addref(job, __func__); + } + + /* + * Decrease number of active requests and wake up + * synching threads if active requests reaches zero + */ + BUG_ON(request->instance->no_of_active_requests == 0); + request->instance->no_of_active_requests--; + if (request->instance->synching && + request->instance->no_of_active_requests == 0) { + request->instance->synching = false; + /* Wake up all syncing */ + + wake_up_interruptible_all( + &request->instance->synch_done_waitq); + } + mutex_unlock(&request->instance->lock); + +#ifdef CONFIG_DEBUG_FS + /* Dump job if cancelled */ + if (job->job_state == B2R2_CORE_JOB_CANCELED) { + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__); + if (Buf != NULL) { + sprintf_req(request, Buf, sizeof(char) * 4096); + b2r2_log_info(cont->dev, "%s", Buf); + kfree(Buf); + } else { + b2r2_log_info(cont->dev, "Unable to print the request." + " Message buffer allocation failed.\n"); + } + } +#endif + + if (request->profile) { + request->total_time_nsec = + (s32)(b2r2_get_curr_nsec() - request->start_time_nsec); + b2r2_call_profiler_blt_done(request); + } + + /* Local addref / release within this func */ + b2r2_core_job_release(job, __func__); +} + +/** + * Called when job should be released (free memory etc.) + * + * @job: The job + */ +static void job_release(struct b2r2_core_job *job) +{ + struct b2r2_blt_request *request = NULL; + struct b2r2_core *core = NULL; + struct b2r2_control *cont = NULL; + + request = container_of(job, struct b2r2_blt_request, job); + core = (struct b2r2_core *) job->data; + cont = core->control; + + inc_stat(cont, &cont->stat_n_jobs_released); + + b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n", + __func__, request->first_node, request->job.ref_count); + + b2r2_node_split_cancel(cont, &request->node_split_job); + + if (request->first_node) { + b2r2_debug_job_done(cont, request->first_node); +#ifdef B2R2_USE_NODE_GEN + b2r2_blt_free_nodes(cont, request->first_node); +#else + b2r2_node_free(cont, request->first_node); +#endif + } + + /* Release memory for the request */ + if (request->clut != NULL) { + dma_free_coherent(cont->dev, CLUT_SIZE, request->clut, + request->clut_phys_addr); + request->clut = NULL; + request->clut_phys_addr = 0; + } + kfree(request); +} + +/** + * Tells the job to try to allocate the resources needed to execute the job. + * Called just before execution of a job. + * + * @job: The job + * @atomic: true if called from atomic (i.e. interrupt) context. If function + * can't allocate in atomic context it should return error, it + * will then be called later from non-atomic context. + */ +static int job_acquire_resources(struct b2r2_core_job *job, bool atomic) +{ + struct b2r2_blt_request *request = + container_of(job, struct b2r2_blt_request, job); + struct b2r2_core *core = (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; + int ret; + int i; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + if (request->buf_count == 0) + return 0; + + if (request->buf_count > MAX_TMP_BUFS_NEEDED) { + b2r2_log_err(cont->dev, + "%s: request->buf_count > MAX_TMP_BUFS_NEEDED\n", + __func__); + return -ENOMSG; + } + + /* + * 1 to 1 mapping between request temp buffers and temp buffers + * (request temp buf 0 is always temp buf 0, request temp buf 1 is + * always temp buf 1 and so on) to avoid starvation of jobs that + * require multiple temp buffers. Not optimal in terms of memory + * usage but we avoid get into a situation where lower prio jobs can + * delay higher prio jobs that require more temp buffers. + */ + if (cont->tmp_bufs[0].in_use) + return -EAGAIN; + + for (i = 0; i < request->buf_count; i++) { + if (cont->tmp_bufs[i].buf.size < request->bufs[i].size) { + b2r2_log_err(cont->dev, "%s: " + "cont->tmp_bufs[i].buf.size < " + "request->bufs[i].size\n", __func__); + ret = -ENOMSG; + goto error; + } + + cont->tmp_bufs[i].in_use = true; + request->bufs[i].phys_addr = cont->tmp_bufs[i].buf.phys_addr; + request->bufs[i].virt_addr = cont->tmp_bufs[i].buf.virt_addr; + + b2r2_log_info(cont->dev, "%s: phys=%p, virt=%p\n", + __func__, (void *)request->bufs[i].phys_addr, + request->bufs[i].virt_addr); + + ret = b2r2_node_split_assign_buffers(cont, + &request->node_split_job, + request->first_node, request->bufs, + request->buf_count); + if (ret < 0) + goto error; + } + + return 0; + +error: + for (i = 0; i < request->buf_count; i++) + cont->tmp_bufs[i].in_use = false; + + return ret; +} + +/** + * Tells the job to free the resources needed to execute the job. + * Called after execution of a job. + * + * @job: The job + * @atomic: true if called from atomic (i.e. interrupt) context. If function + * can't allocate in atomic context it should return error, it + * will then be called later from non-atomic context. + */ +static void job_release_resources(struct b2r2_core_job *job, bool atomic) +{ + struct b2r2_blt_request *request = + container_of(job, struct b2r2_blt_request, job); + struct b2r2_core *core = (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; + int i; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* Free any temporary buffers */ + for (i = 0; i < request->buf_count; i++) { + + b2r2_log_info(cont->dev, "%s: freeing %d bytes\n", + __func__, request->bufs[i].size); + cont->tmp_bufs[i].in_use = false; + memset(&request->bufs[i], 0, sizeof(request->bufs[i])); + } + request->buf_count = 0; + + /* + * Early release of nodes + * FIXME: If nodes are to be reused we don't want to release here + */ + if (!atomic && request->first_node) { + b2r2_debug_job_done(cont, request->first_node); + +#ifdef B2R2_USE_NODE_GEN + b2r2_blt_free_nodes(cont, request->first_node); +#else + b2r2_node_free(cont, request->first_node); +#endif + request->first_node = NULL; + } +} + +#endif /* !CONFIG_B2R2_GENERIC_ONLY */ + +#ifdef CONFIG_B2R2_GENERIC +/** + * Called when job for one tile is done or cancelled + * in the generic path. + * + * @job: The job + */ +static void tile_job_callback_gen(struct b2r2_core_job *job) +{ +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_core *core = + (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; +#endif + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* Local addref / release within this func */ + b2r2_core_job_addref(job, __func__); + +#ifdef CONFIG_DEBUG_FS + /* Notify if a tile job is cancelled */ + if (job->job_state == B2R2_CORE_JOB_CANCELED) + b2r2_log_info(cont->dev, "%s: Tile job cancelled:\n", + __func__); +#endif + + /* Local addref / release within this func */ + b2r2_core_job_release(job, __func__); +} + +/** + * Called when job is done or cancelled. + * Used for the last tile in the generic path + * to notify waiting clients. + * + * @job: The job + */ +static void job_callback_gen(struct b2r2_core_job *job) +{ + struct b2r2_blt_request *request = + container_of(job, struct b2r2_blt_request, job); + struct b2r2_core *core = (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* Local addref / release within this func */ + b2r2_core_job_addref(job, __func__); + + /* Unresolve the buffers */ + unresolve_buf(cont, &request->user_req.src_img.buf, + &request->src_resolved); + unresolve_buf(cont, &request->user_req.src_mask.buf, + &request->src_mask_resolved); + unresolve_buf(cont, &request->user_req.dst_img.buf, + &request->dst_resolved); + + /* Move to report list if the job shall be reported */ + /* FIXME: Use a smaller struct? */ + /* TODO: In the case of kernel API call, feed an asynch task to the + * instance worker (kthread) instead of polling for a report */ + mutex_lock(&request->instance->lock); + if (request->user_req.flags & B2R2_BLT_FLAG_REPORT_WHEN_DONE) { + /* Move job to report list */ + list_add_tail(&request->list, + &request->instance->report_list); + inc_stat(cont, &cont->stat_n_jobs_in_report_list); + + /* Wake up poll */ + wake_up_interruptible( + &request->instance->report_list_waitq); + + /* + * Add a reference because we put the + * job in the report list + */ + b2r2_core_job_addref(job, __func__); + } + + /* + * Decrease number of active requests and wake up + * synching threads if active requests reaches zero + */ + BUG_ON(request->instance->no_of_active_requests == 0); + request->instance->no_of_active_requests--; + if (request->instance->synching && + request->instance->no_of_active_requests == 0) { + request->instance->synching = false; + /* Wake up all syncing */ + + wake_up_interruptible_all( + &request->instance->synch_done_waitq); + } + mutex_unlock(&request->instance->lock); + +#ifdef CONFIG_DEBUG_FS + /* Dump job if cancelled */ + if (job->job_state == B2R2_CORE_JOB_CANCELED) { + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + b2r2_log_info(cont->dev, "%s: Job cancelled:\n", __func__); + if (Buf != NULL) { + sprintf_req(request, Buf, sizeof(char) * 4096); + b2r2_log_info(cont->dev, "%s", Buf); + kfree(Buf); + } else { + b2r2_log_info(cont->dev, "Unable to print the request." + " Message buffer allocation failed.\n"); + } + } +#endif + + /* Local addref / release within this func */ + b2r2_core_job_release(job, __func__); +} + +/** + * Called when tile job should be released (free memory etc.) + * Should be used only for tile jobs. Tile jobs should only be used + * by b2r2_core, thus making ref_count trigger their release. + * + * @job: The job + */ + +static void tile_job_release_gen(struct b2r2_core_job *job) +{ + struct b2r2_core *core = + (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; + + inc_stat(cont, &cont->stat_n_jobs_released); + + b2r2_log_info(cont->dev, "%s, first_node_address=0x%.8x, ref_count=" + "%d\n", __func__, job->first_node_address, + job->ref_count); + + /* Release memory for the job */ + kfree(job); +} + +/** + * Called when job should be released (free memory etc.) + * + * @job: The job + */ + +static void job_release_gen(struct b2r2_core_job *job) +{ + struct b2r2_blt_request *request = + container_of(job, struct b2r2_blt_request, job); + struct b2r2_core *core = (struct b2r2_core *) job->data; + struct b2r2_control *cont = core->control; + + inc_stat(cont, &cont->stat_n_jobs_released); + + b2r2_log_info(cont->dev, "%s, first_node=%p, ref_count=%d\n", + __func__, request->first_node, request->job.ref_count); + + if (request->first_node) { + b2r2_debug_job_done(cont, request->first_node); + + /* Free nodes */ +#ifdef B2R2_USE_NODE_GEN + b2r2_blt_free_nodes(cont, request->first_node); +#else + b2r2_node_free(cont, request->first_node); +#endif + } + + /* Release memory for the request */ + if (request->clut != NULL) { + dma_free_coherent(cont->dev, CLUT_SIZE, request->clut, + request->clut_phys_addr); + request->clut = NULL; + request->clut_phys_addr = 0; + } + kfree(request); +} + +static int job_acquire_resources_gen(struct b2r2_core_job *job, bool atomic) +{ + /* Nothing so far. Temporary buffers are pre-allocated */ + return 0; +} +static void job_release_resources_gen(struct b2r2_core_job *job, bool atomic) +{ + /* Nothing so far. Temporary buffers are pre-allocated */ +} + +/** + * b2r2_generic_blt - Generic implementation of the B2R2 blit request + * + * @request; The request to perform + */ +int b2r2_generic_blt(struct b2r2_blt_request *request) +{ + int ret = 0; + struct b2r2_blt_rect actual_dst_rect; + int request_id = 0; + struct b2r2_node *last_node = request->first_node; + int node_count; + s32 tmp_buf_width = 0; + s32 tmp_buf_height = 0; + u32 tmp_buf_count = 0; + s32 x; + s32 y; + const struct b2r2_blt_rect *dst_rect = &(request->user_req.dst_rect); + const s32 dst_img_width = request->user_req.dst_img.width; + const s32 dst_img_height = request->user_req.dst_img.height; + const enum b2r2_blt_flag flags = request->user_req.flags; + /* Descriptors for the temporary buffers */ + struct b2r2_work_buf work_bufs[4]; + struct b2r2_blt_rect dst_rect_tile; + int i; + struct b2r2_control_instance *instance = request->instance; + struct b2r2_control *cont = instance->control; + + u32 thread_runtime_at_start = 0; + s32 nsec_active_in_b2r2 = 0; + + /* + * Early exit if zero blt. + * dst_rect outside of dst_img or + * dst_clip_rect outside of dst_img. + */ + if (dst_rect->x + dst_rect->width <= 0 || + dst_rect->y + dst_rect->height <= 0 || + dst_img_width <= dst_rect->x || + dst_img_height <= dst_rect->y || + ((flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0 && + (dst_img_width <= request->user_req.dst_clip_rect.x || + dst_img_height <= request->user_req.dst_clip_rect.y || + request->user_req.dst_clip_rect.x + + request->user_req.dst_clip_rect.width <= 0 || + request->user_req.dst_clip_rect.y + + request->user_req.dst_clip_rect.height <= 0))) { + goto zero_blt; + } + + if (request->profile) { + request->start_time_nsec = b2r2_get_curr_nsec(); + thread_runtime_at_start = (u32)task_sched_runtime(current); + } + + memset(work_bufs, 0, sizeof(work_bufs)); + + b2r2_log_info(cont->dev, "%s\n", __func__); + + inc_stat(cont, &cont->stat_n_in_blt); + + /* Debug prints of incoming request */ + b2r2_log_info(cont->dev, + "src.fmt=%#010x flags=0x%.8x src.buf={%d,%d,0x%.8x}\n" + "src.w,h={%d,%d} src.rect={%d,%d,%d,%d}\n", + request->user_req.src_img.fmt, + request->user_req.flags, + request->user_req.src_img.buf.type, + request->user_req.src_img.buf.fd, + request->user_req.src_img.buf.offset, + request->user_req.src_img.width, + request->user_req.src_img.height, + request->user_req.src_rect.x, + request->user_req.src_rect.y, + request->user_req.src_rect.width, + request->user_req.src_rect.height); + b2r2_log_info(cont->dev, + "dst.fmt=%#010x dst.buf={%d,%d,0x%.8x}\n" + "dst.w,h={%d,%d} dst.rect={%d,%d,%d,%d}\n" + "dst_clip_rect={%d,%d,%d,%d}\n", + request->user_req.dst_img.fmt, + request->user_req.dst_img.buf.type, + request->user_req.dst_img.buf.fd, + request->user_req.dst_img.buf.offset, + request->user_req.dst_img.width, + request->user_req.dst_img.height, + request->user_req.dst_rect.x, + request->user_req.dst_rect.y, + request->user_req.dst_rect.width, + request->user_req.dst_rect.height, + request->user_req.dst_clip_rect.x, + request->user_req.dst_clip_rect.y, + request->user_req.dst_clip_rect.width, + request->user_req.dst_clip_rect.height); + + inc_stat(cont, &cont->stat_n_in_blt_synch); + + /* Wait here if synch is ongoing */ + ret = wait_event_interruptible(instance->synch_done_waitq, + !is_synching(instance)); + if (ret) { + b2r2_log_warn(cont->dev, "%s: Sync wait interrupted, %d\n", + __func__, ret); + ret = -EAGAIN; + dec_stat(cont, &cont->stat_n_in_blt_synch); + goto synch_interrupted; + } + + dec_stat(cont, &cont->stat_n_in_blt_synch); + + /* Resolve the buffers */ + + /* Source buffer */ + ret = resolve_buf(cont, &request->user_req.src_img, + &request->user_req.src_rect, false, &request->src_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve src buf failed, %d\n", + __func__, ret); + ret = -EAGAIN; + goto resolve_src_buf_failed; + } + + /* Source mask buffer */ + ret = resolve_buf(cont, &request->user_req.src_mask, + &request->user_req.src_rect, false, + &request->src_mask_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, + "%s: Resolve src mask buf failed, %d\n", + __func__, ret); + ret = -EAGAIN; + goto resolve_src_mask_buf_failed; + } + + /* Destination buffer */ + get_actual_dst_rect(&request->user_req, &actual_dst_rect); + ret = resolve_buf(cont, &request->user_req.dst_img, &actual_dst_rect, + true, &request->dst_resolved); + if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Resolve dst buf failed, %d\n", + __func__, ret); + ret = -EAGAIN; + goto resolve_dst_buf_failed; + } + + /* Debug prints of resolved buffers */ + b2r2_log_info(cont->dev, "src.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n", + request->src_resolved.physical_address, + request->src_resolved.virtual_address, + request->src_resolved.is_pmem, + request->src_resolved.filep, + request->src_resolved.file_physical_start, + request->src_resolved.file_virtual_start, + request->src_resolved.file_len); + + b2r2_log_info(cont->dev, "dst.rbuf={%X,%p,%d} {%p,%X,%X,%d}\n", + request->dst_resolved.physical_address, + request->dst_resolved.virtual_address, + request->dst_resolved.is_pmem, + request->dst_resolved.filep, + request->dst_resolved.file_physical_start, + request->dst_resolved.file_virtual_start, + request->dst_resolved.file_len); + + /* Calculate the number of nodes (and resources) needed for this job */ + ret = b2r2_generic_analyze(request, &tmp_buf_width, + &tmp_buf_height, &tmp_buf_count, &node_count); + if (ret < 0) { + b2r2_log_warn(cont->dev, + "%s: Failed to analyze request, ret = %d\n", + __func__, ret); +#ifdef CONFIG_DEBUG_FS + { + /* Failed, dump job to dmesg */ + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + b2r2_log_info(cont->dev, + "%s: Analyze failed for:\n", __func__); + if (Buf != NULL) { + sprintf_req(request, Buf, sizeof(char) * 4096); + b2r2_log_info(cont->dev, "%s", Buf); + kfree(Buf); + } else { + b2r2_log_info(cont->dev, + "Unable to print the request. " + "Message buffer allocation failed.\n"); + } + } +#endif + goto generate_nodes_failed; + } + + /* Allocate the nodes needed */ +#ifdef B2R2_USE_NODE_GEN + request->first_node = b2r2_blt_alloc_nodes(cont, node_count); + if (request->first_node == NULL) { + b2r2_log_warn(cont->dev, + "%s: Failed to allocate nodes, ret = %d\n", + __func__, ret); + goto generate_nodes_failed; + } +#else + ret = b2r2_node_alloc(cont, node_count, &(request->first_node)); + if (ret < 0 || request->first_node == NULL) { + b2r2_log_warn(cont->dev, + "%s: Failed to allocate nodes, ret = %d\n", + __func__, ret); + goto generate_nodes_failed; + } +#endif + + /* Allocate the temporary buffers */ + for (i = 0; i < tmp_buf_count; i++) { + void *virt; + work_bufs[i].size = tmp_buf_width * tmp_buf_height * 4; + + virt = dma_alloc_coherent(cont->dev, + work_bufs[i].size, + &(work_bufs[i].phys_addr), + GFP_DMA | GFP_KERNEL); + if (virt == NULL) { + ret = -ENOMEM; + goto alloc_work_bufs_failed; + } + + work_bufs[i].virt_addr = virt; + memset(work_bufs[i].virt_addr, 0xff, work_bufs[i].size); + } + ret = b2r2_generic_configure(request, + request->first_node, &work_bufs[0], tmp_buf_count); + + if (ret < 0) { + b2r2_log_warn(cont->dev, + "%s: Failed to perform generic configure, ret = %d\n", + __func__, ret); + goto generic_conf_failed; + } + + /* Exit here if dry run */ + if (flags & B2R2_BLT_FLAG_DRY_RUN) + goto exit_dry_run; + + /* + * Configure the request and make sure + * that its job is run only for the LAST tile. + * This is when the request is complete + * and waiting clients should be notified. + */ + last_node = request->first_node; + while (last_node && last_node->next) + last_node = last_node->next; + + request->job.tag = (int) instance; + request->job.data = (int) cont->data; + request->job.prio = request->user_req.prio; + request->job.first_node_address = + request->first_node->physical_address; + request->job.last_node_address = + last_node->physical_address; + request->job.callback = job_callback_gen; + request->job.release = job_release_gen; + /* Work buffers and nodes are pre-allocated */ + request->job.acquire_resources = job_acquire_resources_gen; + request->job.release_resources = job_release_resources_gen; + + /* Flush the L1/L2 cache for the buffers */ + + /* Source buffer */ + if (!(flags & B2R2_BLT_FLAG_SRC_NO_CACHE_FLUSH) && + (request->user_req.src_img.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.src_img.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.src_img, + &request->src_resolved, + false, /*is_dst*/ + &request->user_req.src_rect); + + /* Source mask buffer */ + if (!(flags & B2R2_BLT_FLAG_SRC_MASK_NO_CACHE_FLUSH) && + (request->user_req.src_mask.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.src_mask.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.src_mask, + &request->src_mask_resolved, + false, /*is_dst*/ + NULL); + + /* Destination buffer */ + if (!(flags & B2R2_BLT_FLAG_DST_NO_CACHE_FLUSH) && + (request->user_req.dst_img.buf.type != + B2R2_BLT_PTR_PHYSICAL) && + !b2r2_is_mb_fmt(request->user_req.dst_img.fmt)) + /* MB formats are never touched by SW */ + sync_buf(cont, &request->user_req.dst_img, + &request->dst_resolved, + true, /*is_dst*/ + &request->user_req.dst_rect); + +#ifdef CONFIG_DEBUG_FS + /* Remember latest request */ + cont->debugfs_latest_request = *request; +#endif + + /* + * Same nodes are reused for all the jobs needed to complete the blit. + * Nodes are NOT released together with associated job, + * as is the case with optimized b2r2_blt() path. + */ + mutex_lock(&instance->lock); + instance->no_of_active_requests++; + mutex_unlock(&instance->lock); + /* + * Process all but the last row in the destination rectangle. + * Consider only the tiles that will actually end up inside + * the destination image. + * dst_rect->height - tmp_buf_height being <=0 is allright. + * The loop will not be entered since y will always be equal to or + * greater than zero. + * Early exit check at the beginning handles the cases when nothing + * at all should be processed. + */ + y = 0; + if (dst_rect->y < 0) + y = -dst_rect->y; + + for (; y < dst_rect->height - tmp_buf_height && + y + dst_rect->y < dst_img_height - tmp_buf_height; + y += tmp_buf_height) { + /* Tile in the destination rectangle being processed */ + struct b2r2_blt_rect dst_rect_tile; + dst_rect_tile.y = y; + dst_rect_tile.width = tmp_buf_width; + dst_rect_tile.height = tmp_buf_height; + + x = 0; + if (dst_rect->x < 0) + x = -dst_rect->x; + + for (; x < dst_rect->width && x + dst_rect->x < dst_img_width; + x += tmp_buf_width) { + /* + * Tile jobs are freed by the supplied release function + * when ref_count on a tile_job reaches zero. + */ + struct b2r2_core_job *tile_job = + kmalloc(sizeof(*tile_job), GFP_KERNEL); + if (tile_job == NULL) { + /* + * Skip this tile. Do not abort, + * just hope for better luck + * with rest of the tiles. + * Memory might become available. + */ + b2r2_log_info(cont->dev, "%s: Failed to alloc " + "job. Skipping tile at (x, y)=" + "(%d, %d)\n", __func__, x, y); + continue; + } + tile_job->job_id = request->job.job_id; + tile_job->tag = request->job.tag; + tile_job->data = request->job.data; + tile_job->prio = request->job.prio; + tile_job->first_node_address = + request->job.first_node_address; + tile_job->last_node_address = + request->job.last_node_address; + tile_job->callback = tile_job_callback_gen; + tile_job->release = tile_job_release_gen; + /* Work buffers and nodes are pre-allocated */ + tile_job->acquire_resources = + job_acquire_resources_gen; + tile_job->release_resources = + job_release_resources_gen; + + dst_rect_tile.x = x; + if (x + dst_rect->x + tmp_buf_width > dst_img_width) { + /* + * Only a part of the tile can be written. + * Limit imposed by buffer size. + */ + dst_rect_tile.width = + dst_img_width - (x + dst_rect->x); + } else if (x + tmp_buf_width > dst_rect->width) { + /* + * Only a part of the tile can be written. + * In this case limit imposed by dst_rect size. + */ + dst_rect_tile.width = dst_rect->width - x; + } else { + /* Whole tile can be written. */ + dst_rect_tile.width = tmp_buf_width; + } + /* + * Where applicable, calculate area in src buffer + * that is needed to generate the specified part + * of destination rectangle. + */ + b2r2_generic_set_areas(request, + request->first_node, &dst_rect_tile); + /* Submit the job */ + b2r2_log_info(cont->dev, + "%s: Submitting job\n", __func__); + + inc_stat(cont, &cont->stat_n_in_blt_add); + + mutex_lock(&instance->lock); + + request_id = b2r2_core_job_add(cont, tile_job); + + dec_stat(cont, &cont->stat_n_in_blt_add); + + if (request_id < 0) { + b2r2_log_warn(cont->dev, "%s: " + "Failed to add tile job, ret = %d\n", + __func__, request_id); + ret = request_id; + mutex_unlock(&instance->lock); + goto job_add_failed; + } + + inc_stat(cont, &cont->stat_n_jobs_added); + + mutex_unlock(&instance->lock); + + /* Wait for the job to be done */ + b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n", + __func__); + + inc_stat(cont, &cont->stat_n_in_blt_wait); + + ret = b2r2_core_job_wait(tile_job); + + dec_stat(cont, &cont->stat_n_in_blt_wait); + + if (ret < 0 && ret != -ENOENT) + b2r2_log_warn(cont->dev, + "%s: Failed to wait job, ret = %d\n", + __func__, ret); + else { + b2r2_log_info(cont->dev, + "%s: Synchronous wait done\n", + __func__); + + nsec_active_in_b2r2 += + tile_job->nsec_active_in_hw; + } + /* Release matching the addref in b2r2_core_job_add */ + b2r2_core_job_release(tile_job, __func__); + } + } + + x = 0; + if (dst_rect->x < 0) + x = -dst_rect->x; + + for (; x < dst_rect->width && + x + dst_rect->x < dst_img_width; x += tmp_buf_width) { + struct b2r2_core_job *tile_job = NULL; + if (x + tmp_buf_width < dst_rect->width && + x + dst_rect->x + tmp_buf_width < + dst_img_width) { + /* + * Tile jobs are freed by the supplied release function + * when ref_count on a tile_job reaches zero. + * Do NOT allocate a tile_job for the last tile. + * Send the job from the request. This way clients + * will be notified when the whole blit is complete + * and not just part of it. + */ + tile_job = kmalloc(sizeof(*tile_job), GFP_KERNEL); + if (tile_job == NULL) { + b2r2_log_info(cont->dev, "%s: Failed to alloc " + "job. Skipping tile at (x, y)=" + "(%d, %d)\n", __func__, x, y); + continue; + } + tile_job->job_id = request->job.job_id; + tile_job->tag = request->job.tag; + tile_job->data = request->job.data; + tile_job->prio = request->job.prio; + tile_job->first_node_address = + request->job.first_node_address; + tile_job->last_node_address = + request->job.last_node_address; + tile_job->callback = tile_job_callback_gen; + tile_job->release = tile_job_release_gen; + tile_job->acquire_resources = + job_acquire_resources_gen; + tile_job->release_resources = + job_release_resources_gen; + } + + dst_rect_tile.x = x; + if (x + dst_rect->x + tmp_buf_width > dst_img_width) { + /* + * Only a part of the tile can be written. + * Limit imposed by buffer size. + */ + dst_rect_tile.width = dst_img_width - (x + dst_rect->x); + } else if (x + tmp_buf_width > dst_rect->width) { + /* + * Only a part of the tile can be written. + * In this case limit imposed by dst_rect size. + */ + dst_rect_tile.width = dst_rect->width - x; + } else { + /* Whole tile can be written. */ + dst_rect_tile.width = tmp_buf_width; + } + /* + * y is now the last row. Either because the whole dst_rect + * has been processed, or because the last row that will be + * written to dst_img has been reached. Limits imposed in + * the same way as for width. + */ + dst_rect_tile.y = y; + if (y + dst_rect->y + tmp_buf_height > dst_img_height) + dst_rect_tile.height = + dst_img_height - (y + dst_rect->y); + else if (y + tmp_buf_height > dst_rect->height) + dst_rect_tile.height = dst_rect->height - y; + else + dst_rect_tile.height = tmp_buf_height; + + b2r2_generic_set_areas(request, + request->first_node, &dst_rect_tile); + + b2r2_log_info(cont->dev, "%s: Submitting job\n", __func__); + inc_stat(cont, &cont->stat_n_in_blt_add); + + mutex_lock(&instance->lock); + if (x + tmp_buf_width < dst_rect->width && + x + dst_rect->x + tmp_buf_width < + dst_img_width) { + request_id = b2r2_core_job_add(cont, tile_job); + } else { + /* + * Last tile. Send the job-struct from the request. + * Clients will be notified once it completes. + */ + request_id = b2r2_core_job_add(cont, &request->job); + } + + dec_stat(cont, &cont->stat_n_in_blt_add); + + if (request_id < 0) { + b2r2_log_warn(cont->dev, "%s: Failed to add tile job, " + "ret = %d\n", __func__, request_id); + ret = request_id; + mutex_unlock(&instance->lock); + if (tile_job != NULL) + kfree(tile_job); + goto job_add_failed; + } + + inc_stat(cont, &cont->stat_n_jobs_added); + mutex_unlock(&instance->lock); + + b2r2_log_info(cont->dev, "%s: Synchronous, waiting\n", + __func__); + + inc_stat(cont, &cont->stat_n_in_blt_wait); + if (x + tmp_buf_width < dst_rect->width && + x + dst_rect->x + tmp_buf_width < + dst_img_width) { + ret = b2r2_core_job_wait(tile_job); + } else { + /* + * This is the last tile. Wait for the job-struct from + * the request. + */ + ret = b2r2_core_job_wait(&request->job); + } + dec_stat(cont, &cont->stat_n_in_blt_wait); + + if (ret < 0 && ret != -ENOENT) + b2r2_log_warn(cont->dev, + "%s: Failed to wait job, ret = %d\n", + __func__, ret); + else { + b2r2_log_info(cont->dev, + "%s: Synchronous wait done\n", __func__); + + if (x + tmp_buf_width < dst_rect->width && + x + dst_rect->x + tmp_buf_width < + dst_img_width) + nsec_active_in_b2r2 += + tile_job->nsec_active_in_hw; + else + nsec_active_in_b2r2 += + request->job.nsec_active_in_hw; + } + + /* + * Release matching the addref in b2r2_core_job_add. + * Make sure that the correct job-struct is released + * when the last tile is processed. + */ + if (x + tmp_buf_width < dst_rect->width && + x + dst_rect->x + tmp_buf_width < + dst_img_width) { + b2r2_core_job_release(tile_job, __func__); + } else { + /* + * Update profiling information before + * the request is released together with + * its core_job. + */ + if (request->profile) { + request->nsec_active_in_cpu = + (s32)((u32)task_sched_runtime(current) - + thread_runtime_at_start); + request->total_time_nsec = + (s32)(b2r2_get_curr_nsec() - + request->start_time_nsec); + request->job.nsec_active_in_hw = + nsec_active_in_b2r2; + + b2r2_call_profiler_blt_done(request); + } + + b2r2_core_job_release(&request->job, __func__); + } + } + + dec_stat(cont, &cont->stat_n_in_blt); + + for (i = 0; i < tmp_buf_count; i++) { + dma_free_coherent(cont->dev, + work_bufs[i].size, + work_bufs[i].virt_addr, + work_bufs[i].phys_addr); + memset(&(work_bufs[i]), 0, sizeof(work_bufs[i])); + } + + return request_id; + +job_add_failed: +exit_dry_run: +generic_conf_failed: +alloc_work_bufs_failed: + for (i = 0; i < 4; i++) { + if (work_bufs[i].virt_addr != 0) { + dma_free_coherent(cont->dev, + work_bufs[i].size, + work_bufs[i].virt_addr, + work_bufs[i].phys_addr); + memset(&(work_bufs[i]), 0, sizeof(work_bufs[i])); + } + } + +generate_nodes_failed: + unresolve_buf(cont, &request->user_req.dst_img.buf, + &request->dst_resolved); +resolve_dst_buf_failed: + unresolve_buf(cont, &request->user_req.src_mask.buf, + &request->src_mask_resolved); +resolve_src_mask_buf_failed: + unresolve_buf(cont, &request->user_req.src_img.buf, + &request->src_resolved); +resolve_src_buf_failed: +synch_interrupted: +zero_blt: + job_release_gen(&request->job); + dec_stat(cont, &cont->stat_n_jobs_released); + dec_stat(cont, &cont->stat_n_in_blt); + + b2r2_log_info(cont->dev, "b2r2:%s ret=%d", __func__, ret); + return ret; +} +#endif /* CONFIG_B2R2_GENERIC */ + +/** + * b2r2_blt_synch - Implements wait for all or a specified job + * + * @instance: The B2R2 BLT instance + * @request_id: If 0, wait for all requests on this instance to finish. + * Else wait for request with given request id to finish. + */ +int b2r2_control_synch(struct b2r2_control_instance *instance, + int request_id) +{ + int ret = 0; + struct b2r2_control *cont = instance->control; + + b2r2_log_info(cont->dev, "%s, request_id=%d\n", __func__, request_id); + + if (request_id == 0) { + /* Wait for all requests */ + inc_stat(cont, &cont->stat_n_in_synch_0); + + /* Enter state "synching" if we have any active request */ + mutex_lock(&instance->lock); + if (instance->no_of_active_requests) + instance->synching = true; + mutex_unlock(&instance->lock); + + /* Wait until no longer in state synching */ + ret = wait_event_interruptible(instance->synch_done_waitq, + !is_synching(instance)); + dec_stat(cont, &cont->stat_n_in_synch_0); + } else { + struct b2r2_core_job *job; + + inc_stat(cont, &cont->stat_n_in_synch_job); + + /* Wait for specific job */ + job = b2r2_core_job_find(cont, request_id); + if (job) { + /* Wait on find job */ + ret = b2r2_core_job_wait(job); + /* Release matching the addref in b2r2_core_job_find */ + b2r2_core_job_release(job, __func__); + } + + /* If job not found we assume that is has been run */ + dec_stat(cont, &cont->stat_n_in_synch_job); + } + + b2r2_log_info(cont->dev, + "%s, request_id=%d, returns %d\n", __func__, request_id, ret); + + return ret; +} + +static void get_actual_dst_rect(struct b2r2_blt_req *req, + struct b2r2_blt_rect *actual_dst_rect) +{ + struct b2r2_blt_rect dst_img_bounds; + + b2r2_get_img_bounding_rect(&req->dst_img, &dst_img_bounds); + + b2r2_intersect_rects(&req->dst_rect, &dst_img_bounds, actual_dst_rect); + + if (req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) + b2r2_intersect_rects(actual_dst_rect, &req->dst_clip_rect, + actual_dst_rect); +} + +static void set_up_hwmem_region(struct b2r2_control *cont, + struct b2r2_blt_img *img, struct b2r2_blt_rect *rect, + struct hwmem_region *region) +{ + s32 img_size; + + memset(region, 0, sizeof(*region)); + + if (b2r2_is_zero_area_rect(rect)) + return; + + img_size = b2r2_get_img_size(cont->dev, img); + + if (b2r2_is_single_plane_fmt(img->fmt) && + b2r2_is_independent_pixel_fmt(img->fmt)) { + int img_fmt_bpp = b2r2_get_fmt_bpp(cont->dev, img->fmt); + u32 img_pitch = b2r2_get_img_pitch(cont->dev, img); + + region->offset = (u32)(img->buf.offset + (rect->y * + img_pitch)); + region->count = (u32)rect->height; + region->start = (u32)((rect->x * img_fmt_bpp) / 8); + region->end = (u32)b2r2_div_round_up( + (rect->x + rect->width) * img_fmt_bpp, 8); + region->size = img_pitch; + } else { + /* + * TODO: Locking entire buffer as a quick safe solution. In the + * future we should lock less to avoid unecessary cache + * synching. Pixel interleaved YCbCr formats should be quite + * easy, just align start and stop points on 2. + */ + region->offset = (u32)img->buf.offset; + region->count = 1; + region->start = 0; + region->end = (u32)img_size; + region->size = (u32)img_size; + } +} + +static int resolve_hwmem(struct b2r2_control *cont, + struct b2r2_blt_img *img, + struct b2r2_blt_rect *rect_2b_used, + bool is_dst, + struct b2r2_resolved_buf *resolved_buf) +{ + int return_value = 0; + enum hwmem_mem_type mem_type; + enum hwmem_access access; + enum hwmem_access required_access; + struct hwmem_mem_chunk mem_chunk; + size_t mem_chunk_length = 1; + struct hwmem_region region; + + resolved_buf->hwmem_alloc = + hwmem_resolve_by_name(img->buf.hwmem_buf_name); + if (IS_ERR(resolved_buf->hwmem_alloc)) { + return_value = PTR_ERR(resolved_buf->hwmem_alloc); + b2r2_log_info(cont->dev, "%s: hwmem_resolve_by_name failed, " + "error code: %i\n", __func__, return_value); + goto resolve_failed; + } + + hwmem_get_info(resolved_buf->hwmem_alloc, &resolved_buf->file_len, + &mem_type, &access); + + required_access = (is_dst ? HWMEM_ACCESS_WRITE : HWMEM_ACCESS_READ) | + HWMEM_ACCESS_IMPORT; + if ((required_access & access) != required_access) { + b2r2_log_info(cont->dev, + "%s: Insufficient access to hwmem (%d, requires %d)" + "buffer.\n", __func__, access, required_access); + return_value = -EACCES; + goto access_check_failed; + } + + if (mem_type != HWMEM_MEM_CONTIGUOUS_SYS) { + b2r2_log_info(cont->dev, "%s: Hwmem buffer is scattered.\n", + __func__); + return_value = -EINVAL; + goto buf_scattered; + } + + if (resolved_buf->file_len < + img->buf.offset + + (__u32)b2r2_get_img_size(cont->dev, img)) { + b2r2_log_info(cont->dev, "%s: Hwmem buffer too small. (%d < " + "%d)\n", __func__, resolved_buf->file_len, + img->buf.offset + + (__u32)b2r2_get_img_size(cont->dev, img)); + return_value = -EINVAL; + goto size_check_failed; + } + + return_value = hwmem_pin(resolved_buf->hwmem_alloc, &mem_chunk, + &mem_chunk_length); + if (return_value < 0) { + b2r2_log_info(cont->dev, "%s: hwmem_pin failed, " + "error code: %i\n", __func__, return_value); + goto pin_failed; + } + resolved_buf->file_physical_start = mem_chunk.paddr; + + set_up_hwmem_region(cont, img, rect_2b_used, ®ion); + return_value = hwmem_set_domain(resolved_buf->hwmem_alloc, + required_access, HWMEM_DOMAIN_SYNC, ®ion); + if (return_value < 0) { + b2r2_log_info(cont->dev, "%s: hwmem_set_domain failed, " + "error code: %i\n", __func__, return_value); + goto set_domain_failed; + } + + resolved_buf->physical_address = + resolved_buf->file_physical_start + img->buf.offset; + + goto out; + +set_domain_failed: + hwmem_unpin(resolved_buf->hwmem_alloc); +pin_failed: +size_check_failed: +buf_scattered: +access_check_failed: + hwmem_release(resolved_buf->hwmem_alloc); +resolve_failed: + +out: + return return_value; +} + +static void unresolve_hwmem(struct b2r2_resolved_buf *resolved_buf) +{ + hwmem_unpin(resolved_buf->hwmem_alloc); + hwmem_release(resolved_buf->hwmem_alloc); +} + +/** + * unresolve_buf() - Must be called after resolve_buf + * + * @buf: The buffer specification as supplied from user space + * @resolved: Gathered information about the buffer + * + * Returns 0 if OK else negative error code + */ +static void unresolve_buf(struct b2r2_control *cont, + struct b2r2_blt_buf *buf, + struct b2r2_resolved_buf *resolved) +{ +#ifdef CONFIG_ANDROID_PMEM + if (resolved->is_pmem && resolved->filep) + put_pmem_file(resolved->filep); +#endif + if (resolved->hwmem_alloc != NULL) + unresolve_hwmem(resolved); +} + +/** + * get_fb_info() - Fill buf with framebuffer info + * + * @file: The framebuffer file + * @buf: Gathered information about the buffer + * @img_offset: Image offset info frame buffer + * + * Returns 0 if OK else negative error code + */ +static int get_fb_info(struct file *file, + struct b2r2_resolved_buf *buf, + __u32 img_offset) +{ +#ifdef CONFIG_FB + if (file && buf && + MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { + int i; + /* + * (OK to do it like this, no locking???) + */ + for (i = 0; i < num_registered_fb; i++) { + struct fb_info *info = registered_fb[i]; + + if (info && info->dev && + MINOR(info->dev->devt) == + MINOR(file->f_dentry->d_inode->i_rdev)) { + buf->file_physical_start = info->fix.smem_start; + buf->file_virtual_start = (u32)info->screen_base; + buf->file_len = info->fix.smem_len; + buf->physical_address = buf->file_physical_start + + img_offset; + buf->virtual_address = + (void *) (buf->file_virtual_start + + img_offset); + return 0; + } + } + } +#endif + return -EINVAL; +} + +/** + * resolve_buf() - Returns the physical & virtual addresses of a B2R2 blt buffer + * + * @img: The image specification as supplied from user space + * @rect_2b_used: The part of the image b2r2 will use. + * @usage: Specifies how the buffer will be used. + * @resolved: Gathered information about the buffer + * + * Returns 0 if OK else negative error code + */ +static int resolve_buf(struct b2r2_control *cont, + struct b2r2_blt_img *img, + struct b2r2_blt_rect *rect_2b_used, + bool is_dst, + struct b2r2_resolved_buf *resolved) +{ + int ret = 0; + + memset(resolved, 0, sizeof(*resolved)); + + switch (img->buf.type) { + case B2R2_BLT_PTR_NONE: + break; + + case B2R2_BLT_PTR_PHYSICAL: + resolved->physical_address = img->buf.offset; + resolved->file_len = img->buf.len; + break; + + /* FD + OFFSET type */ + case B2R2_BLT_PTR_FD_OFFSET: { + /* + * TODO: Do we need to check if the process is allowed to + * read/write (depending on if it's dst or src) to the file? + */ +#ifdef CONFIG_ANDROID_PMEM + if (!get_pmem_file( + img->buf.fd, + (unsigned long *) &resolved->file_physical_start, + (unsigned long *) &resolved->file_virtual_start, + (unsigned long *) &resolved->file_len, + &resolved->filep)) { + resolved->physical_address = + resolved->file_physical_start + + img->buf.offset; + resolved->virtual_address = (void *) + (resolved->file_virtual_start + + img->buf.offset); + resolved->is_pmem = true; + } else +#endif + { + int fput_needed; + struct file *file; + + file = fget_light(img->buf.fd, &fput_needed); + if (file == NULL) + return -EINVAL; + + ret = get_fb_info(file, resolved, + img->buf.offset); + fput_light(file, fput_needed); + if (ret < 0) + return ret; + } + + /* Check bounds */ + if (img->buf.offset + img->buf.len > + resolved->file_len) { + ret = -ESPIPE; + unresolve_buf(cont, &img->buf, resolved); + } + + break; + } + + case B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET: + ret = resolve_hwmem(cont, img, rect_2b_used, is_dst, resolved); + break; + + default: + b2r2_log_warn(cont->dev, "%s: Failed to resolve buf type %d\n", + __func__, img->buf.type); + + ret = -EINVAL; + break; + + } + + return ret; +} + +/** + * sync_buf - Synchronizes the memory occupied by an image buffer. + * + * @buf: User buffer specification + * @resolved_buf: Gathered info (physical address etc.) about buffer + * @is_dst: true if the buffer is a destination buffer, false if the buffer is a + * source buffer. + * @rect: rectangle in the image buffer that should be synced. + * NULL if the buffer is a source mask. + * @img_width: width of the complete image buffer + * @fmt: buffer format +*/ +static void sync_buf(struct b2r2_control *cont, + struct b2r2_blt_img *img, + struct b2r2_resolved_buf *resolved, + bool is_dst, + struct b2r2_blt_rect *rect) +{ + struct sync_args sa; + u32 start_phys, end_phys; + + if (B2R2_BLT_PTR_NONE == img->buf.type || + B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type) + return; + + start_phys = resolved->physical_address; + end_phys = resolved->physical_address + img->buf.len; + + /* + * TODO: Very ugly. We should find out whether the memory is coherent in + * some generic way but cache handling will be rewritten soon so there + * is no use spending time on it. In the new design this will probably + * not be a problem. + */ + /* Frame buffer is coherent, at least now. */ + if (!resolved->is_pmem) { + /* + * Drain the write buffers as they are not always part of the + * coherent concept. + */ + wmb(); + + return; + } + + /* + * src_mask does not have rect. + * Also flush full buffer for planar and semiplanar YUV formats + */ + if (rect == NULL || + (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR) || + (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR) || + (img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) || + (img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR) || + (img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR) || + (img->fmt == + B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE) || + (img->fmt == + B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE)) { + sa.start = (unsigned long)resolved->virtual_address; + sa.end = (unsigned long)resolved->virtual_address + + img->buf.len; + start_phys = resolved->physical_address; + end_phys = resolved->physical_address + img->buf.len; + } else { + /* + * buffer is not a src_mask so make use of rect when + * clean & flush caches + */ + u32 bpp; /* Bits per pixel */ + u32 pitch; + + switch (img->fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */ + case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */ + case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */ + case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */ + case B2R2_BLT_FMT_CB_Y_CR_Y: + bpp = 16; + break; + case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */ + case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */ + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + bpp = 24; + break; + case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */ + case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */ + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + bpp = 32; + break; + default: + bpp = 12; + } + if (img->pitch == 0) + pitch = (img->width * bpp) / 8; + else + pitch = img->pitch; + + /* + * For 422I formats 2 horizontal pixels share color data. + * Thus, the x position must be aligned down to closest even + * number and width must be aligned up. + */ + { + s32 x; + s32 width; + + switch (img->fmt) { + case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */ + case B2R2_BLT_FMT_CB_Y_CR_Y: + x = (rect->x / 2) * 2; + width = ((rect->width + 1) / 2) * 2; + break; + default: + x = rect->x; + width = rect->width; + break; + } + + sa.start = (unsigned long)resolved->virtual_address + + rect->y * pitch + (x * bpp) / 8; + sa.end = (unsigned long)sa.start + + (rect->height - 1) * pitch + + (width * bpp) / 8; + + start_phys = resolved->physical_address + + rect->y * pitch + (x * bpp) / 8; + end_phys = start_phys + + (rect->height - 1) * pitch + + (width * bpp) / 8; + } + } + + /* + * The virtual address to a pmem buffer is retrieved from ioremap, not + * sure if it's ok to use such an address as a kernel virtual address. + * When doing it at a higher level such as dma_map_single it triggers an + * error but at lower levels such as dmac_clean_range it seems to work, + * hence the low level stuff. + */ + + if (is_dst) { + /* + * According to ARM's docs you must clean before invalidating + * (ie flush) to avoid loosing data. + */ + + /* Flush L1 cache */ +#ifdef CONFIG_SMP + flush_l1_cache_range_all_cpus(&sa); +#else + flush_l1_cache_range_curr_cpu(&sa); +#endif + + /* Flush L2 cache */ + outer_flush_range(start_phys, end_phys); + } else { + /* Clean L1 cache */ +#ifdef CONFIG_SMP + clean_l1_cache_range_all_cpus(&sa); +#else + clean_l1_cache_range_curr_cpu(&sa); +#endif + + /* Clean L2 cache */ + outer_clean_range(start_phys, end_phys); + } +} + +/** + * is_report_list_empty() - Spin lock protected check of report list + * + * @instance: The B2R2 BLT instance + */ +static bool is_report_list_empty(struct b2r2_control_instance *instance) +{ + bool is_empty; + + mutex_lock(&instance->lock); + is_empty = list_empty(&instance->report_list); + mutex_unlock(&instance->lock); + + return is_empty; +} + +/** + * is_synching() - Spin lock protected check if synching + * + * @instance: The B2R2 BLT instance + */ +static bool is_synching(struct b2r2_control_instance *instance) +{ + bool is_synching; + + mutex_lock(&instance->lock); + is_synching = instance->synching; + mutex_unlock(&instance->lock); + + return is_synching; +} + +/** + * inc_stat() - Spin lock protected increment of statistics variable + * + * @stat: Pointer to statistics variable that should be incremented + */ +static void inc_stat(struct b2r2_control *cont, unsigned long *stat) +{ + mutex_lock(&cont->stat_lock); + (*stat)++; + mutex_unlock(&cont->stat_lock); +} + +/** + * inc_stat() - Spin lock protected decrement of statistics variable + * + * @stat: Pointer to statistics variable that should be decremented + */ +static void dec_stat(struct b2r2_control *cont, unsigned long *stat) +{ + mutex_lock(&cont->stat_lock); + (*stat)--; + mutex_unlock(&cont->stat_lock); +} + + +#ifdef CONFIG_DEBUG_FS +/** + * debugfs_b2r2_blt_request_read() - Implements debugfs read for B2R2 register + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_blt_request_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + size_t dev_size = 0; + int ret = 0; + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + struct b2r2_control *cont = filp->f_dentry->d_inode->i_private; + + if (Buf == NULL) { + ret = -ENOMEM; + goto out; + } + + dev_size = sprintf_req(&cont->debugfs_latest_request, Buf, + sizeof(char) * 4096); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, Buf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + if (Buf != NULL) + kfree(Buf); + return ret; +} + +/** + * debugfs_b2r2_blt_request_fops - File operations for B2R2 request debugfs + */ +static const struct file_operations debugfs_b2r2_blt_request_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_blt_request_read, +}; + +/** + * struct debugfs_reg - Represents a B2R2 node "register" + * + * @name: Register name + * @offset: Offset within the node + */ +struct debugfs_reg { + const char name[30]; + u32 offset; +}; + +/** + * debugfs_node_regs - Array with all the registers in a B2R2 node, for debug + */ +static const struct debugfs_reg debugfs_node_regs[] = { + {"GROUP0.B2R2_NIP", offsetof(struct b2r2_link_list, GROUP0.B2R2_NIP)}, + {"GROUP0.B2R2_CIC", offsetof(struct b2r2_link_list, GROUP0.B2R2_CIC)}, + {"GROUP0.B2R2_INS", offsetof(struct b2r2_link_list, GROUP0.B2R2_INS)}, + {"GROUP0.B2R2_ACK", offsetof(struct b2r2_link_list, GROUP0.B2R2_ACK)}, + + {"GROUP1.B2R2_TBA", offsetof(struct b2r2_link_list, GROUP1.B2R2_TBA)}, + {"GROUP1.B2R2_TTY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TTY)}, + {"GROUP1.B2R2_TXY", offsetof(struct b2r2_link_list, GROUP1.B2R2_TXY)}, + {"GROUP1.B2R2_TSZ", offsetof(struct b2r2_link_list, GROUP1.B2R2_TSZ)}, + + {"GROUP2.B2R2_S1CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S1CF)}, + {"GROUP2.B2R2_S2CF", offsetof(struct b2r2_link_list, GROUP2.B2R2_S2CF)}, + + {"GROUP3.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP3.B2R2_SBA)}, + {"GROUP3.B2R2_STY", offsetof(struct b2r2_link_list, GROUP3.B2R2_STY)}, + {"GROUP3.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP3.B2R2_SXY)}, + {"GROUP3.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP3.B2R2_SSZ)}, + + {"GROUP4.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP4.B2R2_SBA)}, + {"GROUP4.B2R2_STY", offsetof(struct b2r2_link_list, GROUP4.B2R2_STY)}, + {"GROUP4.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP4.B2R2_SXY)}, + {"GROUP4.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP4.B2R2_SSZ)}, + + {"GROUP5.B2R2_SBA", offsetof(struct b2r2_link_list, GROUP5.B2R2_SBA)}, + {"GROUP5.B2R2_STY", offsetof(struct b2r2_link_list, GROUP5.B2R2_STY)}, + {"GROUP5.B2R2_SXY", offsetof(struct b2r2_link_list, GROUP5.B2R2_SXY)}, + {"GROUP5.B2R2_SSZ", offsetof(struct b2r2_link_list, GROUP5.B2R2_SSZ)}, + + {"GROUP6.B2R2_CWO", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWO)}, + {"GROUP6.B2R2_CWS", offsetof(struct b2r2_link_list, GROUP6.B2R2_CWS)}, + + {"GROUP7.B2R2_CCO", offsetof(struct b2r2_link_list, GROUP7.B2R2_CCO)}, + {"GROUP7.B2R2_CML", offsetof(struct b2r2_link_list, GROUP7.B2R2_CML)}, + + {"GROUP8.B2R2_FCTL", offsetof(struct b2r2_link_list, GROUP8.B2R2_FCTL)}, + {"GROUP8.B2R2_PMK", offsetof(struct b2r2_link_list, GROUP8.B2R2_PMK)}, + + {"GROUP9.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP9.B2R2_RSF)}, + {"GROUP9.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP9.B2R2_RZI)}, + {"GROUP9.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_HFP)}, + {"GROUP9.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP9.B2R2_VFP)}, + + {"GROUP10.B2R2_RSF", offsetof(struct b2r2_link_list, GROUP10.B2R2_RSF)}, + {"GROUP10.B2R2_RZI", offsetof(struct b2r2_link_list, GROUP10.B2R2_RZI)}, + {"GROUP10.B2R2_HFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_HFP)}, + {"GROUP10.B2R2_VFP", offsetof(struct b2r2_link_list, GROUP10.B2R2_VFP)}, + + {"GROUP11.B2R2_FF0", offsetof(struct b2r2_link_list, + GROUP11.B2R2_FF0)}, + {"GROUP11.B2R2_FF1", offsetof(struct b2r2_link_list, + GROUP11.B2R2_FF1)}, + {"GROUP11.B2R2_FF2", offsetof(struct b2r2_link_list, + GROUP11.B2R2_FF2)}, + {"GROUP11.B2R2_FF3", offsetof(struct b2r2_link_list, + GROUP11.B2R2_FF3)}, + + {"GROUP12.B2R2_KEY1", offsetof(struct b2r2_link_list, + GROUP12.B2R2_KEY1)}, + {"GROUP12.B2R2_KEY2", offsetof(struct b2r2_link_list, + GROUP12.B2R2_KEY2)}, + + {"GROUP13.B2R2_XYL", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYL)}, + {"GROUP13.B2R2_XYP", offsetof(struct b2r2_link_list, GROUP13.B2R2_XYP)}, + + {"GROUP14.B2R2_SAR", offsetof(struct b2r2_link_list, GROUP14.B2R2_SAR)}, + {"GROUP14.B2R2_USR", offsetof(struct b2r2_link_list, GROUP14.B2R2_USR)}, + + {"GROUP15.B2R2_VMX0", offsetof(struct b2r2_link_list, + GROUP15.B2R2_VMX0)}, + {"GROUP15.B2R2_VMX1", offsetof(struct b2r2_link_list, + GROUP15.B2R2_VMX1)}, + {"GROUP15.B2R2_VMX2", offsetof(struct b2r2_link_list, + GROUP15.B2R2_VMX2)}, + {"GROUP15.B2R2_VMX3", offsetof(struct b2r2_link_list, + GROUP15.B2R2_VMX3)}, + + {"GROUP16.B2R2_VMX0", offsetof(struct b2r2_link_list, + GROUP16.B2R2_VMX0)}, + {"GROUP16.B2R2_VMX1", offsetof(struct b2r2_link_list, + GROUP16.B2R2_VMX1)}, + {"GROUP16.B2R2_VMX2", offsetof(struct b2r2_link_list, + GROUP16.B2R2_VMX2)}, + {"GROUP16.B2R2_VMX3", offsetof(struct b2r2_link_list, + GROUP16.B2R2_VMX3)}, +}; + +/** + * debugfs_b2r2_blt_stat_read() - Implements debugfs read for B2R2 BLT + * statistics + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_blt_stat_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + size_t dev_size = 0; + int ret = 0; + char *Buf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + struct b2r2_control *cont = filp->f_dentry->d_inode->i_private; + + if (Buf == NULL) { + ret = -ENOMEM; + goto out; + } + + mutex_lock(&cont->stat_lock); + dev_size += sprintf(Buf + dev_size, "Added jobs : %lu\n", + cont->stat_n_jobs_added); + dev_size += sprintf(Buf + dev_size, "Released jobs : %lu\n", + cont->stat_n_jobs_released); + dev_size += sprintf(Buf + dev_size, "Jobs in report list : %lu\n", + cont->stat_n_jobs_in_report_list); + dev_size += sprintf(Buf + dev_size, "Clients in open : %lu\n", + cont->stat_n_in_open); + dev_size += sprintf(Buf + dev_size, "Clients in release : %lu\n", + cont->stat_n_in_release); + dev_size += sprintf(Buf + dev_size, "Clients in blt : %lu\n", + cont->stat_n_in_blt); + dev_size += sprintf(Buf + dev_size, " synch : %lu\n", + cont->stat_n_in_blt_synch); + dev_size += sprintf(Buf + dev_size, " add : %lu\n", + cont->stat_n_in_blt_add); + dev_size += sprintf(Buf + dev_size, " wait : %lu\n", + cont->stat_n_in_blt_wait); + dev_size += sprintf(Buf + dev_size, "Clients in synch 0 : %lu\n", + cont->stat_n_in_synch_0); + dev_size += sprintf(Buf + dev_size, "Clients in synch job : %lu\n", + cont->stat_n_in_synch_job); + dev_size += sprintf(Buf + dev_size, "Clients in query_cap : %lu\n", + cont->stat_n_in_query_cap); + mutex_unlock(&cont->stat_lock); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, Buf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + if (Buf != NULL) + kfree(Buf); + return ret; +} + +/** + * debugfs_b2r2_blt_stat_fops() - File operations for B2R2 BLT + * statistics debugfs + */ +static const struct file_operations debugfs_b2r2_blt_stat_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_blt_stat_read, +}; +#endif + +static void init_tmp_bufs(struct b2r2_control *cont) +{ + int i = 0; + + for (i = 0; i < (sizeof(cont->tmp_bufs) / sizeof(struct tmp_buf)); + i++) { + cont->tmp_bufs[i].buf.virt_addr = dma_alloc_coherent( + cont->dev, MAX_TMP_BUF_SIZE, + &cont->tmp_bufs[i].buf.phys_addr, GFP_DMA); + if (cont->tmp_bufs[i].buf.virt_addr != NULL) + cont->tmp_bufs[i].buf.size = MAX_TMP_BUF_SIZE; + else { + b2r2_log_err(cont->dev, "%s: Failed to allocate temp " + "buffer %i\n", __func__, i); + cont->tmp_bufs[i].buf.size = 0; + } + } +} + +static void destroy_tmp_bufs(struct b2r2_control *cont) +{ + int i = 0; + + for (i = 0; i < MAX_TMP_BUFS_NEEDED; i++) { + if (cont->tmp_bufs[i].buf.size != 0) { + dma_free_coherent(cont->dev, + cont->tmp_bufs[i].buf.size, + cont->tmp_bufs[i].buf.virt_addr, + cont->tmp_bufs[i].buf.phys_addr); + + cont->tmp_bufs[i].buf.size = 0; + } + } +} + +/** + * b2r2_blt_module_init() - Module init function + * + * Returns 0 if OK else negative error code + */ +int b2r2_control_init(struct b2r2_control *cont) +{ + int ret; + + mutex_init(&cont->stat_lock); + +#ifdef CONFIG_B2R2_GENERIC + /* Initialize generic path */ + b2r2_generic_init(cont); +#endif + /* Initialize node splitter */ + ret = b2r2_node_split_init(cont); + if (ret) { + printk(KERN_WARNING "%s: node split init fails\n", __func__); + goto b2r2_node_split_init_fail; + } + + b2r2_log_info(cont->dev, "%s: device registered\n", __func__); + + cont->dev->coherent_dma_mask = 0xFFFFFFFF; + init_tmp_bufs(cont); + ret = b2r2_filters_init(cont); + if (ret) { + b2r2_log_warn(cont->dev, "%s: failed to init filters\n", + __func__); + goto b2r2_filter_init_fail; + } + + /* Initialize memory allocator */ + ret = b2r2_mem_init(cont, B2R2_HEAP_SIZE, + 4, sizeof(struct b2r2_node)); + if (ret) { + printk(KERN_WARNING "%s: initializing B2R2 memhandler fails\n", + __func__); + goto b2r2_mem_init_fail; + } + +#ifdef CONFIG_DEBUG_FS + /* Register debug fs */ + if (!IS_ERR_OR_NULL(cont->debugfs_root_dir)) { + debugfs_create_file("last_request", 0666, + cont->debugfs_root_dir, + cont, &debugfs_b2r2_blt_request_fops); + debugfs_create_file("stats", 0666, + cont->debugfs_root_dir, + cont, &debugfs_b2r2_blt_stat_fops); + } +#endif + + b2r2_log_info(cont->dev, "%s: done\n", __func__); + + return ret; + +b2r2_mem_init_fail: + b2r2_filters_exit(cont); +b2r2_filter_init_fail: + b2r2_node_split_exit(cont); +b2r2_node_split_init_fail: +#ifdef CONFIG_B2R2_GENERIC + b2r2_generic_exit(cont); +#endif + return ret; +} + +/** + * b2r2_control_exit() - Module exit function + */ +void b2r2_control_exit(struct b2r2_control *cont) +{ + if (cont) { + b2r2_log_info(cont->dev, "%s\n", __func__); +#ifdef CONFIG_DEBUG_FS + if (!IS_ERR_OR_NULL(cont->debugfs_root_dir)) { + debugfs_remove_recursive(cont->debugfs_root_dir); + cont->debugfs_root_dir = NULL; + } +#endif + b2r2_mem_exit(cont); + destroy_tmp_bufs(cont); + b2r2_node_split_exit(cont); +#if defined(CONFIG_B2R2_GENERIC) + b2r2_generic_exit(cont); +#endif + b2r2_filters_exit(cont); + } +} + +MODULE_AUTHOR("Robert Fekete <robert.fekete@stericsson.com>"); +MODULE_DESCRIPTION("ST-Ericsson B2R2 Blitter module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/b2r2/b2r2_control.h b/drivers/video/b2r2/b2r2_control.h new file mode 100644 index 00000000000..d13d2188618 --- /dev/null +++ b/drivers/video/b2r2/b2r2_control.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * + * ST-Ericsson B2R2 internal definitions + * + * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_CONTROL_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_CONTROL_H_ + +#include "b2r2_internal.h" + +int b2r2_control_init(struct b2r2_control *cont); +void b2r2_control_exit(struct b2r2_control *cont); +int b2r2_control_open(struct b2r2_control_instance *instance); +int b2r2_control_release(struct b2r2_control_instance *instance); + +int b2r2_control_blt(struct b2r2_blt_request *request); +int b2r2_generic_blt(struct b2r2_blt_request *request); +int b2r2_control_waitjob(struct b2r2_blt_request *request); +int b2r2_control_synch(struct b2r2_control_instance *instance, + int request_id); +size_t b2r2_control_read(struct b2r2_control_instance *instance, + struct b2r2_blt_request **request_out, bool block); +size_t b2r2_control_read_id(struct b2r2_control_instance *instance, + struct b2r2_blt_request **request_out, bool block, + int request_id); + +#endif diff --git a/drivers/video/b2r2/b2r2_core.c b/drivers/video/b2r2/b2r2_core.c new file mode 100644 index 00000000000..02071d5f989 --- /dev/null +++ b/drivers/video/b2r2/b2r2_core.c @@ -0,0 +1,2763 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 core driver + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +/* + * TODO: Clock address from platform data + * Platform data should have string id instead of numbers + * b2r2_remove, some type of runtime problem when kernel hacking + * debug features on + * + * Is there already a priority list in kernel? + * Is it possible to handle clock using clock framework? + * uTimeOut, use mdelay instead? + * Measure performance + * + * Exchange our home-cooked ref count with kernel kref? See + * http://lwn.net/Articles/336224/ + * + * B2R2: + * Source fill 2 bug + * Check with Symbian? + */ + +/* include file */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> +#endif +#include <linux/jiffies.h> +#include <linux/timer.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/kref.h> + +#include "b2r2_internal.h" +#include "b2r2_core.h" +#include "b2r2_global.h" +#include "b2r2_structures.h" +#include "b2r2_control.h" +#include "b2r2_profiler_api.h" +#include "b2r2_timing.h" +#include "b2r2_debug.h" + +/** + * B2R2 Hardware defines below + */ + +/* - BLT_AQ_CTL */ +#define B2R2_AQ_Enab (0x80000000) +#define B2R2_AQ_PRIOR_0 (0x0) +#define B2R2_AQ_PRIOR_1 (0x1) +#define B2R2_AQ_PRIOR_2 (0x2) +#define B2R2_AQ_PRIOR_3 (0x3) +#define B2R2_AQ_NODE_REPEAT_INT (0x100000) +#define B2R2_AQ_STOP_INT (0x200000) +#define B2R2_AQ_LNA_REACH_INT (0x400000) +#define B2R2_AQ_COMPLETED_INT (0x800000) + +/* - BLT_CTL */ +#define B2R2BLT_CTLGLOBAL_soft_reset (0x80000000) +#define B2R2BLT_CTLStep_By_Step (0x20000000) +#define B2R2BLT_CTLBig_not_little (0x10000000) +#define B2R2BLT_CTLMask (0xb0000000) +#define B2R2BLT_CTLTestMask (0xb0000000) +#define B2R2BLT_CTLInitialValue (0x0) +#define B2R2BLT_CTLAccessType (INITIAL_TEST) +#define B2R2BLT_CTL (0xa00) + +/* - BLT_ITS */ +#define B2R2BLT_ITSRLD_ERROR (0x80000000) +#define B2R2BLT_ITSAQ4_Node_Notif (0x8000000) +#define B2R2BLT_ITSAQ4_Node_repeat (0x4000000) +#define B2R2BLT_ITSAQ4_Stopped (0x2000000) +#define B2R2BLT_ITSAQ4_LNA_Reached (0x1000000) +#define B2R2BLT_ITSAQ3_Node_Notif (0x800000) +#define B2R2BLT_ITSAQ3_Node_repeat (0x400000) +#define B2R2BLT_ITSAQ3_Stopped (0x200000) +#define B2R2BLT_ITSAQ3_LNA_Reached (0x100000) +#define B2R2BLT_ITSAQ2_Node_Notif (0x80000) +#define B2R2BLT_ITSAQ2_Node_repeat (0x40000) +#define B2R2BLT_ITSAQ2_Stopped (0x20000) +#define B2R2BLT_ITSAQ2_LNA_Reached (0x10000) +#define B2R2BLT_ITSAQ1_Node_Notif (0x8000) +#define B2R2BLT_ITSAQ1_Node_repeat (0x4000) +#define B2R2BLT_ITSAQ1_Stopped (0x2000) +#define B2R2BLT_ITSAQ1_LNA_Reached (0x1000) +#define B2R2BLT_ITSCQ2_Repaced (0x80) +#define B2R2BLT_ITSCQ2_Node_Notif (0x40) +#define B2R2BLT_ITSCQ2_retriggered (0x20) +#define B2R2BLT_ITSCQ2_completed (0x10) +#define B2R2BLT_ITSCQ1_Repaced (0x8) +#define B2R2BLT_ITSCQ1_Node_Notif (0x4) +#define B2R2BLT_ITSCQ1_retriggered (0x2) +#define B2R2BLT_ITSCQ1_completed (0x1) +#define B2R2BLT_ITSMask (0x8ffff0ff) +#define B2R2BLT_ITSTestMask (0x8ffff0ff) +#define B2R2BLT_ITSInitialValue (0x0) +#define B2R2BLT_ITSAccessType (INITIAL_TEST) +#define B2R2BLT_ITS (0xa04) + +/* - BLT_STA1 */ +#define B2R2BLT_STA1BDISP_IDLE (0x1) +#define B2R2BLT_STA1Mask (0x1) +#define B2R2BLT_STA1TestMask (0x1) +#define B2R2BLT_STA1InitialValue (0x1) +#define B2R2BLT_STA1AccessType (INITIAL_TEST) +#define B2R2BLT_STA1 (0xa08) + +/** + * b2r2_core - Quick link to administration data for B2R2 + */ +static struct b2r2_core *b2r2_core[B2R2_MAX_NBR_DEVICES]; + +/* Local functions */ +static void check_prio_list(struct b2r2_core *core, bool atomic); +static void clear_interrupts(struct b2r2_core *core); +static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job); +static void exit_job_list(struct b2r2_core *core, + struct list_head *job_list); +static void job_work_function(struct work_struct *ptr); +static void init_job(struct b2r2_core_job *job); +static void insert_into_prio_list(struct b2r2_core *core, + struct b2r2_core_job *job); +static struct b2r2_core_job *find_job_in_list(int job_id, + struct list_head *list); +static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core, + int job_id); +static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core, + int tag, struct list_head *list); +static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core, + int tag); + +static int domain_enable(struct b2r2_core *core); +static void domain_disable(struct b2r2_core *core); + +static void stop_queue(enum b2r2_core_queue queue); + +#ifdef HANDLE_TIMEOUTED_JOBS +static void printk_regs(struct b2r2_core *core); +static int hw_reset(struct b2r2_core *core); +static void timeout_work_function(struct work_struct *ptr); +#endif + +static void reset_hw_timer(struct b2r2_core_job *job); +static void start_hw_timer(struct b2r2_core_job *job); +static void stop_hw_timer(struct b2r2_core *core, + struct b2r2_core_job *job); + +static int init_hw(struct b2r2_core *core); +static void exit_hw(struct b2r2_core *core); + +/* Tracking release bug... */ +#ifdef DEBUG_CHECK_ADDREF_RELEASE +/** + * ar_add() - Adds an addref or a release to the array + * + * @core: The b2r2 core entity + * @job: The job that has been referenced + * @caller: The caller of addref / release + * @addref: true if it is an addref else false for release + */ +static void ar_add(struct b2r2_core *core, struct b2r2_core_job *job, + const char *caller, bool addref) +{ + core->ar[core->ar_write].addref = addref; + core->ar[core->ar_write].job = job; + core->ar[core->ar_write].caller = caller; + core->ar[core->ar_write].ref_count = job->ref_count; + core->ar_write = (core->ar_write + 1) % + ARRAY_SIZE(core->ar); + if (core->ar_write == core->ar_read) + core->ar_read = (core->ar_read + 1) % + ARRAY_SIZE(core->ar); +} + +/** + * sprintf_ar() - Writes all addref / release to a string buffer + * + * @core: The b2r2 core entity + * @buf: Receiving character bufefr + * @job: Which job to write or NULL for all + * + * NOTE! No buffer size check!! + */ +static char *sprintf_ar(struct b2r2_core *core, char *buf, + struct b2r2_core_job *job) +{ + int i; + int size = 0; + + for (i = core->ar_read; i != core->ar_write; + i = (i + 1) % ARRAY_SIZE(core->ar)) { + struct addref_release *ar = &core->ar[i]; + if (!job || job == ar->job) + size += sprintf(buf + size, + "%s on %p from %s, ref = %d\n", + ar->addref ? "addref" : "release", + ar->job, ar->caller, ar->ref_count); + } + + return buf; +} + +/** + * printk_ar() - Writes all addref / release using dev_info + * + * @core: The b2r2 core entity + * @job: Which job to write or NULL for all + */ +static void printk_ar(struct b2r2_core *core, struct b2r2_core_job *job) +{ + int i; + + for (i = core->ar_read; i != core->ar_write; + i = (i + 1) % ARRAY_SIZE(core->ar)) { + struct addref_release *ar = &core->ar[i]; + if (!job || job == ar->job) + b2r2_log_info(core->dev, "%s on %p from %s," + " ref = %d\n", + ar->addref ? "addref" : "release", + ar->job, ar->caller, ar->ref_count); + } +} +#endif + +/** + * internal_job_addref() - Increments the reference count for a job + * + * @core: The b2r2 core entity + * @job: Which job to increment reference count for + * @caller: Name of function calling addref (for debug) + * + * Note that core->lock _must_ be held + */ +static void internal_job_addref(struct b2r2_core *core, + struct b2r2_core_job *job, const char *caller) +{ + u32 ref_count; + + /* Sanity checks */ + BUG_ON(core == NULL); + BUG_ON(job == NULL); + + b2r2_log_info(core->dev, "%s (core: %p, job: %p) (from %s)\n", + __func__, core, job, caller); + + + if (job->start_sentinel != START_SENTINEL || + job->end_sentinel != END_SENTINEL || + job->ref_count == 0 || job->ref_count > 10) { + b2r2_log_info(core->dev, "%s: (core: %p, job: %p) " + "start=%X end=%X ref_count=%d\n", + __func__, core, job, job->start_sentinel, + job->end_sentinel, job->ref_count); + + /* Something is wrong, print the addref / release array */ +#ifdef DEBUG_CHECK_ADDREF_RELEASE + printk_ar(core, NULL); +#endif + } + + + BUG_ON(job->start_sentinel != START_SENTINEL); + BUG_ON(job->end_sentinel != END_SENTINEL); + + /* Do the actual reference count increment */ + ref_count = ++job->ref_count; + +#ifdef DEBUG_CHECK_ADDREF_RELEASE + /* Keep track of addref / release */ + ar_add(core, job, caller, true); +#endif + + b2r2_log_info(core->dev, "%s called from %s (core: %p, job: %p): Ref " + "Count is %d\n", __func__, caller, core, job, job->ref_count); +} + +/** + * internal_job_release() - Decrements the reference count for a job + * + * @core: The b2r2 core entity + * @job: Which job to decrement reference count for + * @caller: Name of function calling release (for debug) + * + * Returns true if job_release should be called by caller + * (reference count reached zero). + * + * Note that core->lock _must_ be held + */ +static bool internal_job_release(struct b2r2_core *core, + struct b2r2_core_job *job, const char *caller) +{ + u32 ref_count; + bool call_release = false; + + /* Sanity checks */ + BUG_ON(job == NULL); + + b2r2_log_info(core->dev, "%s (core: %p, job: %p) (from %s)\n", + __func__, core, job, caller); + + if (job->start_sentinel != START_SENTINEL || + job->end_sentinel != END_SENTINEL || + job->ref_count == 0 || job->ref_count > 10) { + b2r2_log_info(core->dev, "%s: (core: %p, job: %p) start=%X " + "end=%X ref_count=%d\n", __func__, core, job, + job->start_sentinel, job->end_sentinel, + job->ref_count); + +#ifdef DEBUG_CHECK_ADDREF_RELEASE + printk_ar(core, NULL); +#endif + } + + BUG_ON(job->start_sentinel != START_SENTINEL); + BUG_ON(job->end_sentinel != END_SENTINEL); + BUG_ON(job->ref_count == 0 || job->ref_count > 10); + + /* Do the actual decrement */ + ref_count = --job->ref_count; +#ifdef DEBUG_CHECK_ADDREF_RELEASE + ar_add(core, job, caller, false); +#endif + b2r2_log_info(core->dev, "%s called from %s (core: %p, job: %p) " + "Ref Count is %d\n", __func__, caller, core, job, ref_count); + + if (!ref_count && job->release) { + call_release = true; + /* Job will now cease to exist */ + job->start_sentinel = 0xFFFFFFFF; + job->end_sentinel = 0xFFFFFFFF; + } + return call_release; +} + + + +/* Exported functions */ + +/** + * core->lock _must_ _NOT_ be held when calling this function + */ +void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller) +{ + unsigned long flags; + struct b2r2_core *core; + + BUG_ON(job == NULL || job->data == 0); + core = (struct b2r2_core *) job->data; + + spin_lock_irqsave(&core->lock, flags); + internal_job_addref(core, job, caller); + spin_unlock_irqrestore(&core->lock, flags); +} + +/** + * core->lock _must_ _NOT_ be held when calling this function + */ +void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller) +{ + unsigned long flags; + bool call_release = false; + struct b2r2_core *core; + + BUG_ON(job == NULL || job->data == 0); + core = (struct b2r2_core *) job->data; + + spin_lock_irqsave(&core->lock, flags); + call_release = internal_job_release(core, job, caller); + spin_unlock_irqrestore(&core->lock, flags); + + if (call_release) + job->release(job); +} + +/** + * core->lock _must_ _NOT_ be held when calling this function + */ +int b2r2_core_job_add(struct b2r2_control *control, + struct b2r2_core_job *job) +{ + unsigned long flags; + struct b2r2_core *core = control->data; + + b2r2_log_info(core->dev, "%s (core: %p, job: %p)\n", + __func__, core, job); + + /* Enable B2R2 */ + domain_enable(core); + + spin_lock_irqsave(&core->lock, flags); + /* Check that we have not been powered down */ + if (!core->domain_enabled) { + spin_unlock_irqrestore(&core->lock, flags); + return -ENOSYS; + } + + core->stat_n_jobs_added++; + + /* Initialise internal job data */ + init_job(job); + + /* Initial reference, should be released by caller of this function */ + job->ref_count = 1; + + /* Insert job into prio list */ + insert_into_prio_list(core, job); + + /* Check if we can dispatch job */ + check_prio_list(core, false); + spin_unlock_irqrestore(&core->lock, flags); + + return job->job_id; +} + +/** + * core->lock _must_ _NOT_ be held when calling this function + */ +struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control, + int job_id) +{ + unsigned long flags; + struct b2r2_core_job *job; + struct b2r2_core *core = control->data; + + b2r2_log_info(core->dev, "%s (core: %p, job_id: %d)\n", + __func__, core, job_id); + + spin_lock_irqsave(&core->lock, flags); + /* Look through prio queue */ + job = find_job_in_list(job_id, &core->prio_queue); + + if (!job) + job = find_job_in_active_jobs(core, job_id); + + spin_unlock_irqrestore(&core->lock, flags); + + return job; +} + +/** + * core->lock _must_ _NOT_ be held when calling this function + */ +struct b2r2_core_job *b2r2_core_job_find_first_with_tag( + struct b2r2_control *control, int tag) +{ + unsigned long flags; + struct b2r2_core_job *job; + struct b2r2_core *core = control->data; + + b2r2_log_info(core->dev, + "%s (core: %p, tag: %d)\n", __func__, core, tag); + + spin_lock_irqsave(&core->lock, flags); + /* Look through prio queue */ + job = find_tag_in_list(core, tag, &core->prio_queue); + + if (!job) + job = find_tag_in_active_jobs(core, tag); + + spin_unlock_irqrestore(&core->lock, flags); + + return job; +} + +/** + * is_job_done() - Spin lock protected check if job is done + * + * @job: Job to check + * + * Returns true if job is done or cancelled + * + * core->lock must _NOT_ be held when calling this function + */ +static bool is_job_done(struct b2r2_core_job *job) +{ + unsigned long flags; + bool job_is_done; + struct b2r2_core *core = (struct b2r2_core *) job->data; + + spin_lock_irqsave(&core->lock, flags); + job_is_done = + job->job_state != B2R2_CORE_JOB_QUEUED && + job->job_state != B2R2_CORE_JOB_RUNNING; + spin_unlock_irqrestore(&core->lock, flags); + + return job_is_done; +} + +/** + * b2r2_core_job_wait() + * + * @job: + * + * core->lock _must_ _NOT_ be held when calling this function + */ +int b2r2_core_job_wait(struct b2r2_core_job *job) +{ + int ret = 0; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_core *core = (struct b2r2_core *) job->data; +#endif + + b2r2_log_info(core->dev, "%s (core: %p, job: %p)\n", + __func__, core, job); + /* Check that we have the job */ + if (job->job_state == B2R2_CORE_JOB_IDLE) { + /* Never or not queued */ + b2r2_log_info(core->dev, "%s: Job not queued\n", __func__); + return -ENOENT; + } + + /* Wait for the job to be done */ + ret = wait_event_interruptible( + job->event, + is_job_done(job)); + + if (ret) + b2r2_log_warn(core->dev, + "%s: wait_event_interruptible returns %d state is %d", + __func__, ret, job->job_state); + return ret; +} + +/** + * cancel_job() - Cancels a job (removes it from prio list or active jobs) and + * calls the job callback + * + * @job: Job to cancel + * + * Returns true if the job was found and cancelled + * + * core->lock must be held when calling this function + */ +static bool cancel_job(struct b2r2_core *core, struct b2r2_core_job *job) +{ + bool found_job = false; + bool job_was_active = false; + + /* Remove from prio list */ + if (job->job_state == B2R2_CORE_JOB_QUEUED) { + list_del_init(&job->list); + found_job = true; + } + + /* Remove from active jobs */ + if (!found_job && core->n_active_jobs > 0) { + int i; + + /* Look for timeout:ed jobs and put them in tmp list */ + for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) { + if (core->active_jobs[i] == job) { + stop_queue((enum b2r2_core_queue)i); + stop_hw_timer(core, job); + core->active_jobs[i] = NULL; + core->n_active_jobs--; + found_job = true; + job_was_active = true; + } + } + } + + /* Handle done list & callback */ + if (found_job) { + /* Job is canceled */ + job->job_state = B2R2_CORE_JOB_CANCELED; + + queue_work(core->work_queue, &job->work); + + /* Statistics */ + if (!job_was_active) + core->stat_n_jobs_in_prio_list--; + + } + + return found_job; +} + +/* core->lock _must_ _NOT_ be held when calling this function */ +int b2r2_core_job_cancel(struct b2r2_core_job *job) +{ + unsigned long flags; + int ret = 0; + struct b2r2_core *core = (struct b2r2_core *) job->data; + + b2r2_log_info(core->dev, "%s (core: %p, job: %p) (st: %d)\n", + __func__, core, job, job->job_state); + /* Check that we have the job */ + if (job->job_state == B2R2_CORE_JOB_IDLE) { + /* Never or not queued */ + b2r2_log_info(core->dev, "%s: Job not queued\n", __func__); + return -ENOENT; + } + + /* Remove from prio list */ + spin_lock_irqsave(&core->lock, flags); + cancel_job(core, job); + spin_unlock_irqrestore(&core->lock, flags); + + return ret; +} + +/* LOCAL FUNCTIONS BELOW */ + +/** + * domain_disable_work_function() + * + * @core: The b2r2 core entity + */ +static void domain_disable_work_function(struct work_struct *work) +{ + struct delayed_work *twork = to_delayed_work(work); + struct b2r2_core *core = container_of( + twork, struct b2r2_core, domain_disable_work); + + if (!mutex_trylock(&core->domain_lock)) + return; + + if (core->domain_request_count == 0) { + core->valid = false; + exit_hw(core); + clk_disable(core->b2r2_clock); + regulator_disable(core->b2r2_reg); + core->domain_enabled = false; + } + + mutex_unlock(&core->domain_lock); +} + +/** + * domain_enable() + * + * @core: The b2r2 core entity + */ +static int domain_enable(struct b2r2_core *core) +{ + mutex_lock(&core->domain_lock); + core->domain_request_count++; + + if (!core->domain_enabled) { + int retry = 0; + int ret; +again: + /* + * Since regulator_enable() may sleep we have to handle + * interrupts. + */ + ret = regulator_enable(core->b2r2_reg); + if ((ret == -EAGAIN) && + ((retry++) < B2R2_REGULATOR_RETRY_COUNT)) + goto again; + else if (ret < 0) + goto regulator_enable_failed; + + ret = clk_enable(core->b2r2_clock); + if (ret < 0) { + b2r2_log_err(core->dev, + "%s: Could not enable clock\n", __func__); + goto enable_clk_failed; + } + if (init_hw(core) < 0) + goto init_hw_failed; + core->domain_enabled = true; + core->valid = true; + } + + mutex_unlock(&core->domain_lock); + + return 0; + +init_hw_failed: + b2r2_log_err(core->dev, + "%s: Could not initialize hardware!\n", __func__); + clk_disable(core->b2r2_clock); + +enable_clk_failed: + if (regulator_disable(core->b2r2_reg) < 0) + b2r2_log_err(core->dev, "%s: regulator_disable failed!\n", + __func__); + +regulator_enable_failed: + core->domain_request_count--; + mutex_unlock(&core->domain_lock); + + return -EFAULT; +} + +/** + * domain_disable() + * + * @core: The b2r2 core entity + */ +static void domain_disable(struct b2r2_core *core) +{ + mutex_lock(&core->domain_lock); + + if (core->domain_request_count == 0) { + b2r2_log_err(core->dev, + "%s: Unbalanced domain_disable()\n", __func__); + } else { + core->domain_request_count--; + + /* Cancel any existing work */ + cancel_delayed_work_sync(&core->domain_disable_work); + + /* Add a work to disable the power and clock after a delay */ + queue_delayed_work(core->work_queue, &core->domain_disable_work, + B2R2_DOMAIN_DISABLE_TIMEOUT); + } + + mutex_unlock(&core->domain_lock); +} + +/** + * stop_queue() - Stops the specified queue. + */ +static void stop_queue(enum b2r2_core_queue queue) +{ + /* TODO: Implement! If this function is not implemented canceled jobs + * will use b2r2 which is a waste of resources. Not stopping jobs will + * also screw up the hardware timing, the job the canceled job + * intrerrupted (if any) will be billed for the time between the point + * where the job is cancelled and when it stops. */ +} + +/** + * exit_job_list() - Empties a job queue by canceling the jobs + * + * @core: The b2r2 core entity + * + * core->lock _must_ be held when calling this function + */ +static void exit_job_list(struct b2r2_core *core, + struct list_head *job_queue) +{ + while (!list_empty(job_queue)) { + struct b2r2_core_job *job = + list_entry(job_queue->next, + struct b2r2_core_job, + list); + /* Add reference to prevent job from disappearing + in the middle of our work, released below */ + internal_job_addref(core, job, __func__); + + cancel_job(core, job); + + /* Matching release to addref above */ + internal_job_release(core, job, __func__); + + } +} + +/** + * job_work_function() - Work queue function that calls callback(s) and + * checks if B2R2 can accept a new job + * + * @ptr: Pointer to work struct (embedded in struct b2r2_core_job) + */ +static void job_work_function(struct work_struct *ptr) +{ + unsigned long flags; + struct b2r2_core_job *job = + container_of(ptr, struct b2r2_core_job, work); + struct b2r2_core *core = (struct b2r2_core *) job->data; + + /* Disable B2R2 */ + domain_disable(core); + + /* Release resources */ + if (job->release_resources) + job->release_resources(job, false); + + spin_lock_irqsave(&core->lock, flags); + + /* Dispatch a new job if possible */ + check_prio_list(core, false); + + spin_unlock_irqrestore(&core->lock, flags); + + /* Tell the client */ + if (job->callback) + job->callback(job); + + /* Drop our reference, matches the + addref in handle_queue_event or b2r2_core_job_cancel */ + b2r2_core_job_release(job, __func__); +} + +#ifdef HANDLE_TIMEOUTED_JOBS +/** + * timeout_work_function() - Work queue function that checks for + * timeout:ed jobs. B2R2 might silently refuse + * to execute some jobs, i.e. SRC2 fill + * + * @ptr: Pointer to work struct (embedded in struct b2r2_core) + * + */ +static void timeout_work_function(struct work_struct *ptr) +{ + unsigned long flags; + struct list_head job_list; + struct delayed_work *twork = to_delayed_work(ptr); + struct b2r2_core *core = container_of(twork, struct b2r2_core, + timeout_work); + + INIT_LIST_HEAD(&job_list); + + /* Cancel all jobs if too long time since last irq */ + spin_lock_irqsave(&core->lock, flags); + if (core->n_active_jobs > 0) { + unsigned long diff = + (long) jiffies - (long) core->jiffies_last_irq; + if (diff > JOB_TIMEOUT) { + /* Active jobs and more than a second since last irq! */ + int i; + + b2r2_core_print_stats(core); + + /* Look for timeout:ed jobs and put them in tmp list. + * It's important that the application queues are + * killed in order of decreasing priority */ + for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) { + struct b2r2_core_job *job = + core->active_jobs[i]; + + if (job) { + stop_hw_timer(core, job); + core->active_jobs[i] = NULL; + core->n_active_jobs--; + list_add_tail(&job->list, &job_list); + } + } + + /* Print the B2R2 register and reset B2R2 */ + printk_regs(core); + hw_reset(core); + } + } + spin_unlock_irqrestore(&core->lock, flags); + + /* Handle timeout:ed jobs */ + spin_lock_irqsave(&core->lock, flags); + while (!list_empty(&job_list)) { + struct b2r2_core_job *job = + list_entry(job_list.next, + struct b2r2_core_job, + list); + + b2r2_log_warn(core->dev, "%s: Job timeout\n", __func__); + + list_del_init(&job->list); + + /* Job is cancelled */ + job->job_state = B2R2_CORE_JOB_CANCELED; + + /* Handle done */ + wake_up_interruptible(&job->event); + + /* Job callbacks handled via work queue */ + queue_work(core->work_queue, &job->work); + } + + /* Requeue delayed work */ + if (core->n_active_jobs) + queue_delayed_work( + core->work_queue, + &core->timeout_work, JOB_TIMEOUT); + + spin_unlock_irqrestore(&core->lock, flags); +} +#endif + +/** + * reset_hw_timer() - Resets a job's hardware timer. Must be called before + * the timer is used. + * + * @job: Pointer to job struct + * + * core->lock _must_ be held when calling this function + */ +static void reset_hw_timer(struct b2r2_core_job *job) +{ + job->nsec_active_in_hw = 0; +} + +/** + * start_hw_timer() - Times how long a job spends in hardware (active). + * Should be called immediatly before starting the + * hardware. + * + * @job: Pointer to job struct + * + * core->lock _must_ be held when calling this function + */ +static void start_hw_timer(struct b2r2_core_job *job) +{ + job->hw_start_time = b2r2_get_curr_nsec(); +} + +/** + * stop_hw_timer() - Times how long a job spends in hardware (active). + * Should be called immediatly after the hardware has + * finished. + * + * @core: The b2r2 core entity + * @job: Pointer to job struct + * + * core->lock _must_ be held when calling this function + */ +static void stop_hw_timer(struct b2r2_core *core, struct b2r2_core_job *job) +{ + /* Assumes only app queues are used, which is the case right now. */ + /* Not 100% accurate. When a higher prio job interrupts a lower prio job it does + so after the current node of the low prio job has finished. Currently we can not + sense when the actual switch takes place so the time reported for a job that + interrupts a lower prio job will on average contain the time it takes to process + half a node in the lower prio job in addition to the time it takes to process the + job's own nodes. This could possibly be solved by adding node notifications but + that would involve a significant amount of work and consume system resources due + to the extra interrupts. */ + /* If a job takes more than ~2s (absolute time, including idleing in the hardware) + the state of the hardware timer will be corrupted and it will not report valid + values until b2r2 becomes idle (no active jobs on any queues). The maximum length + can possibly be increased by using 64 bit integers. */ + + int i; + + u32 stop_time_raw = b2r2_get_curr_nsec(); + /* We'll add an offset to all positions in time to make the current time equal to + 0xFFFFFFFF. This way we can compare positions in time to each other without having + to wory about wrapping (so long as all positions in time are in the past). */ + u32 stop_time = 0xFFFFFFFF; + u32 time_pos_offset = 0xFFFFFFFF - stop_time_raw; + u32 nsec_in_hw = stop_time - (job->hw_start_time + time_pos_offset); + job->nsec_active_in_hw += (s32)nsec_in_hw; + + /* Check if we have delayed the start of higher prio jobs. Can happen as queue + switching only can be done between nodes. */ + for (i = (int)job->queue - 1; i >= (int)B2R2_CORE_QUEUE_AQ1; i--) { + struct b2r2_core_job *queue_active_job = core->active_jobs[i]; + if (NULL == queue_active_job) + continue; + + queue_active_job->hw_start_time = stop_time_raw; + } + + /* Check if the job has stolen time from lower prio jobs */ + for (i = (int)job->queue + 1; i < B2R2_NUM_APPLICATIONS_QUEUES; i++) { + struct b2r2_core_job *queue_active_job = core->active_jobs[i]; + u32 queue_active_job_hw_start_time; + + if (NULL == queue_active_job) + continue; + + queue_active_job_hw_start_time = + queue_active_job->hw_start_time + + time_pos_offset; + + if (queue_active_job_hw_start_time < stop_time) { + u32 queue_active_job_nsec_in_hw = stop_time - + queue_active_job_hw_start_time; + u32 num_stolen_nsec = min(queue_active_job_nsec_in_hw, + nsec_in_hw); + + queue_active_job->nsec_active_in_hw -= (s32)num_stolen_nsec; + + nsec_in_hw -= num_stolen_nsec; + stop_time -= num_stolen_nsec; + } + + if (0 == nsec_in_hw) + break; + } +} + +/** + * init_job() - Initializes a job structure from filled in client data. + * Reference count will be set to 1 + * + * @job: Job to initialize + */ +static void init_job(struct b2r2_core_job *job) +{ + + job->start_sentinel = START_SENTINEL; + job->end_sentinel = END_SENTINEL; + + /* Job is idle, never queued */ + job->job_state = B2R2_CORE_JOB_IDLE; + + /* Initialize internal data */ + INIT_LIST_HEAD(&job->list); + init_waitqueue_head(&job->event); + INIT_WORK(&job->work, job_work_function); + + /* Map given prio to B2R2 queues */ + if (job->prio < B2R2_CORE_LOWEST_PRIO) + job->prio = B2R2_CORE_LOWEST_PRIO; + else if (job->prio > B2R2_CORE_HIGHEST_PRIO) + job->prio = B2R2_CORE_HIGHEST_PRIO; + + if (job->prio > 10) { + job->queue = B2R2_CORE_QUEUE_AQ1; + job->interrupt_context = + (B2R2BLT_ITSAQ1_LNA_Reached); + job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_3); + } else if (job->prio > 0) { + job->queue = B2R2_CORE_QUEUE_AQ2; + job->interrupt_context = + (B2R2BLT_ITSAQ2_LNA_Reached); + job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_2); + } else if (job->prio > -10) { + job->queue = B2R2_CORE_QUEUE_AQ3; + job->interrupt_context = + (B2R2BLT_ITSAQ3_LNA_Reached); + job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_1); + } else { + job->queue = B2R2_CORE_QUEUE_AQ4; + job->interrupt_context = + (B2R2BLT_ITSAQ4_LNA_Reached); + job->control = (B2R2_AQ_Enab | B2R2_AQ_PRIOR_0); + } +} + +/** + * clear_interrupts() - Disables all interrupts + * + * core->lock _must_ be held + */ +static void clear_interrupts(struct b2r2_core *core) +{ + writel(0x0, &core->hw->BLT_ITM0); + writel(0x0, &core->hw->BLT_ITM1); + writel(0x0, &core->hw->BLT_ITM2); + writel(0x0, &core->hw->BLT_ITM3); +} + +/** + * insert_into_prio_list() - Inserts the job into the sorted list of jobs. + * The list is sorted by priority. + * + * @core: The b2r2 core entity + * @job: Job to insert + * + * core->lock _must_ be held + */ +static void insert_into_prio_list(struct b2r2_core *core, + struct b2r2_core_job *job) +{ + /* Ref count is increased when job put in list, + should be released when job is removed from list */ + internal_job_addref(core, job, __func__); + + core->stat_n_jobs_in_prio_list++; + + /* Sort in the job */ + if (list_empty(&core->prio_queue)) + list_add_tail(&job->list, &core->prio_queue); + else { + struct b2r2_core_job *first_job = list_entry( + core->prio_queue.next, + struct b2r2_core_job, list); + struct b2r2_core_job *last_job = list_entry( + core->prio_queue.prev, + struct b2r2_core_job, list); + + if (job->prio > first_job->prio) + list_add(&job->list, &core->prio_queue); + else if (job->prio <= last_job->prio) + list_add_tail(&job->list, &core->prio_queue); + else { + /* We need to find where to put it */ + struct list_head *ptr; + + list_for_each(ptr, &core->prio_queue) { + struct b2r2_core_job *list_job = + list_entry(ptr, struct b2r2_core_job, + list); + if (job->prio > list_job->prio) { + list_add_tail(&job->list, + &list_job->list); + break; + } + } + } + } + /* The job is now queued */ + job->job_state = B2R2_CORE_JOB_QUEUED; +} + +/** + * check_prio_list() - Checks if the first job(s) in the prio list can + * be dispatched to B2R2 + * + * @core: The b2r2 core entity + * @atomic: true if in atomic context (i.e. interrupt context) + * + * core->lock _must_ be held + */ +static void check_prio_list(struct b2r2_core *core, bool atomic) +{ + bool dispatched_job; + int n_dispatched = 0; + struct b2r2_core_job *job; + + do { + dispatched_job = false; + + /* Do we have anything in our prio list? */ + if (list_empty(&core->prio_queue)) + break; + + /* The first job waiting */ + job = list_first_entry(&core->prio_queue, + struct b2r2_core_job, list); + + /* Is the B2R2 queue available? */ + if (core->active_jobs[job->queue] != NULL) + break; + + /* Can we acquire resources? */ + if (!job->acquire_resources || + job->acquire_resources(job, atomic) == 0) { + /* Ok to dispatch job */ + + /* Remove from list */ + list_del_init(&job->list); + + /* The job is now active */ + core->active_jobs[job->queue] = job; + core->n_active_jobs++; + job->jiffies = jiffies; + core->jiffies_last_active = jiffies; + + /* Kick off B2R2 */ + trigger_job(core, job); + dispatched_job = true; + n_dispatched++; + +#ifdef HANDLE_TIMEOUTED_JOBS + /* Check in one half second if it hangs */ + queue_delayed_work(core->work_queue, + &core->timeout_work, JOB_TIMEOUT); +#endif + } else { + /* No resources */ + if (!atomic && core->n_active_jobs == 0) { + b2r2_log_warn(core->dev, + "%s: No resource", __func__); + cancel_job(core, job); + } + } + } while (dispatched_job); + + core->stat_n_jobs_in_prio_list -= n_dispatched; +} + +/** + * find_job_in_list() - Finds job with job_id in list + * + * @jobid: Job id to find + * @list: List to find job id in + * + * Reference count will be incremented for found job. + * + * core->lock _must_ be held + */ +static struct b2r2_core_job *find_job_in_list(int job_id, + struct list_head *list) +{ + struct list_head *ptr; + + list_for_each(ptr, list) { + struct b2r2_core_job *job = list_entry( + ptr, struct b2r2_core_job, list); + if (job->job_id == job_id) { + struct b2r2_core *core = (struct b2r2_core *) job->data; + /* Increase reference count, should be released by + the caller of b2r2_core_job_find */ + internal_job_addref(core, job, __func__); + return job; + } + } + return NULL; +} + +/** + * find_job_in_active_jobs() - Finds job in active job queues + * + * @core: The b2r2 core entity + * @job_id: Job id to find + * + * Reference count will be incremented for found job. + * + * core->lock _must_ be held + */ +static struct b2r2_core_job *find_job_in_active_jobs(struct b2r2_core *core, + int job_id) +{ + int i; + struct b2r2_core_job *found_job = NULL; + + if (core->n_active_jobs) { + for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) { + struct b2r2_core_job *job = core->active_jobs[i]; + + if (job && job->job_id == job_id) { + internal_job_addref(core, job, __func__); + found_job = job; + break; + } + } + } + return found_job; +} + +/** + * find_tag_in_list() - Finds first job with tag in list + * + * @tag: Tag to find + * @list: List to find job id in + * + * Reference count will be incremented for found job. + * + * core->lock must be held + */ +static struct b2r2_core_job *find_tag_in_list(struct b2r2_core *core, + int tag, struct list_head *list) +{ + struct list_head *ptr; + + list_for_each(ptr, list) { + struct b2r2_core_job *job = + list_entry(ptr, struct b2r2_core_job, list); + if (job->tag == tag) { + /* Increase reference count, should be released by + the caller of b2r2_core_job_find */ + internal_job_addref(core, job, __func__); + return job; + } + } + return NULL; +} + +/** + * find_tag_in_active_jobs() - Finds job with tag in active job queues + * + * @tag: Tag to find + * + * Reference count will be incremented for found job. + * + * core->lock must be held + */ +static struct b2r2_core_job *find_tag_in_active_jobs(struct b2r2_core *core, + int tag) +{ + int i; + struct b2r2_core_job *found_job = NULL; + + if (core->n_active_jobs) { + for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) { + struct b2r2_core_job *job = core->active_jobs[i]; + + if (job && job->tag == tag) { + internal_job_addref(core, job, __func__); + found_job = job; + break; + } + } + } + return found_job; +} + + +#ifdef HANDLE_TIMEOUTED_JOBS +/** + * hw_reset() - Resets B2R2 hardware + * + * core->lock must be held + */ +static int hw_reset(struct b2r2_core *core) +{ + u32 uTimeOut = B2R2_RESET_TIMEOUT_VALUE; + + /* Tell B2R2 to reset */ + writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset, + &core->hw->BLT_CTL); + writel(0x00000000, &core->hw->BLT_CTL); + + b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n"); + + /** Wait for B2R2 to be idle (on a timeout rather than while loop) */ + while ((uTimeOut > 0) && + ((readl(&core->hw->BLT_STA1) & + B2R2BLT_STA1BDISP_IDLE) == 0x0)) + uTimeOut--; + + if (uTimeOut == 0) { + b2r2_log_warn(core->dev, + "error-> after software reset B2R2 is not idle\n"); + return -EAGAIN; + } + + return 0; + +} +#endif + +/** + * trigger_job() - Put job in B2R2 HW queue + * + * @job: Job to trigger + * + * core->lock must be held + */ +static void trigger_job(struct b2r2_core *core, struct b2r2_core_job *job) +{ + /* Debug prints */ + b2r2_log_info(core->dev, "queue 0x%x\n", job->queue); + b2r2_log_info(core->dev, "BLT TRIG_IP 0x%x (first node)\n", + job->first_node_address); + b2r2_log_info(core->dev, "BLT LNA_CTL 0x%x (last node)\n", + job->last_node_address); + b2r2_log_info(core->dev, "BLT TRIG_CTL 0x%x\n", job->control); + b2r2_log_info(core->dev, "BLT PACE_CTL 0x%x\n", job->pace_control); + + reset_hw_timer(job); + job->job_state = B2R2_CORE_JOB_RUNNING; + + /* Enable interrupt */ + writel(readl(&core->hw->BLT_ITM0) | job->interrupt_context, + &core->hw->BLT_ITM0); + + writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8), + B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS1_OP2); + writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128), + &core->hw->PLUGS1_CHZ); + writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) | + (core->min_req_time << 16), &core->hw->PLUGS1_MSZ); + writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256), + &core->hw->PLUGS1_PGZ); + + writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8), + B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS2_OP2); + writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128), + &core->hw->PLUGS2_CHZ); + writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) | + (core->min_req_time << 16), &core->hw->PLUGS2_MSZ); + writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256), + &core->hw->PLUGS2_PGZ); + + writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8), + B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGS3_OP2); + writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128), + &core->hw->PLUGS3_CHZ); + writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) | + (core->min_req_time << 16), &core->hw->PLUGS3_MSZ); + writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256), + &core->hw->PLUGS3_PGZ); + + writel(min_t(u8, max_t(u8, core->op_size, B2R2_PLUG_OPCODE_SIZE_8), + B2R2_PLUG_OPCODE_SIZE_64), &core->hw->PLUGT_OP2); + writel(min_t(u8, core->ch_size, B2R2_PLUG_CHUNK_SIZE_128), + &core->hw->PLUGT_CHZ); + writel(min_t(u8, core->mg_size, B2R2_PLUG_MESSAGE_SIZE_128) | + (core->min_req_time << 16), &core->hw->PLUGT_MSZ); + writel(min_t(u8, core->pg_size, B2R2_PLUG_PAGE_SIZE_256), + &core->hw->PLUGT_PGZ); + + /* B2R2 kicks off when LNA is written, LNA write must be last! */ + switch (job->queue) { + case B2R2_CORE_QUEUE_CQ1: + writel(job->first_node_address, &core->hw->BLT_CQ1_TRIG_IP); + writel(job->control, &core->hw->BLT_CQ1_TRIG_CTL); + writel(job->pace_control, &core->hw->BLT_CQ1_PACE_CTL); + break; + + case B2R2_CORE_QUEUE_CQ2: + writel(job->first_node_address, &core->hw->BLT_CQ2_TRIG_IP); + writel(job->control, &core->hw->BLT_CQ2_TRIG_CTL); + writel(job->pace_control, &core->hw->BLT_CQ2_PACE_CTL); + break; + + case B2R2_CORE_QUEUE_AQ1: + writel(job->control, &core->hw->BLT_AQ1_CTL); + writel(job->first_node_address, &core->hw->BLT_AQ1_IP); + wmb(); + start_hw_timer(job); + writel(job->last_node_address, &core->hw->BLT_AQ1_LNA); + break; + + case B2R2_CORE_QUEUE_AQ2: + writel(job->control, &core->hw->BLT_AQ2_CTL); + writel(job->first_node_address, &core->hw->BLT_AQ2_IP); + wmb(); + start_hw_timer(job); + writel(job->last_node_address, &core->hw->BLT_AQ2_LNA); + break; + + case B2R2_CORE_QUEUE_AQ3: + writel(job->control, &core->hw->BLT_AQ3_CTL); + writel(job->first_node_address, &core->hw->BLT_AQ3_IP); + wmb(); + start_hw_timer(job); + writel(job->last_node_address, &core->hw->BLT_AQ3_LNA); + break; + + case B2R2_CORE_QUEUE_AQ4: + writel(job->control, &core->hw->BLT_AQ4_CTL); + writel(job->first_node_address, &core->hw->BLT_AQ4_IP); + wmb(); + start_hw_timer(job); + writel(job->last_node_address, &core->hw->BLT_AQ4_LNA); + break; + + /** Handle the default case */ + default: + break; + + } /* end switch */ + +} + +/** + * handle_queue_event() - Handles interrupt event for specified B2R2 queue + * + * @queue: Queue to handle event for + * + * core->lock must be held + */ +static void handle_queue_event(struct b2r2_core *core, + enum b2r2_core_queue queue) +{ + struct b2r2_core_job *job; + + job = core->active_jobs[queue]; + if (job) { + if (job->job_state != B2R2_CORE_JOB_RUNNING) + /* Should be running + Severe error. TBD */ + b2r2_log_warn(core->dev, + "%s: Job is not running", __func__); + + stop_hw_timer(core, job); + + /* Remove from queue */ + BUG_ON(core->n_active_jobs == 0); + core->active_jobs[queue] = NULL; + core->n_active_jobs--; + } + + if (!job) { + /* No job, error? */ + b2r2_log_warn(core->dev, "%s: No job", __func__); + return; + } + + + /* Atomic context release resources, release resources will + be called again later from process context (work queue) */ + if (job->release_resources) + job->release_resources(job, true); + + /* Job is done */ + job->job_state = B2R2_CORE_JOB_DONE; + + /* Handle done */ + wake_up_interruptible(&job->event); + + /* Dispatch to work queue to handle callbacks */ + queue_work(core->work_queue, &job->work); +} + +/** + * process_events() - Handles interrupt events + * + * @status: Contents of the B2R2 ITS register + */ +static void process_events(struct b2r2_core *core, u32 status) +{ + u32 mask = 0xF; + u32 disable_itm_mask = 0; + + b2r2_log_info(core->dev, "Enters process_events\n"); + b2r2_log_info(core->dev, "status 0x%x\n", status); + + /* Composition queue 1 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_CQ1); + disable_itm_mask |= mask; + } + mask <<= 4; + + /* Composition queue 2 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_CQ2); + disable_itm_mask |= mask; + } + mask <<= 8; + + /* Application queue 1 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_AQ1); + disable_itm_mask |= mask; + } + mask <<= 4; + + /* Application queue 2 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_AQ2); + disable_itm_mask |= mask; + } + mask <<= 4; + + /* Application queue 3 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_AQ3); + disable_itm_mask |= mask; + } + mask <<= 4; + + /* Application queue 4 */ + if (status & mask) { + handle_queue_event(core, B2R2_CORE_QUEUE_AQ4); + disable_itm_mask |= mask; + } + + /* Clear received interrupt flags */ + writel(status, &core->hw->BLT_ITS); + /* Disable handled interrupts */ + writel(readl(&core->hw->BLT_ITM0) & ~disable_itm_mask, + &core->hw->BLT_ITM0); + + b2r2_log_info(core->dev, "Returns process_events\n"); +} + +/** + * b2r2_irq_handler() - B2R2 interrupt handler + * + * @irq: Interrupt number (not used) + * @dev_id: A pointer to the b2r2 core entity + */ +static irqreturn_t b2r2_irq_handler(int irq, void *dev_id) +{ + unsigned long flags; + struct b2r2_core* core = (struct b2r2_core *) dev_id; + + /* Spin lock is need in irq handler (SMP) */ + spin_lock_irqsave(&core->lock, flags); + + /* Make a quick exit if this device was not interrupting */ + if (!core->valid || + ((readl(&core->hw->BLT_ITS) & B2R2_ITS_MASK) == 0)) { + core->stat_n_irq_skipped++; + spin_unlock_irqrestore(&core->lock, flags); + return IRQ_NONE; + } + + /* Remember time for last irq (for timeout mgmt) */ + core->jiffies_last_irq = jiffies; + core->stat_n_irq++; + + /* Handle the interrupt(s) */ + process_events(core, readl(&core->hw->BLT_ITS)); + + /* Check if we can dispatch new jobs */ + check_prio_list(core, true); + + core->stat_n_irq_exit++; + + spin_unlock_irqrestore(&core->lock, flags); + + return IRQ_HANDLED; +} + + +#ifdef CONFIG_DEBUG_FS +/** + * struct debugfs_reg - Represents one B2R2 register in debugfs + * + * @name: Register name + * @offset: Byte offset in B2R2 for register + */ +struct debugfs_reg { + const char name[30]; + u32 offset; +}; + +/** + * debugfs_regs - Array of B2R2 debugfs registers + */ +static const struct debugfs_reg debugfs_regs[] = { + {"BLT_SSBA17", offsetof(struct b2r2_memory_map, BLT_SSBA17)}, + {"BLT_SSBA18", offsetof(struct b2r2_memory_map, BLT_SSBA18)}, + {"BLT_SSBA19", offsetof(struct b2r2_memory_map, BLT_SSBA19)}, + {"BLT_SSBA20", offsetof(struct b2r2_memory_map, BLT_SSBA20)}, + {"BLT_SSBA21", offsetof(struct b2r2_memory_map, BLT_SSBA21)}, + {"BLT_SSBA22", offsetof(struct b2r2_memory_map, BLT_SSBA22)}, + {"BLT_SSBA23", offsetof(struct b2r2_memory_map, BLT_SSBA23)}, + {"BLT_SSBA24", offsetof(struct b2r2_memory_map, BLT_SSBA24)}, + {"BLT_STBA5", offsetof(struct b2r2_memory_map, BLT_STBA5)}, + {"BLT_STBA6", offsetof(struct b2r2_memory_map, BLT_STBA6)}, + {"BLT_STBA7", offsetof(struct b2r2_memory_map, BLT_STBA7)}, + {"BLT_STBA8", offsetof(struct b2r2_memory_map, BLT_STBA8)}, + {"BLT_CTL", offsetof(struct b2r2_memory_map, BLT_CTL)}, + {"BLT_ITS", offsetof(struct b2r2_memory_map, BLT_ITS)}, + {"BLT_STA1", offsetof(struct b2r2_memory_map, BLT_STA1)}, + {"BLT_SSBA1", offsetof(struct b2r2_memory_map, BLT_SSBA1)}, + {"BLT_SSBA2", offsetof(struct b2r2_memory_map, BLT_SSBA2)}, + {"BLT_SSBA3", offsetof(struct b2r2_memory_map, BLT_SSBA3)}, + {"BLT_SSBA4", offsetof(struct b2r2_memory_map, BLT_SSBA4)}, + {"BLT_SSBA5", offsetof(struct b2r2_memory_map, BLT_SSBA5)}, + {"BLT_SSBA6", offsetof(struct b2r2_memory_map, BLT_SSBA6)}, + {"BLT_SSBA7", offsetof(struct b2r2_memory_map, BLT_SSBA7)}, + {"BLT_SSBA8", offsetof(struct b2r2_memory_map, BLT_SSBA8)}, + {"BLT_STBA1", offsetof(struct b2r2_memory_map, BLT_STBA1)}, + {"BLT_STBA2", offsetof(struct b2r2_memory_map, BLT_STBA2)}, + {"BLT_STBA3", offsetof(struct b2r2_memory_map, BLT_STBA3)}, + {"BLT_STBA4", offsetof(struct b2r2_memory_map, BLT_STBA4)}, + {"BLT_CQ1_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_TRIG_IP)}, + {"BLT_CQ1_TRIG_CTL", offsetof(struct b2r2_memory_map, + BLT_CQ1_TRIG_CTL)}, + {"BLT_CQ1_PACE_CTL", offsetof(struct b2r2_memory_map, + BLT_CQ1_PACE_CTL)}, + {"BLT_CQ1_IP", offsetof(struct b2r2_memory_map, BLT_CQ1_IP)}, + {"BLT_CQ2_TRIG_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_TRIG_IP)}, + {"BLT_CQ2_TRIG_CTL", offsetof(struct b2r2_memory_map, + BLT_CQ2_TRIG_CTL)}, + {"BLT_CQ2_PACE_CTL", offsetof(struct b2r2_memory_map, + BLT_CQ2_PACE_CTL)}, + {"BLT_CQ2_IP", offsetof(struct b2r2_memory_map, BLT_CQ2_IP)}, + {"BLT_AQ1_CTL", offsetof(struct b2r2_memory_map, BLT_AQ1_CTL)}, + {"BLT_AQ1_IP", offsetof(struct b2r2_memory_map, BLT_AQ1_IP)}, + {"BLT_AQ1_LNA", offsetof(struct b2r2_memory_map, BLT_AQ1_LNA)}, + {"BLT_AQ1_STA", offsetof(struct b2r2_memory_map, BLT_AQ1_STA)}, + {"BLT_AQ2_CTL", offsetof(struct b2r2_memory_map, BLT_AQ2_CTL)}, + {"BLT_AQ2_IP", offsetof(struct b2r2_memory_map, BLT_AQ2_IP)}, + {"BLT_AQ2_LNA", offsetof(struct b2r2_memory_map, BLT_AQ2_LNA)}, + {"BLT_AQ2_STA", offsetof(struct b2r2_memory_map, BLT_AQ2_STA)}, + {"BLT_AQ3_CTL", offsetof(struct b2r2_memory_map, BLT_AQ3_CTL)}, + {"BLT_AQ3_IP", offsetof(struct b2r2_memory_map, BLT_AQ3_IP)}, + {"BLT_AQ3_LNA", offsetof(struct b2r2_memory_map, BLT_AQ3_LNA)}, + {"BLT_AQ3_STA", offsetof(struct b2r2_memory_map, BLT_AQ3_STA)}, + {"BLT_AQ4_CTL", offsetof(struct b2r2_memory_map, BLT_AQ4_CTL)}, + {"BLT_AQ4_IP", offsetof(struct b2r2_memory_map, BLT_AQ4_IP)}, + {"BLT_AQ4_LNA", offsetof(struct b2r2_memory_map, BLT_AQ4_LNA)}, + {"BLT_AQ4_STA", offsetof(struct b2r2_memory_map, BLT_AQ4_STA)}, + {"BLT_SSBA9", offsetof(struct b2r2_memory_map, BLT_SSBA9)}, + {"BLT_SSBA10", offsetof(struct b2r2_memory_map, BLT_SSBA10)}, + {"BLT_SSBA11", offsetof(struct b2r2_memory_map, BLT_SSBA11)}, + {"BLT_SSBA12", offsetof(struct b2r2_memory_map, BLT_SSBA12)}, + {"BLT_SSBA13", offsetof(struct b2r2_memory_map, BLT_SSBA13)}, + {"BLT_SSBA14", offsetof(struct b2r2_memory_map, BLT_SSBA14)}, + {"BLT_SSBA15", offsetof(struct b2r2_memory_map, BLT_SSBA15)}, + {"BLT_SSBA16", offsetof(struct b2r2_memory_map, BLT_SSBA16)}, + {"BLT_SGA1", offsetof(struct b2r2_memory_map, BLT_SGA1)}, + {"BLT_SGA2", offsetof(struct b2r2_memory_map, BLT_SGA2)}, + {"BLT_ITM0", offsetof(struct b2r2_memory_map, BLT_ITM0)}, + {"BLT_ITM1", offsetof(struct b2r2_memory_map, BLT_ITM1)}, + {"BLT_ITM2", offsetof(struct b2r2_memory_map, BLT_ITM2)}, + {"BLT_ITM3", offsetof(struct b2r2_memory_map, BLT_ITM3)}, + {"BLT_DFV2", offsetof(struct b2r2_memory_map, BLT_DFV2)}, + {"BLT_DFV1", offsetof(struct b2r2_memory_map, BLT_DFV1)}, + {"BLT_PRI", offsetof(struct b2r2_memory_map, BLT_PRI)}, + {"PLUGS1_OP2", offsetof(struct b2r2_memory_map, PLUGS1_OP2)}, + {"PLUGS1_CHZ", offsetof(struct b2r2_memory_map, PLUGS1_CHZ)}, + {"PLUGS1_MSZ", offsetof(struct b2r2_memory_map, PLUGS1_MSZ)}, + {"PLUGS1_PGZ", offsetof(struct b2r2_memory_map, PLUGS1_PGZ)}, + {"PLUGS2_OP2", offsetof(struct b2r2_memory_map, PLUGS2_OP2)}, + {"PLUGS2_CHZ", offsetof(struct b2r2_memory_map, PLUGS2_CHZ)}, + {"PLUGS2_MSZ", offsetof(struct b2r2_memory_map, PLUGS2_MSZ)}, + {"PLUGS2_PGZ", offsetof(struct b2r2_memory_map, PLUGS2_PGZ)}, + {"PLUGS3_OP2", offsetof(struct b2r2_memory_map, PLUGS3_OP2)}, + {"PLUGS3_CHZ", offsetof(struct b2r2_memory_map, PLUGS3_CHZ)}, + {"PLUGS3_MSZ", offsetof(struct b2r2_memory_map, PLUGS3_MSZ)}, + {"PLUGS3_PGZ", offsetof(struct b2r2_memory_map, PLUGS3_PGZ)}, + {"PLUGT_OP2", offsetof(struct b2r2_memory_map, PLUGT_OP2)}, + {"PLUGT_CHZ", offsetof(struct b2r2_memory_map, PLUGT_CHZ)}, + {"PLUGT_MSZ", offsetof(struct b2r2_memory_map, PLUGT_MSZ)}, + {"PLUGT_PGZ", offsetof(struct b2r2_memory_map, PLUGT_PGZ)}, + {"BLT_NIP", offsetof(struct b2r2_memory_map, BLT_NIP)}, + {"BLT_CIC", offsetof(struct b2r2_memory_map, BLT_CIC)}, + {"BLT_INS", offsetof(struct b2r2_memory_map, BLT_INS)}, + {"BLT_ACK", offsetof(struct b2r2_memory_map, BLT_ACK)}, + {"BLT_TBA", offsetof(struct b2r2_memory_map, BLT_TBA)}, + {"BLT_TTY", offsetof(struct b2r2_memory_map, BLT_TTY)}, + {"BLT_TXY", offsetof(struct b2r2_memory_map, BLT_TXY)}, + {"BLT_TSZ", offsetof(struct b2r2_memory_map, BLT_TSZ)}, + {"BLT_S1CF", offsetof(struct b2r2_memory_map, BLT_S1CF)}, + {"BLT_S2CF", offsetof(struct b2r2_memory_map, BLT_S2CF)}, + {"BLT_S1BA", offsetof(struct b2r2_memory_map, BLT_S1BA)}, + {"BLT_S1TY", offsetof(struct b2r2_memory_map, BLT_S1TY)}, + {"BLT_S1XY", offsetof(struct b2r2_memory_map, BLT_S1XY)}, + {"BLT_S2BA", offsetof(struct b2r2_memory_map, BLT_S2BA)}, + {"BLT_S2TY", offsetof(struct b2r2_memory_map, BLT_S2TY)}, + {"BLT_S2XY", offsetof(struct b2r2_memory_map, BLT_S2XY)}, + {"BLT_S2SZ", offsetof(struct b2r2_memory_map, BLT_S2SZ)}, + {"BLT_S3BA", offsetof(struct b2r2_memory_map, BLT_S3BA)}, + {"BLT_S3TY", offsetof(struct b2r2_memory_map, BLT_S3TY)}, + {"BLT_S3XY", offsetof(struct b2r2_memory_map, BLT_S3XY)}, + {"BLT_S3SZ", offsetof(struct b2r2_memory_map, BLT_S3SZ)}, + {"BLT_CWO", offsetof(struct b2r2_memory_map, BLT_CWO)}, + {"BLT_CWS", offsetof(struct b2r2_memory_map, BLT_CWS)}, + {"BLT_CCO", offsetof(struct b2r2_memory_map, BLT_CCO)}, + {"BLT_CML", offsetof(struct b2r2_memory_map, BLT_CML)}, + {"BLT_FCTL", offsetof(struct b2r2_memory_map, BLT_FCTL)}, + {"BLT_PMK", offsetof(struct b2r2_memory_map, BLT_PMK)}, + {"BLT_RSF", offsetof(struct b2r2_memory_map, BLT_RSF)}, + {"BLT_RZI", offsetof(struct b2r2_memory_map, BLT_RZI)}, + {"BLT_HFP", offsetof(struct b2r2_memory_map, BLT_HFP)}, + {"BLT_VFP", offsetof(struct b2r2_memory_map, BLT_VFP)}, + {"BLT_Y_RSF", offsetof(struct b2r2_memory_map, BLT_Y_RSF)}, + {"BLT_Y_RZI", offsetof(struct b2r2_memory_map, BLT_Y_RZI)}, + {"BLT_Y_HFP", offsetof(struct b2r2_memory_map, BLT_Y_HFP)}, + {"BLT_Y_VFP", offsetof(struct b2r2_memory_map, BLT_Y_VFP)}, + {"BLT_KEY1", offsetof(struct b2r2_memory_map, BLT_KEY1)}, + {"BLT_KEY2", offsetof(struct b2r2_memory_map, BLT_KEY2)}, + {"BLT_SAR", offsetof(struct b2r2_memory_map, BLT_SAR)}, + {"BLT_USR", offsetof(struct b2r2_memory_map, BLT_USR)}, + {"BLT_IVMX0", offsetof(struct b2r2_memory_map, BLT_IVMX0)}, + {"BLT_IVMX1", offsetof(struct b2r2_memory_map, BLT_IVMX1)}, + {"BLT_IVMX2", offsetof(struct b2r2_memory_map, BLT_IVMX2)}, + {"BLT_IVMX3", offsetof(struct b2r2_memory_map, BLT_IVMX3)}, + {"BLT_OVMX0", offsetof(struct b2r2_memory_map, BLT_OVMX0)}, + {"BLT_OVMX1", offsetof(struct b2r2_memory_map, BLT_OVMX1)}, + {"BLT_OVMX2", offsetof(struct b2r2_memory_map, BLT_OVMX2)}, + {"BLT_OVMX3", offsetof(struct b2r2_memory_map, BLT_OVMX3)}, + {"BLT_VC1R", offsetof(struct b2r2_memory_map, BLT_VC1R)}, + {"BLT_Y_HFC0", offsetof(struct b2r2_memory_map, BLT_Y_HFC0)}, + {"BLT_Y_HFC1", offsetof(struct b2r2_memory_map, BLT_Y_HFC1)}, + {"BLT_Y_HFC2", offsetof(struct b2r2_memory_map, BLT_Y_HFC2)}, + {"BLT_Y_HFC3", offsetof(struct b2r2_memory_map, BLT_Y_HFC3)}, + {"BLT_Y_HFC4", offsetof(struct b2r2_memory_map, BLT_Y_HFC4)}, + {"BLT_Y_HFC5", offsetof(struct b2r2_memory_map, BLT_Y_HFC5)}, + {"BLT_Y_HFC6", offsetof(struct b2r2_memory_map, BLT_Y_HFC6)}, + {"BLT_Y_HFC7", offsetof(struct b2r2_memory_map, BLT_Y_HFC7)}, + {"BLT_Y_HFC8", offsetof(struct b2r2_memory_map, BLT_Y_HFC8)}, + {"BLT_Y_HFC9", offsetof(struct b2r2_memory_map, BLT_Y_HFC9)}, + {"BLT_Y_HFC10", offsetof(struct b2r2_memory_map, BLT_Y_HFC10)}, + {"BLT_Y_HFC11", offsetof(struct b2r2_memory_map, BLT_Y_HFC11)}, + {"BLT_Y_HFC12", offsetof(struct b2r2_memory_map, BLT_Y_HFC12)}, + {"BLT_Y_HFC13", offsetof(struct b2r2_memory_map, BLT_Y_HFC13)}, + {"BLT_Y_HFC14", offsetof(struct b2r2_memory_map, BLT_Y_HFC14)}, + {"BLT_Y_HFC15", offsetof(struct b2r2_memory_map, BLT_Y_HFC15)}, + {"BLT_Y_VFC0", offsetof(struct b2r2_memory_map, BLT_Y_VFC0)}, + {"BLT_Y_VFC1", offsetof(struct b2r2_memory_map, BLT_Y_VFC1)}, + {"BLT_Y_VFC2", offsetof(struct b2r2_memory_map, BLT_Y_VFC2)}, + {"BLT_Y_VFC3", offsetof(struct b2r2_memory_map, BLT_Y_VFC3)}, + {"BLT_Y_VFC4", offsetof(struct b2r2_memory_map, BLT_Y_VFC4)}, + {"BLT_Y_VFC5", offsetof(struct b2r2_memory_map, BLT_Y_VFC5)}, + {"BLT_Y_VFC6", offsetof(struct b2r2_memory_map, BLT_Y_VFC6)}, + {"BLT_Y_VFC7", offsetof(struct b2r2_memory_map, BLT_Y_VFC7)}, + {"BLT_Y_VFC8", offsetof(struct b2r2_memory_map, BLT_Y_VFC8)}, + {"BLT_Y_VFC9", offsetof(struct b2r2_memory_map, BLT_Y_VFC9)}, + {"BLT_HFC0", offsetof(struct b2r2_memory_map, BLT_HFC0)}, + {"BLT_HFC1", offsetof(struct b2r2_memory_map, BLT_HFC1)}, + {"BLT_HFC2", offsetof(struct b2r2_memory_map, BLT_HFC2)}, + {"BLT_HFC3", offsetof(struct b2r2_memory_map, BLT_HFC3)}, + {"BLT_HFC4", offsetof(struct b2r2_memory_map, BLT_HFC4)}, + {"BLT_HFC5", offsetof(struct b2r2_memory_map, BLT_HFC5)}, + {"BLT_HFC6", offsetof(struct b2r2_memory_map, BLT_HFC6)}, + {"BLT_HFC7", offsetof(struct b2r2_memory_map, BLT_HFC7)}, + {"BLT_HFC8", offsetof(struct b2r2_memory_map, BLT_HFC8)}, + {"BLT_HFC9", offsetof(struct b2r2_memory_map, BLT_HFC9)}, + {"BLT_HFC10", offsetof(struct b2r2_memory_map, BLT_HFC10)}, + {"BLT_HFC11", offsetof(struct b2r2_memory_map, BLT_HFC11)}, + {"BLT_HFC12", offsetof(struct b2r2_memory_map, BLT_HFC12)}, + {"BLT_HFC13", offsetof(struct b2r2_memory_map, BLT_HFC13)}, + {"BLT_HFC14", offsetof(struct b2r2_memory_map, BLT_HFC14)}, + {"BLT_HFC15", offsetof(struct b2r2_memory_map, BLT_HFC15)}, + {"BLT_VFC0", offsetof(struct b2r2_memory_map, BLT_VFC0)}, + {"BLT_VFC1", offsetof(struct b2r2_memory_map, BLT_VFC1)}, + {"BLT_VFC2", offsetof(struct b2r2_memory_map, BLT_VFC2)}, + {"BLT_VFC3", offsetof(struct b2r2_memory_map, BLT_VFC3)}, + {"BLT_VFC4", offsetof(struct b2r2_memory_map, BLT_VFC4)}, + {"BLT_VFC5", offsetof(struct b2r2_memory_map, BLT_VFC5)}, + {"BLT_VFC6", offsetof(struct b2r2_memory_map, BLT_VFC6)}, + {"BLT_VFC7", offsetof(struct b2r2_memory_map, BLT_VFC7)}, + {"BLT_VFC8", offsetof(struct b2r2_memory_map, BLT_VFC8)}, + {"BLT_VFC9", offsetof(struct b2r2_memory_map, BLT_VFC9)}, +}; + +#ifdef HANDLE_TIMEOUTED_JOBS +/** + * printk_regs() - Print B2R2 registers to printk + */ +static void printk_regs(struct b2r2_core *core) +{ +#ifdef CONFIG_B2R2_DEBUG + int i; + + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + unsigned long value = readl( + (unsigned long *) (((u8 *) core->hw) + + debugfs_regs[i].offset)); + b2r2_log_regdump(core->dev, "%s: %08lX\n", + debugfs_regs[i].name, + value); + } +#endif +} +#endif + +/** + * debugfs_b2r2_reg_read() - Implements debugfs read for B2R2 register + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_reg_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + size_t dev_size; + int ret = 0; + unsigned long value; + char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + if (tmpbuf == NULL) { + ret = -ENOMEM; + goto out; + } + + /* Read from B2R2 */ + value = readl((unsigned long *) + filp->f_dentry->d_inode->i_private); + + /* Build the string */ + dev_size = sprintf(tmpbuf, "%8lX\n", value); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + /* Return it to user space */ + if (copy_to_user(buf, tmpbuf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + if (tmpbuf != NULL) + kfree(tmpbuf); + return ret; +} + +/** + * debugfs_b2r2_reg_write() - Implements debugfs write for B2R2 register + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to write + * @f_pos: File position + * + * Returns number of bytes written or negative error code + */ +static int debugfs_b2r2_reg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + char tmpbuf[80]; + u32 reg_value; + int ret = 0; + + /* Adjust count */ + if (count >= sizeof(tmpbuf)) + count = sizeof(tmpbuf) - 1; + /* Get it from user space */ + if (copy_from_user(tmpbuf, buf, count)) + return -EINVAL; + tmpbuf[count] = 0; + /* Convert from hex string */ + if (sscanf(tmpbuf, "%8lX", (unsigned long *) ®_value) != 1) + return -EINVAL; + + writel(reg_value, (u32 *) + filp->f_dentry->d_inode->i_private); + + *f_pos += count; + ret = count; + + return ret; +} + +/** + * debugfs_b2r2_reg_fops() - File operations for B2R2 register debugfs + */ +static const struct file_operations debugfs_b2r2_reg_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_reg_read, + .write = debugfs_b2r2_reg_write, +}; + +/** + * debugfs_b2r2_regs_read() - Implements debugfs read for B2R2 register dump + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes written or negative error code + */ +static int debugfs_b2r2_regs_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + size_t dev_size = 0; + int ret = 0; + int i; + char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + + if (tmpbuf == NULL) { + ret = -ENOMEM; + goto out; + } + + /* Build a giant string containing all registers */ + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + unsigned long value = + readl((u32 *) (((u8 *) + filp->f_dentry->d_inode->i_private) + + debugfs_regs[i].offset)); + dev_size += sprintf(tmpbuf + dev_size, "%s: %08lX\n", + debugfs_regs[i].name, + value); + } + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, tmpbuf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + if (tmpbuf != NULL) + kfree(tmpbuf); + return ret; +} + +/** + * debugfs_b2r2_regs_fops() - File operations for B2R2 register dump debugfs + */ +static const struct file_operations debugfs_b2r2_regs_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_regs_read, +}; + +/** + * debugfs_b2r2_stat_read() - Implements debugfs read for B2R2 statistics + * + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_stat_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + size_t dev_size = 0; + int ret = 0; + int i = 0; + char *tmpbuf = kmalloc(sizeof(char) * 4096, GFP_KERNEL); + struct b2r2_core *core = filp->f_dentry->d_inode->i_private; + + if (tmpbuf == NULL) { + ret = -ENOMEM; + goto out; + } + + /* Build a string containing all statistics */ + dev_size += sprintf(tmpbuf + dev_size, "Interrupts : %lu\n", + core->stat_n_irq); + dev_size += sprintf(tmpbuf + dev_size, "Added jobs : %lu\n", + core->stat_n_jobs_added); + dev_size += sprintf(tmpbuf + dev_size, "Removed jobs : %lu\n", + core->stat_n_jobs_removed); + dev_size += sprintf(tmpbuf + dev_size, "Jobs in prio list : %lu\n", + core->stat_n_jobs_in_prio_list); + dev_size += sprintf(tmpbuf + dev_size, "Active jobs : %lu\n", + core->n_active_jobs); + for (i = 0; i < ARRAY_SIZE(core->active_jobs); i++) + dev_size += sprintf(tmpbuf + dev_size, + " Job in queue %d : 0x%08lx\n", + i, (unsigned long) core->active_jobs[i]); + dev_size += sprintf(tmpbuf + dev_size, "Clock requests : %lu\n", + core->clock_request_count); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, tmpbuf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + if (tmpbuf != NULL) + kfree(tmpbuf); + return ret; +} + +/** + * debugfs_b2r2_stat_fops() - File operations for B2R2 statistics debugfs + */ +static const struct file_operations debugfs_b2r2_stat_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_stat_read, +}; + + +/** + * debugfs_b2r2_clock_read() - Implements debugfs read for + * PMU B2R2 clock register + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_clock_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* 10 characters hex number + newline + string terminator; */ + char tmpbuf[10+2]; + size_t dev_size; + int ret = 0; + struct b2r2_core *core = filp->f_dentry->d_inode->i_private; + + unsigned long value = clk_get_rate(core->b2r2_clock); + + dev_size = sprintf(tmpbuf, "%#010lx\n", value); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, tmpbuf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + return ret; +} + +/** + * debugfs_b2r2_clock_write() - Implements debugfs write for + * PMU B2R2 clock register + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to write + * @f_pos: File position + * + * Returns number of bytes written or negative error code + */ +static int debugfs_b2r2_clock_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + char tmpbuf[80]; + u32 reg_value; + int ret = 0; + + if (count >= sizeof(tmpbuf)) + count = sizeof(tmpbuf) - 1; + if (copy_from_user(tmpbuf, buf, count)) + return -EINVAL; + tmpbuf[count] = 0; + if (sscanf(tmpbuf, "%8lX", (unsigned long *) ®_value) != 1) + return -EINVAL; + + /*not working yet*/ + /*clk_set_rate(b2r2_core.b2r2_clock, (unsigned long) reg_value);*/ + + *f_pos += count; + ret = count; + + return ret; +} + +/** + * debugfs_b2r2_clock_fops() - File operations for PMU B2R2 clock debugfs + */ +static const struct file_operations debugfs_b2r2_clock_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_clock_read, + .write = debugfs_b2r2_clock_write, +}; + +/** + * debugfs_b2r2_enabled_read() - Implements debugfs read for + * B2R2 Core Enable/Disable + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to read + * @f_pos: File position + * + * Returns number of bytes read or negative error code + */ +static int debugfs_b2r2_enabled_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + /* 4 characters hex number + newline + string terminator; */ + char tmpbuf[4+2]; + size_t dev_size; + int ret = 0; + struct b2r2_core *core = filp->f_dentry->d_inode->i_private; + + dev_size = sprintf(tmpbuf, "%02X\n", core->control->enabled); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, tmpbuf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; +out: + return ret; +} + +/** + * debugfs_b2r2_enabled_write() - Implements debugfs write for + * B2R2 Core Enable/Disable + * @filp: File pointer + * @buf: User space buffer + * @count: Number of bytes to write + * @f_pos: File position + * + * Returns number of bytes written or negative error code + */ +static int debugfs_b2r2_enabled_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + char tmpbuf[80]; + unsigned int enable; + int ret = 0; + struct b2r2_core *core = filp->f_dentry->d_inode->i_private; + + if (count >= sizeof(tmpbuf)) + count = sizeof(tmpbuf) - 1; + if (copy_from_user(tmpbuf, buf, count)) + return -EINVAL; + tmpbuf[count] = 0; + if (sscanf(tmpbuf, "%02X", &enable) != 1) + return -EINVAL; + + if (enable) + core->control->enabled = true; + else + core->control->enabled = false; + + *f_pos += count; + ret = count; + + return ret; +} + +/** + * debugfs_b2r2_enabled_fops() - File operations for B2R2 Core Enable/Disable debugfs + */ +static const struct file_operations debugfs_b2r2_enabled_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_enabled_read, + .write = debugfs_b2r2_enabled_write, +}; + +#endif + +/** + * + * init_hw() - B2R2 Hardware reset & initiliaze + * + * @pdev: B2R2 platform device + * + * 1)Register interrupt handler + * + * 2)B2R2 Register map + * + * 3)For resetting B2R2 hardware,write to B2R2 Control register the + * B2R2BLT_CTLGLOBAL_soft_reset and then polling for on + * B2R2 status register for B2R2BLT_STA1BDISP_IDLE flag. + * + * 4)Wait for B2R2 hardware to be idle (on a timeout rather than while loop) + * + * 5)Driver status reset + * + * 6)Recover from any error without any leaks. + * + */ +static int init_hw(struct b2r2_core *core) +{ + int result = 0; + u32 uTimeOut = B2R2_RESET_TIMEOUT_VALUE; + + /* Put B2R2 into reset */ + clear_interrupts(core); + + writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset, + &core->hw->BLT_CTL); + + /* Set up interrupt handler */ + result = request_irq(core->irq, b2r2_irq_handler, IRQF_SHARED, + "b2r2-interrupt", core); + if (result) { + b2r2_log_err(core->dev, + "%s: failed to register IRQ for B2R2\n", __func__); + goto b2r2_init_request_irq_failed; + } + + b2r2_log_info(core->dev, "do a global reset..\n"); + + /* Release reset */ + writel(0x00000000, &core->hw->BLT_CTL); + + b2r2_log_info(core->dev, "wait for B2R2 to be idle..\n"); + + /** Wait for B2R2 to be idle (on a timeout rather than while loop) */ + while ((uTimeOut > 0) && + ((readl(&core->hw->BLT_STA1) & + B2R2BLT_STA1BDISP_IDLE) == 0x0)) + uTimeOut--; + if (uTimeOut == 0) { + b2r2_log_err(core->dev, + "%s: B2R2 not idle after SW reset\n", __func__); + result = -EAGAIN; + goto b2r2_core_init_hw_timeout; + } + +#ifdef CONFIG_DEBUG_FS + /* Register debug fs files for register access */ + if (!IS_ERR_OR_NULL(core->debugfs_core_root_dir) && + IS_ERR_OR_NULL(core->debugfs_regs_dir)) { + core->debugfs_regs_dir = debugfs_create_dir("regs", + core->debugfs_core_root_dir); + } + if (!IS_ERR_OR_NULL(core->debugfs_regs_dir)) { + int i; + debugfs_create_file("all", 0666, core->debugfs_regs_dir, + (void *)core->hw, &debugfs_b2r2_regs_fops); + /* Create debugfs entries for all static registers */ + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) + debugfs_create_file(debugfs_regs[i].name, 0666, + core->debugfs_regs_dir, + (void *)(((u8 *) core->hw) + + debugfs_regs[i].offset), + &debugfs_b2r2_reg_fops); + } +#endif + + b2r2_log_info(core->dev, "%s ended..\n", __func__); + return result; + +/** Recover from any error without any leaks */ +b2r2_core_init_hw_timeout: + /** Free B2R2 interrupt handler */ + free_irq(core->irq, core); + +b2r2_init_request_irq_failed: + if (core->hw) + iounmap(core->hw); + core->hw = NULL; + + return result; +} + + +/** + * exit_hw() - B2R2 Hardware exit + * + * core->lock _must_ NOT be held + */ +static void exit_hw(struct b2r2_core *core) +{ + unsigned long flags; + + b2r2_log_info(core->dev, "%s started..\n", __func__); + +#ifdef CONFIG_DEBUG_FS + /* Unregister our debugfs entries */ + if (!IS_ERR_OR_NULL(core->debugfs_regs_dir)) { + debugfs_remove_recursive(core->debugfs_regs_dir); + core->debugfs_regs_dir = NULL; + } +#endif + b2r2_log_debug(core->dev, "%s: locking core->lock\n", __func__); + spin_lock_irqsave(&core->lock, flags); + + /* Cancel all pending jobs */ + b2r2_log_debug(core->dev, "%s: canceling pending jobs\n", __func__); + exit_job_list(core, &core->prio_queue); + + /* Soft reset B2R2 (Close all DMA, + reset all state to idle, reset regs)*/ + b2r2_log_debug(core->dev, "%s: putting b2r2 in reset\n", __func__); + writel(readl(&core->hw->BLT_CTL) | B2R2BLT_CTLGLOBAL_soft_reset, + &core->hw->BLT_CTL); + + b2r2_log_debug(core->dev, "%s: clearing interrupts\n", __func__); + clear_interrupts(core); + + /** Free B2R2 interrupt handler */ + b2r2_log_debug(core->dev, "%s: freeing interrupt handler\n", __func__); + free_irq(core->irq, core); + + b2r2_log_debug(core->dev, "%s: unlocking core->lock\n", __func__); + spin_unlock_irqrestore(&core->lock, flags); + + b2r2_log_info(core->dev, "%s ended...\n", __func__); +} + +/** + * b2r2_probe() - This routine loads the B2R2 core driver + * + * @pdev: platform device. + */ +static int b2r2_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res = NULL; + struct b2r2_core *core = NULL; + struct b2r2_control *control = NULL; + struct b2r2_platform_data *pdata = NULL; + int debug_init = 0; + + BUG_ON(pdev == NULL); + BUG_ON(pdev->id < 0 || pdev->id >= B2R2_MAX_NBR_DEVICES); + + pdata = pdev->dev.platform_data; + + core = kzalloc(sizeof(*core), GFP_KERNEL); + if (!core) { + dev_err(&pdev->dev, "b2r2 core alloc failed\n"); + ret = -EINVAL; + goto error_exit; + } + + core->dev = &pdev->dev; + dev_set_drvdata(core->dev, core); + if (pdev->id) + snprintf(core->name, sizeof(core->name), "b2r2_%d", pdev->id); + else + snprintf(core->name, sizeof(core->name), "b2r2"); + + dev_info(&pdev->dev, "init started.\n"); + + /* Init spin locks */ + spin_lock_init(&core->lock); + + /* Init job queues */ + INIT_LIST_HEAD(&core->prio_queue); + +#ifdef HANDLE_TIMEOUTED_JOBS + /* Create work queue for callbacks & timeout */ + INIT_DELAYED_WORK(&core->timeout_work, timeout_work_function); +#endif + + /* Work queue for callbacks and timeout management */ + core->work_queue = create_workqueue("B2R2"); + if (!core->work_queue) { + ret = -ENOMEM; + goto error_exit; + } + + /* Get the clock for B2R2 */ + core->b2r2_clock = clk_get(core->dev, pdata->clock_id); + if (IS_ERR(core->b2r2_clock)) { + ret = PTR_ERR(core->b2r2_clock); + dev_err(&pdev->dev, "clk_get %s failed\n", pdata->clock_id); + goto error_exit; + } + + /* Get the B2R2 regulator */ + core->b2r2_reg = regulator_get(core->dev, pdata->regulator_id); + if (IS_ERR(core->b2r2_reg)) { + ret = PTR_ERR(core->b2r2_reg); + dev_err(&pdev->dev, "regulator_get %s failed " + "(dev_name=%s)\n", pdata->regulator_id, + dev_name(core->dev)); + goto error_exit; + } + + /* Init power management */ + mutex_init(&core->domain_lock); + INIT_DELAYED_WORK_DEFERRABLE(&core->domain_disable_work, + domain_disable_work_function); + core->domain_enabled = false; + core->valid = false; + + /* Map B2R2 into kernel virtual memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + goto error_exit; + + /* Hook up irq */ + core->irq = platform_get_irq(pdev, 0); + if (core->irq <= 0) { + dev_err(&pdev->dev, "%s: Failed to request irq (irq=%d)\n", + __func__, core->irq); + goto error_exit; + } + + core->hw = (struct b2r2_memory_map *) ioremap(res->start, + res->end - res->start + 1); + if (core->hw == NULL) { + dev_err(&pdev->dev, "%s: ioremap failed\n", __func__); + ret = -ENOMEM; + goto error_exit; + } + + dev_dbg(core->dev, "b2r2 structure address %p\n", core->hw); + + control = kzalloc(sizeof(*control), GFP_KERNEL); + if (!control) { + dev_err(&pdev->dev, "b2r2 control alloc failed\n"); + ret = -EINVAL; + goto error_exit; + } + + control->data = (void *)core; + control->id = pdev->id; + control->dev = &pdev->dev; /* Temporary device */ + + core->op_size = B2R2_PLUG_OPCODE_SIZE_DEFAULT; + core->ch_size = B2R2_PLUG_CHUNK_SIZE_DEFAULT; + core->pg_size = B2R2_PLUG_PAGE_SIZE_DEFAULT; + core->mg_size = B2R2_PLUG_MESSAGE_SIZE_DEFAULT; + core->min_req_time = 0; + +#ifdef CONFIG_DEBUG_FS + core->debugfs_root_dir = debugfs_create_dir(core->name, NULL); + if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) { + core->debugfs_core_root_dir = debugfs_create_dir("core", + core->debugfs_root_dir); + control->debugfs_debug_root_dir = debugfs_create_dir("debug", + core->debugfs_root_dir); + control->mem_heap.debugfs_root_dir = debugfs_create_dir("mem", + core->debugfs_root_dir); + control->debugfs_root_dir = debugfs_create_dir("blt", + core->debugfs_root_dir); + } + + if (!IS_ERR_OR_NULL(core->debugfs_core_root_dir)) { + debugfs_create_file("stats", 0666, core->debugfs_core_root_dir, + core, &debugfs_b2r2_stat_fops); + debugfs_create_file("clock", 0666, core->debugfs_core_root_dir, + core, &debugfs_b2r2_clock_fops); + debugfs_create_file("enabled", 0666, + core->debugfs_core_root_dir, + core, &debugfs_b2r2_enabled_fops); + debugfs_create_u8("op_size", 0666, core->debugfs_core_root_dir, + &core->op_size); + debugfs_create_u8("ch_size", 0666, core->debugfs_core_root_dir, + &core->ch_size); + debugfs_create_u8("pg_size", 0666, core->debugfs_core_root_dir, + &core->pg_size); + debugfs_create_u8("mg_size", 0666, core->debugfs_core_root_dir, + &core->mg_size); + debugfs_create_u16("min_req_time", 0666, + core->debugfs_core_root_dir, &core->min_req_time); + } +#endif + + ret = b2r2_debug_init(control); + if (ret < 0) { + dev_err(&pdev->dev, "b2r2_debug_init failed\n"); + goto error_exit; + } + debug_init = 1; + + /* Initialize b2r2_control */ + ret = b2r2_control_init(control); + if (ret < 0) { + b2r2_log_err(&pdev->dev, "b2r2_control_init failed\n"); + goto error_exit; + } + core->control = control; + + /* Add the control to the blitter */ + kref_init(&control->ref); + control->enabled = true; + b2r2_blt_add_control(control); + + b2r2_core[pdev->id] = core; + dev_info(&pdev->dev, "%s done.\n", __func__); + + return ret; + +/** Recover from any error if something fails */ +error_exit: + kfree(control); + + if (!IS_ERR_OR_NULL(core->b2r2_reg)) + regulator_put(core->b2r2_reg); + + if (!IS_ERR_OR_NULL(core->b2r2_clock)) + clk_put(core->b2r2_clock); + + if (!IS_ERR_OR_NULL(core->work_queue)) + destroy_workqueue(core->work_queue); + + if (debug_init) + b2r2_debug_exit(); + +#ifdef CONFIG_DEBUG_FS + if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) { + debugfs_remove_recursive(core->debugfs_root_dir); + core->debugfs_root_dir = NULL; + } +#endif + kfree(core); + + dev_info(&pdev->dev, "%s done with errors (%d).\n", __func__, ret); + + return ret; +} + +void b2r2_core_release(struct kref *control_ref) +{ + struct b2r2_control *control = container_of( + control_ref, struct b2r2_control, ref); + struct b2r2_core *core = control->data; + int id = control->id; + unsigned long flags; +#ifdef CONFIG_B2R2_DEBUG + struct device *dev = core->dev; +#endif + + b2r2_log_info(dev, "%s: enter\n", __func__); + + /* Exit b2r2 control module */ + b2r2_control_exit(control); + kfree(control); + b2r2_debug_exit(); + +#ifdef HANDLE_TIMEOUTED_JOBS + cancel_delayed_work(&core->timeout_work); +#endif + + /* Flush B2R2 work queue (call all callbacks for + cancelled jobs) */ + flush_workqueue(core->work_queue); + + /* Make sure the power is turned off */ + cancel_delayed_work_sync(&core->domain_disable_work); + + /** Unmap B2R2 registers */ + b2r2_log_info(dev, "%s: unmap b2r2 registers..\n", __func__); + if (core->hw) { + iounmap(core->hw); + core->hw = NULL; + } + + destroy_workqueue(core->work_queue); + + spin_lock_irqsave(&core->lock, flags); + core->work_queue = NULL; + spin_unlock_irqrestore(&core->lock, flags); + + /* Return the clock */ + clk_put(core->b2r2_clock); + regulator_put(core->b2r2_reg); + + core->dev = NULL; + kfree(core); + b2r2_core[id] = NULL; + + b2r2_log_info(dev, "%s: exit\n", __func__); +} + + +/** + * b2r2_remove - This routine unloads b2r2 driver + * + * @pdev: platform device. + */ +static int b2r2_remove(struct platform_device *pdev) +{ + struct b2r2_core *core; + + BUG_ON(pdev == NULL); + + core = dev_get_drvdata(&pdev->dev); + BUG_ON(core == NULL); + b2r2_log_info(&pdev->dev, "%s: Started\n", __func__); + +#ifdef CONFIG_DEBUG_FS + if (!IS_ERR_OR_NULL(core->debugfs_root_dir)) { + debugfs_remove_recursive(core->debugfs_root_dir); + core->debugfs_root_dir = NULL; + } +#endif + + /* Flush B2R2 work queue (call all callbacks) */ + flush_workqueue(core->work_queue); + + /* Remove control from blitter */ + core->control->enabled = false; + b2r2_blt_remove_control(core->control); + kref_put(&core->control->ref, b2r2_core_release); + + b2r2_log_info(&pdev->dev, "%s: Ended\n", __func__); + + return 0; +} +/** + * b2r2_suspend() - This routine puts the B2R2 device in to sustend state. + * @pdev: platform device. + * + * This routine stores the current state of the b2r2 device and puts in to + * suspend state. + * + */ +int b2r2_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct b2r2_core *core; + + BUG_ON(pdev == NULL); + core = dev_get_drvdata(&pdev->dev); + BUG_ON(core == NULL); + b2r2_log_info(core->dev, "%s\n", __func__); + + /* Flush B2R2 work queue (call all callbacks) */ + flush_workqueue(core->work_queue); + +#ifdef HANDLE_TIMEOUTED_JOBS + cancel_delayed_work(&core->timeout_work); +#endif + + /* Flush B2R2 work queue (call all callbacks for + cancelled jobs) */ + flush_workqueue(core->work_queue); + + /* Make sure power is turned off */ + cancel_delayed_work_sync(&core->domain_disable_work); + + return 0; +} + + +/** + * b2r2_resume() - This routine resumes the B2R2 device from sustend state. + * @pdev: platform device. + * + * This routine restore back the current state of the b2r2 device resumes. + * + */ +int b2r2_resume(struct platform_device *pdev) +{ + struct b2r2_core *core; + + BUG_ON(pdev == NULL); + core = dev_get_drvdata(&pdev->dev); + BUG_ON(core == NULL); + b2r2_log_info(core->dev, "%s\n", __func__); + + return 0; +} + +void b2r2_core_print_stats(struct b2r2_core *core) +{ + b2r2_log_info(core->dev, + "%s: n_irq %ld, n_irq_exit %ld, n_irq_skipped %ld,\n" + "n_jobs_added %ld, n_active_jobs %ld, " + "n_jobs_in_prio_list %ld,\n" + "n_jobs_removed %ld\n", + __func__, + core->stat_n_irq, + core->stat_n_irq_exit, + core->stat_n_irq_skipped, + core->stat_n_jobs_added, + core->n_active_jobs, + core->stat_n_jobs_in_prio_list, + core->stat_n_jobs_removed); +} + +/** + * struct platform_b2r2_driver - Platform driver configuration for the + * B2R2 core driver + */ +static struct platform_driver platform_b2r2_driver = { + .remove = b2r2_remove, + .driver = { + .name = "b2r2", + }, + /** TODO implement power mgmt functions */ + .suspend = b2r2_suspend, + .resume = b2r2_resume, +}; + + +/** + * b2r2_init() - Module init function for the B2R2 core module + */ +static int __init b2r2_init(void) +{ + printk(KERN_INFO "%s\n", __func__); + return platform_driver_probe(&platform_b2r2_driver, b2r2_probe); +} +module_init(b2r2_init); + +/** + * b2r2_exit() - Module exit function for the B2R2 core module + */ +static void __exit b2r2_exit(void) +{ + printk(KERN_INFO "%s\n", __func__); + platform_driver_unregister(&platform_b2r2_driver); + return; +} +module_exit(b2r2_exit); + + +/** Module is having GPL license */ + +MODULE_LICENSE("GPL"); + +/** Module author & discription */ + +MODULE_AUTHOR("Robert Fekete (robert.fekete@stericsson.com)"); +MODULE_DESCRIPTION("B2R2 Core driver"); diff --git a/drivers/video/b2r2/b2r2_core.h b/drivers/video/b2r2/b2r2_core.h new file mode 100644 index 00000000000..5b9fdcdc2bb --- /dev/null +++ b/drivers/video/b2r2/b2r2_core.h @@ -0,0 +1,300 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 core driver + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __B2R2_CORE_H__ +#define __B2R2_CORE_H__ + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +/** + * B2R2_RESET_TIMEOUT_VALUE - The number of times to read the status register + * waiting for b2r2 to go idle after soft reset. + */ +#define B2R2_RESET_TIMEOUT_VALUE (1500) + +/** + * B2R2_CLK_FLAG - Value to write into clock reg to turn clock on + */ +#define B2R2_CLK_FLAG (0x125) + +/** + * DEBUG_CHECK_ADDREF_RELEASE - Define this to enable addref / release debug + */ +#define DEBUG_CHECK_ADDREF_RELEASE 1 + +#ifdef CONFIG_DEBUG_FS +/** + * HANDLE_TIMEOUTED_JOBS - Define this to check jobs for timeout and cancel them + */ +#define HANDLE_TIMEOUTED_JOBS +#define JOB_TIMEOUT (HZ/2) +#endif + +/** + * B2R2_CLOCK_ALWAYS_ON - Define this to disable power save clock turn off + */ +/* #define B2R2_CLOCK_ALWAYS_ON 1 */ + +/** + * START_SENTINEL - Watch guard to detect job overwrites + */ +#define START_SENTINEL 0xBABEDEEA + +/** + * STOP_SENTINEL - Watch guard to detect job overwrites + */ +#define END_SENTINEL 0xDADBDCDD + +/** + * B2R2_CORE_LOWEST_PRIO - Lowest prio allowed + */ +#define B2R2_CORE_LOWEST_PRIO -19 +/** + * B2R2_CORE_HIGHEST_PRIO - Highest prio allowed + */ +#define B2R2_CORE_HIGHEST_PRIO 20 + +/** + * B2R2_DOMAIN_DISABLE - + */ +#define B2R2_DOMAIN_DISABLE_TIMEOUT (HZ/100) + +/** + * B2R2_REGULATOR_RETRY_COUNT - + */ +#define B2R2_REGULATOR_RETRY_COUNT 10 + + +#ifdef DEBUG_CHECK_ADDREF_RELEASE + +/** + * struct addref_release - Represents one addref or release. Used + * to debug addref / release problems + * + * @addref: true if this represents an addref else it represents + * a release. + * @job: The job that was referenced + * @caller: The caller of the addref or release + * @ref_count: The job reference count after addref / release + */ +struct addref_release { + bool addref; + struct b2r2_core_job *job; + const char *caller; + int ref_count; +}; + +#endif + +/** + * struct b2r2_core - Administration data for B2R2 core + * + * @lock: Spin lock protecting the b2r2_core structure and the B2R2 HW + * @hw: B2R2 registers memory mapped + * @pmu_b2r2_clock: Control of B2R2 clock + * @log_dev: Device used for logging via dev_... functions + * + * @prio_queue: Queue of jobs sorted in priority order + * @active_jobs: Array containing pointer to zero or one job per queue + * @n_active_jobs: Number of active jobs + * @jiffies_last_active: jiffie value when adding last active job + * @jiffies_last_irq: jiffie value when last irq occured + * @timeout_work: Work structure for timeout work + * + * @next_job_id: Contains the job id that will be assigned to the next + * added job. + * + * @clock_request_count: When non-zero, clock is on + * @clock_off_timer: Kernel timer to handle delayed turn off of clock + * + * @work_queue: Work queue to handle done jobs (callbacks) and timeouts in + * non-interrupt context. + * + * @stat_n_irq: Number of interrupts (statistics) + * @stat_n_jobs_added: Number of jobs added (statistics) + * @stat_n_jobs_removed: Number of jobs removed (statistics) + * @stat_n_jobs_in_prio_list: Number of jobs in prio list (statistics) + * + * @debugfs_root_dir: Root directory for B2R2 debugfs + * + * @ar: Circular array of addref / release debug structs + * @ar_write: Where next write will occur + * @ar_read: First valid place to read. When ar_read == ar_write then + * the array is empty. + */ +struct b2r2_core { + spinlock_t lock; + + struct b2r2_memory_map *hw; + + u8 op_size; + u8 ch_size; + u8 pg_size; + u8 mg_size; + u16 min_req_time; + int irq; + + char name[16]; + struct device *dev; + + struct list_head prio_queue; + + struct b2r2_core_job *active_jobs[B2R2_CORE_QUEUE_NO_OF]; + unsigned long n_active_jobs; + + unsigned long jiffies_last_active; + unsigned long jiffies_last_irq; +#ifdef HANDLE_TIMEOUTED_JOBS + struct delayed_work timeout_work; +#endif + int next_job_id; + + unsigned long clock_request_count; + struct timer_list clock_off_timer; + + struct workqueue_struct *work_queue; + + /* Statistics */ + unsigned long stat_n_irq_exit; + unsigned long stat_n_irq_skipped; + unsigned long stat_n_irq; + unsigned long stat_n_jobs_added; + unsigned long stat_n_jobs_removed; + + unsigned long stat_n_jobs_in_prio_list; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root_dir; + struct dentry *debugfs_core_root_dir; + struct dentry *debugfs_regs_dir; +#endif + +#ifdef DEBUG_CHECK_ADDREF_RELEASE + /* Tracking release bug...*/ + struct addref_release ar[100]; + int ar_write; + int ar_read; +#endif + + /* Power management variables */ + struct mutex domain_lock; + struct delayed_work domain_disable_work; + + /* + * We need to keep track of both the number of domain_enable/disable() + * calls and whether the power was actually turned off, since the + * power off is done in a delayed job. + */ + bool domain_enabled; + volatile bool valid; + int domain_request_count; + + struct clk *b2r2_clock; + struct regulator *b2r2_reg; + + struct b2r2_control *control; +}; + +/** + * b2r2_core_job_add() - Adds a job to B2R2 job queues + * + * The job reference count will be increased after this function + * has been called and b2r2_core_job_release() must be called to + * release the reference. The job callback function will be always + * be called after the job is done or cancelled. + * + * @control: The b2r2 control entity + * @job: Job to be added + * + * Returns 0 if OK else negative error code + * + */ +int b2r2_core_job_add(struct b2r2_control *control, + struct b2r2_core_job *job); + +/** + * b2r2_core_job_wait() - Waits for an added job to be done. + * + * @job: Job to wait for + * + * Returns 0 if job done else negative error code + * + */ +int b2r2_core_job_wait(struct b2r2_core_job *job); + +/** + * b2r2_core_job_cancel() - Cancel an already added job. + * + * @job: Job to cancel + * + * Returns 0 if job cancelled or done else negative error code + * + */ +int b2r2_core_job_cancel(struct b2r2_core_job *job); + +/** + * b2r2_core_job_find() - Finds job with given job id + * + * Reference count will be increased for the found job + * + * @control: The b2r2 control entity + * @job_id: Job id to find + * + * Returns job if found, else NULL + * + */ +struct b2r2_core_job *b2r2_core_job_find(struct b2r2_control *control, + int job_id); + +/** + * b2r2_core_job_find_first_with_tag() - Finds first job with given tag + * + * Reference count will be increased for the found job. + * This function can be used to find all jobs for a client, i.e. + * when cancelling all jobs for a client. + * + * @control: The b2r2 control entity + * @tag: Tag to find + * + * Returns job if found, else NULL + * + */ +struct b2r2_core_job *b2r2_core_job_find_first_with_tag( + struct b2r2_control *control, int tag); + +/** + * b2r2_core_job_addref() - Increase the job reference count. + * + * @job: Job to increase reference count for. + * @caller: The function calling this function (for debug) + */ +void b2r2_core_job_addref(struct b2r2_core_job *job, const char *caller); + +/** + * b2r2_core_job_release() - Decrease the job reference count. The + * job will be released (the release() function + * will be called) when the reference count + * reaches zero. + * + * @job: Job to decrease reference count for. + * @caller: The function calling this function (for debug) + */ +void b2r2_core_job_release(struct b2r2_core_job *job, const char *caller); + +void b2r2_core_print_stats(struct b2r2_core *core); + +void b2r2_core_release(struct kref *control_ref); + +#endif /* !defined(__B2R2_CORE_JOB_H__) */ diff --git a/drivers/video/b2r2/b2r2_debug.c b/drivers/video/b2r2/b2r2_debug.c new file mode 100644 index 00000000000..934ba938ee5 --- /dev/null +++ b/drivers/video/b2r2/b2r2_debug.c @@ -0,0 +1,340 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 dynamic debug + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include "b2r2_debug.h" +#include <linux/debugfs.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/uaccess.h> + +int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT]; +static struct dentry *log_lvl_dir; +static int module_init; + +#define CHARS_IN_NODE_DUMP 1544 +#define DUMPED_NODE_SIZE (CHARS_IN_NODE_DUMP * sizeof(char) + 1) + +static void dump_node(char *dst, struct b2r2_node *node) +{ + dst += sprintf(dst, "node 0x%08x ------------------\n", + (unsigned int)node); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_NIP:", node->node.GROUP0.B2R2_NIP); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_CIC:", node->node.GROUP0.B2R2_CIC); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_INS:", node->node.GROUP0.B2R2_INS); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_ACK:", node->node.GROUP0.B2R2_ACK); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_TBA:", node->node.GROUP1.B2R2_TBA); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_TTY:", node->node.GROUP1.B2R2_TTY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_TXY:", node->node.GROUP1.B2R2_TXY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_TSZ:", node->node.GROUP1.B2R2_TSZ); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S1CF:", node->node.GROUP2.B2R2_S1CF); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S2CF:", node->node.GROUP2.B2R2_S2CF); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S1BA:", node->node.GROUP3.B2R2_SBA); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S1TY:", node->node.GROUP3.B2R2_STY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S1XY:", node->node.GROUP3.B2R2_SXY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S1SZ:", node->node.GROUP3.B2R2_SSZ); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S2BA:", node->node.GROUP4.B2R2_SBA); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S2TY:", node->node.GROUP4.B2R2_STY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S2XY:", node->node.GROUP4.B2R2_SXY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S2SZ:", node->node.GROUP4.B2R2_SSZ); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S3BA:", node->node.GROUP5.B2R2_SBA); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S3TY:", node->node.GROUP5.B2R2_STY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S3XY:", node->node.GROUP5.B2R2_SXY); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_S3SZ:", node->node.GROUP5.B2R2_SSZ); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_CWO:", node->node.GROUP6.B2R2_CWO); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_CWS:", node->node.GROUP6.B2R2_CWS); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_CCO:", node->node.GROUP7.B2R2_CCO); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_CML:", node->node.GROUP7.B2R2_CML); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_PMK:", node->node.GROUP8.B2R2_PMK); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_FCTL:", node->node.GROUP8.B2R2_FCTL); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_RSF:", node->node.GROUP9.B2R2_RSF); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_RZI:", node->node.GROUP9.B2R2_RZI); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_HFP:", node->node.GROUP9.B2R2_HFP); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_VFP:", node->node.GROUP9.B2R2_VFP); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_Y_RSF:", node->node.GROUP10.B2R2_RSF); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_Y_RZI:", node->node.GROUP10.B2R2_RZI); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_Y_HFP:", node->node.GROUP10.B2R2_HFP); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_Y_VFP:", node->node.GROUP10.B2R2_VFP); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_FF0:", node->node.GROUP11.B2R2_FF0); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_FF1:", node->node.GROUP11.B2R2_FF1); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_FF2:", node->node.GROUP11.B2R2_FF2); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_FF3:", node->node.GROUP11.B2R2_FF3); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_KEY1:", node->node.GROUP12.B2R2_KEY1); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_KEY2:", node->node.GROUP12.B2R2_KEY2); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_XYL:", node->node.GROUP13.B2R2_XYL); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_XYP:", node->node.GROUP13.B2R2_XYP); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_SAR:", node->node.GROUP14.B2R2_SAR); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_USR:", node->node.GROUP14.B2R2_USR); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_IVMX0:", node->node.GROUP15.B2R2_VMX0); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_IVMX1:", node->node.GROUP15.B2R2_VMX1); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_IVMX2:", node->node.GROUP15.B2R2_VMX2); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_IVMX3:", node->node.GROUP15.B2R2_VMX3); + dst += sprintf(dst, "--\n"); + + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_OVMX0:", node->node.GROUP16.B2R2_VMX0); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_OVMX1:", node->node.GROUP16.B2R2_VMX1); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_OVMX2:", node->node.GROUP16.B2R2_VMX2); + dst += sprintf(dst, "%s\t0x%08x\n", + "B2R2_OVMX3:", node->node.GROUP16.B2R2_VMX3); + dst += sprintf(dst, "--\n"); + +} + +void b2r2_debug_job_done(struct b2r2_control *cont, + struct b2r2_node *first_node) +{ + struct b2r2_node *node = first_node; + struct b2r2_node **dst_node; + unsigned int node_count = 0; + + while (node != NULL) { + node_count++; + node = node->next; + } + + mutex_lock(&cont->last_job_lock); + + if (cont->last_job) { + node = cont->last_job; + while (node != NULL) { + struct b2r2_node *tmp = node->next; + kfree(node); + node = tmp; + } + cont->last_job = NULL; + } + + node = first_node; + dst_node = &cont->last_job; + while (node != NULL) { + *dst_node = kzalloc(sizeof(**dst_node), GFP_KERNEL); + if (!(*dst_node)) + goto last_job_alloc_failed; + + memcpy(*dst_node, node, sizeof(**dst_node)); + + dst_node = &((*dst_node)->next); + node = node->next; + } + + mutex_unlock(&cont->last_job_lock); + + return; + +last_job_alloc_failed: + mutex_unlock(&cont->last_job_lock); + + while (cont->last_job != NULL) { + struct b2r2_node *tmp = cont->last_job->next; + kfree(cont->last_job); + cont->last_job = tmp; + } + + return; +} + +static ssize_t last_job_read(struct file *filp, char __user *buf, + size_t bytes, loff_t *off) +{ + struct b2r2_control *cont = filp->f_dentry->d_inode->i_private; + struct b2r2_node *node = cont->last_job; + int node_count = 0; + int i; + + size_t size; + size_t count; + loff_t offs = *off; + + for (; node != NULL; node = node->next) + node_count++; + + size = node_count * DUMPED_NODE_SIZE; + + if (node_count != cont->prev_node_count) { + kfree(cont->last_job_chars); + + cont->last_job_chars = kzalloc(size, GFP_KERNEL); + if (!cont->last_job_chars) + return 0; + cont->prev_node_count = node_count; + } + + mutex_lock(&cont->last_job_lock); + node = cont->last_job; + for (i = 0; i < node_count; i++) { + BUG_ON(node == NULL); + dump_node(cont->last_job_chars + + i * DUMPED_NODE_SIZE/sizeof(char), + node); + node = node->next; + } + mutex_unlock(&cont->last_job_lock); + + if (offs > size) + return 0; + + if (offs + bytes > size) + count = size - offs; + else + count = bytes; + + if (copy_to_user(buf, cont->last_job_chars + offs, count)) + return -EFAULT; + + *off = offs + count; + return count; +} + +static const struct file_operations last_job_fops = { + .read = last_job_read, +}; + +int b2r2_debug_init(struct b2r2_control *cont) +{ + int i; + + if (!module_init) { + for (i = 0; i < B2R2_LOG_LEVEL_COUNT; i++) + b2r2_log_levels[i] = 0; + +#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS) + /* + * If dynamic debug is disabled we need some other way to + * control the log prints + */ + log_lvl_dir = debugfs_create_dir("b2r2_log", NULL); + + /* No need to save the files, + * they will be removed recursively */ + if (!IS_ERR_OR_NULL(log_lvl_dir)) { + (void)debugfs_create_bool("warnings", 0644, log_lvl_dir, + &b2r2_log_levels[B2R2_LOG_LEVEL_WARN]); + (void)debugfs_create_bool("info", 0644, log_lvl_dir, + &b2r2_log_levels[B2R2_LOG_LEVEL_INFO]); + (void)debugfs_create_bool("debug", 0644, log_lvl_dir, + &b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]); + (void)debugfs_create_bool("regdumps", 0644, log_lvl_dir, + &b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]); + } + +#elif defined(CONFIG_DYNAMIC_DEBUG) + /* log_lvl_dir is never used */ + (void)log_lvl_dir; +#endif + module_init++; + } + + if (!IS_ERR_OR_NULL(cont->debugfs_debug_root_dir)) { + /* No need to save the file, + * it will be removed recursively */ + (void)debugfs_create_file("last_job", 0444, + cont->debugfs_debug_root_dir, cont, + &last_job_fops); + } + + mutex_init(&cont->last_job_lock); + + return 0; +} + +void b2r2_debug_exit(void) +{ +#if !defined(CONFIG_DYNAMIC_DEBUG) && defined(CONFIG_DEBUG_FS) + module_init--; + if (!module_init && !IS_ERR_OR_NULL(log_lvl_dir)) { + debugfs_remove_recursive(log_lvl_dir); + log_lvl_dir = NULL; + } +#endif +} diff --git a/drivers/video/b2r2/b2r2_debug.h b/drivers/video/b2r2/b2r2_debug.h new file mode 100644 index 00000000000..1b1ac83f6cb --- /dev/null +++ b/drivers/video/b2r2/b2r2_debug.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 dynamic debug + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_DEBUG_H_ + +#include <linux/device.h> + +#include "b2r2_internal.h" + +#ifdef CONFIG_B2R2_DEBUG + +/* Log macros */ +enum b2r2_log_levels { + B2R2_LOG_LEVEL_WARN, + B2R2_LOG_LEVEL_INFO, + B2R2_LOG_LEVEL_DEBUG, + B2R2_LOG_LEVEL_REGDUMP, + B2R2_LOG_LEVEL_COUNT, +}; + +/* + * Booleans controlling the different log levels. The different log levels are + * enabled separately (i.e. you can have info prints without the warn prints). + */ +extern int b2r2_log_levels[B2R2_LOG_LEVEL_COUNT]; + +#define b2r2_log_err(b2r2_log_dev, ...) do { \ + dev_err(b2r2_log_dev, __VA_ARGS__); \ + } while (0) + +/* If dynamic debug is enabled it should be used instead of loglevels */ +#ifdef CONFIG_DYNAMIC_DEBUG +# define b2r2_log_warn(b2r2_log_dev, ...) do { \ + dev_dbg(b2r2_log_dev, "WARN " __VA_ARGS__); \ + } while (0) +# define b2r2_log_info(b2r2_log_dev, ...) do { \ + dev_dbg(b2r2_log_dev, "INFO " __VA_ARGS__); \ + } while (0) +# define b2r2_log_debug(b2r2_log_dev, ...) do { \ + dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \ + } while (0) +# define b2r2_log_regdump(b2r2_log_dev, ...) do { \ + dev_dbg(b2r2_log_dev, "REGD " __VA_ARGS__); \ + } while (0) +#else +# define b2r2_log_warn(b2r2_log_dev, ...) do { \ + if (b2r2_log_levels[B2R2_LOG_LEVEL_WARN]) \ + dev_warn(b2r2_log_dev, "WARN " __VA_ARGS__); \ + } while (0) +# define b2r2_log_info(b2r2_log_dev, ...) do { \ + if (b2r2_log_levels[B2R2_LOG_LEVEL_INFO]) \ + dev_info(b2r2_log_dev, "INFO " __VA_ARGS__); \ + } while (0) +# define b2r2_log_debug(b2r2_log_dev, ...) do { \ + if (b2r2_log_levels[B2R2_LOG_LEVEL_DEBUG]) \ + dev_dbg(b2r2_log_dev, "DEBUG " __VA_ARGS__); \ + } while (0) +# define b2r2_log_regdump(b2r2_log_dev, ...) do { \ + if (b2r2_log_levels[B2R2_LOG_LEVEL_REGDUMP]) \ + dev_vdbg(b2r2_log_dev, "REGD " __VA_ARGS__); \ + } while (0) +#endif + +int b2r2_debug_init(struct b2r2_control *cont); +void b2r2_debug_exit(void); +void b2r2_debug_job_done(struct b2r2_control *cont, + struct b2r2_node *node); + +#else + +#define b2r2_log_err(...) +#define b2r2_log_warn(...) +#define b2r2_log_info(...) +#define b2r2_log_debug(...) +#define b2r2_log_regdump(...) + +static inline int b2r2_debug_init(struct b2r2_control *cont) +{ + return 0; +} +static inline void b2r2_debug_exit(void) +{ + return; +} +static inline void b2r2_debug_job_done(struct b2r2_control *cont, + struct b2r2_node *node) +{ + return; +} + +#endif + +#endif diff --git a/drivers/video/b2r2/b2r2_filters.c b/drivers/video/b2r2/b2r2_filters.c new file mode 100644 index 00000000000..a93eb60d91f --- /dev/null +++ b/drivers/video/b2r2/b2r2_filters.c @@ -0,0 +1,377 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 filters. + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/dma-mapping.h> + +#include "b2r2_filters.h" +#include "b2r2_internal.h" +#include "b2r2_debug.h" + +/** + * struct b2r2_filter_spec filters[] - Filter lookup table + * + * Lookup table for filters for different scale factors. A filter + * will be selected according to "min < scale_factor <= max". + */ +static struct b2r2_filter_spec filters[] = { + { + .min = 1024, + .max = 1433, + .h_coeffs = { + 0x00, 0x00, 0xFA, 0x09, 0x34, 0x09, 0x00, 0x00, + 0x00, 0x00, 0xF9, 0x10, 0x32, 0x06, 0xFF, 0x00, + 0x00, 0x00, 0xF8, 0x17, 0x2F, 0x02, 0x00, 0x00, + 0x00, 0x00, 0xF7, 0x20, 0x2A, 0xFF, 0x00, 0x00, + 0x00, 0x00, 0xF8, 0x27, 0x25, 0xFC, 0x00, 0x00, + 0x00, 0x00, 0xFA, 0x2D, 0x1D, 0xFC, 0x00, 0x00, + 0x00, 0x00, 0xFE, 0x31, 0x15, 0xFC, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x35, 0x0D, 0xFC, 0x00, 0x00 + }, + .v_coeffs = { + 0x00, 0x02, 0x3C, 0x02, 0x00, + 0x00, 0x08, 0x3B, 0xFD, 0x00, + 0x00, 0x10, 0x35, 0xFB, 0x00, + 0x00, 0x18, 0x30, 0xF8, 0x00, + 0x00, 0x1F, 0x27, 0xFA, 0x00, + 0x00, 0x27, 0x1E, 0xFB, 0x00, + 0x00, 0x2E, 0x16, 0xFC, 0x00, + 0x00, 0x34, 0x0D, 0xFF, 0x00 + }, + }, + { + .min = 1433, + .max = 1536, + .h_coeffs = { + 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06, + 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06, + 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06, + 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04, + 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03, + 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01, + 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00, + 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff + }, + .v_coeffs = { + 0xf6, 0x0e, 0x38, 0x0e, 0xf6, + 0xf5, 0x15, 0x38, 0x06, 0xf8, + 0xf5, 0x1d, 0x33, 0x00, 0xfb, + 0xf6, 0x23, 0x2d, 0xfc, 0xfe, + 0xf9, 0x28, 0x26, 0xf9, 0x00, + 0xfc, 0x2c, 0x1e, 0xf7, 0x03, + 0x00, 0x2e, 0x18, 0xf6, 0x04, + 0x05, 0x2e, 0x11, 0xf7, 0x05 + }, + }, + { + .min = 1536, + .max = 3072, + .h_coeffs = { + 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd, + 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc, + 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc, + 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb, + 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb, + 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb, + 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb, + 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc + }, + .v_coeffs = { + 0x05, 0x10, 0x16, 0x10, 0x05, + 0x06, 0x11, 0x16, 0x0f, 0x04, + 0x08, 0x13, 0x15, 0x0e, 0x02, + 0x09, 0x14, 0x16, 0x0c, 0x01, + 0x0b, 0x15, 0x15, 0x0b, 0x00, + 0x0d, 0x16, 0x13, 0x0a, 0x00, + 0x0f, 0x17, 0x13, 0x08, 0xff, + 0x11, 0x18, 0x12, 0x07, 0xfe + }, + }, + { + .min = 3072, + .max = 4096, + .h_coeffs = { + 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02, + 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01, + 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00, + 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00, + 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00, + 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00, + 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff, + 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff + }, + .v_coeffs = { + 0x09, 0x0f, 0x10, 0x0f, 0x09, + 0x09, 0x0f, 0x12, 0x0e, 0x08, + 0x0a, 0x10, 0x11, 0x0e, 0x07, + 0x0b, 0x11, 0x11, 0x0d, 0x06, + 0x0c, 0x11, 0x12, 0x0c, 0x05, + 0x0d, 0x12, 0x11, 0x0c, 0x04, + 0x0e, 0x12, 0x11, 0x0b, 0x04, + 0x0f, 0x13, 0x11, 0x0a, 0x03 + }, + }, + { + .min = 4096, + .max = 5120, + .h_coeffs = { + 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04, + 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04, + 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03, + 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03, + 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02, + 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02, + 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01, + 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01 + }, + .v_coeffs = { + 0x0a, 0x0e, 0x10, 0x0e, 0x0a, + 0x0b, 0x0e, 0x0f, 0x0e, 0x0a, + 0x0b, 0x0f, 0x10, 0x0d, 0x09, + 0x0c, 0x0f, 0x10, 0x0d, 0x08, + 0x0d, 0x0f, 0x0f, 0x0d, 0x08, + 0x0d, 0x10, 0x10, 0x0c, 0x07, + 0x0e, 0x10, 0x0f, 0x0c, 0x07, + 0x0f, 0x10, 0x10, 0x0b, 0x06 + }, + }, +}; +static const size_t filters_size = sizeof(filters)/sizeof(filters[0]); + +/** + * struct b2r2_filter_spec bilinear_filter - A bilinear filter + * + * The bilinear filter will be used if no custom filters are specified, or + * for upscales not matching any filter in the lookup table. + */ +static struct b2r2_filter_spec bilinear_filter = { + .min = 0, + .max = 0xffff, + .h_coeffs = { + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x08, 0x3e, 0xfb, 0x00, 0x00, + 0x00, 0x00, 0xfb, 0x13, 0x3b, 0xf7, 0x00, 0x00, + 0x00, 0x00, 0xf8, 0x1f, 0x34, 0xf5, 0x00, 0x00, + 0x00, 0x00, 0xf6, 0x2b, 0x2a, 0xf5, 0x00, 0x00, + 0x00, 0x00, 0xf6, 0x35, 0x1e, 0xf7, 0x00, 0x00, + 0x00, 0x00, 0xf9, 0x3c, 0x12, 0xf9, 0x00, 0x00, + 0x00, 0x00, 0xfd, 0x3f, 0x07, 0xfd, 0x00, 0x00 + }, + .v_coeffs = { + 0x00, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x09, 0x3d, 0xfa, 0x00, + 0x00, 0x13, 0x39, 0xf4, 0x00, + 0x00, 0x1e, 0x31, 0xf1, 0x00, + 0x00, 0x27, 0x28, 0xf1, 0x00, + 0x00, 0x31, 0x1d, 0xf2, 0x00, + 0x00, 0x38, 0x12, 0xf6, 0x00, + 0x00, 0x3d, 0x07, 0xfc, 0x00 + }, +}; + +/** + * struct b2r2_filter_spec default_downscale_filter - Default filter for downscale + * + * The default downscale filter will be used for downscales not matching any + * filter in the lookup table. + */ +static struct b2r2_filter_spec default_downscale_filter = { + .min = 1 << 10, + .max = 0xffff, + .h_coeffs = { + 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06, + 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05, + 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05, + 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04, + 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04, + 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04, + 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03, + 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03 + }, + .v_coeffs = { + 0x0b, 0x0e, 0x0e, 0x0e, 0x0b, + 0x0b, 0x0e, 0x0f, 0x0d, 0x0b, + 0x0c, 0x0e, 0x0f, 0x0d, 0x0a, + 0x0c, 0x0e, 0x0f, 0x0d, 0x0a, + 0x0d, 0x0f, 0x0e, 0x0d, 0x09, + 0x0d, 0x0f, 0x0f, 0x0c, 0x09, + 0x0e, 0x0f, 0x0e, 0x0c, 0x09, + 0x0e, 0x0f, 0x0f, 0x0c, 0x08 + }, +}; + +/** + * struct b2r2_filter_spec blur_filter - Blur filter + * + * Filter for blurring an image. + */ +static struct b2r2_filter_spec blur_filter = { + .min = 0, + .max = 0xffff, + .h_coeffs = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08 + }, + .v_coeffs = { + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c, + 0x0c, 0x0c, 0x10, 0x0c, 0x0c + }, +}; + +/* Private function declarations */ +static int alloc_filter_coeffs(struct device *dev, + struct b2r2_filter_spec *filter); +static void free_filter_coeffs(struct device *dev, + struct b2r2_filter_spec *filter); + +/* Public functions */ + +int b2r2_filters_init(struct b2r2_control *cont) +{ + int i; + + if (cont->filters_initialized) + return 0; + + for (i = 0; i < filters_size; i++) { + alloc_filter_coeffs(cont->dev, &filters[i]); + } + + alloc_filter_coeffs(cont->dev, &bilinear_filter); + alloc_filter_coeffs(cont->dev, &default_downscale_filter); + alloc_filter_coeffs(cont->dev, &blur_filter); + + cont->filters_initialized = 1; + + return 0; +} + +void b2r2_filters_exit(struct b2r2_control *cont) +{ + int i; + + if (!cont->filters_initialized) + return; + + for (i = 0; i < filters_size; i++) { + free_filter_coeffs(cont->dev, &filters[i]); + } + + free_filter_coeffs(cont->dev, &bilinear_filter); + free_filter_coeffs(cont->dev, &default_downscale_filter); + free_filter_coeffs(cont->dev, &blur_filter); + + cont->filters_initialized = 0; +} + +struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor) +{ + int i; + struct b2r2_filter_spec *filter = NULL; + + for (i = 0; i < filters_size; i++) { + if ((filters[i].min < scale_factor) && + (scale_factor <= filters[i].max) && + filters[i].h_coeffs_dma_addr && + filters[i].v_coeffs_dma_addr) { + filter = &filters[i]; + break; + } + } + + if (filter == NULL) { + /* + * No suitable filter has been found. Use default filters, + * bilinear for any upscale. + */ + if (scale_factor < (1 << 10)) + filter = &bilinear_filter; + else + filter = &default_downscale_filter; + } + + /* + * Check so that the coefficients were successfully allocated for this + * filter. + */ + if (!filter->h_coeffs_dma_addr || !filter->v_coeffs_dma_addr) + return NULL; + else + return filter; +} + +struct b2r2_filter_spec *b2r2_filter_blur() +{ + return &blur_filter; +} + +/* Private functions */ +static int alloc_filter_coeffs(struct device *dev, + struct b2r2_filter_spec *filter) +{ + int ret; + + filter->h_coeffs_dma_addr = dma_alloc_coherent(dev, + B2R2_HF_TABLE_SIZE, &(filter->h_coeffs_phys_addr), + GFP_DMA | GFP_KERNEL); + if (filter->h_coeffs_dma_addr == NULL) { + ret = -ENOMEM; + goto error; + } + + filter->v_coeffs_dma_addr = dma_alloc_coherent(dev, + B2R2_VF_TABLE_SIZE, &(filter->v_coeffs_phys_addr), + GFP_DMA | GFP_KERNEL); + if (filter->v_coeffs_dma_addr == NULL) { + ret = -ENOMEM; + goto error; + } + + memcpy(filter->h_coeffs_dma_addr, filter->h_coeffs, + B2R2_HF_TABLE_SIZE); + memcpy(filter->v_coeffs_dma_addr, filter->v_coeffs, + B2R2_VF_TABLE_SIZE); + + return 0; + +error: + free_filter_coeffs(dev, filter); + return ret; + +} + +static void free_filter_coeffs(struct device *dev, + struct b2r2_filter_spec *filter) +{ + if (filter->h_coeffs_dma_addr != NULL) + dma_free_coherent(dev, B2R2_HF_TABLE_SIZE, + filter->h_coeffs_dma_addr, + filter->h_coeffs_phys_addr); + if (filter->v_coeffs_dma_addr != NULL) + dma_free_coherent(dev, B2R2_VF_TABLE_SIZE, + filter->v_coeffs_dma_addr, + filter->v_coeffs_phys_addr); + + filter->h_coeffs_dma_addr = NULL; + filter->h_coeffs_phys_addr = 0; + filter->v_coeffs_dma_addr = NULL; + filter->v_coeffs_phys_addr = 0; +} diff --git a/drivers/video/b2r2/b2r2_filters.h b/drivers/video/b2r2/b2r2_filters.h new file mode 100644 index 00000000000..790c9ec8ee9 --- /dev/null +++ b/drivers/video/b2r2/b2r2_filters.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 filters. + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_VIDEO_B2R2_FILTERS_H +#define _LINUX_VIDEO_B2R2_FILTERS_H + +#include <linux/kernel.h> + +#include "b2r2_internal.h" + +#define B2R2_HF_TABLE_SIZE 64 +#define B2R2_VF_TABLE_SIZE 40 + +/** + * @struct b2r2_filter_spec - Filter specification structure + * + * @param min - Minimum scale factor for this filter (in 6.10 fixed point) + * @param max - Maximum scale factor for this filter (in 6.10 fixed point) + * @param h_coeffs - Horizontal filter coefficients + * @param v_coeffs - Vertical filter coefficients + * @param h_coeffs_dma_addr - Virtual DMA address for horizontal coefficients + * @param v_coeffs_dma_addr - Virtual DMA address for vertical coefficients + * @param h_coeffs_phys_addr - Physical address for horizontal coefficients + * @param v_coeffs_phys_addr - Physical address for vertical coefficients + */ +struct b2r2_filter_spec { + const u16 min; + const u16 max; + + const u8 h_coeffs[B2R2_HF_TABLE_SIZE]; + const u8 v_coeffs[B2R2_VF_TABLE_SIZE]; + + void *h_coeffs_dma_addr; + u32 h_coeffs_phys_addr; + + void *v_coeffs_dma_addr; + u32 v_coeffs_phys_addr; +}; + +/** + * b2r2_filters_init() - Initilizes the B2R2 filters + */ +int b2r2_filters_init(struct b2r2_control *control); + +/** + * b2r2_filters_init() - De-initilizes the B2R2 filters + */ +void b2r2_filters_exit(struct b2r2_control *control); + +/** + * b2r2_filter_find() - Find a filter matching the given scale factor + * + * @param scale_factor - Scale factor to find a filter for + * + * Returns NULL if no filter could be found. + */ +struct b2r2_filter_spec *b2r2_filter_find(u16 scale_factor); + +/** + * b2r2_filter_blur() - Returns the blur filter + * + * Returns NULL if no blur filter is available. + */ +struct b2r2_filter_spec *b2r2_filter_blur(void); + +#endif /* _LINUX_VIDEO_B2R2_FILTERS_H */ diff --git a/drivers/video/b2r2/b2r2_generic.c b/drivers/video/b2r2/b2r2_generic.c new file mode 100644 index 00000000000..4191e497e13 --- /dev/null +++ b/drivers/video/b2r2/b2r2_generic.c @@ -0,0 +1,3206 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 generic. Full coverage of user interface but + * non optimized implementation. For Fallback purposes. + * + * Author: Maciej Socha <maciej.socha@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> + +#include "b2r2_generic.h" +#include "b2r2_internal.h" +#include "b2r2_global.h" +#include "b2r2_debug.h" +#include "b2r2_filters.h" +#include "b2r2_hw_convert.h" + +/* + * Debug printing + */ +#define B2R2_GENERIC_DEBUG_AREAS 0 +#define B2R2_GENERIC_DEBUG + +#define B2R2_GENERIC_WORK_BUF_WIDTH 16 +#define B2R2_GENERIC_WORK_BUF_HEIGHT 16 +#define B2R2_GENERIC_WORK_BUF_PITCH (16 * 4) +#define B2R2_GENERIC_WORK_BUF_FMT B2R2_NATIVE_ARGB8888 + +/* + * Private functions + */ + +/** + * reset_nodes() - clears the node list + */ +static void reset_nodes(struct b2r2_control *cont, + struct b2r2_node *node) +{ + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + while (node != NULL) { + memset(&(node->node), 0, sizeof(node->node)); + + /* TODO: Implement support for short linked lists */ + node->node.GROUP0.B2R2_CIC = 0x7fffc; + + if (node->next == NULL) + break; + + node->node.GROUP0.B2R2_NIP = node->next->physical_address; + + node = node->next; + } + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +/** + * dump_nodes() - prints the node list + */ +static void dump_nodes(struct b2r2_control *cont, + struct b2r2_node *first, bool dump_all) +{ + struct b2r2_node *node = first; + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + do { + b2r2_log_debug(cont->dev, "\nNODE START:\n=============\n"); + b2r2_log_debug(cont->dev, "B2R2_ACK: \t0x%.8x\n", + node->node.GROUP0.B2R2_ACK); + b2r2_log_debug(cont->dev, "B2R2_INS: \t0x%.8x\n", + node->node.GROUP0.B2R2_INS); + b2r2_log_debug(cont->dev, "B2R2_CIC: \t0x%.8x\n", + node->node.GROUP0.B2R2_CIC); + b2r2_log_debug(cont->dev, "B2R2_NIP: \t0x%.8x\n", + node->node.GROUP0.B2R2_NIP); + + b2r2_log_debug(cont->dev, "B2R2_TSZ: \t0x%.8x\n", + node->node.GROUP1.B2R2_TSZ); + b2r2_log_debug(cont->dev, "B2R2_TXY: \t0x%.8x\n", + node->node.GROUP1.B2R2_TXY); + b2r2_log_debug(cont->dev, "B2R2_TTY: \t0x%.8x\n", + node->node.GROUP1.B2R2_TTY); + b2r2_log_debug(cont->dev, "B2R2_TBA: \t0x%.8x\n", + node->node.GROUP1.B2R2_TBA); + + b2r2_log_debug(cont->dev, "B2R2_S2CF: \t0x%.8x\n", + node->node.GROUP2.B2R2_S2CF); + b2r2_log_debug(cont->dev, "B2R2_S1CF: \t0x%.8x\n", + node->node.GROUP2.B2R2_S1CF); + + b2r2_log_debug(cont->dev, "B2R2_S1SZ: \t0x%.8x\n", + node->node.GROUP3.B2R2_SSZ); + b2r2_log_debug(cont->dev, "B2R2_S1XY: \t0x%.8x\n", + node->node.GROUP3.B2R2_SXY); + b2r2_log_debug(cont->dev, "B2R2_S1TY: \t0x%.8x\n", + node->node.GROUP3.B2R2_STY); + b2r2_log_debug(cont->dev, "B2R2_S1BA: \t0x%.8x\n", + node->node.GROUP3.B2R2_SBA); + + b2r2_log_debug(cont->dev, "B2R2_S2SZ: \t0x%.8x\n", + node->node.GROUP4.B2R2_SSZ); + b2r2_log_debug(cont->dev, "B2R2_S2XY: \t0x%.8x\n", + node->node.GROUP4.B2R2_SXY); + b2r2_log_debug(cont->dev, "B2R2_S2TY: \t0x%.8x\n", + node->node.GROUP4.B2R2_STY); + b2r2_log_debug(cont->dev, "B2R2_S2BA: \t0x%.8x\n", + node->node.GROUP4.B2R2_SBA); + + b2r2_log_debug(cont->dev, "B2R2_S3SZ: \t0x%.8x\n", + node->node.GROUP5.B2R2_SSZ); + b2r2_log_debug(cont->dev, "B2R2_S3XY: \t0x%.8x\n", + node->node.GROUP5.B2R2_SXY); + b2r2_log_debug(cont->dev, "B2R2_S3TY: \t0x%.8x\n", + node->node.GROUP5.B2R2_STY); + b2r2_log_debug(cont->dev, "B2R2_S3BA: \t0x%.8x\n", + node->node.GROUP5.B2R2_SBA); + + b2r2_log_debug(cont->dev, "B2R2_CWS: \t0x%.8x\n", + node->node.GROUP6.B2R2_CWS); + b2r2_log_debug(cont->dev, "B2R2_CWO: \t0x%.8x\n", + node->node.GROUP6.B2R2_CWO); + + b2r2_log_debug(cont->dev, "B2R2_FCTL: \t0x%.8x\n", + node->node.GROUP8.B2R2_FCTL); + b2r2_log_debug(cont->dev, "B2R2_RSF: \t0x%.8x\n", + node->node.GROUP9.B2R2_RSF); + b2r2_log_debug(cont->dev, "B2R2_RZI: \t0x%.8x\n", + node->node.GROUP9.B2R2_RZI); + b2r2_log_debug(cont->dev, "B2R2_HFP: \t0x%.8x\n", + node->node.GROUP9.B2R2_HFP); + b2r2_log_debug(cont->dev, "B2R2_VFP: \t0x%.8x\n", + node->node.GROUP9.B2R2_VFP); + b2r2_log_debug(cont->dev, "B2R2_LUMA_RSF: \t0x%.8x\n", + node->node.GROUP10.B2R2_RSF); + b2r2_log_debug(cont->dev, "B2R2_LUMA_RZI: \t0x%.8x\n", + node->node.GROUP10.B2R2_RZI); + b2r2_log_debug(cont->dev, "B2R2_LUMA_HFP: \t0x%.8x\n", + node->node.GROUP10.B2R2_HFP); + b2r2_log_debug(cont->dev, "B2R2_LUMA_VFP: \t0x%.8x\n", + node->node.GROUP10.B2R2_VFP); + + + b2r2_log_debug(cont->dev, "B2R2_IVMX0: \t0x%.8x\n", + node->node.GROUP15.B2R2_VMX0); + b2r2_log_debug(cont->dev, "B2R2_IVMX1: \t0x%.8x\n", + node->node.GROUP15.B2R2_VMX1); + b2r2_log_debug(cont->dev, "B2R2_IVMX2: \t0x%.8x\n", + node->node.GROUP15.B2R2_VMX2); + b2r2_log_debug(cont->dev, "B2R2_IVMX3: \t0x%.8x\n", + node->node.GROUP15.B2R2_VMX3); + b2r2_log_debug(cont->dev, "\n=============\nNODE END\n"); + + node = node->next; + } while (node != NULL && dump_all); + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +/** + * to_native_fmt() - returns the native B2R2 format + */ +static inline enum b2r2_native_fmt to_native_fmt(struct b2r2_control *cont, + enum b2r2_blt_fmt fmt) +{ + + switch (fmt) { + case B2R2_BLT_FMT_UNUSED: + return B2R2_NATIVE_RGB565; + case B2R2_BLT_FMT_1_BIT_A1: + return B2R2_NATIVE_A1; + case B2R2_BLT_FMT_8_BIT_A8: + return B2R2_NATIVE_A8; + case B2R2_BLT_FMT_16_BIT_RGB565: + return B2R2_NATIVE_RGB565; + case B2R2_BLT_FMT_16_BIT_ARGB4444: + return B2R2_NATIVE_ARGB4444; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + return B2R2_NATIVE_ARGB1555; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + return B2R2_NATIVE_ARGB8565; + case B2R2_BLT_FMT_24_BIT_RGB888: + return B2R2_NATIVE_RGB888; + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_24_BIT_YUV888: + return B2R2_NATIVE_YCBCR888; + case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */ + case B2R2_BLT_FMT_32_BIT_ARGB8888: + return B2R2_NATIVE_ARGB8888; + case B2R2_BLT_FMT_32_BIT_VUYA8888: /* fall through */ + case B2R2_BLT_FMT_32_BIT_AYUV8888: + return B2R2_NATIVE_AYCBCR8888; + case B2R2_BLT_FMT_CB_Y_CR_Y: + return B2R2_NATIVE_YCBCR422R; + case B2R2_BLT_FMT_Y_CB_Y_CR: + return B2R2_NATIVE_YCBCR422R; + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return B2R2_NATIVE_YCBCR42X_R2B; + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return B2R2_NATIVE_YCBCR42X_MBN; + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return B2R2_NATIVE_YUV; + default: + /* Should never ever happen */ + return B2R2_NATIVE_BYTE; + } +} + +/** + * get_alpha_range() - returns the alpha range of the given format + */ +static inline enum b2r2_ty get_alpha_range(struct b2r2_control *cont, + enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_8_BIT_A8: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */ + break; + default: + break; + } + + return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */ +} + +static unsigned int get_pitch(struct b2r2_control *cont, + enum b2r2_blt_fmt format, u32 width) +{ + switch (format) { + case B2R2_BLT_FMT_1_BIT_A1: { + int pitch = width >> 3; + /* Check for remainder */ + if (width & 7) + pitch++; + return pitch; + break; + } + case B2R2_BLT_FMT_8_BIT_A8: + return width; + break; + case B2R2_BLT_FMT_16_BIT_RGB565: /* all 16 bits/pixel RGB formats */ + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_ARGB4444: + return width * 2; + break; + case B2R2_BLT_FMT_24_BIT_RGB888: /* all 24 bits/pixel raster formats */ + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + return width * 3; + break; + case B2R2_BLT_FMT_32_BIT_ARGB8888: /* all 32 bits/pixel formats */ + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + return width * 4; + break; + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + /* width of the buffer must be a multiple of 4 */ + if (width & 3) { + b2r2_log_warn(cont->dev, "%s: Illegal width " + "for fmt=%#010x width=%d\n", __func__, + format, width); + return 0; + } + return width * 2; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return width; + break; + /* fall through, same pitch and pointers */ + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + /* width of the buffer must be a multiple of 2 */ + if (width & 1) { + b2r2_log_warn(cont->dev, "%s: Illegal width " + "for fmt=%#010x width=%d\n", __func__, + format, width); + return 0; + } + /* + * return pitch of the Y-buffer. + * U and V pitch can be derived from it. + */ + return width; + break; + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* width of the buffer must be a multiple of 16. */ + if (width & 15) { + b2r2_log_warn(cont->dev, "%s: Illegal width " + "for fmt=%#010x width=%d\n", __func__, + format, width); + return 0; + } + /* + * return pitch of the Y-buffer. + * U and V pitch can be derived from it. + */ + return width; + break; + default: + b2r2_log_warn(cont->dev, "%s: Unable to determine pitch " + "for fmt=%#010x width=%d\n", __func__, + format, width); + return 0; + } +} + +static s32 validate_buf(struct b2r2_control *cont, + const struct b2r2_blt_img *image, + const struct b2r2_resolved_buf *buf) +{ + u32 expect_buf_size; + u32 pitch; + + if (image->width <= 0 || image->height <= 0) { + b2r2_log_warn(cont->dev, "%s: width=%d or height=%d negative" + ".\n", __func__, image->width, image->height); + return -EINVAL; + } + + if (image->pitch == 0) { + /* autodetect pitch based on format and width */ + pitch = get_pitch(cont, image->fmt, image->width); + } else + pitch = image->pitch; + + expect_buf_size = pitch * image->height; + + if (pitch == 0) { + b2r2_log_warn(cont->dev, "%s: Unable to detect pitch. " + "fmt=%#010x, width=%d\n", + __func__, + image->fmt, image->width); + return -EINVAL; + } + + /* format specific adjustments */ + switch (image->fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + /* + * Use ceil(height/2) in case buffer height + * is not divisible by 2. + */ + expect_buf_size += + (pitch >> 1) * ((image->height + 1) >> 1) * 2; + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + expect_buf_size += (pitch >> 1) * image->height * 2; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + expect_buf_size += pitch * image->height * 2; + break; + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + /* + * include space occupied by U and V data. + * U and V interleaved, half resolution, which makes + * the UV pitch equal to luma pitch. + * Use ceil(height/2) in case buffer height + * is not divisible by 2. + */ + expect_buf_size += pitch * ((image->height + 1) >> 1); + break; + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + /* + * include space occupied by U and V data. + * U and V interleaved, half resolution, which makes + * the UV pitch equal to luma pitch. + */ + expect_buf_size += pitch * image->height; + break; + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + /* Height must be a multiple of 16 for macro-block format.*/ + if (image->height & 15) { + b2r2_log_warn(cont->dev, "%s: Illegal height " + "for fmt=%#010x height=%d\n", __func__, + image->fmt, image->height); + return -EINVAL; + } + expect_buf_size += pitch * (image->height >> 1); + break; + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* Height must be a multiple of 16 for macro-block format.*/ + if (image->height & 15) { + b2r2_log_warn(cont->dev, "%s: Illegal height " + "for fmt=%#010x height=%d\n", __func__, + image->fmt, image->height); + return -EINVAL; + } + expect_buf_size += pitch * image->height; + break; + default: + break; + } + + if (buf->file_len < expect_buf_size) { + b2r2_log_warn(cont->dev, "%s: Invalid buffer size:\n" + "fmt=%#010x w=%d h=%d buf.len=%d expect_buf_size=%d\n", + __func__, + image->fmt, image->width, image->height, buf->file_len, + expect_buf_size); + return -EINVAL; + } + + if (image->buf.type == B2R2_BLT_PTR_VIRTUAL) { + b2r2_log_warn(cont->dev, "%s: Virtual pointers not supported" + " yet.\n", __func__); + return -EINVAL; + } + return 0; +} + +/* + * Bit-expand the color from fmt to RGB888 with blue at LSB. + * Copy MSBs into missing LSBs. + */ +static u32 to_RGB888(struct b2r2_control *cont, u32 color, + const enum b2r2_blt_fmt fmt) +{ + u32 out_color = 0; + u32 r = 0; + u32 g = 0; + u32 b = 0; + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8); + g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4); + b = ((color & 0xf) << 4) | (color & 0xf); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4); + g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_16_BIT_RGB565: + r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3); + g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + out_color = color & 0xffffff; + break; + case B2R2_BLT_FMT_32_BIT_ABGR8888: + r = (color & 0xff) << 16; + g = color & 0xff00; + b = (color & 0xff0000) >> 16; + out_color = r | g | b; + break; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3); + g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + default: + break; + } + + return out_color; +} + + +static void setup_fill_input_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *out_buf) +{ + enum b2r2_native_fmt fill_fmt = 0; + u32 src_color = req->user_req.src_color; + const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img); + struct b2r2_control *cont = req->instance->control; + bool fullrange = (req->user_req.flags & + B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0; + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + /* Determine format in src_color */ + switch (dst_img->fmt) { + /* ARGB formats */ + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) { + fill_fmt = B2R2_NATIVE_ARGB8888; + } else { + /* SOURCE_FILL_RAW */ + fill_fmt = to_native_fmt(cont, dst_img->fmt); + if (dst_img->fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) { + /* + * Color is read from a register, + * where it is stored in ABGR format. + * Set up IVMX. + */ + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR); + } + } + break; + /* YUV formats */ + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL) != 0) { + fill_fmt = B2R2_NATIVE_AYCBCR8888; + /* + * Set up IVMX + * The destination format is in fact YUV, + * but the input stage stores the data in + * an intermediate buffer which is RGB. + * Hence the conversion from YUV to RGB. + * Format of the supplied src_color is + * B2R2_BLT_FMT_32_BIT_AYUV8888. + */ + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB); + } else { + /* SOURCE_FILL_RAW */ + bool dst_yuv_planar = + B2R2_BLT_FMT_YUV420_PACKED_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YUV422_PACKED_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YVU420_PACKED_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YVU422_PACKED_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YUV444_PACKED_PLANAR == + dst_img->fmt; + + bool dst_yuv_semi_planar = + B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR == + dst_img->fmt || + B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE == + dst_img->fmt || + B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE == + dst_img->fmt; + + if (dst_yuv_planar || dst_yuv_semi_planar) { + /* + * SOURCE_FILL_RAW cannot be supported + * with multi-buffer formats. + * Force a legal format to prevent B2R2 + * from misbehaving. + */ + fill_fmt = B2R2_NATIVE_AYCBCR8888; + } else { + fill_fmt = to_native_fmt(cont, dst_img->fmt); + } + + switch (dst_img->fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + if (fullrange) + b2r2_setup_ivmx(node, + B2R2_CC_BLT_YUV888_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, + B2R2_CC_BLT_YUV888_TO_RGB); + /* + * Re-arrange the color components from + * VUY(A) to (A)YUV + */ + if (dst_img->fmt == + B2R2_BLT_FMT_24_BIT_VUY888) { + u32 Y = src_color & 0xff; + u32 U = src_color & 0xff00; + u32 V = src_color & 0xff0000; + src_color = (Y << 16) | U | (V >> 16); + } else if (dst_img->fmt == + B2R2_BLT_FMT_32_BIT_VUYA8888) { + u32 A = src_color & 0xff; + u32 Y = src_color & 0xff00; + u32 U = src_color & 0xff0000; + u32 V = src_color & 0xff000000; + src_color = (A << 24) | + (Y << 8) | + (U >> 8) | + (V >> 24); + } + break; + case B2R2_BLT_FMT_Y_CB_Y_CR: + /* + * Setup input VMX to convert YVU to + * RGB 601 VIDEO + * Chroma components are swapped so + * it is YVU and not YUV. + */ + if (fullrange) + b2r2_setup_ivmx(node, + B2R2_CC_YVU_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB); + break; + default: + /* + * Set up IVMX + * The destination format is in fact YUV, + * but the input stage stores the data in + * an intermediate buffer which is RGB. + * Hence the conversion from YUV to RGB. + */ + if (fullrange) + b2r2_setup_ivmx(node, + B2R2_CC_YUV_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB); + break; + } + } + break; + default: + src_color = 0; + fill_fmt = B2R2_NATIVE_ARGB8888; + break; + } + + node->node.GROUP1.B2R2_TBA = out_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + /* Set color fill on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = 0; + node->node.GROUP4.B2R2_STY = + (0 << B2R2_TY_BITMAP_PITCH_SHIFT) | + fill_fmt | + get_alpha_range(cont, dst_img->fmt) | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL; + node->node.GROUP2.B2R2_S2CF = src_color; + + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +static void setup_input_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *out_buf) +{ + /* Horizontal and vertical scaling factors in 6.10 fixed point format */ + s32 h_scf = 1 << 10; + s32 v_scf = 1 << 10; + const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect); + const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect); + const struct b2r2_blt_img *src_img = &(req->user_req.src_img); + u32 src_pitch = 0; + /* horizontal and vertical scan order for out_buf */ + enum b2r2_ty dst_hso = B2R2_TY_HSO_LEFT_TO_RIGHT; + enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM; + u32 endianness = 0; + u32 fctl = 0; + u32 rsf = 0; + u32 rzi = 0; + bool yuv_semi_planar = + src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + + bool yuv_planar = + src_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR; + + struct b2r2_filter_spec *hf; + struct b2r2_filter_spec *vf; + + bool use_h_filter = false; + bool use_v_filter = false; + + struct b2r2_control *cont = req->instance->control; + bool fullrange = (req->user_req.flags & + B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0; + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + if (((B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW) & + req->user_req.flags) != 0) { + setup_fill_input_stage(req, node, out_buf); + b2r2_log_info(cont->dev, "%s DONE\n", __func__); + return; + } + + if (src_img->pitch == 0) { + /* Determine pitch based on format and width of the image. */ + src_pitch = get_pitch(cont, src_img->fmt, src_img->width); + } else { + src_pitch = src_img->pitch; + } + + b2r2_log_info(cont->dev, "%s transform=%#010x\n", + __func__, req->user_req.transform); + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + h_scf = (src_rect->width << 10) / dst_rect->height; + v_scf = (src_rect->height << 10) / dst_rect->width; + } else { + h_scf = (src_rect->width << 10) / dst_rect->width; + v_scf = (src_rect->height << 10) / dst_rect->height; + } + + hf = b2r2_filter_find(h_scf); + vf = b2r2_filter_find(v_scf); + + use_h_filter = h_scf != (1 << 10); + use_v_filter = v_scf != (1 << 10); + + /* B2R2_BLT_FLAG_BLUR overrides any scaling filter. */ + if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) { + use_h_filter = true; + use_v_filter = true; + hf = b2r2_filter_blur(); + vf = b2r2_filter_blur(); + } + + /* Configure horizontal rescale */ + if (h_scf != (1 << 10)) { + b2r2_log_info(cont->dev, "%s: Scaling horizontally by 0x%.8x" + "\ns(%d, %d)->d(%d, %d)\n", __func__, + h_scf, src_rect->width, src_rect->height, + dst_rect->width, dst_rect->height); + } + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER; + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT; + rzi |= B2R2_RZI_DEFAULT_HNB_REPEAT; + + /* Configure vertical rescale */ + if (v_scf != (1 << 10)) { + b2r2_log_info(cont->dev, "%s: Scaling vertically by 0x%.8x" + "\ns(%d, %d)->d(%d, %d)\n", __func__, + v_scf, src_rect->width, src_rect->height, + dst_rect->width, dst_rect->height); + } + fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT; + rzi |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA; + + /* Adjustments that depend on the source format */ + switch (src_img->fmt) { + case B2R2_BLT_FMT_32_BIT_ABGR8888: + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR); + break; + case B2R2_BLT_FMT_Y_CB_Y_CR: + /* + * Setup input VMX to convert YVU to RGB 601 VIDEO + * Chroma components are swapped so + * it is YVU and not YUV. + */ + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB); + break; + case B2R2_BLT_FMT_CB_Y_CR_Y: + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB); + break; + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + /* + * Set up IVMX. + * For B2R2_BLT_FMT_32_BIT_YUV888 and + * B2R2_BLT_FMT_32_BIT_AYUV8888 + * the color components are laid out in memory as V, U, Y, (A) + * with V at the first byte (due to little endian addressing). + * B2R2 expects them to be as U, Y, V, (A) + * with U at the first byte. + */ + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB); + + /* + * Re-arrange color components from VUY(A) to (A)YUV + * for input VMX to work on them further. + */ + if (src_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + src_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + break; + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: { + /* + * Luma handled in the same way + * for all YUV multi-buffer formats. + * Set luma rescale registers. + */ + u32 rsf_luma = 0; + u32 rzi_luma = 0; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA; + + if (src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + src_img->fmt == + B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB); + } else { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB); + } + + fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER; + + if (use_h_filter && hf) { + fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER; + node->node.GROUP10.B2R2_HFP = hf->h_coeffs_phys_addr; + } + + if (use_v_filter && vf) { + fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER; + node->node.GROUP10.B2R2_VFP = vf->v_coeffs_phys_addr; + } + + rsf_luma |= h_scf << B2R2_RSF_HSRC_INC_SHIFT; + rzi_luma |= B2R2_RZI_DEFAULT_HNB_REPEAT; + + rsf_luma |= v_scf << B2R2_RSF_VSRC_INC_SHIFT; + rzi_luma |= 2 << B2R2_RZI_VNB_REPEAT_SHIFT; + + node->node.GROUP10.B2R2_RSF = rsf_luma; + node->node.GROUP10.B2R2_RZI = rzi_luma; + + switch (src_img->fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + /* + * Chrominance is always half the luminance size + * so chrominance resizer is always active. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (v_scf >> 1) << B2R2_RSF_VSRC_INC_SHIFT; + /* Select suitable filter for chroma */ + hf = b2r2_filter_find(h_scf >> 1); + vf = b2r2_filter_find(v_scf >> 1); + use_h_filter = true; + use_v_filter = true; + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* + * Chrominance is always half the luminance size + * only in horizontal direction. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (h_scf >> 1) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT; + /* Select suitable filter for chroma */ + hf = b2r2_filter_find(h_scf >> 1); + use_h_filter = true; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + /* Chrominance is the same size as luminance.*/ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= h_scf << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= v_scf << B2R2_RSF_VSRC_INC_SHIFT; + /* Select suitable filter for chroma */ + hf = b2r2_filter_find(h_scf); + vf = b2r2_filter_find(v_scf); + use_h_filter = true; + use_v_filter = true; + break; + default: + break; + } + break; + } + default: + break; + } + + /* + * Set the filter control and rescale registers. + * GROUP9 registers are used for all single-buffer formats + * or for chroma in case of multi-buffer YUV formats. + * h/v_filter is now appropriately selected for chroma scaling, + * be it YUV multi-buffer, or single-buffer raster format. + * B2R2_BLT_FLAG_BLUR overrides any scaling filter. + */ + if (req->user_req.flags & B2R2_BLT_FLAG_BLUR) { + use_h_filter = true; + use_v_filter = true; + hf = b2r2_filter_blur(); + vf = b2r2_filter_blur(); + } + + if (use_h_filter && hf) { + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER; + node->node.GROUP9.B2R2_HFP = hf->h_coeffs_phys_addr; + } + + if (use_v_filter && vf) { + fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER; + node->node.GROUP9.B2R2_VFP = vf->v_coeffs_phys_addr; + } + + node->node.GROUP8.B2R2_FCTL |= fctl; + node->node.GROUP9.B2R2_RSF |= rsf; + node->node.GROUP9.B2R2_RZI |= rzi; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL; + + /* + * Flip transform is done before potential rotation. + * This can be achieved with appropriate scan order. + * Transform stage will only do rotation. + */ + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) + dst_hso = B2R2_TY_HSO_RIGHT_TO_LEFT; + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) + dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + + /* Set target buffer */ + node->node.GROUP1.B2R2_TBA = out_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + dst_hso | dst_vso; + + if (yuv_planar) { + /* + * Set up chrominance buffers on source 1 and 2, + * luminance on source 3. + * src_pitch and physical_address apply to luminance, + * corresponding chrominance values have to be derived. + */ + u32 cb_addr = 0; + u32 cr_addr = 0; + u32 chroma_pitch = 0; + bool swapped_chroma = + src_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + src_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR; + enum b2r2_native_fmt src_fmt = + to_native_fmt(cont, src_img->fmt); + + if (swapped_chroma) + cr_addr = req->src_resolved.physical_address + + src_pitch * src_img->height; + else + cb_addr = req->src_resolved.physical_address + + src_pitch * src_img->height; + + switch (src_img->fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + chroma_pitch = src_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + (src_img->height >> 1); + else + cr_addr = cb_addr + chroma_pitch * + (src_img->height >> 1); + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + chroma_pitch = src_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + src_img->height; + else + cr_addr = cb_addr + chroma_pitch * + src_img->height; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + /* Chrominance has full resolution, same as luminance.*/ + chroma_pitch = src_pitch; + cr_addr = + cb_addr + chroma_pitch * src_img->height; + break; + default: + break; + } + + node->node.GROUP3.B2R2_SBA = cr_addr; + node->node.GROUP3.B2R2_STY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + src_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP4.B2R2_SBA = cb_addr; + node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY; + + node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address; + node->node.GROUP5.B2R2_STY = + (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + src_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_1_FETCH_FROM_MEM | + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_SOURCE_3_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_1 | + B2R2_CIC_SOURCE_2 | + B2R2_CIC_SOURCE_3; + } else if (yuv_semi_planar) { + /* + * Set up chrominance buffer on source 2, luminance on source 3. + * src_pitch and physical_address apply to luminance, + * corresponding chrominance values have to be derived. + * U and V are interleaved at half the luminance resolution, + * which makes the pitch of the UV plane equal + * to luminance pitch. + */ + u32 chroma_addr = req->src_resolved.physical_address + + src_pitch * src_img->height; + u32 chroma_pitch = src_pitch; + + enum b2r2_native_fmt src_fmt = + to_native_fmt(cont, src_img->fmt); + + node->node.GROUP4.B2R2_SBA = chroma_addr; + node->node.GROUP4.B2R2_STY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + src_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP5.B2R2_SBA = req->src_resolved.physical_address; + node->node.GROUP5.B2R2_STY = + (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + src_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_SOURCE_3_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3; + } else { + /* single buffer format */ + node->node.GROUP4.B2R2_SBA = req->src_resolved.physical_address; + node->node.GROUP4.B2R2_STY = + (src_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + to_native_fmt(cont, src_img->fmt) | + get_alpha_range(cont, src_img->fmt) | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + endianness; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + } + + if ((req->user_req.flags & + B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) { + node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT; + node->node.GROUP7.B2R2_CCO = B2R2_CCO_CLUT_COLOR_CORRECTION | + B2R2_CCO_CLUT_UPDATE; + node->node.GROUP7.B2R2_CML = req->clut_phys_addr; + } + + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +static void setup_transform_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *out_buf, + struct b2r2_work_buf *in_buf) +{ + /* vertical scan order for out_buf */ + enum b2r2_ty dst_vso = B2R2_TY_VSO_TOP_TO_BOTTOM; + enum b2r2_blt_transform transform = req->user_req.transform; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_control *cont = req->instance->control; +#endif + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + /* + * Scan order must be flipped otherwise contents will + * be mirrored vertically. Leftmost column of in_buf + * would become top instead of bottom row of out_buf. + */ + dst_vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED; + } + + /* Set target buffer */ + node->node.GROUP1.B2R2_TBA = out_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | dst_vso; + + /* Set source buffer on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +/* +static void setup_mask_stage(const struct b2r2_blt_request req, + struct b2r2_node *node, + struct b2r2_work_buf *out_buf, + struct b2r2_work_buf *in_buf); +*/ + +static void setup_dst_read_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *out_buf) +{ + const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img); + u32 fctl = 0; + u32 rsf = 0; + u32 endianness = 0; + bool yuv_semi_planar = + dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + + bool yuv_planar = + dst_img->fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR; + + u32 dst_pitch = 0; + struct b2r2_control *cont = req->instance->control; + bool fullrange = (req->user_req.flags & + B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0; + + if (dst_img->pitch == 0) { + /* Determine pitch based on format and width of the image. */ + dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width); + } else { + dst_pitch = dst_img->pitch; + } + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + /* Adjustments that depend on the destination format */ + switch (dst_img->fmt) { + case B2R2_BLT_FMT_32_BIT_ABGR8888: + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_BGR); + break; + case B2R2_BLT_FMT_Y_CB_Y_CR: + /* + * Setup input VMX to convert YVU to RGB 601 VIDEO + * Chroma components are swapped + * so it is YVU and not YUV. + */ + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB); + break; + case B2R2_BLT_FMT_CB_Y_CR_Y: + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB); + break; + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + /* + * Set up IVMX. + * For B2R2_BLT_FMT_32_BIT_YUV888 and + * B2R2_BLT_FMT_32_BIT_AYUV8888 + * the color components are laid out in memory as V, U, Y, (A) + * with V at the first byte (due to little endian addressing). + * B2R2 expects them to be as U, Y, V, (A) + * with U at the first byte. + */ + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_BLT_YUV888_TO_RGB); + + /* + * Re-arrange color components from VUY(A) to (A)YUV + * for input VMX to work on them further. + */ + if (dst_img->fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_img->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + break; + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: { + if (dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_img->fmt == + B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YVU_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YVU_TO_RGB); + } else { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_YUV_FULL_TO_RGB); + else + b2r2_setup_ivmx(node, B2R2_CC_YUV_TO_RGB); + } + + switch (dst_img->fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + /* + * Chrominance is always half the luminance size + * so chrominance resizer is always active. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (1 << 9) << B2R2_RSF_VSRC_INC_SHIFT; + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* + * Chrominance is always half the luminance size + * only in horizontal direction. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (1 << 9) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + /* Chrominance is the same size as luminance.*/ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (1 << 10) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + break; + default: + break; + } + /* Set the filter control and rescale registers for chroma */ + node->node.GROUP8.B2R2_FCTL |= fctl; + node->node.GROUP9.B2R2_RSF |= rsf; + node->node.GROUP9.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + node->node.GROUP0.B2R2_INS |= B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_FILTER_CONTROL | B2R2_CIC_RESIZE_CHROMA; + break; + } + default: + break; + } + + /* Set target buffer */ + node->node.GROUP1.B2R2_TBA = out_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + if (yuv_planar) { + /* + * Set up chrominance buffers on source 1 and 2, + * luminance on source 3. + * dst_pitch and physical_address apply to luminance, + * corresponding chrominance values have to be derived. + */ + u32 cb_addr = 0; + u32 cr_addr = 0; + u32 chroma_pitch = 0; + bool swapped_chroma = + dst_img->fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_img->fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR; + enum b2r2_native_fmt dst_native_fmt = + to_native_fmt(cont, dst_img->fmt); + + if (swapped_chroma) + cr_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + else + cb_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + + switch (dst_img->fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + chroma_pitch = dst_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + (dst_img->height >> 1); + else + cr_addr = cb_addr + chroma_pitch * + (dst_img->height >> 1); + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + chroma_pitch = dst_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + dst_img->height; + else + cr_addr = cb_addr + chroma_pitch * + dst_img->height; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + chroma_pitch = dst_pitch; + cr_addr = + cb_addr + chroma_pitch * dst_img->height; + break; + default: + break; + } + + node->node.GROUP3.B2R2_SBA = cr_addr; + node->node.GROUP3.B2R2_STY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP4.B2R2_SBA = cb_addr; + node->node.GROUP4.B2R2_STY = node->node.GROUP3.B2R2_STY; + + node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address; + node->node.GROUP5.B2R2_STY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_1_FETCH_FROM_MEM | + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_SOURCE_3_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_1 | + B2R2_CIC_SOURCE_2 | + B2R2_CIC_SOURCE_3; + } else if (yuv_semi_planar) { + /* + * Set up chrominance buffer on source 2, luminance on source 3. + * dst_pitch and physical_address apply to luminance, + * corresponding chrominance values have to be derived. + * U and V are interleaved at half the luminance resolution, + * which makes the pitch of the UV plane equal + * to luminance pitch. + */ + u32 chroma_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + u32 chroma_pitch = dst_pitch; + + enum b2r2_native_fmt dst_native_fmt = + to_native_fmt(cont, dst_img->fmt); + + node->node.GROUP4.B2R2_SBA = chroma_addr; + node->node.GROUP4.B2R2_STY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP5.B2R2_SBA = req->dst_resolved.physical_address; + node->node.GROUP5.B2R2_STY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_SOURCE_3_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_2 | B2R2_CIC_SOURCE_3; + } else { + /* single buffer format */ + node->node.GROUP4.B2R2_SBA = req->dst_resolved.physical_address; + node->node.GROUP4.B2R2_STY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + to_native_fmt(cont, dst_img->fmt) | + get_alpha_range(cont, dst_img->fmt) | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + endianness; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + } + + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +static void setup_blend_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *bg_buf, + struct b2r2_work_buf *fg_buf) +{ + u32 global_alpha = req->user_req.global_alpha; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_control *cont = req->instance->control; +#endif + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + node->node.GROUP0.B2R2_ACK = 0; + + if (req->user_req.flags & + (B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND | + B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND)) { + /* Some kind of blending needs to be done. */ + if (req->user_req.flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) + node->node.GROUP0.B2R2_ACK |= + B2R2_ACK_MODE_BLEND_NOT_PREMULT; + else + node->node.GROUP0.B2R2_ACK |= + B2R2_ACK_MODE_BLEND_PREMULT; + + /* + * global_alpha register accepts 0..128 range, + * global_alpha in the request is 0..255, remap needed. + */ + if (req->user_req.flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) { + if (global_alpha == 255) + global_alpha = 128; + else + global_alpha >>= 1; + } else { + /* + * Use solid global_alpha + * if global alpha blending is not set. + */ + global_alpha = 128; + } + + node->node.GROUP0.B2R2_ACK |= + global_alpha << (B2R2_ACK_GALPHA_ROPID_SHIFT); + + /* Set background on SRC1 channel */ + node->node.GROUP3.B2R2_SBA = bg_buf->phys_addr; + node->node.GROUP3.B2R2_STY = + (B2R2_GENERIC_WORK_BUF_PITCH << + B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + /* Set foreground on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr; + node->node.GROUP4.B2R2_STY = + (B2R2_GENERIC_WORK_BUF_PITCH << + B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + /* Set target buffer */ + node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << + B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_1_FETCH_FROM_MEM | + B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_1 | + B2R2_CIC_SOURCE_2; + } else { + /* + * No blending, foreground goes on SRC2. No global alpha. + * EMACSOC TODO: The blending stage should be skipped altogether + * if no blending is to be done. Probably could go directly from + * transform to writeback. + */ + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + + node->node.GROUP4.B2R2_SBA = fg_buf->phys_addr; + node->node.GROUP4.B2R2_STY = + (B2R2_GENERIC_WORK_BUF_PITCH << + B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + node->node.GROUP1.B2R2_TBA = bg_buf->phys_addr; + node->node.GROUP1.B2R2_TTY = + (B2R2_GENERIC_WORK_BUF_PITCH << + B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + } + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +static void setup_writeback_stage(const struct b2r2_blt_request *req, + struct b2r2_node *node, + struct b2r2_work_buf *in_buf) +{ + const struct b2r2_blt_img *dst_img = &(req->user_req.dst_img); + const enum b2r2_blt_fmt dst_fmt = dst_img->fmt; + const bool yuv_planar_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR; + + const bool yuv_semi_planar_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + + const u32 group4_b2r2_sty = + (B2R2_GENERIC_WORK_BUF_PITCH << B2R2_TY_BITMAP_PITCH_SHIFT) | + B2R2_GENERIC_WORK_BUF_FMT | + B2R2_TY_ALPHA_RANGE_255 | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM; + + u32 dst_dither = 0; + u32 dst_pitch = 0; + u32 endianness = 0; + + struct b2r2_control *cont = req->instance->control; + bool fullrange = (req->user_req.flags & + B2R2_BLT_FLAG_FULL_RANGE_YUV) != 0; + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + if (dst_img->pitch == 0) { + /* Determine pitch based on format and width of the image. */ + dst_pitch = get_pitch(cont, dst_img->fmt, dst_img->width); + } else + dst_pitch = dst_img->pitch; + + if ((req->user_req.flags & B2R2_BLT_FLAG_DITHER) != 0) + dst_dither = B2R2_TTY_RGB_ROUND_DITHER; + + /* Set target buffer(s) */ + if (yuv_planar_dst) { + /* + * three nodes required to write the output. + * Luma, blue chroma and red chroma. + */ + u32 fctl = 0; + u32 rsf = 0; + const u32 group0_b2r2_ins = + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_RECT_CLIP_ENABLED; + const u32 group0_b2r2_cic = + B2R2_CIC_SOURCE_2 | + B2R2_CIC_CLIP_WINDOW; + + u32 cb_addr = 0; + u32 cr_addr = 0; + u32 chroma_pitch = 0; + bool swapped_chroma = + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR; + enum b2r2_native_fmt dst_native_fmt = + to_native_fmt(cont, dst_img->fmt); + enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt); + + if (swapped_chroma) + cr_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + else + cb_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + + switch (dst_fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + chroma_pitch = dst_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + (dst_img->height >> 1); + else + cr_addr = cb_addr + chroma_pitch * + (dst_img->height >> 1); + /* + * Chrominance is always half the luminance size + * so chrominance resizer is always active. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + chroma_pitch = dst_pitch >> 1; + if (swapped_chroma) + cb_addr = cr_addr + chroma_pitch * + dst_img->height; + else + cr_addr = cb_addr + chroma_pitch * + dst_img->height; + /* + * YUV422 or YVU422 + * Chrominance is always half the luminance size + * only in horizontal direction. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + chroma_pitch = dst_pitch; + cr_addr = + cb_addr + chroma_pitch * dst_img->height; + /* + * No scaling required since + * chrominance is not subsampled. + */ + default: + break; + } + + /* Luma (Y-component) */ + node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address; + node->node.GROUP1.B2R2_TTY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | alpha_range | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither; + + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV); + + /* bypass ALU, no blending here. Handled in its own stage. */ + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS = group0_b2r2_ins; + node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic; + + /* Set source buffer on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + + /* Blue chroma (U-component)*/ + node = node->next; + node->node.GROUP1.B2R2_TBA = cb_addr; + node->node.GROUP1.B2R2_TTY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | alpha_range | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither | + B2R2_TTY_CHROMA_NOT_LUMA; + + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV); + + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS = group0_b2r2_ins; + node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic; + if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) { + node->node.GROUP0.B2R2_INS |= + B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_FILTER_CONTROL | + B2R2_CIC_RESIZE_CHROMA; + /* Set the filter control and rescale registers */ + node->node.GROUP8.B2R2_FCTL = fctl; + node->node.GROUP9.B2R2_RSF = rsf; + node->node.GROUP9.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + } + + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + + + /* + * Red chroma (V-component) + * The flag B2R2_TTY_CB_NOT_CR actually works + * the other way around, i.e. as if it was + * CR_NOT_CB. + */ + node = node->next; + node->node.GROUP1.B2R2_TBA = cr_addr; + node->node.GROUP1.B2R2_TTY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | alpha_range | + B2R2_TTY_CB_NOT_CR | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither | + B2R2_TTY_CHROMA_NOT_LUMA; + + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV); + + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS = group0_b2r2_ins; + node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic; + if (dst_fmt != B2R2_BLT_FMT_YUV444_PACKED_PLANAR) { + node->node.GROUP0.B2R2_INS |= + B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_FILTER_CONTROL | + B2R2_CIC_RESIZE_CHROMA; + /* Set the filter control and rescale registers */ + node->node.GROUP8.B2R2_FCTL = fctl; + node->node.GROUP9.B2R2_RSF = rsf; + node->node.GROUP9.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + } + + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + } else if (yuv_semi_planar_dst) { + /* + * two nodes required to write the output. + * One node for luma and one for interleaved chroma + * components. + */ + u32 fctl = 0; + u32 rsf = 0; + const u32 group0_b2r2_ins = + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_RECT_CLIP_ENABLED; + const u32 group0_b2r2_cic = + B2R2_CIC_SOURCE_2 | + B2R2_CIC_CLIP_WINDOW; + + u32 chroma_addr = req->dst_resolved.physical_address + + dst_pitch * dst_img->height; + u32 chroma_pitch = dst_pitch; + enum b2r2_native_fmt dst_native_fmt = + to_native_fmt(cont, dst_img->fmt); + enum b2r2_ty alpha_range = get_alpha_range(cont, dst_img->fmt); + + if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_fmt == + B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR) { + /* + * Chrominance is always half the luminance size + * so chrominance resizer is always active. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + } else { + /* + * YUV422 + * Chrominance is always half the luminance size + * only in horizontal direction. + */ + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER; + + rsf &= ~(0xffff << B2R2_RSF_HSRC_INC_SHIFT); + rsf |= (2 << 10) << B2R2_RSF_HSRC_INC_SHIFT; + rsf &= ~(0xffff << B2R2_RSF_VSRC_INC_SHIFT); + rsf |= (1 << 10) << B2R2_RSF_VSRC_INC_SHIFT; + } + + /* Luma (Y-component) */ + node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address; + node->node.GROUP1.B2R2_TTY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | alpha_range | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither; + + if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU); + } else { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV); + } + + /* bypass ALU, no blending here. Handled in its own stage. */ + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS = group0_b2r2_ins; + node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic; + + /* Set source buffer on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + + /* Chroma (UV-components)*/ + node = node->next; + node->node.GROUP1.B2R2_TBA = chroma_addr; + node->node.GROUP1.B2R2_TTY = + (chroma_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + dst_native_fmt | alpha_range | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither | + B2R2_TTY_CHROMA_NOT_LUMA; + + if (dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR) { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YVU); + } else { + if (fullrange) + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV_FULL); + else + b2r2_setup_ivmx(node, B2R2_CC_RGB_TO_YUV); + } + + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS = + group0_b2r2_ins | B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= group0_b2r2_cic | + B2R2_CIC_FILTER_CONTROL | + B2R2_CIC_RESIZE_CHROMA; + + /* Set the filter control and rescale registers */ + node->node.GROUP8.B2R2_FCTL = fctl; + node->node.GROUP9.B2R2_RSF = rsf; + node->node.GROUP9.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + } else { + /* single buffer target */ + + switch (dst_fmt) { + case B2R2_BLT_FMT_32_BIT_ABGR8888: + b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BGR); + break; + case B2R2_BLT_FMT_Y_CB_Y_CR: + if (fullrange) + b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_YVU_FULL); + else + b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_YVU); + break; + case B2R2_BLT_FMT_24_BIT_YUV888: /* fall through */ + case B2R2_BLT_FMT_32_BIT_AYUV8888: /* fall through */ + case B2R2_BLT_FMT_24_BIT_VUY888: /* fall through */ + case B2R2_BLT_FMT_32_BIT_VUYA8888: + if (fullrange) + b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BLT_YUV888_FULL); + else + b2r2_setup_ovmx(node, B2R2_CC_RGB_TO_BLT_YUV888); + /* + * Re-arrange color components from (A)YUV to VUY(A) + * when bytes are stored in memory. + */ + if (dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + endianness = B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + break; + default: + break; + } + + node->node.GROUP1.B2R2_TBA = req->dst_resolved.physical_address; + node->node.GROUP1.B2R2_TTY = + (dst_pitch << B2R2_TY_BITMAP_PITCH_SHIFT) | + to_native_fmt(cont, dst_img->fmt) | + get_alpha_range(cont, dst_img->fmt) | + B2R2_TY_HSO_LEFT_TO_RIGHT | + B2R2_TY_VSO_TOP_TO_BOTTOM | + dst_dither | + endianness; + + node->node.GROUP0.B2R2_ACK = B2R2_ACK_MODE_BYPASS_S2_S3; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM | + B2R2_INS_RECT_CLIP_ENABLED; + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_SOURCE_2 | B2R2_CIC_CLIP_WINDOW; + + if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) { + u32 key_color = 0; + + node->node.GROUP0.B2R2_ACK |= + B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT | + B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN | + B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN | + B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN; + node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY; + + key_color = to_RGB888(cont, req->user_req.src_color, + req->user_req.src_img.fmt); + node->node.GROUP12.B2R2_KEY1 = key_color; + node->node.GROUP12.B2R2_KEY2 = key_color; + } + + /* Set source buffer on SRC2 channel */ + node->node.GROUP4.B2R2_SBA = in_buf->phys_addr; + node->node.GROUP4.B2R2_STY = group4_b2r2_sty; + } + /* + * Writeback is the last stage. Terminate the program chain + * to prevent out-of-control B2R2 execution. + */ + node->node.GROUP0.B2R2_NIP = 0; + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} + +/* + * Public functions + */ +void b2r2_generic_init(struct b2r2_control *cont) +{ + +} + +void b2r2_generic_exit(struct b2r2_control *cont) +{ + +} + +int b2r2_generic_analyze(const struct b2r2_blt_request *req, + s32 *work_buf_width, + s32 *work_buf_height, + u32 *work_buf_count, + u32 *node_count) +{ + /* + * Need at least 4 nodes, read or fill input, read dst, blend + * and write back the result */ + u32 n_nodes = 4; + /* Need at least 2 bufs, 1 for blend output and 1 for input */ + u32 n_work_bufs = 2; + /* Horizontal and vertical scaling factors in 6.10 fixed point format */ + s32 h_scf = 1 << 10; + s32 v_scf = 1 << 10; + enum b2r2_blt_fmt dst_fmt = 0; + bool is_src_fill = false; + bool yuv_planar_dst; + bool yuv_semi_planar_dst; + struct b2r2_blt_rect src_rect; + struct b2r2_blt_rect dst_rect; + struct b2r2_control *cont = req->instance->control; + + if (req == NULL || work_buf_width == NULL || work_buf_height == NULL || + work_buf_count == NULL || node_count == NULL) { + b2r2_log_warn(cont->dev, "%s: Invalid in or out pointers:\n" + "req=0x%p\n" + "work_buf_width=0x%p work_buf_height=0x%p " + "work_buf_count=0x%p\n" + "node_count=0x%p.\n", + __func__, + req, + work_buf_width, work_buf_height, + work_buf_count, + node_count); + return -EINVAL; + } + + dst_fmt = req->user_req.dst_img.fmt; + + is_src_fill = (req->user_req.flags & + (B2R2_BLT_FLAG_SOURCE_FILL | + B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0; + + yuv_planar_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR; + yuv_semi_planar_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + + *node_count = 0; + *work_buf_width = 0; + *work_buf_height = 0; + *work_buf_count = 0; + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + n_nodes++; + n_work_bufs++; + } + + if ((yuv_planar_dst || yuv_semi_planar_dst) && + (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW)) { + b2r2_log_warn(cont->dev, + "%s: Invalid combination: source_fill_raw" + " and multi-buffer destination.\n", + __func__); + return -EINVAL; + } + + if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) != 0 && + (req->user_req.flags & B2R2_BLT_FLAG_DEST_COLOR_KEY)) { + b2r2_log_warn(cont->dev, + "%s: Invalid combination: source and " + "destination color keying.\n", __func__); + return -EINVAL; + } + + if ((req->user_req.flags & + (B2R2_BLT_FLAG_SOURCE_FILL | + B2R2_BLT_FLAG_SOURCE_FILL_RAW)) && + (req->user_req.flags & + (B2R2_BLT_FLAG_SOURCE_COLOR_KEY | + B2R2_BLT_FLAG_DEST_COLOR_KEY))) { + b2r2_log_warn(cont->dev, "%s: Invalid combination: " + "source_fill and color keying.\n", + __func__); + return -EINVAL; + } + + if ((req->user_req.flags & + (B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND | + B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND)) && + (req->user_req.flags & + (B2R2_BLT_FLAG_DEST_COLOR_KEY | + B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) { + b2r2_log_warn(cont->dev, "%s: Invalid combination: " + "blending and color keying.\n", + __func__); + return -EINVAL; + } + + if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) && + (req->user_req.flags & + (B2R2_BLT_FLAG_DEST_COLOR_KEY | + B2R2_BLT_FLAG_SOURCE_COLOR_KEY))) { + b2r2_log_warn(cont->dev, "%s: Invalid combination: source mask" + "and color keying.\n", + __func__); + return -EINVAL; + } + + if (req->user_req.flags & + (B2R2_BLT_FLAG_DEST_COLOR_KEY | + B2R2_BLT_FLAG_SOURCE_MASK)) { + b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, " + "destination color keying.\n", + __func__); + return -ENOSYS; + } + + if ((req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK)) { + enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt; + bool yuv_src = + src_fmt == B2R2_BLT_FMT_Y_CB_Y_CR || + src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + src_fmt == + B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + if (yuv_src || src_fmt == B2R2_BLT_FMT_1_BIT_A1 || + src_fmt == B2R2_BLT_FMT_8_BIT_A8) { + b2r2_log_warn(cont->dev, "%s: Unsupported: source " + "color keying with YUV or pure alpha " + "formats.\n", __func__); + return -ENOSYS; + } + } + + /* Check for invalid dimensions that would hinder scale calculations */ + src_rect = req->user_req.src_rect; + dst_rect = req->user_req.dst_rect; + /* Check for invalid src_rect unless src_fill is enabled */ + if (!is_src_fill && (src_rect.x < 0 || src_rect.y < 0 || + src_rect.x + src_rect.width > req->user_req.src_img.width || + src_rect.y + src_rect.height > req->user_req.src_img.height)) { + b2r2_log_warn(cont->dev, "%s: src_rect outside src_img:\n" + "src(x,y,w,h)=(%d, %d, %d, %d) " + "src_img(w,h)=(%d, %d).\n", + __func__, + src_rect.x, src_rect.y, src_rect.width, src_rect.height, + req->user_req.src_img.width, + req->user_req.src_img.height); + return -EINVAL; + } + + if (!is_src_fill && (src_rect.width <= 0 || src_rect.height <= 0)) { + b2r2_log_warn(cont->dev, "%s: Invalid source dimensions:\n" + "src(w,h)=(%d, %d).\n", + __func__, + src_rect.width, src_rect.height); + return -EINVAL; + } + + if (dst_rect.width <= 0 || dst_rect.height <= 0) { + b2r2_log_warn(cont->dev, "%s: Invalid dest dimensions:\n" + "dst(w,h)=(%d, %d).\n", + __func__, + dst_rect.width, dst_rect.height); + return -EINVAL; + } + + if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) && + req->user_req.clut == NULL) { + b2r2_log_warn(cont->dev, "%s: Invalid request: no table " + "specified for CLUT color correction.\n", + __func__); + return -EINVAL; + } + + /* Check for invalid image params */ + if (!is_src_fill && validate_buf(cont, &(req->user_req.src_img), + &(req->src_resolved))) + return -EINVAL; + + if (validate_buf(cont, &(req->user_req.dst_img), &(req->dst_resolved))) + return -EINVAL; + + if (is_src_fill) { + /* + * Params correct for a source fill operation. + * No need for further checking. + */ + if (yuv_planar_dst) + n_nodes += 2; + else if (yuv_semi_planar_dst) + n_nodes++; + + *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH; + *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT; + *work_buf_count = n_work_bufs; + *node_count = n_nodes; + b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d " + "buf_count=%d node_count=%d\n", __func__, + *work_buf_width, *work_buf_height, + *work_buf_count, *node_count); + return 0; + } + + /* + * Calculate scaling factors, all transform enum values + * that include rotation have the CCW_ROT_90 bit set. + */ + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + h_scf = (src_rect.width << 10) / dst_rect.height; + v_scf = (src_rect.height << 10) / dst_rect.width; + } else { + h_scf = (src_rect.width << 10) / dst_rect.width; + v_scf = (src_rect.height << 10) / dst_rect.height; + } + + /* Check for degenerate/out_of_range scaling factors. */ + if (h_scf <= 0 || v_scf <= 0 || h_scf > 0x7C00 || v_scf > 0x7C00) { + b2r2_log_warn(cont->dev, + "%s: Dimensions result in degenerate or " + "out of range scaling:\n" + "src(w,h)=(%d, %d) " + "dst(w,h)=(%d,%d).\n" + "h_scf=0x%.8x, v_scf=0x%.8x\n", + __func__, + src_rect.width, src_rect.height, + dst_rect.width, dst_rect.height, + h_scf, v_scf); + return -EINVAL; + } + + if (yuv_planar_dst) + n_nodes += 2; + else if (yuv_semi_planar_dst) + n_nodes++; + + *work_buf_width = B2R2_GENERIC_WORK_BUF_WIDTH; + *work_buf_height = B2R2_GENERIC_WORK_BUF_HEIGHT; + *work_buf_count = n_work_bufs; + *node_count = n_nodes; + b2r2_log_info(cont->dev, "%s DONE buf_w=%d buf_h=%d buf_count=%d " + "node_count=%d\n", __func__, *work_buf_width, + *work_buf_height, *work_buf_count, *node_count); + return 0; +} + +/* + * + */ +int b2r2_generic_configure(const struct b2r2_blt_request *req, + struct b2r2_node *first, + struct b2r2_work_buf *tmp_bufs, + u32 buf_count) +{ + struct b2r2_node *node = NULL; + struct b2r2_work_buf *in_buf = NULL; + struct b2r2_work_buf *out_buf = NULL; + struct b2r2_work_buf *empty_buf = NULL; + struct b2r2_control *cont = req->instance->control; + +#ifdef B2R2_GENERIC_DEBUG + u32 needed_bufs = 0; + u32 needed_nodes = 0; + s32 work_buf_width = 0; + s32 work_buf_height = 0; + u32 n_nodes = 0; + int invalid_req = b2r2_generic_analyze(req, &work_buf_width, + &work_buf_height, &needed_bufs, + &needed_nodes); + if (invalid_req < 0) { + b2r2_log_warn(cont->dev, + "%s: Invalid request supplied, ec=%d\n", + __func__, invalid_req); + return -EINVAL; + } + + node = first; + + while (node != NULL) { + n_nodes++; + node = node->next; + } + if (n_nodes < needed_nodes) { + b2r2_log_warn(cont->dev, "%s: Not enough nodes %d < %d.\n", + __func__, n_nodes, needed_nodes); + return -EINVAL; + } + + if (buf_count < needed_bufs) { + b2r2_log_warn(cont->dev, "%s: Not enough buffers %d < %d.\n", + __func__, buf_count, needed_bufs); + return -EINVAL; + } + +#endif + + reset_nodes(cont, first); + node = first; + empty_buf = tmp_bufs; + out_buf = empty_buf; + empty_buf++; + /* Prepare input tile. Color_fill or read from src */ + setup_input_stage(req, node, out_buf); + in_buf = out_buf; + out_buf = empty_buf; + empty_buf++; + node = node->next; + + if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) { + setup_transform_stage(req, node, out_buf, in_buf); + node = node->next; + in_buf = out_buf; + out_buf = empty_buf++; + } + /* EMACSOC TODO: mask */ + /* + if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) { + setup_mask_stage(req, node, out_buf, in_buf); + node = node->next; + in_buf = out_buf; + out_buf = empty_buf++; + } + */ + /* Read the part of destination that will be updated */ + setup_dst_read_stage(req, node, out_buf); + node = node->next; + setup_blend_stage(req, node, out_buf, in_buf); + node = node->next; + in_buf = out_buf; + setup_writeback_stage(req, node, in_buf); + return 0; +} + +void b2r2_generic_set_areas(const struct b2r2_blt_request *req, + struct b2r2_node *first, + struct b2r2_blt_rect *dst_rect_area) +{ + /* + * Nodes come in the following order: <input stage>, [transform], + * [src_mask], <dst_read>, <blend>, <writeback> + */ + struct b2r2_node *node = first; + const struct b2r2_blt_rect *dst_rect = &(req->user_req.dst_rect); + const struct b2r2_blt_rect *src_rect = &(req->user_req.src_rect); + const enum b2r2_blt_fmt src_fmt = req->user_req.src_img.fmt; + bool yuv_multi_buffer_src = + src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + src_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + src_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + const enum b2r2_blt_fmt dst_fmt = req->user_req.dst_img.fmt; + const bool yuv_multi_buffer_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + s32 h_scf = 1 << 10; + s32 v_scf = 1 << 10; + s32 src_x = 0; + s32 src_y = 0; + s32 src_w = 0; + s32 src_h = 0; + u32 b2r2_rzi = 0; + s32 clip_top = 0; + s32 clip_left = 0; + s32 clip_bottom = req->user_req.dst_img.height - 1; + s32 clip_right = req->user_req.dst_img.width - 1; + /* Dst coords inside the dst_rect, not the buffer */ + s32 dst_x = dst_rect_area->x; + s32 dst_y = dst_rect_area->y; + struct b2r2_control *cont = req->instance->control; + + b2r2_log_info(cont->dev, "%s ENTRY\n", __func__); + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + h_scf = (src_rect->width << 10) / dst_rect->height; + v_scf = (src_rect->height << 10) / dst_rect->width; + } else { + h_scf = (src_rect->width << 10) / dst_rect->width; + v_scf = (src_rect->height << 10) / dst_rect->height; + } + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + /* + * Normally the inverse transform for 90 degree rotation + * is given by: + * | 0 1| |x| | y| + * | | X | | = | | + * |-1 0| |y| |-x| + * but screen coordinates are flipped in y direction + * (compared to usual Cartesian coordinates), hence the offsets. + */ + src_x = (dst_rect->height - dst_y - dst_rect_area->height) * + h_scf; + src_y = dst_x * v_scf; + src_w = dst_rect_area->height * h_scf; + src_h = dst_rect_area->width * v_scf; + } else { + src_x = dst_x * h_scf; + src_y = dst_y * v_scf; + src_w = dst_rect_area->width * h_scf; + src_h = dst_rect_area->height * v_scf; + } + + b2r2_rzi |= ((src_x & 0x3ff) << B2R2_RZI_HSRC_INIT_SHIFT) | + ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT); + + /* + * src_w must contain all the pixels that contribute + * to a particular tile. + * ((x + 0x3ff) >> 10) is equivalent to ceiling(x), + * expressed in 6.10 fixed point format. + * Every destination tile, maps to a certain area in the source + * rectangle. The area in source will most likely not be a rectangle + * with exact integer dimensions whenever arbitrary scaling is involved. + * Consider the following example. + * Suppose, that width of the current destination tile maps + * to 1.7 pixels in source, starting at x == 5.4, as calculated + * using the scaling factor. + * This means that while the destination tile is written, + * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1 + * Consequently, color from 3 pixels (x == 5, 6 and 7) + * needs to be read from source. + * The formula below the comment yields: + * ceil(0.4 + 1.7) == ceil(2.1) == 3 + * (src_x & 0x3ff) is the fractional part of src_x, + * which is expressed in 6.10 fixed point format. + * Thus, width of the source area should be 3 pixels wide, + * starting at x == 5. + * However, the reading should not start at x == 5.0 + * but a bit inside, namely x == 5.4 + * The B2R2_RZI register is used to instruct the HW to do so. + * It contains the fractional part that will be added to + * the first pixel coordinate, before incrementing the current source + * coordinate with the step specified in B2R2_RSF register. + * The same applies to scaling in vertical direction. + */ + src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10; + src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10; + + /* + * EMACSOC TODO: Remove this debug clamp, once tile size + * is taken into account in generic_analyze() + */ + if (src_w > 128) + src_w = 128; + + src_x >>= 10; + src_y >>= 10; + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) + src_x = src_rect->width - src_x - src_w; + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) + src_y = src_rect->height - src_y - src_h; + + /* + * Translate the src/dst_rect coordinates into true + * src/dst_buffer coordinates + */ + src_x += src_rect->x; + src_y += src_rect->y; + + dst_x += dst_rect->x; + dst_y += dst_rect->y; + + /* + * Clamp the src coords to buffer dimensions + * to prevent illegal reads. + */ + if (src_x < 0) + src_x = 0; + + if (src_y < 0) + src_y = 0; + + if ((src_x + src_w) > req->user_req.src_img.width) + src_w = req->user_req.src_img.width - src_x; + + if ((src_y + src_h) > req->user_req.src_img.height) + src_h = req->user_req.src_img.height - src_y; + + + /* The input node */ + if (yuv_multi_buffer_src) { + /* Luma on SRC3 */ + node->node.GROUP5.B2R2_SXY = + ((src_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((src_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP5.B2R2_SSZ = + ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + /* Clear and set only the SRC_INIT bits */ + node->node.GROUP10.B2R2_RZI &= + ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) | + (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT)); + node->node.GROUP10.B2R2_RZI |= b2r2_rzi; + + node->node.GROUP9.B2R2_RZI &= + ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) | + (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT)); + switch (src_fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + /* + * Chroma goes on SRC2 and potentially on SRC1. + * Chroma is half the size of luma. Must round up + * the chroma size to handle cases when luma size is not + * divisible by 2. + * E.g. luma width==7 requires chroma width==4. + * Chroma width==7/2==3 is only enough + * for luma width==6. + */ + node->node.GROUP4.B2R2_SXY = + (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) | + (((src_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((((src_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((((src_h + 1) & 0xfff) >> 1) << + B2R2_SZ_HEIGHT_SHIFT); + if (src_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + src_fmt == + B2R2_BLT_FMT_YVU420_PACKED_PLANAR) { + node->node.GROUP3.B2R2_SXY = + node->node.GROUP4.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = + node->node.GROUP4.B2R2_SSZ; + } + node->node.GROUP9.B2R2_RZI |= (b2r2_rzi >> 1) & + ((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) | + (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT)); + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* + * Chroma goes on SRC2 and potentially on SRC1. + * Now chroma is half the size of luma + * only in horizontal direction. + * Same rounding applies as for 420 formats above, + * except it is only done horizontally. + */ + node->node.GROUP4.B2R2_SXY = + (((src_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) | + ((src_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((((src_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + if (src_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + src_fmt == + B2R2_BLT_FMT_YVU422_PACKED_PLANAR) { + node->node.GROUP3.B2R2_SXY = + node->node.GROUP4.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = + node->node.GROUP4.B2R2_SSZ; + } + node->node.GROUP9.B2R2_RZI |= + (((src_x & 0x3ff) >> 1) << + B2R2_RZI_HSRC_INIT_SHIFT) | + ((src_y & 0x3ff) << B2R2_RZI_VSRC_INIT_SHIFT); + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + /* + * Chroma goes on SRC2 and SRC1. + * It is the same size as luma. + */ + node->node.GROUP4.B2R2_SXY = + ((src_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((src_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + node->node.GROUP3.B2R2_SXY = node->node.GROUP4.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = node->node.GROUP4.B2R2_SSZ; + + /* Clear and set only the SRC_INIT bits */ + node->node.GROUP9.B2R2_RZI &= + ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) | + (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT)); + node->node.GROUP9.B2R2_RZI |= b2r2_rzi; + break; + default: + break; + } + } else { + node->node.GROUP4.B2R2_SXY = + ((src_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((src_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((src_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((src_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + /* Clear and set only the SRC_INIT bits */ + node->node.GROUP9.B2R2_RZI &= + ~((0x3ff << B2R2_RZI_HSRC_INIT_SHIFT) | + (0x3ff << B2R2_RZI_VSRC_INIT_SHIFT)); + node->node.GROUP9.B2R2_RZI |= b2r2_rzi; + } + + node->node.GROUP1.B2R2_TXY = 0; + if (req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + /* + * dst_rect_area coordinates are specified + * after potential rotation. + * Input is read before rotation, hence the width and height + * need to be swapped. + * Horizontal and vertical flips are accomplished with + * suitable scanning order while writing + * to the temporary buffer. + */ + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) { + node->node.GROUP1.B2R2_TXY |= + ((dst_rect_area->height - 1) & 0xffff) << + B2R2_XY_X_SHIFT; + } + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) { + node->node.GROUP1.B2R2_TXY |= + ((dst_rect_area->width - 1) & 0xffff) << + B2R2_XY_Y_SHIFT; + } + + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + } else { + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_H) { + node->node.GROUP1.B2R2_TXY |= + ((dst_rect_area->width - 1) & 0xffff) << + B2R2_XY_X_SHIFT; + } + + if (req->user_req.transform & B2R2_BLT_TRANSFORM_FLIP_V) { + node->node.GROUP1.B2R2_TXY |= + ((dst_rect_area->height - 1) & 0xffff) << + B2R2_XY_Y_SHIFT; + } + + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + } + + if (req->user_req.flags & + (B2R2_BLT_FLAG_SOURCE_FILL | B2R2_BLT_FLAG_SOURCE_FILL_RAW)) { + /* + * Scan order for source fill should always be left-to-right + * and top-to-bottom. Fill the input tile from top left. + */ + node->node.GROUP1.B2R2_TXY = 0; + node->node.GROUP4.B2R2_SSZ = node->node.GROUP1.B2R2_TSZ; + } + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, "%s Input node done.\n", __func__); + } + + /* Transform */ + if ((req->user_req.transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0) { + /* + * Transform node operates on temporary buffers. + * Content always at top left, but scanning order + * has to be flipped during rotation. + * Width and height need to be considered as well, since + * a tile may not necessarily be filled completely. + * dst_rect_area dimensions are specified + * after potential rotation. + * Input is read before rotation, hence the width and height + * need to be swapped on src. + */ + node = node->next; + + node->node.GROUP4.B2R2_SXY = 0; + node->node.GROUP4.B2R2_SSZ = + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + /* Bottom line written first */ + node->node.GROUP1.B2R2_TXY = + ((dst_rect_area->height - 1) & 0xffff) << + B2R2_XY_Y_SHIFT; + + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, + "%s Tranform node done.\n", __func__); + } + } + + /* Source mask */ + if (req->user_req.flags & B2R2_BLT_FLAG_SOURCE_MASK) { + node = node->next; + /* + * Same coords for mask as for the input stage. + * Should the mask be transformed together with source? + * EMACSOC TODO: Apply mask before any + * transform/scaling is done. + * Otherwise it will be dst_ not src_mask. + */ + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, + "%s Source mask node done.\n", __func__); + } + } + + /* dst_read */ + if (yuv_multi_buffer_dst) { + s32 dst_w = dst_rect_area->width; + s32 dst_h = dst_rect_area->height; + bool yuv420_dst = + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE; + + bool yuv422_dst = + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE; + node = node->next; + /* Luma on SRC3 */ + node->node.GROUP5.B2R2_SXY = + ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP5.B2R2_SSZ = + ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + if (yuv420_dst) { + /* + * Chroma goes on SRC2 and potentially on SRC1. + * Chroma is half the size of luma. Must round up + * the chroma size to handle cases when luma size is not + * divisible by 2. + * E.g. luma width==7 requires chroma width==4. + * Chroma width==7/2==3 is only enough + * for luma width==6. + */ + node->node.GROUP4.B2R2_SXY = + (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) | + (((dst_y & 0xffff) >> 1) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((((dst_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((((dst_h + 1) & 0xfff) >> 1) << + B2R2_SZ_HEIGHT_SHIFT); + + if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == + B2R2_BLT_FMT_YVU420_PACKED_PLANAR) { + node->node.GROUP3.B2R2_SXY = + node->node.GROUP4.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = + node->node.GROUP4.B2R2_SSZ; + } + } else if (yuv422_dst) { + /* + * Chroma goes on SRC2 and potentially on SRC1. + * Now chroma is half the size of luma + * only in horizontal direction. + * Same rounding applies as for 420 formats above, + * except it is only done horizontally. + */ + node->node.GROUP4.B2R2_SXY = + (((dst_x & 0xffff) >> 1) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((((dst_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + if (dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == + B2R2_BLT_FMT_YVU422_PACKED_PLANAR) { + node->node.GROUP3.B2R2_SXY = + node->node.GROUP4.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = + node->node.GROUP4.B2R2_SSZ; + } + } else if (dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) { + /* + * Chroma goes on SRC2 and SRC1. + * It is the same size as luma. + */ + node->node.GROUP4.B2R2_SXY = node->node.GROUP5.B2R2_SXY; + node->node.GROUP4.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ; + node->node.GROUP3.B2R2_SXY = node->node.GROUP5.B2R2_SXY; + node->node.GROUP3.B2R2_SSZ = node->node.GROUP5.B2R2_SSZ; + } + + node->node.GROUP1.B2R2_TXY = 0; + node->node.GROUP1.B2R2_TSZ = + ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + } else { + node = node->next; + node->node.GROUP4.B2R2_SXY = + ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP4.B2R2_SSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + node->node.GROUP1.B2R2_TXY = 0; + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + } + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, "%s dst_read node done.\n", __func__); + } + + /* blend */ + node = node->next; + node->node.GROUP3.B2R2_SXY = 0; + node->node.GROUP3.B2R2_SSZ = + ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + /* contents of the foreground temporary buffer always at top left */ + node->node.GROUP4.B2R2_SXY = 0; + node->node.GROUP4.B2R2_SSZ = + ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + node->node.GROUP1.B2R2_TXY = 0; + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, "%s Blend node done.\n", __func__); + } + + /* writeback */ + node = node->next; + if ((req->user_req.flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0) { + clip_left = req->user_req.dst_clip_rect.x; + clip_top = req->user_req.dst_clip_rect.y; + clip_right = clip_left + req->user_req.dst_clip_rect.width - 1; + clip_bottom = clip_top + req->user_req.dst_clip_rect.height - 1; + } + /* + * Clamp the dst clip rectangle to buffer dimensions to prevent + * illegal writes. An illegal clip rectangle, e.g. outside the + * buffer will be ignored, resulting in nothing being clipped. + */ + if (clip_left < 0 || req->user_req.dst_img.width <= clip_left) + clip_left = 0; + + if (clip_top < 0 || req->user_req.dst_img.height <= clip_top) + clip_top = 0; + + if (clip_right < 0 || req->user_req.dst_img.width <= clip_right) + clip_right = req->user_req.dst_img.width - 1; + + if (clip_bottom < 0 || req->user_req.dst_img.height <= clip_bottom) + clip_bottom = req->user_req.dst_img.height - 1; + + /* + * Only allow writing inside the clip rect. + * INTNL bit in B2R2_CWO should be zero. + */ + node->node.GROUP6.B2R2_CWO = + ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) | + ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT); + node->node.GROUP6.B2R2_CWS = + ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) | + ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT); + + if (yuv_multi_buffer_dst) { + const s32 dst_w = dst_rect_area->width; + const s32 dst_h = dst_rect_area->height; + int i = 0; + /* Number of nodes required to write chroma output */ + int n_nodes = 1; + if (dst_fmt == B2R2_BLT_FMT_YUV420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU420_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YVU422_PACKED_PLANAR || + dst_fmt == B2R2_BLT_FMT_YUV444_PACKED_PLANAR) + n_nodes = 2; + + node->node.GROUP4.B2R2_SXY = 0; + node->node.GROUP4.B2R2_SSZ = + ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + /* Luma (Y-component) */ + node->node.GROUP1.B2R2_TXY = + ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP1.B2R2_TSZ = + ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + node->node.GROUP6.B2R2_CWO = + ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) | + ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT); + node->node.GROUP6.B2R2_CWS = + ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) | + ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT); + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, + "%s Writeback luma node done.\n", __func__); + } + + node = node->next; + + /* + * Chroma components. 1 or 2 nodes + * for semi-planar or planar buffer respectively. + */ + for (i = 0; i < n_nodes && node != NULL; ++i) { + + node->node.GROUP4.B2R2_SXY = 0; + node->node.GROUP4.B2R2_SSZ = + ((dst_w & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + + switch (dst_fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + /* + * Chroma is half the size of luma. + * Must round up the chroma size to handle + * cases when luma size is not divisible by 2. + * E.g. luma_width==7 requires chroma_width==4. + * Chroma_width==7/2==3 is only enough + * for luma_width==6. + */ + node->node.GROUP1.B2R2_TXY = + (((dst_x & 0xffff) >> 1) << + B2R2_XY_X_SHIFT) | + (((dst_y & 0xffff) >> 1) << + B2R2_XY_Y_SHIFT); + node->node.GROUP1.B2R2_TSZ = + ((((dst_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((((dst_h + 1) & 0xfff) >> 1) << + B2R2_SZ_HEIGHT_SHIFT); + break; + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + /* + * Now chroma is half the size of luma only + * in horizontal direction. + * Same rounding applies as + * for 420 formats above, except it is only + * done horizontally. + */ + node->node.GROUP1.B2R2_TXY = + (((dst_x & 0xffff) >> 1) << + B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP1.B2R2_TSZ = + ((((dst_w + 1) & 0xfff) >> 1) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + break; + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + /* + * Chroma has the same resolution as luma. + */ + node->node.GROUP1.B2R2_TXY = + ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP1.B2R2_TSZ = + ((dst_w & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_h & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + break; + default: + break; + } + + node->node.GROUP6.B2R2_CWO = + ((clip_top & 0x7fff) << B2R2_CWO_Y_SHIFT) | + ((clip_left & 0x7fff) << B2R2_CWO_X_SHIFT); + node->node.GROUP6.B2R2_CWS = + ((clip_bottom & 0x7fff) << B2R2_CWS_Y_SHIFT) | + ((clip_right & 0x7fff) << B2R2_CWS_X_SHIFT); + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, "%s Writeback chroma " + "node %d of %d done.\n", + __func__, i + 1, n_nodes); + } + + node = node->next; + } + } else { + node->node.GROUP4.B2R2_SXY = 0; + node->node.GROUP4.B2R2_SSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + node->node.GROUP1.B2R2_TXY = + ((dst_x & 0xffff) << B2R2_XY_X_SHIFT) | + ((dst_y & 0xffff) << B2R2_XY_Y_SHIFT); + node->node.GROUP1.B2R2_TSZ = + ((dst_rect_area->width & 0xfff) << + B2R2_SZ_WIDTH_SHIFT) | + ((dst_rect_area->height & 0xfff) << + B2R2_SZ_HEIGHT_SHIFT); + + if (B2R2_GENERIC_DEBUG_AREAS && dst_rect_area->x == 0 && + dst_rect_area->y == 0) { + dump_nodes(cont, node, false); + b2r2_log_debug(cont->dev, "%s Writeback node done.\n", + __func__); + } + } + + b2r2_log_info(cont->dev, "%s DONE\n", __func__); +} diff --git a/drivers/video/b2r2/b2r2_generic.h b/drivers/video/b2r2/b2r2_generic.h new file mode 100644 index 00000000000..3b22f654deb --- /dev/null +++ b/drivers/video/b2r2/b2r2_generic.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 generic. Full coverage of user interface but + * non optimized implementation. For Fallback purposes. + * + * Author: Maciej Socha <maciej.socha@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_VIDEO_B2R2_GENERIC_H +#define _LINUX_VIDEO_B2R2_GENERIC_H + +#include <video/b2r2_blt.h> + +#include "b2r2_internal.h" + +/** + * b2r2_generic_init() + */ +void b2r2_generic_init(struct b2r2_control *cont); + +/** + * b2r2_generic_exit() + */ +void b2r2_generic_exit(struct b2r2_control *cont); + +/** + * b2r2_generic_analyze() + */ +int b2r2_generic_analyze(const struct b2r2_blt_request *req, + s32 *work_buf_width, + s32 *work_buf_height, + u32 *work_buf_count, + u32 *node_count); +/** + * b2r2_generic_configure() + */ +int b2r2_generic_configure(const struct b2r2_blt_request *req, + struct b2r2_node *first, + struct b2r2_work_buf *tmp_bufs, + u32 buf_count); +/** + * b2r2_generic_set_areas() + */ +void b2r2_generic_set_areas(const struct b2r2_blt_request *req, + struct b2r2_node *first, + struct b2r2_blt_rect *dst_rect_area); +#endif diff --git a/drivers/video/b2r2/b2r2_global.h b/drivers/video/b2r2/b2r2_global.h new file mode 100644 index 00000000000..38cf74bb753 --- /dev/null +++ b/drivers/video/b2r2/b2r2_global.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 global definitions + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __B2R2_GLOBAL_H +#define __B2R2_GLOBAL_H + +/** Sources involved */ + +struct b2r2_system { + unsigned int B2R2_NIP; + unsigned int B2R2_CIC; + unsigned int B2R2_INS; + unsigned int B2R2_ACK; +}; + +struct b2r2_target { + unsigned int B2R2_TBA; + unsigned int B2R2_TTY; + unsigned int B2R2_TXY; + unsigned int B2R2_TSZ; +}; + +struct b2r2_color_fill { + unsigned int B2R2_S1CF; + unsigned int B2R2_S2CF; +}; + +struct b2r2_src_config { + unsigned int B2R2_SBA; + unsigned int B2R2_STY; + unsigned int B2R2_SXY; + unsigned int B2R2_SSZ; +}; + +struct b2r2_clip { + unsigned int B2R2_CWO; + unsigned int B2R2_CWS; +}; + +struct b2r2_color_key { + unsigned int B2R2_KEY1; + unsigned int B2R2_KEY2; +}; + +struct b2r2_clut { + unsigned int B2R2_CCO; + unsigned int B2R2_CML; +}; + +struct b2r2_rsz_pl_mask { + unsigned int B2R2_FCTL; + unsigned int B2R2_PMK; +}; + +struct b2r2_Cr_luma_rsz { + unsigned int B2R2_RSF; + unsigned int B2R2_RZI; + unsigned int B2R2_HFP; + unsigned int B2R2_VFP; +}; + +struct b2r2_flikr_filter { + unsigned int B2R2_FF0; + unsigned int B2R2_FF1; + unsigned int B2R2_FF2; + unsigned int B2R2_FF3; +}; + +struct b2r2_xyl { + unsigned int B2R2_XYL; + unsigned int B2R2_XYP; +}; + +struct b2r2_sau { + unsigned int B2R2_SAR; + unsigned int B2R2_USR; +}; + +struct b2r2_vm { + unsigned int B2R2_VMX0; + unsigned int B2R2_VMX1; + unsigned int B2R2_VMX2; + unsigned int B2R2_VMX3; +}; + +struct b2r2_link_list { + + struct b2r2_system GROUP0; + struct b2r2_target GROUP1; + struct b2r2_color_fill GROUP2; + struct b2r2_src_config GROUP3; + struct b2r2_src_config GROUP4; + struct b2r2_src_config GROUP5; + struct b2r2_clip GROUP6; + struct b2r2_clut GROUP7; + struct b2r2_rsz_pl_mask GROUP8; + struct b2r2_Cr_luma_rsz GROUP9; + struct b2r2_Cr_luma_rsz GROUP10; + struct b2r2_flikr_filter GROUP11; + struct b2r2_color_key GROUP12; + struct b2r2_xyl GROUP13; + struct b2r2_sau GROUP14; + struct b2r2_vm GROUP15; + struct b2r2_vm GROUP16; + + unsigned int B2R2_RESERVED[2]; +}; + + +#endif /* !defined(__B2R2_GLOBAL_H) */ diff --git a/drivers/video/b2r2/b2r2_hw.h b/drivers/video/b2r2/b2r2_hw.h new file mode 100644 index 00000000000..9739912d78c --- /dev/null +++ b/drivers/video/b2r2/b2r2_hw.h @@ -0,0 +1,458 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 hw definitions + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef B2R2_HW_H__ +#define B2R2_HW_H__ + +#include <linux/bitops.h> + +/* Scaling works in strips 128 pixels wide */ +#define B2R2_RESCALE_MAX_WIDTH 128 + +/* Rotation works in strips 16 pixels wide */ +#define B2R2_ROTATE_MAX_WIDTH 16 + +/* B2R2 color formats */ +#define B2R2_COLOR_FORMAT_SHIFT 16 +enum b2r2_native_fmt { + /* RGB formats */ + B2R2_NATIVE_RGB565 = 0x00 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_RGB888 = 0x01 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ARGB8565 = 0x04 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ARGB8888 = 0x05 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ARGB1555 = 0x06 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ARGB4444 = 0x07 << B2R2_COLOR_FORMAT_SHIFT, + + /* YCbCr formats */ + B2R2_NATIVE_YCBCR888 = 0x10 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_YCBCR422R = 0x12 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_AYCBCR8888 = 0x15 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_YCBCR42X_MB = 0x14 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_YCBCR42X_R2B = 0x16 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_YCBCR42X_MBN = 0x0e << B2R2_COLOR_FORMAT_SHIFT, + + /* CLUT formats */ + B2R2_NATIVE_CLUT2 = 0x09 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_CLUT8 = 0x0b << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ACLUT44 = 0x0c << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_ACLUT88 = 0x0d << B2R2_COLOR_FORMAT_SHIFT, + + /* Misc. formats */ + B2R2_NATIVE_A1 = 0x18 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_A8 = 0x19 << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_YUV = 0x1e << B2R2_COLOR_FORMAT_SHIFT, + B2R2_NATIVE_BYTE = 0x1f << B2R2_COLOR_FORMAT_SHIFT, +}; + +/* B2R2_CIC register values */ +enum b2r2_cic { + B2R2_CIC_COLOR_FILL = BIT(1),/*0x00000002*/ + B2R2_CIC_SOURCE_1 = BIT(2),/*0x00000004*/ + B2R2_CIC_SOURCE_2 = BIT(3),/*0x00000008*/ + B2R2_CIC_SOURCE_3 = BIT(4),/*0x00000010*/ + B2R2_CIC_CLIP_WINDOW = BIT(5),/*0x00000020*/ + B2R2_CIC_CLUT = BIT(6),/*0x00000040*/ + B2R2_CIC_FILTER_CONTROL = BIT(7),/*0x00000080*/ + B2R2_CIC_RESIZE_CHROMA = BIT(8),/*0x00000100*/ + B2R2_CIC_RESIZE_LUMA = BIT(9),/*0x00000200*/ + B2R2_CIC_FLICKER_COEFF = BIT(10),/*0x00000400*/ + B2R2_CIC_COLOR_KEY = BIT(11),/*0x00000800*/ + B2R2_CIC_XYL = BIT(12),/*0x00001000*/ + B2R2_CIC_SAU = BIT(13),/*0x00002000*/ + B2R2_CIC_IVMX = BIT(14),/*0x00004000*/ + B2R2_CIC_OVMX = BIT(15),/*0x00008000*/ + B2R2_CIC_PACEDOT = BIT(16),/*0x00010000*/ + B2R2_CIC_VC1 = BIT(17)/*0x00020000*/ +}; + +/* B2R2_INS register values */ +#define B2R2_INS_SOURCE_1_SHIFT 0 +#define B2R2_INS_SOURCE_2_SHIFT 3 +#define B2R2_INS_SOURCE_3_SHIFT 5 +#define B2R2_INS_IVMX_SHIFT 6 +#define B2R2_INS_CLUTOP_SHIFT 7 +#define B2R2_INS_RESCALE2D_SHIFT 8 +#define B2R2_INS_FLICK_FILT_SHIFT 9 +#define B2R2_INS_RECT_CLIP_SHIFT 10 +#define B2R2_INS_CKEY_SHIFT 11 +#define B2R2_INS_OVMX_SHIFT 12 +#define B2R2_INS_DEI_SHIFT 13 +#define B2R2_INS_PLANE_MASK_SHIFT 14 +#define B2R2_INS_XYL_SHIFT 15 +#define B2R2_INS_DOT_SHIFT 16 +#define B2R2_INS_VC1R_SHIFT 17 +#define B2R2_INS_ROTATION_SHIFT 18 +#define B2R2_INS_PACE_DOWN_SHIFT 30 +#define B2R2_INS_BLITCOMPIRQ_SHIFT 31 +enum b2r2_ins { + /* Source 1 config */ + B2R2_INS_SOURCE_1_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_1_SHIFT, + B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_1_SHIFT, + B2R2_INS_SOURCE_1_DIRECT_COPY = 0x4 << B2R2_INS_SOURCE_1_SHIFT, + B2R2_INS_SOURCE_1_DIRECT_FILL = 0x7 << B2R2_INS_SOURCE_1_SHIFT, + + /* Source 2 config */ + B2R2_INS_SOURCE_2_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_2_SHIFT, + B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER = 0x3 << B2R2_INS_SOURCE_2_SHIFT, + + /* Source 3 config */ + B2R2_INS_SOURCE_3_FETCH_FROM_MEM = 0x1 << B2R2_INS_SOURCE_3_SHIFT, + + /* Other configs */ + B2R2_INS_IVMX_ENABLED = 0x1 << B2R2_INS_IVMX_SHIFT, + B2R2_INS_CLUTOP_ENABLED = 0x1 << B2R2_INS_CLUTOP_SHIFT, + B2R2_INS_RESCALE2D_ENABLED = 0x1 << B2R2_INS_RESCALE2D_SHIFT, + B2R2_INS_FLICK_FILT_ENABLED = 0x1 << B2R2_INS_FLICK_FILT_SHIFT, + B2R2_INS_RECT_CLIP_ENABLED = 0x1 << B2R2_INS_RECT_CLIP_SHIFT, + B2R2_INS_CKEY_ENABLED = 0x1 << B2R2_INS_CKEY_SHIFT, + B2R2_INS_OVMX_ENABLED = 0x1 << B2R2_INS_OVMX_SHIFT, + B2R2_INS_DEI_ENABLED = 0x1 << B2R2_INS_DEI_SHIFT, + B2R2_INS_PLANE_MASK_ENABLED = 0x1 << B2R2_INS_PLANE_MASK_SHIFT, + B2R2_INS_XYL_ENABLED = 0x1 << B2R2_INS_XYL_SHIFT, + B2R2_INS_DOT_ENABLED = 0x1 << B2R2_INS_DOT_SHIFT, + B2R2_INS_VC1R_ENABLED = 0x1 << B2R2_INS_VC1R_SHIFT, + B2R2_INS_ROTATION_ENABLED = 0x1 << B2R2_INS_ROTATION_SHIFT, + B2R2_INS_PACE_DOWN_ENABLED = 0x1 << B2R2_INS_PACE_DOWN_SHIFT, + B2R2_INS_BLITCOMPIRQ_ENABLED = 0x1 << B2R2_INS_BLITCOMPIRQ_SHIFT, + +}; + +/* B2R2_ACK register values */ +#define B2R2_ACK_MODE_SHIFT 0 +#define B2R2_ACK_SWAP_FG_BG_SHIFT 4 +#define B2R2_ACK_GALPHA_ROPID_SHIFT 8 +#define B2R2_ACK_CKEY_BLUE_SHIFT 16 +#define B2R2_ACK_CKEY_GREEN_SHIFT 18 +#define B2R2_ACK_CKEY_RED_SHIFT 20 +#define B2R2_ACK_CKEY_SEL_SHIFT 22 +enum b2r2_ack { + /* ALU operation modes */ + B2R2_ACK_MODE_LOGICAL_OPERATION = 0x1 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_BLEND_NOT_PREMULT = 0x2 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_BLEND_PREMULT = 0x3 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_LOGICAL_FIRST_PASS = 0x4 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_BLEND = 0x5 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_BYPASS_S2_S3 = 0x7 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_LOGICAL_SECOND_PASS = 0x8 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_XYL_LOGICAL = 0x9 << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_NOT_PREMULT = + 0xa << B2R2_ACK_MODE_SHIFT, + B2R2_ACK_MODE_CLIPMASK_XYL_BLEND_PREMULT = 0xb << B2R2_ACK_MODE_SHIFT, + + /* ALU channel selection */ + B2R2_ACK_SWAP_FG_BG = 0x1 << B2R2_ACK_SWAP_FG_BG_SHIFT, + + /* Global alpha and ROP IDs */ + B2R2_ACK_ROP_CLEAR = 0x0 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_AND = 0x1 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_AND_REV = 0x2 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_COPY = 0x3 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_AND_INV = 0x4 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_NOOP = 0x5 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_XOR = 0x6 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_OR = 0x7 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_NOR = 0x8 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_EQUIV = 0x9 << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_INVERT = 0xa << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_OR_REV = 0xb << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_COPY_INV = 0xc << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_OR_INV = 0xd << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_NAND = 0xe << B2R2_ACK_GALPHA_ROPID_SHIFT, + B2R2_ACK_ROP_SET = 0xf << B2R2_ACK_GALPHA_ROPID_SHIFT, + + /* Color key configuration bits */ + B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_BLUE_SHIFT, + B2R2_ACK_CKEY_BLUE_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_BLUE_SHIFT, + B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_GREEN_SHIFT, + B2R2_ACK_CKEY_RED_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_GREEN_SHIFT, + B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN = 0x1 << B2R2_ACK_CKEY_RED_SHIFT, + B2R2_ACK_CKEY_GREEN_MATCH_IF_LT_OR_GT = 0x2 << B2R2_ACK_CKEY_RED_SHIFT, + + /* Color key input selection */ + B2R2_ACK_CKEY_SEL_DEST = 0x0 << B2R2_ACK_CKEY_SEL_SHIFT, + B2R2_ACK_CKEY_SEL_SRC_BEFORE_CLUT = 0x1 << B2R2_ACK_CKEY_SEL_SHIFT, + B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT = 0x2 << B2R2_ACK_CKEY_SEL_SHIFT, + B2R2_ACK_CKEY_SEL_BLANKING_S2_ALPHA = 0x3 << B2R2_ACK_CKEY_SEL_SHIFT, +}; + +/* Common <S/T>TY defines */ +#define B2R2_TY_BITMAP_PITCH_SHIFT 0 +#define B2R2_TY_COLOR_FORM_SHIFT 16 +#define B2R2_TY_ALPHA_RANGE_SHIFT 21 +#define B2R2_TY_MB_ACCESS_MODE_SHIFT 23 +#define B2R2_TY_HSO_SHIFT 24 +#define B2R2_TY_VSO_SHIFT 25 +#define B2R2_TY_SUBBYTE_SHIFT 28 +#define B2R2_TY_ENDIAN_SHIFT 30 +#define B2R2_TY_SECURE_SHIFT 31 + +/* Dummy enum for generalization of <S/T>TY registers */ +enum b2r2_ty { + /* Alpha range */ + B2R2_TY_ALPHA_RANGE_128 = 0x0 << B2R2_TY_ALPHA_RANGE_SHIFT, + B2R2_TY_ALPHA_RANGE_255 = 0x1 << B2R2_TY_ALPHA_RANGE_SHIFT, + + /* Access mode in macro-block organized frame buffers */ + B2R2_TY_MB_ACCESS_MODE_FRAME = 0x0 << B2R2_TY_MB_ACCESS_MODE_SHIFT, + B2R2_TY_MB_ACCESS_MODE_FIELD = 0x1 << B2R2_TY_MB_ACCESS_MODE_SHIFT, + + /* Horizontal scan order */ + B2R2_TY_HSO_LEFT_TO_RIGHT = 0x0 << B2R2_TY_HSO_SHIFT, + B2R2_TY_HSO_RIGHT_TO_LEFT = 0x1 << B2R2_TY_HSO_SHIFT, + + /* Vertical scan order */ + B2R2_TY_VSO_TOP_TO_BOTTOM = 0x0 << B2R2_TY_VSO_SHIFT, + B2R2_TY_VSO_BOTTOM_TO_TOP = 0x1 << B2R2_TY_VSO_SHIFT, + + /* Pixel ordering for sub-byte formats (position of right-most pixel) */ + B2R2_TY_SUBBYTE_MSB = 0x0 << B2R2_TY_SUBBYTE_SHIFT, + B2R2_TY_SUBBYTE_LSB = 0x1 << B2R2_TY_SUBBYTE_SHIFT, + + /* Bitmap endianess */ + B2R2_TY_ENDIAN_BIG_NOT_LITTLE = 0x1 << B2R2_TY_ENDIAN_SHIFT, + + /* Secureness of the target memory region */ + B2R2_TY_SECURE_UNSECURE = 0x0 << B2R2_TY_SECURE_SHIFT, + B2R2_TY_SECURE_SECURE = 0x1 << B2R2_TY_SECURE_SHIFT, + + /* Dummy to make sure the data type is large enough */ + B2R2_TY_DUMMY = 0xffffffff, +}; + +/* B2R2_TTY register values */ +#define B2R2_TTY_CB_NOT_CR_SHIFT 22 +#define B2R2_TTY_RGB_ROUND_SHIFT 26 +#define B2R2_TTY_CHROMA_NOT_LUMA_SHIFT 27 +enum b2r2_tty { + + /* Chroma component selection */ + B2R2_TTY_CB_NOT_CR = 0x1 << B2R2_TTY_CB_NOT_CR_SHIFT, + + /* RGB rounding mode */ + B2R2_TTY_RGB_ROUND_NORMAL = 0x0 << B2R2_TTY_RGB_ROUND_SHIFT, + B2R2_TTY_RGB_ROUND_DITHER = 0x1 << B2R2_TTY_RGB_ROUND_SHIFT, + + /* Component selection for splitted frame buffer formats */ + B2R2_TTY_CHROMA_NOT_LUMA = 0x1 << B2R2_TTY_CHROMA_NOT_LUMA_SHIFT, +}; + +/* B2R2_S1TY register values */ +#define B2R2_S1TY_A1_SUBST_SHIFT 22 +#define B2R2_S1TY_ROTATION_SHIFT 27 +#define B2R2_S1TY_RGB_EXPANSION_SHIFT 29 +enum b2r2_s1ty { + + /* Alpha bit substitution mode for ARGB1555 */ + B2R2_S1TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S1TY_A1_SUBST_SHIFT, + + /* Input rectangle rotation (NOT YET IMPLEMENTED) */ + B2R2_S1TY_ENABLE_ROTATION = 0x1 << B2R2_S1TY_ROTATION_SHIFT, + + /* RGB expansion mode */ + B2R2_S1TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S1TY_RGB_EXPANSION_SHIFT, + B2R2_S1TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S1TY_RGB_EXPANSION_SHIFT, +}; + +/* B2R2_S1TY register values */ +#define B2R2_S2TY_A1_SUBST_SHIFT 22 +#define B2R2_S2TY_CHROMA_LEFT_SHIFT 26 +#define B2R2_S2TY_RGB_EXPANSION_SHIFT 29 +enum b2r2_s2ty { + + /* Alpha bit substitution mode for ARGB1555 */ + B2R2_S2TY_A1_SUBST_KEY_MODE = 0x1 << B2R2_S2TY_A1_SUBST_SHIFT, + + /* Chroma left extension */ + B2R2_S2TY_CHROMA_LEFT_EXT_FOLLOWING_PIXEL = 0x0 + << B2R2_S2TY_CHROMA_LEFT_SHIFT, + B2R2_S2TY_CHROMA_LEFT_EXT_AVERAGE = 0x1 << B2R2_S2TY_CHROMA_LEFT_SHIFT, + + /* RGB expansion mode */ + B2R2_S2TY_RGB_EXPANSION_MSB_DUP = 0x0 << B2R2_S2TY_RGB_EXPANSION_SHIFT, + B2R2_S2TY_RGB_EXPANSION_LSP_ZERO = 0x1 << B2R2_S2TY_RGB_EXPANSION_SHIFT, +}; + +/* B2R2_S1TY register values */ +#define B2R2_S3TY_BLANK_ACC_SHIFT 26 +enum b2r2_s3ty { + /* Enables "blank" access on this source (nothing will be fetched from + memory) */ + B2R2_S3TY_ENABLE_BLANK_ACCESS = 0x1 << B2R2_S3TY_BLANK_ACC_SHIFT, +}; + +/* B2R2_<S or T>XY register values */ +#define B2R2_XY_X_SHIFT 0 +#define B2R2_XY_Y_SHIFT 16 + +/* B2R2_<S or T>SZ register values */ +#define B2R2_SZ_WIDTH_SHIFT 0 +#define B2R2_SZ_HEIGHT_SHIFT 16 + +/* Clip window offset (top left coordinates) */ +#define B2R2_CWO_X_SHIFT 0 +#define B2R2_CWO_Y_SHIFT 16 + +/* Clip window stop (bottom right coordinates) */ +#define B2R2_CWS_X_SHIFT 0 +#define B2R2_CWS_Y_SHIFT 16 + +/* Color look-up table */ +enum b2r2_cco { + B2R2_CCO_CLUT_COLOR_CORRECTION = (1 << 16), + B2R2_CCO_CLUT_UPDATE = (1 << 18), + B2R2_CCO_CLUT_ON_S1 = (1 << 15) +}; + +/* Filter control (2D resize control) */ +enum b2r2_fctl { + /* Horizontal 2D filter mode */ + B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(0), + B2R2_FCTL_HF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(1), + B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER = BIT(2), + + /* Vertical 2D filter mode */ + B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER = BIT(4), + B2R2_FCTL_VF2D_MODE_ENABLE_ALPHA_CHANNEL_FILTER = BIT(5), + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER = BIT(6), + + /* Alpha borders */ + B2R2_FCTL_ENABLE_ALPHA_BORDER_RIGHT = BIT(12), + B2R2_FCTL_ENABLE_ALPHA_BORDER_LEFT = BIT(13), + B2R2_FCTL_ENABLE_ALPHA_BORDER_BOTTOM = BIT(14), + B2R2_FCTL_ENABLE_ALPHA_BORDER_TOP = BIT(15), + + /* Luma path horizontal 2D filter mode */ + B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER = BIT(24), + B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER = BIT(25), + + /* Luma path vertical 2D filter mode */ + B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER = BIT(28), + B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER = BIT(29), +}; + +/* Resize scaling factor */ +#define B2R2_RSF_HSRC_INC_SHIFT 0 +#define B2R2_RSF_VSRC_INC_SHIFT 16 + +/* Resizer initialization */ +#define B2R2_RZI_HSRC_INIT_SHIFT 0 +#define B2R2_RZI_HNB_REPEAT_SHIFT 12 +#define B2R2_RZI_VSRC_INIT_SHIFT 16 +#define B2R2_RZI_VNB_REPEAT_SHIFT 28 + +/* Default values for the resizer */ +#define B2R2_RZI_DEFAULT_HNB_REPEAT (3 << B2R2_RZI_HNB_REPEAT_SHIFT) +#define B2R2_RZI_DEFAULT_VNB_REPEAT (3 << B2R2_RZI_VNB_REPEAT_SHIFT) + + +/* Bus plug configuration registers */ +enum b2r2_plug_opcode_size { + B2R2_PLUG_OPCODE_SIZE_8 = 0x3, + B2R2_PLUG_OPCODE_SIZE_16 = 0x4, + B2R2_PLUG_OPCODE_SIZE_32 = 0x5, + B2R2_PLUG_OPCODE_SIZE_64 = 0x6, +}; + +enum b2r2_plug_chunk_size { + B2R2_PLUG_CHUNK_SIZE_1 = 0x0, + B2R2_PLUG_CHUNK_SIZE_2 = 0x1, + B2R2_PLUG_CHUNK_SIZE_4 = 0x2, + B2R2_PLUG_CHUNK_SIZE_8 = 0x3, + B2R2_PLUG_CHUNK_SIZE_16 = 0x4, + B2R2_PLUG_CHUNK_SIZE_32 = 0x5, + B2R2_PLUG_CHUNK_SIZE_64 = 0x6, + B2R2_PLUG_CHUNK_SIZE_128 = 0x7, +}; + +enum b2r2_plug_message_size { + B2R2_PLUG_MESSAGE_SIZE_1 = 0x0, + B2R2_PLUG_MESSAGE_SIZE_2 = 0x1, + B2R2_PLUG_MESSAGE_SIZE_4 = 0x2, + B2R2_PLUG_MESSAGE_SIZE_8 = 0x3, + B2R2_PLUG_MESSAGE_SIZE_16 = 0x4, + B2R2_PLUG_MESSAGE_SIZE_32 = 0x5, + B2R2_PLUG_MESSAGE_SIZE_64 = 0x6, + B2R2_PLUG_MESSAGE_SIZE_128 = 0x7, +}; + +enum b2r2_plug_page_size { + B2R2_PLUG_PAGE_SIZE_64 = 0x0, + B2R2_PLUG_PAGE_SIZE_128 = 0x1, + B2R2_PLUG_PAGE_SIZE_256 = 0x2, +}; + +/* Default opcode size */ +#if defined(CONFIG_B2R2_OPSIZE_8) +# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_8 +#elif defined(CONFIG_B2R2_OPSIZE_16) +# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_16 +#elif defined(CONFIG_B2R2_OPSIZE_32) +# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_32 +#elif defined(CONFIG_B2R2_OPSIZE_64) +# define B2R2_PLUG_OPCODE_SIZE_DEFAULT B2R2_PLUG_OPCODE_SIZE_64 +#else +# define B2R2_PLUG_OPCODE_SIZE_DEFAULT 0 +#endif + +/* Default chunk size */ +#if defined(CONFIG_B2R2_CHSIZE_1) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_1 +#elif defined(CONFIG_B2R2_CHSIZE_2) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_2 +#elif defined(CONFIG_B2R2_CHSIZE_4) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_4 +#elif defined(CONFIG_B2R2_CHSIZE_8) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_8 +#elif defined(CONFIG_B2R2_CHSIZE_16) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_16 +#elif defined(CONFIG_B2R2_CHSIZE_32) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_32 +#elif defined(CONFIG_B2R2_CHSIZE_64) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_64 +#elif defined(CONFIG_B2R2_CHSIZE_128) +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT B2R2_PLUG_CHUNK_SIZE_128 +#else +# define B2R2_PLUG_CHUNK_SIZE_DEFAULT 0 +#endif + +/* Default message size */ +#if defined(CONFIG_B2R2_MGSIZE_1) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_1 +#elif defined(CONFIG_B2R2_MGSIZE_2) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_2 +#elif defined(CONFIG_B2R2_MGSIZE_4) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_4 +#elif defined(CONFIG_B2R2_MGSIZE_8) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_8 +#elif defined(CONFIG_B2R2_MGSIZE_16) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_16 +#elif defined(CONFIG_B2R2_MGSIZE_32) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_32 +#elif defined(CONFIG_B2R2_MGSIZE_64) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_64 +#elif defined(CONFIG_B2R2_MGSIZE_128) +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT B2R2_PLUG_MESSAGE_SIZE_128 +#else +# define B2R2_PLUG_MESSAGE_SIZE_DEFAULT 0 +#endif + +/* Default page size */ +#if defined(CONFIG_B2R2_PGSIZE_64) +# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_64 +#elif defined(CONFIG_B2R2_PGSIZE_128) +# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_128 +#elif defined(CONFIG_B2R2_PGSIZE_256) +# define B2R2_PLUG_PAGE_SIZE_DEFAULT B2R2_PLUG_PAGE_SIZE_256 +#else +# define B2R2_PLUG_PAGE_SIZE_DEFAULT 0 +#endif + +#endif /* B2R2_HW_H__ */ diff --git a/drivers/video/b2r2/b2r2_hw_convert.c b/drivers/video/b2r2/b2r2_hw_convert.c new file mode 100644 index 00000000000..d1b44db79c3 --- /dev/null +++ b/drivers/video/b2r2/b2r2_hw_convert.c @@ -0,0 +1,747 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * + * ST-Ericsson B2R2 node splitter + * + * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include "b2r2_hw_convert.h" +#include "b2r2_internal.h" +#include "b2r2_utils.h" + +/* + * Macros and constants + */ + +/* VMX register values for RGB to YUV color conversion */ +/* Magic numbers from 27.11 in DB8500_DesignSpecification_v2.5.pdf */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE 0x107e4beb +#define B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE 0x0982581d +#define B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE 0xfa9ea483 +#define B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE 0x08000080 + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_RGB_TO_YUV_601_STANDARD 0x0e1e8bee +#define B2R2_VMX1_RGB_TO_YUV_601_STANDARD 0x08420419 +#define B2R2_VMX2_RGB_TO_YUV_601_STANDARD 0xfb5ed471 +#define B2R2_VMX3_RGB_TO_YUV_601_STANDARD 0x08004080 + +/* 709 Full range conversion matrix */ +#define B2R2_VMX0_RGB_TO_YUV_709_FULL_RANGE 0x107e27f4 +#define B2R2_VMX1_RGB_TO_YUV_709_FULL_RANGE 0x06e2dc13 +#define B2R2_VMX2_RGB_TO_YUV_709_FULL_RANGE 0xfc5e6c83 +#define B2R2_VMX3_RGB_TO_YUV_709_FULL_RANGE 0x08000080 + +/* 709 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_RGB_TO_YUV_709_STANDARD 0x0e3e6bf5 +#define B2R2_VMX1_RGB_TO_YUV_709_STANDARD 0x05e27410 +#define B2R2_VMX2_RGB_TO_YUV_709_STANDARD 0xfcdea471 +#define B2R2_VMX3_RGB_TO_YUV_709_STANDARD 0x08004080 + +/* VMX register values for YUV to RGB color conversion */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE 0x2c440000 +#define B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE 0xe9a403aa +#define B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE 0x0004013f +#define B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE 0x34f21322 + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YUV_TO_RGB_601_STANDARD 0x3324a800 +#define B2R2_VMX1_YUV_TO_RGB_601_STANDARD 0xe604ab9c +#define B2R2_VMX2_YUV_TO_RGB_601_STANDARD 0x0004a957 +#define B2R2_VMX3_YUV_TO_RGB_601_STANDARD 0x32121eeb + +/* 709 Full range conversion matrix */ +#define B2R2_VMX0_YUV_TO_RGB_709_FULL_RANGE 0x31440000 +#define B2R2_VMX1_YUV_TO_RGB_709_FULL_RANGE 0xf16403d1 +#define B2R2_VMX2_YUV_TO_RGB_709_FULL_RANGE 0x00040145 +#define B2R2_VMX3_YUV_TO_RGB_709_FULL_RANGE 0x33b14b18 + +/* 709 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YUV_TO_RGB_709_STANDARD 0x3964a800 +#define B2R2_VMX1_YUV_TO_RGB_709_STANDARD 0xef04abc9 +#define B2R2_VMX2_YUV_TO_RGB_709_STANDARD 0x0004a95f +#define B2R2_VMX3_YUV_TO_RGB_709_STANDARD 0x307132df + +/* VMX register values for RGB to BGR conversion */ +#define B2R2_VMX0_RGB_TO_BGR 0x00000100 +#define B2R2_VMX1_RGB_TO_BGR 0x00040000 +#define B2R2_VMX2_RGB_TO_BGR 0x20000000 +#define B2R2_VMX3_RGB_TO_BGR 0x00000000 + +/* VMX register values for BGR to YUV color conversion */ +/* Note: All BGR -> YUV values are calculated by multiplying + * the RGB -> YUV matrices [A], with [S] to form [A]x[S] where + * |0 0 1| + * S = |0 1 0| + * |1 0 0| + * Essentially swapping first and third columns in + * the matrices (VMX0, VMX1 and VMX2 values). + * The offset vector VMX3 remains untouched. + * Put another way, the value of bits 0 through 9 + * is swapped with the value of + * bits 20 through 31 in VMX0, VMX1 and VMX2, + * taking into consideration the compression + * that is used on bits 0 through 9. Bit 0 being LSB. + */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE 0xfd7e4883 +#define B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE 0x03a2584c +#define B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE 0x107ea7d4 +#define B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE 0x08000080 + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_BGR_TO_YUV_601_STANDARD 0xfdde8870 +#define B2R2_VMX1_BGR_TO_YUV_601_STANDARD 0x03220442 +#define B2R2_VMX2_BGR_TO_YUV_601_STANDARD 0x0e3ed7da +#define B2R2_VMX3_BGR_TO_YUV_601_STANDARD 0x08004080 + +/* 709 Full range conversion matrix */ +#define B2R2_VMX0_BGR_TO_YUV_709_FULL_RANGE 0xfe9e2483 +#define B2R2_VMX1_BGR_TO_YUV_709_FULL_RANGE 0x0262dc37 +#define B2R2_VMX2_BGR_TO_YUV_709_FULL_RANGE 0x107e6fe2 +#define B2R2_VMX3_BGR_TO_YUV_709_FULL_RANGE 0x08000080 + +/* 709 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_BGR_TO_YUV_709_STANDARD 0xfebe6871 +#define B2R2_VMX1_BGR_TO_YUV_709_STANDARD 0x0202742f +#define B2R2_VMX2_BGR_TO_YUV_709_STANDARD 0x0e3ea7e6 +#define B2R2_VMX3_BGR_TO_YUV_709_STANDARD 0x08004080 + + +/* VMX register values for YUV to BGR conversion */ +/* Note: All YUV -> BGR values are constructed + * from the YUV -> RGB ones, by swapping + * first and third rows in the matrix + * (VMX0 and VMX2 values). Further, the first and + * third values in the offset vector need to be + * swapped as well, i.e. bits 0 through 9 are swapped + * with bits 20 through 29 in the VMX3 value. + * Bit 0 being LSB. + */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX1_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX2_YUV_TO_BGR_601_FULL_RANGE (B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX3_YUV_TO_BGR_601_FULL_RANGE 0x3222134f + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YUV_TO_BGR_601_STANDARD (B2R2_VMX2_YUV_TO_RGB_601_STANDARD) +#define B2R2_VMX1_YUV_TO_BGR_601_STANDARD (B2R2_VMX1_YUV_TO_RGB_601_STANDARD) +#define B2R2_VMX2_YUV_TO_BGR_601_STANDARD (B2R2_VMX0_YUV_TO_RGB_601_STANDARD) +#define B2R2_VMX3_YUV_TO_BGR_601_STANDARD 0x2eb21f21 + +/* 709 Full range conversion matrix */ +#define B2R2_VMX0_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX2_YUV_TO_RGB_709_FULL_RANGE) +#define B2R2_VMX1_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX1_YUV_TO_RGB_709_FULL_RANGE) +#define B2R2_VMX2_YUV_TO_BGR_709_FULL_RANGE (B2R2_VMX0_YUV_TO_RGB_709_FULL_RANGE) +#define B2R2_VMX3_YUV_TO_BGR_709_FULL_RANGE 0x31814b3b + +/* 709 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YUV_TO_BGR_709_STANDARD (B2R2_VMX2_YUV_TO_RGB_709_STANDARD) +#define B2R2_VMX1_YUV_TO_BGR_709_STANDARD (B2R2_VMX1_YUV_TO_RGB_709_STANDARD) +#define B2R2_VMX2_YUV_TO_BGR_709_STANDARD (B2R2_VMX0_YUV_TO_RGB_709_STANDARD) +#define B2R2_VMX3_YUV_TO_BGR_709_STANDARD 0x2df13307 + + +/* VMX register values for YVU to RGB conversion */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE 0x00040120 +#define B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE 0xF544034D +#define B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE 0x37840000 +#define B2R2_VMX3_YVU_TO_RGB_601_FULL_RANGE (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE) + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YVU_TO_RGB_601_STANDARD 0x0004A999 +#define B2R2_VMX1_YVU_TO_RGB_601_STANDARD 0xF384AB30 +#define B2R2_VMX2_YVU_TO_RGB_601_STANDARD 0x40A4A800 +#define B2R2_VMX3_YVU_TO_RGB_601_STANDARD (B2R2_VMX3_YUV_TO_RGB_601_STANDARD) + +/* VMX register values for RGB to YVU conversion */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX1_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX2_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX3_RGB_TO_YVU_601_FULL_RANGE (B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE) + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_RGB_TO_YVU_601_STANDARD (B2R2_VMX2_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX1_RGB_TO_YVU_601_STANDARD (B2R2_VMX1_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX2_RGB_TO_YVU_601_STANDARD (B2R2_VMX0_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX3_RGB_TO_YVU_601_STANDARD (B2R2_VMX3_RGB_TO_YUV_601_STANDARD) + +/* VMX register values for YVU to BGR conversion */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX1_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX2_YVU_TO_BGR_601_FULL_RANGE (B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE) +#define B2R2_VMX3_YVU_TO_BGR_601_FULL_RANGE 0x3222134F + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_YVU_TO_BGR_601_STANDARD (B2R2_VMX2_YVU_TO_RGB_601_STANDARD) +#define B2R2_VMX1_YVU_TO_BGR_601_STANDARD (B2R2_VMX1_YVU_TO_RGB_601_STANDARD) +#define B2R2_VMX2_YVU_TO_BGR_601_STANDARD (B2R2_VMX0_YVU_TO_RGB_601_STANDARD) +#define B2R2_VMX3_YVU_TO_BGR_601_STANDARD 0x3222134F + +/* VMX register values for BGR to YVU conversion */ + +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX1_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX2_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX3_BGR_TO_YVU_601_FULL_RANGE (B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE) + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_BGR_TO_YVU_601_STANDARD (B2R2_VMX2_BGR_TO_YUV_601_STANDARD) +#define B2R2_VMX1_BGR_TO_YVU_601_STANDARD (B2R2_VMX1_BGR_TO_YUV_601_STANDARD) +#define B2R2_VMX2_BGR_TO_YVU_601_STANDARD (B2R2_VMX0_BGR_TO_YUV_601_STANDARD) +#define B2R2_VMX3_BGR_TO_YVU_601_STANDARD (B2R2_VMX3_BGR_TO_YUV_601_STANDARD) + +/* VMX register values for YVU to YUV conversion */ + +/* 601 Video Matrix (standard 601 conversion) */ +/* Internally, the components are in fact stored + * with luma in the middle, i.e. UYV, which is why + * the values are just like for RGB->BGR conversion. + */ +#define B2R2_VMX0_YVU_TO_YUV 0x00000100 +#define B2R2_VMX1_YVU_TO_YUV 0x00040000 +#define B2R2_VMX2_YVU_TO_YUV 0x20000000 +#define B2R2_VMX3_YVU_TO_YUV 0x00000000 + +/* VMX register values for RGB to BLT_YUV888 conversion */ + +/* + * BLT_YUV888 has color components laid out in memory as V, U, Y, (Alpha) + * with V at the first byte (due to little endian addressing). + * B2R2 expects them to be as U, Y, V, (A) + * with U at the first byte. + * Note: RGB -> BLT_YUV888 values are calculated by multiplying + * the RGB -> YUV matrix [A], with [S] to form [S]x[A] where + * |0 1 0| + * S = |0 0 1| + * |1 0 0| + * Essentially changing the order of rows in the original + * matrix [A]. + * row1 -> row3 + * row2 -> row1 + * row3 -> row2 + * Values in the offset vector are swapped in the same manner. + */ +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_FULL_RANGE (B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE) +#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_FULL_RANGE 0x00020080 + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX1_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX1_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX2_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX2_RGB_TO_BLT_YUV888_601_STANDARD (B2R2_VMX0_RGB_TO_YUV_601_STANDARD) +#define B2R2_VMX3_RGB_TO_BLT_YUV888_601_STANDARD 0x00020080 + +/* VMX register values for BLT_YUV888 to RGB conversion */ + +/* + * Note: BLT_YUV888 -> RGB values are calculated by multiplying + * the YUV -> RGB matrix [A], with [S] to form [A]x[S] where + * |0 0 1| + * S = |1 0 0| + * |0 1 0| + * Essentially changing the order of columns in the original + * matrix [A]. + * col1 -> col3 + * col2 -> col1 + * col3 -> col2 + * Values in the offset vector remain unchanged. + */ +/* 601 Full range conversion matrix */ +#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x20000121 +#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x201ea74c +#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_FULL_RANGE 0x2006f000 +#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_FULL_RANGE (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE) + +/* 601 Standard (clamped) conversion matrix */ +#define B2R2_VMX0_BLT_YUV888_TO_RGB_601_STANDARD 0x25400133 +#define B2R2_VMX1_BLT_YUV888_TO_RGB_601_STANDARD 0x255E7330 +#define B2R2_VMX2_BLT_YUV888_TO_RGB_601_STANDARD 0x25481400 +#define B2R2_VMX3_BLT_YUV888_TO_RGB_601_STANDARD (B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE) + +/* VMX register values for YUV to BLT_YUV888 conversion */ +#define B2R2_VMX0_YUV_TO_BLT_YUV888 0x00040000 +#define B2R2_VMX1_YUV_TO_BLT_YUV888 0x00000100 +#define B2R2_VMX2_YUV_TO_BLT_YUV888 0x20000000 +#define B2R2_VMX3_YUV_TO_BLT_YUV888 0x00000000 + +/* VMX register values for BLT_YUV888 to YUV conversion */ +#define B2R2_VMX0_BLT_YUV888_TO_YUV 0x00000100 +#define B2R2_VMX1_BLT_YUV888_TO_YUV 0x20000000 +#define B2R2_VMX2_BLT_YUV888_TO_YUV 0x00040000 +#define B2R2_VMX3_BLT_YUV888_TO_YUV 0x00000000 + +/* VMX register values for YVU to BLT_YUV888 conversion */ +#define B2R2_VMX0_YVU_TO_BLT_YUV888 0x00040000 +#define B2R2_VMX1_YVU_TO_BLT_YUV888 0x20000000 +#define B2R2_VMX2_YVU_TO_BLT_YUV888 0x00000100 +#define B2R2_VMX3_YVU_TO_BLT_YUV888 0x00000000 + +/* VMX register values for BLT_YUV888 to YVU conversion */ +#define B2R2_VMX0_BLT_YUV888_TO_YVU 0x00040000 +#define B2R2_VMX1_BLT_YUV888_TO_YVU 0x20000000 +#define B2R2_VMX2_BLT_YUV888_TO_YVU 0x00000100 +#define B2R2_VMX3_BLT_YUV888_TO_YVU 0x00000000 + +/* + * Internal types + */ + +/* + * Global variables + */ + + /** + * VMx values for color space conversion + * (component swap) + */ +static const u32 vmx_yuv_to_blt_yuv888[] = { + B2R2_VMX0_YUV_TO_BLT_YUV888, + B2R2_VMX1_YUV_TO_BLT_YUV888, + B2R2_VMX2_YUV_TO_BLT_YUV888, + B2R2_VMX3_YUV_TO_BLT_YUV888, +}; + +static const u32 vmx_blt_yuv888_to_yuv[] = { + B2R2_VMX0_BLT_YUV888_TO_YUV, + B2R2_VMX1_BLT_YUV888_TO_YUV, + B2R2_VMX2_BLT_YUV888_TO_YUV, + B2R2_VMX3_BLT_YUV888_TO_YUV, +}; + +static const u32 vmx_yvu_to_blt_yuv888[] = { + B2R2_VMX0_YVU_TO_BLT_YUV888, + B2R2_VMX1_YVU_TO_BLT_YUV888, + B2R2_VMX2_YVU_TO_BLT_YUV888, + B2R2_VMX3_YVU_TO_BLT_YUV888, +}; + +static const u32 vmx_blt_yuv888_to_yvu[] = { + B2R2_VMX0_BLT_YUV888_TO_YVU, + B2R2_VMX1_BLT_YUV888_TO_YVU, + B2R2_VMX2_BLT_YUV888_TO_YVU, + B2R2_VMX3_BLT_YUV888_TO_YVU, +}; + +static const u32 vmx_rgb_to_bgr[] = { + B2R2_VMX0_RGB_TO_BGR, + B2R2_VMX1_RGB_TO_BGR, + B2R2_VMX2_RGB_TO_BGR, + B2R2_VMX3_RGB_TO_BGR, +}; + +static const u32 vmx_yvu_to_yuv[] = { + B2R2_VMX0_YVU_TO_YUV, + B2R2_VMX1_YVU_TO_YUV, + B2R2_VMX2_YVU_TO_YUV, + B2R2_VMX3_YVU_TO_YUV, +}; + +/** + * VMx values for color space conversions + * (standard 601 conversions) + */ +static const u32 vmx_rgb_to_yuv[] = { + B2R2_VMX0_RGB_TO_YUV_601_STANDARD, + B2R2_VMX1_RGB_TO_YUV_601_STANDARD, + B2R2_VMX2_RGB_TO_YUV_601_STANDARD, + B2R2_VMX3_RGB_TO_YUV_601_STANDARD, +}; + +static const u32 vmx_yuv_to_rgb[] = { + B2R2_VMX0_YUV_TO_RGB_601_STANDARD, + B2R2_VMX1_YUV_TO_RGB_601_STANDARD, + B2R2_VMX2_YUV_TO_RGB_601_STANDARD, + B2R2_VMX3_YUV_TO_RGB_601_STANDARD, +}; + +static const u32 vmx_rgb_to_blt_yuv888[] = { + B2R2_VMX0_RGB_TO_BLT_YUV888_601_STANDARD, + B2R2_VMX1_RGB_TO_BLT_YUV888_601_STANDARD, + B2R2_VMX2_RGB_TO_BLT_YUV888_601_STANDARD, + B2R2_VMX3_RGB_TO_BLT_YUV888_601_STANDARD, +}; + +static const u32 vmx_blt_yuv888_to_rgb[] = { + B2R2_VMX0_BLT_YUV888_TO_RGB_601_STANDARD, + B2R2_VMX1_BLT_YUV888_TO_RGB_601_STANDARD, + B2R2_VMX2_BLT_YUV888_TO_RGB_601_STANDARD, + B2R2_VMX3_BLT_YUV888_TO_RGB_601_STANDARD, +}; + +static const u32 vmx_rgb_to_yvu[] = { + B2R2_VMX0_RGB_TO_YVU_601_STANDARD, + B2R2_VMX1_RGB_TO_YVU_601_STANDARD, + B2R2_VMX2_RGB_TO_YVU_601_STANDARD, + B2R2_VMX3_RGB_TO_YVU_601_STANDARD, +}; + +static const u32 vmx_yvu_to_rgb[] = { + B2R2_VMX0_YVU_TO_RGB_601_STANDARD, + B2R2_VMX1_YVU_TO_RGB_601_STANDARD, + B2R2_VMX2_YVU_TO_RGB_601_STANDARD, + B2R2_VMX3_YVU_TO_RGB_601_STANDARD, +}; + +static const u32 vmx_bgr_to_yuv[] = { + B2R2_VMX0_BGR_TO_YUV_601_STANDARD, + B2R2_VMX1_BGR_TO_YUV_601_STANDARD, + B2R2_VMX2_BGR_TO_YUV_601_STANDARD, + B2R2_VMX3_BGR_TO_YUV_601_STANDARD, +}; + +static const u32 vmx_yuv_to_bgr[] = { + B2R2_VMX0_YUV_TO_BGR_601_STANDARD, + B2R2_VMX1_YUV_TO_BGR_601_STANDARD, + B2R2_VMX2_YUV_TO_BGR_601_STANDARD, + B2R2_VMX3_YUV_TO_BGR_601_STANDARD, +}; + +static const u32 vmx_bgr_to_yvu[] = { + B2R2_VMX0_BGR_TO_YVU_601_STANDARD, + B2R2_VMX1_BGR_TO_YVU_601_STANDARD, + B2R2_VMX2_BGR_TO_YVU_601_STANDARD, + B2R2_VMX3_BGR_TO_YVU_601_STANDARD, +}; + +static const u32 vmx_yvu_to_bgr[] = { + B2R2_VMX0_YVU_TO_BGR_601_STANDARD, + B2R2_VMX1_YVU_TO_BGR_601_STANDARD, + B2R2_VMX2_YVU_TO_BGR_601_STANDARD, + B2R2_VMX3_YVU_TO_BGR_601_STANDARD, +}; + +/** + * VMx values for color space conversions + * (full range conversions) + */ + +static const u32 vmx_full_rgb_to_yuv[] = { + B2R2_VMX0_RGB_TO_YUV_601_FULL_RANGE, + B2R2_VMX1_RGB_TO_YUV_601_FULL_RANGE, + B2R2_VMX2_RGB_TO_YUV_601_FULL_RANGE, + B2R2_VMX3_RGB_TO_YUV_601_FULL_RANGE, +}; + +static const u32 vmx_full_yuv_to_rgb[] = { + B2R2_VMX0_YUV_TO_RGB_601_FULL_RANGE, + B2R2_VMX1_YUV_TO_RGB_601_FULL_RANGE, + B2R2_VMX2_YUV_TO_RGB_601_FULL_RANGE, + B2R2_VMX3_YUV_TO_RGB_601_FULL_RANGE, +}; + +static const u32 vmx_full_rgb_to_blt_yuv888[] = { + B2R2_VMX0_RGB_TO_BLT_YUV888_601_FULL_RANGE, + B2R2_VMX1_RGB_TO_BLT_YUV888_601_FULL_RANGE, + B2R2_VMX2_RGB_TO_BLT_YUV888_601_FULL_RANGE, + B2R2_VMX3_RGB_TO_BLT_YUV888_601_FULL_RANGE, +}; + +static const u32 vmx_full_blt_yuv888_to_rgb[] = { + B2R2_VMX0_BLT_YUV888_TO_RGB_601_FULL_RANGE, + B2R2_VMX1_BLT_YUV888_TO_RGB_601_FULL_RANGE, + B2R2_VMX2_BLT_YUV888_TO_RGB_601_FULL_RANGE, + B2R2_VMX3_BLT_YUV888_TO_RGB_601_FULL_RANGE, +}; + +static const u32 vmx_full_yvu_to_rgb[] = { + B2R2_VMX0_YVU_TO_RGB_601_FULL_RANGE, + B2R2_VMX1_YVU_TO_RGB_601_FULL_RANGE, + B2R2_VMX2_YVU_TO_RGB_601_FULL_RANGE, + B2R2_VMX3_YVU_TO_RGB_601_FULL_RANGE, +}; + +static const u32 vmx_full_rgb_to_yvu[] = { + B2R2_VMX0_RGB_TO_YVU_601_FULL_RANGE, + B2R2_VMX1_RGB_TO_YVU_601_FULL_RANGE, + B2R2_VMX2_RGB_TO_YVU_601_FULL_RANGE, + B2R2_VMX3_RGB_TO_YVU_601_FULL_RANGE, +}; + +static const u32 vmx_full_bgr_to_yuv[] = { + B2R2_VMX0_BGR_TO_YUV_601_FULL_RANGE, + B2R2_VMX1_BGR_TO_YUV_601_FULL_RANGE, + B2R2_VMX2_BGR_TO_YUV_601_FULL_RANGE, + B2R2_VMX3_BGR_TO_YUV_601_FULL_RANGE, +}; + +static const u32 vmx_full_yuv_to_bgr[] = { + B2R2_VMX0_YUV_TO_BGR_601_FULL_RANGE, + B2R2_VMX1_YUV_TO_BGR_601_FULL_RANGE, + B2R2_VMX2_YUV_TO_BGR_601_FULL_RANGE, + B2R2_VMX3_YUV_TO_BGR_601_FULL_RANGE, +}; + +static const u32 vmx_full_bgr_to_yvu[] = { + B2R2_VMX0_BGR_TO_YVU_601_FULL_RANGE, + B2R2_VMX1_BGR_TO_YVU_601_FULL_RANGE, + B2R2_VMX2_BGR_TO_YVU_601_FULL_RANGE, + B2R2_VMX3_BGR_TO_YVU_601_FULL_RANGE, +}; + +static const u32 vmx_full_yvu_to_bgr[] = { + B2R2_VMX0_YVU_TO_BGR_601_FULL_RANGE, + B2R2_VMX1_YVU_TO_BGR_601_FULL_RANGE, + B2R2_VMX2_YVU_TO_BGR_601_FULL_RANGE, + B2R2_VMX3_YVU_TO_BGR_601_FULL_RANGE, +}; + +/* + * Forward declaration of private functions + */ + +/* + * Public functions + */ + +/** + * Setup input versatile matrix for color space conversion + */ +int b2r2_setup_ivmx(struct b2r2_node *node, enum b2r2_color_conversion cc) +{ + const u32 *vmx = NULL; + + if (b2r2_get_vmx(cc, &vmx) < 0 || vmx == NULL) + return -1; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX; + + node->node.GROUP15.B2R2_VMX0 = vmx[0]; + node->node.GROUP15.B2R2_VMX1 = vmx[1]; + node->node.GROUP15.B2R2_VMX2 = vmx[2]; + node->node.GROUP15.B2R2_VMX3 = vmx[3]; + + return 0; +} + +/** + * Setup output versatile matrix for color space conversion + */ +int b2r2_setup_ovmx(struct b2r2_node *node, enum b2r2_color_conversion cc) +{ + const u32 *vmx = NULL; + + if (b2r2_get_vmx(cc, &vmx) < 0 || vmx == NULL) + return -1; + + node->node.GROUP0.B2R2_INS |= B2R2_INS_OVMX_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_OVMX; + + node->node.GROUP16.B2R2_VMX0 = vmx[0]; + node->node.GROUP16.B2R2_VMX1 = vmx[1]; + node->node.GROUP16.B2R2_VMX2 = vmx[2]; + node->node.GROUP16.B2R2_VMX3 = vmx[3]; + + return 0; +} + +enum b2r2_color_conversion b2r2_get_color_conversion(enum b2r2_blt_fmt src_fmt, + enum b2r2_blt_fmt dst_fmt, bool fullrange) +{ + if (b2r2_is_rgb_fmt(src_fmt)) { + if (b2r2_is_yvu_fmt(dst_fmt)) + return fullrange ? B2R2_CC_RGB_TO_YVU_FULL : + B2R2_CC_RGB_TO_YVU; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + /* + * (A)YUV/VUY(A) formats differ only in component + * order. This is handled by the endianness bit + * in B2R2_STY/TTY registers when src/target are set. + */ + return fullrange ? B2R2_CC_RGB_TO_BLT_YUV888_FULL : + B2R2_CC_RGB_TO_BLT_YUV888; + else if (b2r2_is_yuv_fmt(dst_fmt)) + return fullrange ? B2R2_CC_RGB_TO_YUV_FULL : + B2R2_CC_RGB_TO_YUV; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return B2R2_CC_RGB_TO_BGR; + } else if (b2r2_is_yvu_fmt(src_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return fullrange ? B2R2_CC_YVU_FULL_TO_RGB : + B2R2_CC_YVU_TO_RGB; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return fullrange ? B2R2_CC_YVU_FULL_TO_BGR : + B2R2_CC_YVU_TO_BGR; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return B2R2_CC_YVU_TO_BLT_YUV888; + else if (b2r2_is_yuv_fmt(dst_fmt) && + !b2r2_is_yvu_fmt(dst_fmt)) + return B2R2_CC_YVU_TO_YUV; + } else if (src_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + src_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + src_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + src_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) { + /* + * (A)YUV/VUY(A) formats differ only in component + * order. This is handled by the endianness bit + * in B2R2_STY/TTY registers when src/target are set. + */ + if (b2r2_is_rgb_fmt(dst_fmt)) + return fullrange ? B2R2_CC_BLT_YUV888_FULL_TO_RGB : + B2R2_CC_BLT_YUV888_TO_RGB; + else if (b2r2_is_yvu_fmt(dst_fmt)) + return B2R2_CC_BLT_YUV888_TO_YVU; + else if (b2r2_is_yuv_fmt(dst_fmt)) { + switch (dst_fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return B2R2_CC_NOTHING; + default: + return B2R2_CC_BLT_YUV888_TO_YUV; + } + } + } else if (b2r2_is_yuv_fmt(src_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return fullrange ? B2R2_CC_YUV_FULL_TO_RGB : + B2R2_CC_YUV_TO_RGB; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return fullrange ? B2R2_CC_YUV_FULL_TO_BGR : + B2R2_CC_YUV_TO_BGR; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return B2R2_CC_YUV_TO_BLT_YUV888; + else if (b2r2_is_yvu_fmt(dst_fmt)) + return B2R2_CC_YVU_TO_YUV; + } else if (b2r2_is_bgr_fmt(src_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return B2R2_CC_RGB_TO_BGR; + else if (b2r2_is_yvu_fmt(dst_fmt)) + return fullrange ? B2R2_CC_BGR_TO_YVU_FULL : + B2R2_CC_BGR_TO_YVU; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + BUG_ON(1); + else if (b2r2_is_yuv_fmt(dst_fmt)) + return fullrange ? B2R2_CC_BGR_TO_YUV_FULL : + B2R2_CC_BGR_TO_YUV; + } + + return B2R2_CC_NOTHING; +} + +int b2r2_get_vmx(enum b2r2_color_conversion cc, const u32 **vmx) +{ + if (vmx == NULL) + return -1; + + switch (cc) { + case B2R2_CC_RGB_TO_BGR: + *vmx = &vmx_rgb_to_bgr[0]; + break; + case B2R2_CC_BLT_YUV888_TO_YVU: + *vmx = &vmx_blt_yuv888_to_yvu[0]; + break; + case B2R2_CC_BLT_YUV888_TO_YUV: + *vmx = &vmx_blt_yuv888_to_yuv[0]; + break; + case B2R2_CC_YVU_TO_YUV: + *vmx = &vmx_yvu_to_yuv[0]; + break; + case B2R2_CC_YVU_TO_BLT_YUV888: + *vmx = &vmx_yvu_to_blt_yuv888[0]; + break; + case B2R2_CC_YUV_TO_BLT_YUV888: + *vmx = &vmx_yuv_to_blt_yuv888[0]; + break; + case B2R2_CC_RGB_TO_YUV: + *vmx = &vmx_rgb_to_yuv[0]; + break; + case B2R2_CC_RGB_TO_YUV_FULL: + *vmx = &vmx_full_rgb_to_yuv[0]; + break; + case B2R2_CC_RGB_TO_YVU: + *vmx = &vmx_rgb_to_yvu[0]; + break; + case B2R2_CC_RGB_TO_YVU_FULL: + *vmx = &vmx_full_rgb_to_yvu[0]; + break; + case B2R2_CC_RGB_TO_BLT_YUV888: + *vmx = &vmx_rgb_to_blt_yuv888[0]; + break; + case B2R2_CC_RGB_TO_BLT_YUV888_FULL: + *vmx = &vmx_full_rgb_to_blt_yuv888[0]; + break; + case B2R2_CC_BGR_TO_YVU: + *vmx = &vmx_bgr_to_yvu[0]; + break; + case B2R2_CC_BGR_TO_YVU_FULL: + *vmx = &vmx_full_bgr_to_yvu[0]; + break; + case B2R2_CC_BGR_TO_YUV: + *vmx = &vmx_bgr_to_yuv[0]; + break; + case B2R2_CC_BGR_TO_YUV_FULL: + *vmx = &vmx_full_bgr_to_yuv[0]; + break; + case B2R2_CC_YUV_TO_RGB: + *vmx = &vmx_yuv_to_rgb[0]; + break; + case B2R2_CC_YUV_FULL_TO_RGB: + *vmx = &vmx_full_yuv_to_rgb[0]; + break; + case B2R2_CC_YUV_TO_BGR: + *vmx = &vmx_yuv_to_bgr[0]; + break; + case B2R2_CC_YUV_FULL_TO_BGR: + *vmx = &vmx_full_yuv_to_bgr[0]; + break; + case B2R2_CC_YVU_TO_RGB: + *vmx = &vmx_yvu_to_rgb[0]; + break; + case B2R2_CC_YVU_FULL_TO_RGB: + *vmx = &vmx_full_yvu_to_rgb[0]; + break; + case B2R2_CC_YVU_TO_BGR: + *vmx = &vmx_yvu_to_bgr[0]; + break; + case B2R2_CC_YVU_FULL_TO_BGR: + *vmx = &vmx_full_yvu_to_bgr[0]; + break; + case B2R2_CC_BLT_YUV888_TO_RGB: + *vmx = &vmx_blt_yuv888_to_rgb[0]; + break; + case B2R2_CC_BLT_YUV888_FULL_TO_RGB: + *vmx = &vmx_full_blt_yuv888_to_rgb[0]; + break; + case B2R2_CC_NOTHING: + default: + break; + } + + return 0; +} + + diff --git a/drivers/video/b2r2/b2r2_hw_convert.h b/drivers/video/b2r2/b2r2_hw_convert.h new file mode 100644 index 00000000000..e173e898e44 --- /dev/null +++ b/drivers/video/b2r2/b2r2_hw_convert.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) ST-Ericsson SA 2012 + * + * ST-Ericsson B2R2 hw color conversion definitions + * + * Author: Jorgen Nilsson <jorgen.nilsson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef B2R2_HW_CONVERT_H__ +#define B2R2_HW_CONVERT_H__ + +#include "b2r2_internal.h" + +enum b2r2_color_conversion { + B2R2_CC_NOTHING = 0, + B2R2_CC_RGB_TO_BGR, + B2R2_CC_BLT_YUV888_TO_YVU, + B2R2_CC_BLT_YUV888_TO_YUV, + B2R2_CC_YUV_TO_BLT_YUV888, + B2R2_CC_YVU_TO_YUV, + B2R2_CC_YVU_TO_BLT_YUV888, + B2R2_CC_RGB_TO_YUV, + B2R2_CC_RGB_TO_YUV_FULL, + B2R2_CC_RGB_TO_YVU, + B2R2_CC_RGB_TO_YVU_FULL, + B2R2_CC_RGB_TO_BLT_YUV888, + B2R2_CC_RGB_TO_BLT_YUV888_FULL, + B2R2_CC_BGR_TO_YVU, + B2R2_CC_BGR_TO_YVU_FULL, + B2R2_CC_BGR_TO_YUV, + B2R2_CC_BGR_TO_YUV_FULL, + B2R2_CC_YUV_TO_RGB, + B2R2_CC_YUV_FULL_TO_RGB, + B2R2_CC_YUV_TO_BGR, + B2R2_CC_YUV_FULL_TO_BGR, + B2R2_CC_YVU_TO_RGB, + B2R2_CC_YVU_FULL_TO_RGB, + B2R2_CC_YVU_TO_BGR, + B2R2_CC_YVU_FULL_TO_BGR, + B2R2_CC_BLT_YUV888_TO_RGB, + B2R2_CC_BLT_YUV888_FULL_TO_RGB, +}; + +int b2r2_setup_ivmx(struct b2r2_node *node, enum b2r2_color_conversion cc); +int b2r2_setup_ovmx(struct b2r2_node *node, enum b2r2_color_conversion cc); +enum b2r2_color_conversion b2r2_get_color_conversion(enum b2r2_blt_fmt src_fmt, + enum b2r2_blt_fmt dst_fmt, bool fullrange); +int b2r2_get_vmx(enum b2r2_color_conversion cc, const u32 **vmx); + +#endif /* B2R2_HW_CONVERT_H__ */ diff --git a/drivers/video/b2r2/b2r2_input_validation.c b/drivers/video/b2r2/b2r2_input_validation.c new file mode 100644 index 00000000000..c8eb2f7b025 --- /dev/null +++ b/drivers/video/b2r2/b2r2_input_validation.c @@ -0,0 +1,496 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include "b2r2_internal.h" +#include "b2r2_input_validation.h" +#include "b2r2_debug.h" +#include "b2r2_utils.h" + +#include <video/b2r2_blt.h> +#include <linux/kernel.h> +#include <linux/errno.h> + + +static bool is_valid_format(enum b2r2_blt_fmt fmt); +static bool is_valid_bg_format(enum b2r2_blt_fmt fmt); + +static bool is_valid_pitch_for_fmt(struct device *dev, + u32 pitch, s32 width, enum b2r2_blt_fmt fmt); + +static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt); +static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt); +static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt); +static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt); + +static bool validate_img(struct device *dev, + struct b2r2_blt_img *img); +static bool validate_rect(struct device *dev, + struct b2r2_blt_rect *rect); + + +static bool is_valid_format(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return true; + + default: + return false; + } +} + +static bool is_valid_bg_format(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return false; + default: + return true; + } +} + + +static bool is_valid_pitch_for_fmt(struct device *dev, + u32 pitch, s32 width, enum b2r2_blt_fmt fmt) +{ + s32 complete_width; + u32 pitch_derived_from_width; + + complete_width = width_2_complete_width(width, fmt); + + pitch_derived_from_width = b2r2_calc_pitch_from_width(dev, + complete_width, fmt); + + if (pitch < pitch_derived_from_width) + return false; + + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + if (!b2r2_is_aligned(pitch, 2)) + return false; + + break; + + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + if (!b2r2_is_aligned(pitch, 4)) + return false; + + break; + + default: + break; + } + + return true; +} + + +static bool is_aligned_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + if (!b2r2_is_aligned(width, 4)) + return false; + + break; + + case B2R2_BLT_FMT_1_BIT_A1: + if (!b2r2_is_aligned(width, 8)) + return false; + + break; + + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + if (!b2r2_is_aligned(width, 2)) + return false; + + break; + + default: + break; + } + + return true; +} + +static s32 width_2_complete_width(s32 width, enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return b2r2_align_up(width, 2); + + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return b2r2_align_up(width, 16); + + default: + return width; + } +} + +static bool is_complete_width_for_fmt(s32 width, enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + if (!b2r2_is_aligned(width, 2)) + return false; + + break; + + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + if (!b2r2_is_aligned(width, 16)) + return false; + + break; + + default: + break; + } + + return true; +} + +static bool is_valid_height_for_fmt(s32 height, enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + if (!b2r2_is_aligned(height, 2)) + return false; + + break; + + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + if (!b2r2_is_aligned(height, 16)) + return false; + + break; + + default: + break; + } + + return true; +} + +static bool validate_img(struct device *dev, + struct b2r2_blt_img *img) +{ + /* + * So that we always can do width * height * bpp without overflowing a + * 32 bit signed integer. isqrt(s32_max / max_bpp) was used to + * calculate the value. + */ + static const s32 max_img_width_height = 8191; + + s32 img_size; + + if (!is_valid_format(img->fmt)) { + b2r2_log_info(dev, "Validation Error: " + "!is_valid_format(img->fmt)\n"); + return false; + } + + if (img->width < 0 || img->width > max_img_width_height || + img->height < 0 || img->height > max_img_width_height) { + b2r2_log_info(dev, "Validation Error: " + "img->width < 0 || " + "img->width > max_img_width_height || " + "img->height < 0 || " + "img->height > max_img_width_height\n"); + return false; + } + + if (b2r2_is_mb_fmt(img->fmt)) { + if (!is_complete_width_for_fmt(img->width, img->fmt)) { + b2r2_log_info(dev, "Validation Error: " + "!is_complete_width_for_fmt(img->width," + " img->fmt)\n"); + return false; + } + } else { + if (0 == img->pitch && + (!is_aligned_width_for_fmt(img->width, img->fmt) || + !is_complete_width_for_fmt(img->width, img->fmt))) { + b2r2_log_info(dev, + "Validation Error: " + "0 == img->pitch && " + "(!is_aligned_width_for_fmt(img->width," + " img->fmt) || " + "!is_complete_width_for_fmt(img->width," + " img->fmt))\n"); + return false; + } + + if (img->pitch != 0 && + !is_valid_pitch_for_fmt(dev, img->pitch, img->width, + img->fmt)) { + b2r2_log_info(dev, + "Validation Error: " + "img->pitch != 0 && " + "!is_valid_pitch_for_fmt(dev, " + "img->pitch, img->width, img->fmt)\n"); + return false; + } + } + + if (!is_valid_height_for_fmt(img->width, img->fmt)) { + b2r2_log_info(dev, "Validation Error: " + "!is_valid_height_for_fmt(img->width, img->fmt)\n"); + return false; + } + + img_size = b2r2_get_img_size(dev, img); + + /* + * To keep the entire image inside s32 range. + */ + if ((B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == img->buf.type || + B2R2_BLT_PTR_FD_OFFSET == img->buf.type) && + img->buf.offset > (u32)b2r2_s32_max - (u32)img_size) { + b2r2_log_info(dev, "Validation Error: " + "(B2R2_BLT_PTR_HWMEM_BUF_NAME_OFFSET == " + "img->buf.type || B2R2_BLT_PTR_FD_OFFSET == " + "img->buf.type) && img->buf.offset > " + "(u32)B2R2_MAX_S32 - (u32)img_size\n"); + return false; + } + + return true; +} + +static bool validate_rect(struct device *dev, + struct b2r2_blt_rect *rect) +{ + if (rect->width < 0 || rect->height < 0) { + b2r2_log_info(dev, "Validation Error: " + "rect->width < 0 || rect->height < 0\n"); + return false; + } + + return true; +} + +bool b2r2_validate_user_req(struct device *dev, + struct b2r2_blt_req *req) +{ + bool is_src_img_used; + bool is_bg_img_used; + bool is_src_mask_used; + bool is_dst_clip_rect_used; + + if (req->size != sizeof(struct b2r2_blt_req)) { + b2r2_log_err(dev, "Validation Error: " + "req->size != sizeof(struct b2r2_blt_req)\n"); + return false; + } + + is_src_img_used = !(req->flags & B2R2_BLT_FLAG_SOURCE_FILL || + req->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW); + is_bg_img_used = (req->flags & B2R2_BLT_FLAG_BG_BLEND); + is_src_mask_used = req->flags & B2R2_BLT_FLAG_SOURCE_MASK; + is_dst_clip_rect_used = req->flags & B2R2_BLT_FLAG_DESTINATION_CLIP; + + if (is_src_img_used || is_src_mask_used) { + if (!validate_rect(dev, &req->src_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_rect(dev, &req->src_rect)\n"); + return false; + } + } + + if (!validate_rect(dev, &req->dst_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_rect(dev, &req->dst_rect)\n"); + return false; + } + + if (is_bg_img_used) { + if (!validate_rect(dev, &req->bg_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_rect(dev, &req->bg_rect)\n"); + return false; + } + } + + if (is_dst_clip_rect_used) { + if (!validate_rect(dev, &req->dst_clip_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_rect(dev, &req->dst_clip_rect)\n"); + return false; + } + } + + if (is_src_img_used) { + struct b2r2_blt_rect src_img_bounding_rect; + + if (!validate_img(dev, &req->src_img)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_img(dev, &req->src_img)\n"); + return false; + } + + b2r2_get_img_bounding_rect(&req->src_img, + &src_img_bounding_rect); + if (!b2r2_is_rect_inside_rect(&req->src_rect, + &src_img_bounding_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!b2r2_is_rect_inside_rect(&req->src_rect, " + "&src_img_bounding_rect)\n"); + return false; + } + } + + if (is_bg_img_used) { + struct b2r2_blt_rect bg_img_bounding_rect; + + if (!validate_img(dev, &req->bg_img)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_img(dev, &req->bg_img)\n"); + return false; + } + + if (!is_valid_bg_format(req->bg_img.fmt)) { + b2r2_log_info(dev, "Validation Error: " + "!is_valid_bg_format(req->bg_img->fmt)\n"); + return false; + } + + b2r2_get_img_bounding_rect(&req->bg_img, + &bg_img_bounding_rect); + if (!b2r2_is_rect_inside_rect(&req->bg_rect, + &bg_img_bounding_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!b2r2_is_rect_inside_rect(&req->bg_rect, " + "&bg_img_bounding_rect)\n"); + return false; + } + } + + if (is_src_mask_used) { + struct b2r2_blt_rect src_mask_bounding_rect; + + if (!validate_img(dev, &req->src_mask)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_img(dev, &req->src_mask)\n"); + return false; + } + + b2r2_get_img_bounding_rect(&req->src_mask, + &src_mask_bounding_rect); + if (!b2r2_is_rect_inside_rect(&req->src_rect, + &src_mask_bounding_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!b2r2_is_rect_inside_rect(&req->src_rect, " + "&src_mask_bounding_rect)\n"); + return false; + } + } + + if (!validate_img(dev, &req->dst_img)) { + b2r2_log_info(dev, "Validation Error: " + "!validate_img(dev, &req->dst_img)\n"); + return false; + } + + if (is_bg_img_used) { + if (!b2r2_is_rect_gte_rect(&req->bg_rect, &req->dst_rect)) { + b2r2_log_info(dev, "Validation Error: " + "!b2r2_is_rect_gte_rect(&req->bg_rect, " + "&req->dst_rect)\n"); + return false; + } + } + + return true; +} diff --git a/drivers/video/b2r2/b2r2_input_validation.h b/drivers/video/b2r2/b2r2_input_validation.h new file mode 100644 index 00000000000..25f022a45ab --- /dev/null +++ b/drivers/video/b2r2/b2r2_input_validation.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_INPUT_VALIDATION_H_ + +#include <video/b2r2_blt.h> + +#include "b2r2_internal.h" + +bool b2r2_validate_user_req(struct device *dev, + struct b2r2_blt_req *req); + +#endif diff --git a/drivers/video/b2r2/b2r2_internal.h b/drivers/video/b2r2/b2r2_internal.h new file mode 100644 index 00000000000..329d644e5e1 --- /dev/null +++ b/drivers/video/b2r2/b2r2_internal.h @@ -0,0 +1,610 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 internal definitions + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_INTERNAL_H_ + +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <video/b2r2_blt.h> + +#include "b2r2_global.h" +#include "b2r2_hw.h" + +/** + * B2R2_MAX_NBR_DEVICES - The maximum number of B2R2s handled + */ +#define B2R2_MAX_NBR_DEVICES 2 + +/* The maximum possible number of temporary buffers needed */ +#define MAX_TMP_BUFS_NEEDED 2 + +/* Size of the color look-up table */ +#define CLUT_SIZE 1024 + +/* The defined bits of the Interrupt Status Register */ +#define B2R2_ITS_MASK 0x0FFFF0FF + +/** + * b2r2_op_type - the type of B2R2 operation to configure + */ +enum b2r2_op_type { + B2R2_DIRECT_COPY, + B2R2_DIRECT_FILL, + B2R2_COPY, + B2R2_FILL, + B2R2_SCALE, + B2R2_ROTATE, + B2R2_SCALE_AND_ROTATE, + B2R2_FLIP, +}; + +/** + * b2r2_fmt_type - the type of buffer for a given format + */ +enum b2r2_fmt_type { + B2R2_FMT_TYPE_RASTER, + B2R2_FMT_TYPE_SEMI_PLANAR, + B2R2_FMT_TYPE_PLANAR, +}; + +/** + * b2r2_fmt_conv - the type of format conversion to do + */ +enum b2r2_fmt_conv { + B2R2_FMT_CONV_NONE, + B2R2_FMT_CONV_RGB_TO_YUV, + B2R2_FMT_CONV_YUV_TO_RGB, + B2R2_FMT_CONV_YUV_TO_YUV, + B2R2_FMT_CONV_RGB_TO_BGR, + B2R2_FMT_CONV_BGR_TO_RGB, + B2R2_FMT_CONV_YUV_TO_BGR, + B2R2_FMT_CONV_BGR_TO_YUV, +}; + +/** + * enum b2r2_core_queue - Indicates the B2R2 queue that the job belongs to + * + * @B2R2_CORE_QUEUE_AQ1: Application queue 1 + * @B2R2_CORE_QUEUE_AQ2: Application queue 2 + * @B2R2_CORE_QUEUE_AQ3: Application queue 3 + * @B2R2_CORE_QUEUE_AQ4: Application queue 4 + * @B2R2_CORE_QUEUE_CQ1: Composition queue 1 + * @B2R2_CORE_QUEUE_CQ2: Composition queue 2 + * @B2R2_CORE_QUEUE_NO_OF: Number of queues + */ +enum b2r2_core_queue { + B2R2_CORE_QUEUE_AQ1 = 0, + B2R2_CORE_QUEUE_AQ2, + B2R2_CORE_QUEUE_AQ3, + B2R2_CORE_QUEUE_AQ4, + B2R2_CORE_QUEUE_CQ1, + B2R2_CORE_QUEUE_CQ2, + B2R2_CORE_QUEUE_NO_OF, +}; + +#define B2R2_NUM_APPLICATIONS_QUEUES 4 + +/** + * enum b2r2_core_job_state - Indicates the current state of the job + * + * @B2R2_CORE_JOB_IDLE: Never queued + * @B2R2_CORE_JOB_QUEUED: In queue but not started yet + * @B2R2_CORE_JOB_RUNNING: Running, executed by B2R2 + * @B2R2_CORE_JOB_DONE: Completed + * @B2R2_CORE_JOB_CANCELED: Canceled + */ +enum b2r2_core_job_state { + B2R2_CORE_JOB_IDLE = 0, + B2R2_CORE_JOB_QUEUED, + B2R2_CORE_JOB_RUNNING, + B2R2_CORE_JOB_DONE, + B2R2_CORE_JOB_CANCELED, +}; + +/** + * b2r2_work_buf - specification for a temporary work buffer + * + * @size - the size of the buffer (set by b2r2_node_split) + * @phys_addr - the physical address of the buffer (set by b2r2_blt_main) + */ +struct b2r2_work_buf { + u32 size; + u32 phys_addr; + void *virt_addr; + u32 mem_handle; +}; + +struct tmp_buf { + struct b2r2_work_buf buf; + bool in_use; +}; + +/** + * struct b2r2_control_instance - Represents the B2R2 instance + * (one per open and blitter core) + * + * @lock: Lock to protect the instance + * @control_id: The b2r2 core core control identifier + * @control: The b2r2 core control entity + * + * @report_list: Ready requests that should be reported, + * @report_list_waitq: Wait queue for report list + * @no_of_active_requests: Number of requests added but not reported + * in callback. + * @synching: true if any client is waiting for b2r2_blt_synch(0) + * @synch_done_waitq: Wait queue to handle synching on request_id 0 + */ +struct b2r2_control_instance { + struct mutex lock; + int control_id; + struct b2r2_control *control; + + /* Requests to be reported */ + struct list_head report_list; + wait_queue_head_t report_list_waitq; + + /* Below for synching */ + u32 no_of_active_requests; + bool synching; + wait_queue_head_t synch_done_waitq; +}; + +/** + * struct b2r2_node - Represents a B2R2 node with reqister values, executed + * by B2R2. Should be allocated non-cached. + * + * @next: Next node + * @physical_address: Physical address to be given to B2R2 + * (physical address of "node" member below) + * @node: The B2R2 node with register settings. This is the data + * that B2R2 will use. + * + */ +struct b2r2_node { + struct b2r2_node *next; + u32 physical_address; + + int src_tmp_index; + int dst_tmp_index; + + int src_index; + + /* B2R2 regs comes here */ + struct b2r2_link_list node; +}; + +/** + * struct b2r2_resolved_buf - Contains calculated information about + * image buffers. + * + * @physical_address: Physical address of the buffer + * @virtual_address: Virtual address of the buffer + * @is_pmem: true if buffer is from pmem + * @hwmem_session: Hwmem session + * @hwmem_alloc: Hwmem alloc + * @filep: File pointer of mapped file (like pmem device, frame buffer device) + * @file_physical_start: Physical address of file start + * @file_virtual_start: Virtual address of file start + * @file_len: File len + * + */ +struct b2r2_resolved_buf { + u32 physical_address; + void *virtual_address; + bool is_pmem; + struct hwmem_alloc *hwmem_alloc; + /* Data for validation below */ + struct file *filep; + u32 file_physical_start; + u32 file_virtual_start; + u32 file_len; +}; + +/** + * b2r2_node_split_buf - information about a source or destination buffer + * + * @addr - the physical base address + * @chroma_addr - the physical address of the chroma plane + * @chroma_cr_addr - the physical address of the Cr chroma plane + * @fmt - the buffer format + * @fmt_type - the buffer format type + * @rect - the rectangle of the buffer to use + * @color - the color value to use is case of a fill operation + * @pitch - the pixmap byte pitch + * @height - the pixmap height + * @alpha_range - the alpha range of the buffer (0-128 or 0-255) + * @hso - the horizontal scan order + * @vso - the vertical scan order + * @endian - the endianess of the buffer + * @plane_selection - the plane to write if buffer is planar or semi-planar + */ +struct b2r2_node_split_buf { + u32 addr; + u32 chroma_addr; + u32 chroma_cr_addr; + + enum b2r2_blt_fmt fmt; + enum b2r2_fmt_type type; + + struct b2r2_blt_rect rect; + struct b2r2_blt_rect win; + + s32 dx; + s32 dy; + + u32 color; + u16 pitch; + u16 width; + u16 height; + + enum b2r2_ty alpha_range; + enum b2r2_ty hso; + enum b2r2_ty vso; + enum b2r2_ty endian; + enum b2r2_tty dither; + + /* Plane selection (used when writing to a multibuffer format) */ + enum b2r2_tty plane_selection; + + /* Chroma plane selection (used when writing planar formats) */ + enum b2r2_tty chroma_selection; + + int tmp_buf_index; +}; + +/** + * b2r2_node_split_job - an instance of a node split job + * + * @type - the type of operation + * @ivmx - the ivmx matrix to use for color conversion + * @blend - determines if blending is enabled + * @clip - determines if destination clipping is enabled + * @rotation - determines if rotation is requested + * @fullrange - determines YUV<->RGB conversion matrix (iVMx) + * @swap_fg_bg - determines if FG and BG should be swapped when blending + * @flags - the flags passed in the blt request + * @flag_param - parameter required by certain flags, + * e.g. color for source color keying. + * @transform - the transforms passed in the blt request + * @global_alpha - the global alpha + * @clip_rect - the clipping rectangle to use + * @horiz_rescale - determmines if horizontal rescaling is enabled + * @horiz_sf - the horizontal scale factor + * @vert_rescale - determines if vertical rescale is enabled + * @vert_sf - the vertical scale factor + * @src - the incoming source buffer + * @bg - the incoming background buffer + * @dst - the outgoing destination buffer + * @work_bufs - work buffer specifications + * @tmp_bufs - temporary buffers + * @buf_count - the number of temporary buffers used for the job + * @node_count - the number of nodes used for the job + * @max_buf_size - the maximum size of temporary buffers + * @nbr_rows - the number of tile rows in the blit operation + * @nbr_cols - the number of time columns in the blit operation + */ +struct b2r2_node_split_job { + enum b2r2_op_type type; + + const u32 *ivmx; + + bool blend; + bool clip; + bool rotation; + bool fullrange; + + bool swap_fg_bg; + + u32 flags; + u32 flag_param; + u32 transform; + u32 global_alpha; + + struct b2r2_blt_rect clip_rect; + + bool h_rescale; + u16 h_rsf; + + bool v_rescale; + u16 v_rsf; + + struct b2r2_node_split_buf src; + struct b2r2_node_split_buf bg; + struct b2r2_node_split_buf dst; + + struct b2r2_work_buf work_bufs[MAX_TMP_BUFS_NEEDED]; + struct b2r2_node_split_buf tmp_bufs[MAX_TMP_BUFS_NEEDED]; + + u32 buf_count; + u32 node_count; + u32 max_buf_size; +}; + +/** + * struct b2r2_core_job - Represents a B2R2 core job + * + * @start_sentinel: Memory overwrite guard + * + * @tag: Client value. Used by b2r2_core_job_find_first_with_tag(). + * @prio: Job priority, from -19 up to 20. Mapped to the + * B2R2 application queues. Filled in by the client. + * @first_node_address: Physical address of the first node. Filled + * in by the client. + * @last_node_address: Physical address of the last node. Filled + * in by the client. + * + * @callback: Function that will be called when the job is done. + * @acquire_resources: Function that allocates the resources needed + * to execute the job (i.e. SRAM alloc). Must not + * sleep if atomic, should fail with negative error code + * if resources not available. + * @release_resources: Function that releases the resources previously + * allocated by acquire_resources (i.e. SRAM alloc). + * @release: Function that will be called when the reference count reaches + * zero. + * + * @job_id: Unique id for this job, assigned by B2R2 core + * @job_state: The current state of the job + * @jiffies: Number of jiffies needed for this request + * + * @list: List entry element for internal list management + * @event: Wait queue event to wait for job done + * @work: Work queue structure, for callback implementation + * + * @queue: The queue that this job shall be submitted to + * @control: B2R2 Queue control + * @pace_control: For composition queue only + * @interrupt_context: Context for interrupt + * @hw_start_time: The point when the b2r2 HW queue is activated for this job + * @nsec_active_in_hw: Time spent on the b2r2 HW queue for this job + * + * @end_sentinel: Memory overwrite guard + */ +struct b2r2_core_job { + u32 start_sentinel; + + /* Data to be filled in by client */ + int tag; + int data; + int prio; + u32 first_node_address; + u32 last_node_address; + void (*callback)(struct b2r2_core_job *); + int (*acquire_resources)(struct b2r2_core_job *, + bool atomic); + void (*release_resources)(struct b2r2_core_job *, + bool atomic); + void (*release)(struct b2r2_core_job *); + + /* Output data, do not modify */ + int job_id; + enum b2r2_core_job_state job_state; + unsigned long jiffies; + + /* Data below is internal to b2r2_core, do not modify */ + + /* Reference counting */ + u32 ref_count; + + /* Internal data */ + struct list_head list; + wait_queue_head_t event; + struct work_struct work; + + /* B2R2 HW data */ + enum b2r2_core_queue queue; + u32 control; + u32 pace_control; + u32 interrupt_context; + + /* Timing data */ + u32 hw_start_time; + s32 nsec_active_in_hw; + + u32 end_sentinel; +}; + +/** + * struct b2r2_blt_request - Represents one B2R2 blit request + * + * @instance: Back pointer to the instance structure + * @list: List item to keep track of requests per instance + * @user_req: The request received from userspace + * @job: The administration structure for the B2R2 job, + * consisting of one or more nodes + * @node_split_job: The administration structure for the B2R2 node split job + * @first_node: Pointer to the first B2R2 node + * @request_id: Request id for this job + * @core_mask: Bit mask with the cores doing part of the job + * @node_split_handle: Handle of the node split + * @src_resolved: Calculated info about the source buffer + * @src_mask_resolved: Calculated info about the source mask buffer + * @bg_resolved: Calculated info about the background buffer + * @dst_resolved: Calculated info about the destination buffer + * @profile: True if the blit shall be profiled, false otherwise + */ +struct b2r2_blt_request { + struct b2r2_control_instance *instance; + struct list_head list; + struct b2r2_blt_req user_req; + struct b2r2_core_job job; + struct b2r2_node_split_job node_split_job; + struct b2r2_node *first_node; + int request_id; + u32 core_mask; + + /* Resolved buffer addresses */ + struct b2r2_resolved_buf src_resolved; + struct b2r2_resolved_buf src_mask_resolved; + struct b2r2_resolved_buf bg_resolved; + struct b2r2_resolved_buf dst_resolved; + + /* TBD: Info about SRAM usage & needs */ + struct b2r2_work_buf *bufs; + u32 buf_count; + + /* color look-up table */ + void *clut; + u32 clut_phys_addr; + + /* Profiling stuff */ + bool profile; + + s32 nsec_active_in_cpu; + + u32 start_time_nsec; + s32 total_time_nsec; +}; + +/** + * struct b2r2_mem_heap - The memory heap + * + * @start_phys_addr: Physical memory start address + * @start_virt_ptr: Virtual pointer to start + * @size: Memory size + * @align: Alignment + * @blocks: List of all blocks + * @heap_lock: Protection for the heap + * @node_size: Size of each B2R2 node + * @node_heap: Heap for B2R2 node allocations + * @debugfs_root_dir: Debugfs B2R2 mem root dir + * @debugfs_heap_stats: Debugfs B2R2 memory status + * @debugfs_dir_blocks: Debugfs B2R2 free blocks dir + */ +struct b2r2_mem_heap { + dma_addr_t start_phys_addr; + void *start_virt_ptr; + u32 size; + u32 align; + struct list_head blocks; + spinlock_t heap_lock; + u32 node_size; + struct dma_pool *node_heap; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root_dir; + struct dentry *debugfs_heap_stats; + struct dentry *debugfs_dir_blocks; +#endif +}; + +/** + * + * @dev: The device handle of the b2r2 instance + * @id: The id of the b2r2 instance + * @name: The name of the b2r2 instance + * @data: Used to store a reference to b2r2_core + * @tmp_bufs: Temporary buffers needed in the node splitter + * @filters_initialized: Indicating of filters has been + * initialized for this b2r2 instance + * @mem_heap: The b2r2 heap, e.g. used to allocate nodes + * @debugfs_latest_request: Copy of the latest request issued + * @debugfs_root_dir: The debugfs root directory, e.g. /debugfs/b2r2 + * @debugfs_debug_root_dir: The b2r2 debug root directory, + * e.g. /debugfs/b2r2/debug + * @stat_lock: Spin lock protecting the statistics + * @stat_n_jobs_added: Number of jobs added to b2r2_core + * @stat_n_jobs_released: Number of jobs released (job_release called) + * @stat_n_jobs_in_report_list: Number of jobs currently in the report list + * @stat_n_in_blt: Number of client threads currently exec inside b2r2_blt() + * @stat_n_in_blt_synch: Number of client threads currently waiting for synch + * @stat_n_in_blt_add: Number of client threads currenlty adding in b2r2_blt + * @stat_n_in_blt_wait: Number of client threads currently waiting in b2r2_blt + * @stat_n_in_synch_0: Number of client threads currently in b2r2_blt_sync + * waiting for all client jobs to finish + * @stat_n_in_synch_job: Number of client threads currently in b2r2_blt_sync + * waiting specific job to finish + * @stat_n_in_query_cap: Number of clients currently in query cap + * @stat_n_in_open: Number of clients currently in b2r2_blt_open + * @stat_n_in_release: Number of clients currently in b2r2_blt_release + * @last_job_lock: Mutex protecting last_job + * @last_job: The last running job on this b2r2 instance + * @last_job_chars: Temporary buffer used in printing last_job + * @prev_node_count: Node cound of last_job + */ +struct b2r2_control { + struct device *dev; + void *data; + int id; + struct kref ref; + bool enabled; + struct tmp_buf tmp_bufs[MAX_TMP_BUFS_NEEDED]; + int filters_initialized; + struct b2r2_mem_heap mem_heap; +#ifdef CONFIG_DEBUG_FS + struct b2r2_blt_request debugfs_latest_request; + struct dentry *debugfs_root_dir; + struct dentry *debugfs_debug_root_dir; +#endif + struct mutex stat_lock; + unsigned long stat_n_jobs_added; + unsigned long stat_n_jobs_released; + unsigned long stat_n_jobs_in_report_list; + unsigned long stat_n_in_blt; + unsigned long stat_n_in_blt_synch; + unsigned long stat_n_in_blt_add; + unsigned long stat_n_in_blt_wait; + unsigned long stat_n_in_synch_0; + unsigned long stat_n_in_synch_job; + unsigned long stat_n_in_query_cap; + unsigned long stat_n_in_open; + unsigned long stat_n_in_release; + struct mutex last_job_lock; + struct b2r2_node *last_job; + char *last_job_chars; + int prev_node_count; +}; + +/* FIXME: The functions below should be removed when we are + switching to the new Robert Lind allocator */ + +/** + * b2r2_blt_alloc_nodes() - Allocate nodes + * + * @node_count: Number of nodes to allocate + * + * Return: + * Returns a pointer to the first node in the node list. + */ +struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont, + int node_count); + +/** + * b2r2_blt_free_nodes() - Release nodes previously allocated via + * b2r2_generate_nodes + * + * @first_node: First node in linked list of nodes + */ +void b2r2_blt_free_nodes(struct b2r2_control *cont, + struct b2r2_node *first_node); + +/** + * b2r2_blt_module_init() - Initialize the B2R2 blt module + */ +int b2r2_blt_module_init(struct b2r2_control *cont); + +/** + * b2r2_blt_module_exit() - Un-initialize the B2R2 blt module + */ +void b2r2_blt_module_exit(struct b2r2_control *cont); + +/** + * b2r2_blt_add_control() - Add the b2r2 core control + */ +void b2r2_blt_add_control(struct b2r2_control *cont); + +/** + * b2r2_blt_remove_control() - Remove the b2r2 core control + */ +void b2r2_blt_remove_control(struct b2r2_control *cont); + +#endif diff --git a/drivers/video/b2r2/b2r2_kernel_if.c b/drivers/video/b2r2/b2r2_kernel_if.c new file mode 100644 index 00000000000..373311ccca5 --- /dev/null +++ b/drivers/video/b2r2/b2r2_kernel_if.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 kernel interface for beeing a separate module + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/poll.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/list.h> +#ifdef CONFIG_ANDROID_PMEM +#include <linux/android_pmem.h> +#endif +#include <linux/fb.h> +#include <linux/sched.h> +#include <asm/uaccess.h> +#include <asm/cacheflush.h> + +EXPORT_SYMBOL(fget_light); +EXPORT_SYMBOL(fput_light); +EXPORT_SYMBOL(flush_cache_range); +EXPORT_SYMBOL(task_sched_runtime); +#ifdef CONFIG_ANDROID_PMEM +EXPORT_SYMBOL(get_pmem_file); +EXPORT_SYMBOL(put_pmem_file); +EXPORT_SYMBOL(flush_pmem_file); +#endif diff --git a/drivers/video/b2r2/b2r2_mem_alloc.c b/drivers/video/b2r2/b2r2_mem_alloc.c new file mode 100644 index 00000000000..584b324b8fe --- /dev/null +++ b/drivers/video/b2r2/b2r2_mem_alloc.c @@ -0,0 +1,669 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 internal Memory allocator + * + * Author: Robert Lind <robert.lind@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> + +#include "b2r2_internal.h" +#include "b2r2_mem_alloc.h" + +/* Forward declarations */ +static struct b2r2_mem_block *b2r2_mem_block_alloc( + struct b2r2_control *cont, u32 offset, u32 size, bool free); +static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block); +static int b2r2_mem_heap_status(struct b2r2_mem_heap *mem_heap, + struct b2r2_mem_heap_status *mem_heap_status); + +/* Align value down to specified alignment */ +static inline u32 align_down(u32 align, u32 value) +{ + return value & ~(align - 1); +} + +/* Align value up to specified alignment */ +static inline u32 align_up(u32 align, u32 value) +{ + return (value + align - 1) & ~(align - 1); +} + + +#ifdef CONFIG_DEBUG_FS +/* About debugfs: + * debugfs is a mountable debug file system. + * + * Mount like this: + * mkdir /debug + * mount -t debugfs none /debug + * ls /debug/b2r2/mem + * + * ls -al /debug/b2r2/mem/blocks + * cat /debug/b2r2/mem/stats + */ + + +/* Create string containing memory heap status */ +static char *get_b2r2_mem_stats(struct b2r2_mem_heap *mem_heap, char *buf) +{ + struct b2r2_mem_heap_status mem_heap_status; + + if (b2r2_mem_heap_status(mem_heap, &mem_heap_status) != 0) { + strcpy(buf, "Error, failed to get status\n"); + return buf; + } + + sprintf(buf, + "Handle : 0x%lX\n" + "Physical start address : 0x%lX\n" + "Size : %lu\n" + "Align : %lu\n" + "No of blocks allocated : %lu\n" + "Allocated size : %lu\n" + "No of free blocks : %lu\n" + "Free size : %lu\n" + "No of locks : %lu\n" + "No of locked : %lu\n" + "No of nodes : %lu\n", + (unsigned long) mem_heap, + (unsigned long) mem_heap_status.start_phys_addr, + (unsigned long) mem_heap_status.size, + (unsigned long) mem_heap_status.align, + (unsigned long) mem_heap_status.num_alloc, + (unsigned long) mem_heap_status.allocated_size, + (unsigned long) mem_heap_status.num_free, + (unsigned long) mem_heap_status.free_size, + (unsigned long) mem_heap_status.num_locks, + (unsigned long) mem_heap_status.num_locked, + (unsigned long) mem_heap_status.num_nodes); + + return buf; +} + +/* + * Print memory heap status on file + * (Use like "cat /debug/b2r2/mem/stats") + */ +static int debugfs_b2r2_mem_stats_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + struct b2r2_mem_heap *mem_heap = filp->f_dentry->d_inode->i_private; + char Buf[400]; + size_t dev_size; + int ret = 0; + + get_b2r2_mem_stats(mem_heap, Buf); + dev_size = strlen(Buf); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, Buf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + return ret; +} + +/* debugfs file operations for the "stats" file */ +static const struct file_operations debugfs_b2r2_mem_stats_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_mem_stats_read, +}; + +/* read function for file in the "blocks" sub directory */ +static int debugfs_b2r2_mem_block_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + struct b2r2_mem_block *mem_block = filp->f_dentry->d_inode->i_private; + char Buf[200]; + size_t dev_size; + int ret = 0; + + dev_size = sprintf(Buf, "offset: %08lX %s size: %8d " + "lock_count: %2d\n", + (unsigned long) mem_block->offset, + mem_block->free ? "free" : "allc", + mem_block->size, + mem_block->lock_count); + + /* No more to read if offset != 0 */ + if (*f_pos > dev_size) + goto out; + + if (*f_pos + count > dev_size) + count = dev_size - *f_pos; + + if (copy_to_user(buf, Buf, count)) + ret = -EINVAL; + *f_pos += count; + ret = count; + +out: + return ret; +} + +/* debugfs file operations for files in the "blocks" directory */ +static const struct file_operations debugfs_b2r2_mem_block_fops = { + .owner = THIS_MODULE, + .read = debugfs_b2r2_mem_block_read, +}; + +/* + * Create or update the debugfs directory entry for a file in the + * "blocks" directory (a memory allocation) + */ +void debugfs_create_mem_block_entry(struct b2r2_mem_block *mem_block, + struct dentry *parent) +{ + struct timespec tm = current_kernel_time(); + struct timespec atime = tm; + struct timespec mtime = tm; + struct timespec ctime = tm; + + if (!IS_ERR_OR_NULL(mem_block->debugfs_block)) { + atime = mem_block->debugfs_block->d_inode->i_atime; + ctime = mem_block->debugfs_block->d_inode->i_ctime; + debugfs_remove(mem_block->debugfs_block); + mem_block->debugfs_block = NULL; + } + + /* Add the block in debugfs */ + if (mem_block->free) + sprintf(mem_block->debugfs_fname, "%08lX free", + (unsigned long) mem_block->offset); + else { + sprintf(mem_block->debugfs_fname, "%08lX allc h:%08lX " + "lck:%d ", + (unsigned long) mem_block->offset, + (unsigned long) mem_block, + mem_block->lock_count); + } + + mem_block->debugfs_block = debugfs_create_file( + mem_block->debugfs_fname, + 0444, parent, mem_block, + &debugfs_b2r2_mem_block_fops); + if (!IS_ERR_OR_NULL(mem_block->debugfs_block)) { + mem_block->debugfs_block->d_inode->i_size = mem_block->size; + mem_block->debugfs_block->d_inode->i_atime = atime; + mem_block->debugfs_block->d_inode->i_mtime = mtime; + mem_block->debugfs_block->d_inode->i_ctime = ctime; + } +} +#endif /* CONFIG_DEBUG_FS */ + +/* Module initialization function */ +int b2r2_mem_init(struct b2r2_control *cont, + u32 heap_size, u32 align, u32 node_size) +{ + struct b2r2_mem_block *mem_block; + u32 aligned_size; + + dev_info(cont->dev, "%s: Creating heap for size %d bytes\n", + __func__, (int) heap_size); + + /* Align size */ + aligned_size = align_down(align, heap_size); + if (aligned_size == 0) + return -EINVAL; + + cont->mem_heap.start_virt_ptr = dma_alloc_coherent(cont->dev, + aligned_size, &(cont->mem_heap.start_phys_addr), GFP_KERNEL); + if (!cont->mem_heap.start_phys_addr || !cont->mem_heap.start_virt_ptr) { + printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n"); + return -ENOMEM; + } + + /* Initialize the heap */ + cont->mem_heap.size = aligned_size; + cont->mem_heap.align = align; + + INIT_LIST_HEAD(&cont->mem_heap.blocks); + +#ifdef CONFIG_DEBUG_FS + /* Register debugfs */ + if (!IS_ERR_OR_NULL(cont->mem_heap.debugfs_root_dir)) { + cont->mem_heap.debugfs_heap_stats = debugfs_create_file( + "stats", 0444, cont->mem_heap.debugfs_root_dir, + &cont->mem_heap, &debugfs_b2r2_mem_stats_fops); + cont->mem_heap.debugfs_dir_blocks = debugfs_create_dir( + "blocks", cont->mem_heap.debugfs_root_dir); + } +#endif + + /* Create the first _free_ memory block */ + mem_block = b2r2_mem_block_alloc(cont, 0, aligned_size, true); + if (!mem_block) { + dma_free_coherent(cont->dev, aligned_size, + cont->mem_heap.start_virt_ptr, + cont->mem_heap.start_phys_addr); + printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n"); + return -ENOMEM; + } + + /* Add the free block to the blocks list */ + list_add(&mem_block->list, &cont->mem_heap.blocks); + + /* Allocate separate heap for B2R2 nodes */ + cont->mem_heap.node_size = node_size; + cont->mem_heap.node_heap = dma_pool_create("b2r2_node_cache", + cont->dev, node_size, align, 4096); + if (!cont->mem_heap.node_heap) { + b2r2_mem_block_free(mem_block); + dma_free_coherent(cont->dev, aligned_size, + cont->mem_heap.start_virt_ptr, + cont->mem_heap.start_phys_addr); + printk(KERN_ERR "B2R2_MEM: Failed to allocate memory\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(b2r2_mem_init); + +/* Module exit function */ +void b2r2_mem_exit(struct b2r2_control *cont) +{ + struct list_head *ptr; + + /* Free B2R2 node heap */ + dma_pool_destroy(cont->mem_heap.node_heap); + + list_for_each(ptr, &cont->mem_heap.blocks) { + struct b2r2_mem_block *mem_block = + list_entry(ptr, struct b2r2_mem_block, list); + + b2r2_mem_block_free(mem_block); + } + + dma_free_coherent(cont->dev, cont->mem_heap.size, + cont->mem_heap.start_virt_ptr, + cont->mem_heap.start_phys_addr); +} +EXPORT_SYMBOL(b2r2_mem_exit); + +/* Return status of the heap */ +static int b2r2_mem_heap_status(struct b2r2_mem_heap *mheap, + struct b2r2_mem_heap_status *mem_heap_status) +{ + struct list_head *ptr; + + if (!mheap || !mem_heap_status) + return -EINVAL; + memset(mem_heap_status, 0, sizeof(*mem_heap_status)); + + /* Lock the heap */ + spin_lock(&mheap->heap_lock); + + /* Fill in static info */ + mem_heap_status->start_phys_addr = mheap->start_phys_addr; + mem_heap_status->size = mheap->size; + mem_heap_status->align = mheap->align; + + list_for_each(ptr, &mheap->blocks) { + struct b2r2_mem_block *mem_block = + list_entry(ptr, struct b2r2_mem_block, list); + + if (mem_block->free) { + mem_heap_status->num_free++; + mem_heap_status->free_size += mem_block->size; + } else { + if (mem_block->lock_count) { + mem_heap_status->num_locked++; + mem_heap_status->num_locks += + mem_block->lock_count; + } + mem_heap_status->num_alloc++; + mem_heap_status->allocated_size += mem_block->size; + } + } + + spin_unlock(&mheap->heap_lock); + + return 0; +} +EXPORT_SYMBOL(b2r2_mem_heap_status); + +/* Internal: Allocate a housekeeping structure + * for an allocated or free memory block + */ +static struct b2r2_mem_block *b2r2_mem_block_alloc( + struct b2r2_control *cont, u32 offset, u32 size, bool free) +{ + struct b2r2_mem_block *mem_block = kmalloc( + sizeof(struct b2r2_mem_block), GFP_KERNEL); + + if (mem_block) { + mem_block->offset = offset; + mem_block->size = size; + mem_block->free = free; + mem_block->lock_count = 0; + + INIT_LIST_HEAD(&mem_block->list); + +#ifdef CONFIG_DEBUG_FS + mem_block->debugfs_block = NULL; + /* Add the block in debugfs */ + debugfs_create_mem_block_entry(mem_block, + cont->mem_heap.debugfs_dir_blocks); +#endif + } + + return mem_block; +} + +/* Internal: Release housekeeping structure */ +static void b2r2_mem_block_free(struct b2r2_mem_block *mem_block) +{ + if (mem_block) { +#ifdef CONFIG_DEBUG_FS + debugfs_remove(mem_block->debugfs_block); +#endif + kfree(mem_block); + } +} + +/* Allocate a block from the heap */ +int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size, + u32 *returned_size, u32 *mem_handle) +{ + int ret = 0; + struct list_head *ptr; + struct b2r2_mem_block *found_mem_block = NULL; + u32 aligned_size; + + if (!mem_handle) + return -EINVAL; + + printk(KERN_INFO "%s: size=%d\n", __func__, requested_size); + + *mem_handle = 0; + + /* Lock the heap */ + spin_lock(&cont->mem_heap.heap_lock); + + aligned_size = align_up(cont->mem_heap.align, requested_size); + /* Try to find the best matching free block of suitable size */ + list_for_each(ptr, &cont->mem_heap.blocks) { + struct b2r2_mem_block *mem_block = + list_entry(ptr, struct b2r2_mem_block, list); + + if (mem_block->free && mem_block->size >= aligned_size && + (!found_mem_block || + mem_block->size < found_mem_block->size)) { + found_mem_block = mem_block; + if (found_mem_block->size == aligned_size) + break; + } + } + + if (found_mem_block) { + struct b2r2_mem_block *new_block + = b2r2_mem_block_alloc(cont, + found_mem_block->offset, + requested_size, false); + + if (new_block) { + /* Insert the new block before the found block */ + list_add_tail(&new_block->list, + &found_mem_block->list); + + /* Split the free block */ + found_mem_block->offset += aligned_size; + found_mem_block->size -= aligned_size; + + if (found_mem_block->size == 0) + b2r2_mem_block_free(found_mem_block); + else { +#ifdef CONFIG_DEBUG_FS + debugfs_create_mem_block_entry( + found_mem_block, + cont->mem_heap.debugfs_dir_blocks); +#endif + } + + *mem_handle = (u32) new_block; + *returned_size = aligned_size; + } else { + ret = -ENOMEM; + } + } else + ret = -ENOMEM; + + if (ret != 0) { + *returned_size = 0; + *mem_handle = (u32) 0; + } + + /* Unlock */ + spin_unlock(&cont->mem_heap.heap_lock); + + return ret; +} +EXPORT_SYMBOL(b2r2_mem_alloc); + +/* Free the allocated block */ +int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle) +{ + int ret = 0; + struct b2r2_mem_block *mem_block = (struct b2r2_mem_block *) mem_handle; + + if (!mem_block) + return -EINVAL; + + /* Lock the heap */ + spin_lock(&cont->mem_heap.heap_lock); + + if (!ret && mem_block->free) + ret = -EINVAL; + + if (!ret) { + printk(KERN_INFO "%s: freeing block 0x%p\n", __func__, mem_block); + /* Release the block */ + + mem_block->free = true; + mem_block->size = align_up(cont->mem_heap.align, + mem_block->size); + + /* Join with previous block if possible */ + if (mem_block->list.prev != &cont->mem_heap.blocks) { + struct b2r2_mem_block *prev_block = + list_entry(mem_block->list.prev, + struct b2r2_mem_block, list); + + if (prev_block->free && + (prev_block->offset + prev_block->size) == + mem_block->offset) { + mem_block->offset = prev_block->offset; + mem_block->size += prev_block->size; + + b2r2_mem_block_free(prev_block); + } + } + + /* Join with next block if possible */ + if (mem_block->list.next != &cont->mem_heap.blocks) { + struct b2r2_mem_block *next_block + = list_entry(mem_block->list.next, + struct b2r2_mem_block, + list); + + if (next_block->free && + (mem_block->offset + mem_block->size) == + next_block->offset) { + mem_block->size += next_block->size; + + b2r2_mem_block_free(next_block); + } + } +#ifdef CONFIG_DEBUG_FS + debugfs_create_mem_block_entry(mem_block, + cont->mem_heap.debugfs_dir_blocks); +#endif + } + + /* Unlock */ + spin_unlock(&cont->mem_heap.heap_lock); + + return ret; +} +EXPORT_SYMBOL(b2r2_mem_free); + +/* Lock the allocated block in memory */ +int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle, + u32 *phys_addr, void **virt_ptr, u32 *size) +{ + struct b2r2_mem_block *mem_block = + (struct b2r2_mem_block *) mem_handle; + + if (!mem_block) + return -EINVAL; + + /* Lock the heap */ + spin_lock(&cont->mem_heap.heap_lock); + + mem_block->lock_count++; + + if (phys_addr) + *phys_addr = cont->mem_heap.start_phys_addr + mem_block->offset; + if (virt_ptr) + *virt_ptr = (char *) cont->mem_heap.start_virt_ptr + + mem_block->offset; + if (size) + *size = align_up(cont->mem_heap.align, mem_block->size); +#ifdef CONFIG_DEBUG_FS + debugfs_create_mem_block_entry(mem_block, + cont->mem_heap.debugfs_dir_blocks); +#endif + + spin_unlock(&cont->mem_heap.heap_lock); + + return 0; +} +EXPORT_SYMBOL(b2r2_mem_lock); + +/* Unlock the allocated block in memory */ +int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle) +{ + struct b2r2_mem_block *mem_block = + (struct b2r2_mem_block *) mem_handle; + + if (!mem_block) + return -EINVAL; + + /* Lock the heap */ + spin_lock(&cont->mem_heap.heap_lock); + + mem_block->lock_count--; + + spin_unlock(&cont->mem_heap.heap_lock); + + /* debugfs will be updated in release */ + return 0; +/* return b2r2_mem_free(mem_handle);*/ +} +EXPORT_SYMBOL(b2r2_mem_unlock); + +/* Allocate one or more b2r2 nodes from DMA pool */ +int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes, + struct b2r2_node **first_node) +{ + int i; + int ret = 0; + u32 physical_address; + struct b2r2_node *first_node_ptr; + struct b2r2_node *node_ptr; + + /* Check input parameters */ + if ((num_nodes <= 0) || !first_node) { + dev_err(cont->dev, + "B2R2_MEM: Invalid parameter for b2r2_node_alloc, " + "num_nodes=%d, first_node=%ld\n", + (int) num_nodes, (long) first_node); + return -EINVAL; + } + + /* Allocate the first node */ + first_node_ptr = dma_pool_alloc(cont->mem_heap.node_heap, + GFP_DMA | GFP_KERNEL, &physical_address); + if (!first_node_ptr) { + dev_err(cont->dev, + "B2R2_MEM: Failed to allocate memory for node\n"); + return -ENOMEM; + } + + /* Initialize first node */ + first_node_ptr->next = NULL; + first_node_ptr->physical_address = physical_address + + offsetof(struct b2r2_node, node); + + /* Allocate and initialize remaining nodes, */ + /* and link them into a list */ + for (i = 1, node_ptr = first_node_ptr; i < num_nodes; i++) { + node_ptr->next = dma_pool_alloc(cont->mem_heap.node_heap, + GFP_DMA | GFP_KERNEL, &physical_address); + if (node_ptr->next) { + node_ptr = node_ptr->next; + node_ptr->next = NULL; + node_ptr->physical_address = physical_address + + offsetof(struct b2r2_node, node); + } else { + printk(KERN_ERR "B2R2_MEM: Failed to allocate memory for node\n"); + ret = -ENOMEM; + break; + } + } + + /* If all nodes were allocated successfully, */ + /* return the first node */ + if (!ret) + *first_node = first_node_ptr; + else + b2r2_node_free(cont, first_node_ptr); + + return ret; +} +EXPORT_SYMBOL(b2r2_node_alloc); + +/* Free a linked list of b2r2 nodes */ +void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node) +{ + struct b2r2_node *current_node = first_node; + struct b2r2_node *next_node = NULL; + + /* Traverse the linked list and free the nodes */ + while (current_node != NULL) { + next_node = current_node->next; + dma_pool_free(cont->mem_heap.node_heap, current_node, + current_node->physical_address - + offsetof(struct b2r2_node, node)); + current_node = next_node; + } +} +EXPORT_SYMBOL(b2r2_node_free); + +MODULE_AUTHOR("Robert Lind <robert.lind@ericsson.com"); +MODULE_DESCRIPTION("Ericsson AB B2R2 physical memory driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/b2r2/b2r2_mem_alloc.h b/drivers/video/b2r2/b2r2_mem_alloc.h new file mode 100644 index 00000000000..4fd1e66abca --- /dev/null +++ b/drivers/video/b2r2/b2r2_mem_alloc.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 internal Memory allocator + * + * Author: Robert Lind <robert.lind@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __B2R2_MEM_ALLOC_H +#define __B2R2_MEM_ALLOC_H + +#include "b2r2_internal.h" + + +/** + * struct b2r2_mem_heap_status - Information about current state of the heap + * + * @start_phys_addr: Physical address of the the memory area + * @size: Size of the memory area + * @align: Alignment of start and allocation sizes (in bytes). + * @num_alloc: Number of memory allocations + * @allocated_size: Size allocated (sum of requested sizes) + * @num_free: Number of free blocks (fragments) + * @free_size: Free size available for allocation + * @num_locks: Sum of number of number of locks on memory allocations + * @num_locked: Number of locked memory allocations + * @num_nodes: Number of node allocations + * + **/ +struct b2r2_mem_heap_status { + u32 start_phys_addr; + u32 size; + u32 align; + u32 num_alloc; + u32 allocated_size; + u32 num_free; + u32 free_size; + u32 num_locks; + u32 num_locked; + u32 num_nodes; +}; + +/** + * struct b2r2_mem_block - Represents one block of b2r2 + * physical memory, free or allocated + * + * @list: For membership in list + * @offset: Offset in b2r2 physical memory area (aligned) + * @size: Size of the object (requested size if busy, else actual) + * @free: True if the block is free + * @lock_count: Lock count + * @debugfs_fname: Debugfs file name + * @debugfs_block: Debugfs dir entry for the block + */ +struct b2r2_mem_block { + struct list_head list; + u32 offset; + u32 size; + bool free; + u32 lock_count; +#ifdef CONFIG_DEBUG_FS + char debugfs_fname[80]; + struct dentry *debugfs_block; +#endif +}; + + +/* B2R2 memory API (kernel) */ + +/** + * b2r2_mem_init() - Initializes the B2R2 memory manager + * @dev: Pointer to device to use for allocating the memory heap + * @heap_size: Size of the heap (in bytes) + * @align: Alignment to use for memory allocations on heap (in bytes) + * @node_size: Size of each B2R2 node (in bytes) + * + * Returns 0 if success, else negative error code + **/ +int b2r2_mem_init(struct b2r2_control *cont, + u32 heap_size, u32 align, u32 node_size); + +/** + * b2r2_mem_exit() - Cleans up the B2R2 memory manager + * + **/ +void b2r2_mem_exit(struct b2r2_control *cont); + +/** + * b2r2_mem_alloc() - Allocates memory block from physical memory heap + * @requested_size: Requested size + * @returned_size: Actual size of memory block. Might be adjusted due to + * alignment but is always >= requested size if function + * succeeds + * @mem_handle: Returned memory handle + * + * All memory allocations are movable when not locked. + * Returns 0 if OK else negative error value + **/ +int b2r2_mem_alloc(struct b2r2_control *cont, u32 requested_size, + u32 *returned_size, u32 *mem_handle); + +/** + * b2r2_mem_free() - Frees an allocation + * @mem_handle: Memory handle + * + * Returns 0 if OK else negative error value + **/ +int b2r2_mem_free(struct b2r2_control *cont, u32 mem_handle); + +/** + * b2r2_mem_lock() - Lock memory in memory and return physical address + * @mem_handle: Memory handle + * @phys_addr: Returned physical address to start of memory allocation. + * May be NULL. + * @virt_ptr: Returned virtual address pointer to start of memory allocation. + * May be NULL. + * @size: Returned size of memory allocation. May be NULL. + * + * The adress of the memory allocation is locked and the physical address + * is returned. + * The lock count is incremented by one. + * You need to call b2r2_mem_unlock once for each call to + * b2r2_mem_lock. + * Returns 0 if OK else negative error value + **/ +int b2r2_mem_lock(struct b2r2_control *cont, u32 mem_handle, + u32 *phys_addr, void **virt_ptr, u32 *size); + +/** + * b2r2_mem_unlock() - Unlock previously locked memory + * @mem_handle: Memory handle + * + * Decrements lock count. When lock count reaches 0 the + * memory area is movable again. + * Returns 0 if OK else negative error value + **/ +int b2r2_mem_unlock(struct b2r2_control *cont, u32 mem_handle); + +/** + * b2r2_node_alloc() - Allocates B2R2 node from physical memory heap + * @num_nodes: Number of linked nodes to allocate + * @first_node: Returned pointer to first node in linked list + * + * Returns 0 if OK else negative error value + **/ +int b2r2_node_alloc(struct b2r2_control *cont, u32 num_nodes, + struct b2r2_node **first_node); + +/** + * b2r2_node_free() - Frees a linked list of allocated B2R2 nodes + * @first_node: Pointer to first node in linked list + * + * Returns 0 if OK else negative error value + **/ +void b2r2_node_free(struct b2r2_control *cont, struct b2r2_node *first_node); + + +#endif /* __B2R2_MEM_ALLOC_H */ diff --git a/drivers/video/b2r2/b2r2_node_gen.c b/drivers/video/b2r2/b2r2_node_gen.c new file mode 100644 index 00000000000..1f48bac6fe7 --- /dev/null +++ b/drivers/video/b2r2/b2r2_node_gen.c @@ -0,0 +1,83 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 node generator + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <asm/dma-mapping.h> +#include "b2r2_internal.h" + +static void free_nodes(struct b2r2_control *cont, + struct b2r2_node *first_node) +{ + struct b2r2_node *node = first_node; + int no_of_nodes = 0; + + while (node) { + no_of_nodes++; + node = node->next; + } + + dma_free_coherent(cont->dev, + no_of_nodes * sizeof(struct b2r2_node), + first_node, + first_node->physical_address - + offsetof(struct b2r2_node, node)); +} + +struct b2r2_node *b2r2_blt_alloc_nodes(struct b2r2_control *cont, + int no_of_nodes) +{ + u32 physical_address; + struct b2r2_node *nodes; + struct b2r2_node *tmpnode; + + if (no_of_nodes <= 0) { + dev_err(cont->dev, "%s: Wrong number of nodes (%d)", + __func__, no_of_nodes); + return NULL; + } + + /* Allocate the memory */ + nodes = (struct b2r2_node *) dma_alloc_coherent(cont->dev, + no_of_nodes * sizeof(struct b2r2_node), + &physical_address, GFP_DMA | GFP_KERNEL); + + if (nodes == NULL) { + dev_err(cont->dev, + "%s: Failed to alloc memory for nodes", + __func__); + return NULL; + } + + /* Build the linked list */ + tmpnode = nodes; + physical_address += offsetof(struct b2r2_node, node); + while (no_of_nodes--) { + tmpnode->physical_address = physical_address; + if (no_of_nodes) + tmpnode->next = tmpnode + 1; + else + tmpnode->next = NULL; + + tmpnode++; + physical_address += sizeof(struct b2r2_node); + } + + return nodes; +} + +void b2r2_blt_free_nodes(struct b2r2_control *cont, + struct b2r2_node *first_node) +{ + free_nodes(cont, first_node); +} + diff --git a/drivers/video/b2r2/b2r2_node_split.c b/drivers/video/b2r2/b2r2_node_split.c new file mode 100644 index 00000000000..b2fb07580ca --- /dev/null +++ b/drivers/video/b2r2/b2r2_node_split.c @@ -0,0 +1,3033 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 node splitter + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include "b2r2_debug.h" +#include "b2r2_node_split.h" +#include "b2r2_internal.h" +#include "b2r2_hw_convert.h" +#include "b2r2_filters.h" +#include "b2r2_utils.h" + +#include <linux/kernel.h> + +/* + * Macros and constants + */ + +#define INSTANCES_DEFAULT_SIZE 10 +#define INSTANCES_GROW_SIZE 5 + +/* + * Internal types + */ + + +/* + * Global variables + */ + + +/* + * Forward declaration of private functions + */ +static int analyze_fmt_conv(struct b2r2_control *cont, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + const u32 **vmx, u32 *node_count, + bool fullrange); +static int analyze_color_fill(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count); +static int analyze_copy(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count); +static int analyze_scaling(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count); +static int analyze_rotate(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count); +static int analyze_transform(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count); +static int analyze_rot_scale(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count); +static int analyze_scale_factors(struct b2r2_control *cont, + struct b2r2_node_split_job *this); + +static void configure_src(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_node_split_buf *src, const u32 *ivmx); +static void configure_bg(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_node_split_buf *bg, bool swap_fg_bg); +static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_node_split_buf *dst, const u32 *ivmx, + struct b2r2_node **next); +static void configure_blend(struct b2r2_control *cont, struct b2r2_node *node, + u32 flags, u32 global_alpha); +static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_blt_rect *clip_rect); + +static int configure_tile(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *node, + struct b2r2_node **next); +static void configure_direct_fill(struct b2r2_control *cont, + struct b2r2_node *node, u32 color, + struct b2r2_node_split_buf *dst, + struct b2r2_node **next); +static int configure_fill(struct b2r2_control *cont, + struct b2r2_node *node, u32 color, enum b2r2_blt_fmt fmt, + struct b2r2_node_split_buf *dst, const u32 *ivmx, + struct b2r2_node **next); +static void configure_direct_copy(struct b2r2_control *cont, + struct b2r2_node *node, struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, struct b2r2_node **next); +static int configure_copy(struct b2r2_control *cont, + struct b2r2_node *node, struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, const u32 *ivmx, + struct b2r2_node **next, + struct b2r2_node_split_job *this); +static int configure_rotate(struct b2r2_control *cont, + struct b2r2_node *node, struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, const u32 *ivmx, + struct b2r2_node **next, + struct b2r2_node_split_job *this); +static int configure_scale(struct b2r2_control *cont, + struct b2r2_node *node, struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, u16 h_rsf, u16 v_rsf, + const u32 *ivmx, struct b2r2_node **next, + struct b2r2_node_split_job *this); +static int configure_rot_scale(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *node, + struct b2r2_node **next); + +static int check_rect(struct b2r2_control *cont, + const struct b2r2_blt_img *img, + const struct b2r2_blt_rect *rect, + const struct b2r2_blt_rect *clip); +static void set_buf(struct b2r2_control *cont, + struct b2r2_node_split_buf *buf, + u32 addr, const struct b2r2_blt_img *img, + const struct b2r2_blt_rect *rect, bool color_fill, u32 color); +static int setup_tmp_buf(struct b2r2_control *cont, + struct b2r2_node_split_buf *this, u32 max_size, + enum b2r2_blt_fmt pref_fmt, u32 pref_width, u32 pref_height); + +static bool is_transform(const struct b2r2_blt_request *req); +static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf); +static s32 inv_rescale(s32 dim, u16 sf); + +static void set_target(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf); +static void set_src(struct b2r2_src_config *src, u32 addr, + struct b2r2_node_split_buf *buf); +static void set_src_1(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf); +static void set_src_2(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf); +static void set_src_3(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf); +static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values); + +static void reset_nodes(struct b2r2_node *node); + +static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt, + enum b2r2_blt_fmt dst_fmt); + +/* + * Public functions + */ + +/** + * b2r2_node_split_analyze() - analyzes the request + */ +int b2r2_node_split_analyze(const struct b2r2_blt_request *req, + u32 max_buf_size, u32 *node_count, struct b2r2_work_buf **bufs, + u32 *buf_count, struct b2r2_node_split_job *this) +{ + int ret; + bool color_fill; + struct b2r2_control *cont = req->instance->control; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + memset(this, 0, sizeof(*this)); + + /* Copy parameters */ + this->flags = req->user_req.flags; + this->transform = req->user_req.transform; + this->max_buf_size = max_buf_size; + this->global_alpha = req->user_req.global_alpha; + this->buf_count = 0; + this->node_count = 0; + + if (this->flags & B2R2_BLT_FLAG_BLUR) { + ret = -ENOSYS; + goto unsupported; + } + + /* Unsupported formats on src */ + switch (req->user_req.src_img.fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + if (b2r2_is_bgr_fmt(req->user_req.dst_img.fmt)) { + ret = -ENOSYS; + goto unsupported; + } + break; + default: + break; + } + + /* Unsupported formats on dst */ + switch (req->user_req.dst_img.fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + if (b2r2_is_bgr_fmt(req->user_req.src_img.fmt)) { + ret = -ENOSYS; + goto unsupported; + } + break; + default: + break; + } + + /* Unsupported formats on bg */ + if (this->flags & B2R2_BLT_FLAG_BG_BLEND) + /* + * There are no ivmx on source 1, so check that there is no + * such requirement on the background to destination format + * conversion. This check is sufficient since the node splitter + * currently does not support destination ivmx. That fact also + * removes the source format as a parameter when checking the + * background format. + */ + if (bg_format_require_ivmx(req->user_req.bg_img.fmt, + req->user_req.dst_img.fmt)) { + ret = -ENOSYS; + goto unsupported; + } + + if ((this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) && + (b2r2_is_yuv_fmt(req->user_req.src_img.fmt) || + req->user_req.src_img.fmt == B2R2_BLT_FMT_1_BIT_A1 || + req->user_req.src_img.fmt == B2R2_BLT_FMT_8_BIT_A8)) { + b2r2_log_warn(cont->dev, "%s: Unsupported: source color keying " + "with YUV or pure alpha formats.\n", __func__); + ret = -ENOSYS; + goto unsupported; + } + + if (this->flags & (B2R2_BLT_FLAG_DEST_COLOR_KEY | + B2R2_BLT_FLAG_SOURCE_MASK)) { + b2r2_log_warn(cont->dev, "%s: Unsupported: source mask, " + "destination color keying.\n", __func__); + ret = -ENOSYS; + goto unsupported; + } + + if ((req->user_req.flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) && + req->user_req.clut == NULL) { + b2r2_log_warn(cont->dev, "%s: Invalid request: no table " + "specified for CLUT color correction.\n", + __func__); + return -EINVAL; + } + + /* Check for color fill */ + color_fill = (this->flags & (B2R2_BLT_FLAG_SOURCE_FILL | + B2R2_BLT_FLAG_SOURCE_FILL_RAW)) != 0; + + /* Configure the source and destination buffers */ + set_buf(cont, &this->src, req->src_resolved.physical_address, + &req->user_req.src_img, &req->user_req.src_rect, + color_fill, req->user_req.src_color); + + if (this->flags & B2R2_BLT_FLAG_BG_BLEND) { + set_buf(cont, &this->bg, req->bg_resolved.physical_address, + &req->user_req.bg_img, &req->user_req.bg_rect, + false, 0); + } + + set_buf(cont, &this->dst, req->dst_resolved.physical_address, + &req->user_req.dst_img, &req->user_req.dst_rect, false, + 0); + + b2r2_log_info(cont->dev, "%s:\n" + "\t\tsrc.rect=(%4d, %4d, %4d, %4d)\t" + "bg.rect=(%4d, %4d, %4d, %4d)\t" + "dst.rect=(%4d, %4d, %4d, %4d)\n", __func__, this->src.rect.x, + this->src.rect.y, this->src.rect.width, this->src.rect.height, + this->bg.rect.x, this->bg.rect.y, this->bg.rect.width, + this->bg.rect.height, this->dst.rect.x, this->dst.rect.y, + this->dst.rect.width, this->dst.rect.height); + + if (this->flags & B2R2_BLT_FLAG_DITHER) + this->dst.dither = B2R2_TTY_RGB_ROUND_DITHER; + + if (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) + this->flag_param = req->user_req.src_color; + + /* Check for blending */ + if ((this->flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) && + (this->global_alpha != 255)) + this->blend = true; + else if (this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND) + this->blend = (color_fill && b2r2_fmt_has_alpha(this->dst.fmt)) || + b2r2_fmt_has_alpha(this->src.fmt); + else if (this->flags & B2R2_BLT_FLAG_BG_BLEND) + this->blend = true; + + /* Check for full range YUV conversion */ + if (this->flags & B2R2_BLT_FLAG_FULL_RANGE_YUV) + this->fullrange = true; + + if (this->blend && this->src.type == B2R2_FMT_TYPE_PLANAR) { + b2r2_log_warn(cont->dev, "%s: Unsupported: blend with planar" + " source\n", __func__); + ret = -ENOSYS; + goto unsupported; + } + + /* Check for clipping */ + this->clip = (this->flags & B2R2_BLT_FLAG_DESTINATION_CLIP) != 0; + if (this->clip) { + s32 l = req->user_req.dst_clip_rect.x; + s32 r = l + req->user_req.dst_clip_rect.width; + s32 t = req->user_req.dst_clip_rect.y; + s32 b = t + req->user_req.dst_clip_rect.height; + + /* Intersect the clip and buffer rects */ + if (l < 0) + l = 0; + if (r > req->user_req.dst_img.width) + r = req->user_req.dst_img.width; + if (t < 0) + t = 0; + if (b > req->user_req.dst_img.height) + b = req->user_req.dst_img.height; + + this->clip_rect.x = l; + this->clip_rect.y = t; + this->clip_rect.width = r - l; + this->clip_rect.height = b - t; + } else { + /* Set the clip rectangle to the buffer bounds */ + this->clip_rect.x = 0; + this->clip_rect.y = 0; + this->clip_rect.width = req->user_req.dst_img.width; + this->clip_rect.height = req->user_req.dst_img.height; + } + + /* Validate the destination */ + ret = check_rect(cont, &req->user_req.dst_img, &req->user_req.dst_rect, + &this->clip_rect); + if (ret < 0) + goto error; + + /* Validate the source (if not color fill) */ + if (!color_fill) { + ret = check_rect(cont, &req->user_req.src_img, + &req->user_req.src_rect, NULL); + if (ret < 0) + goto error; + } + + /* Validate the background source */ + if (this->flags & B2R2_BLT_FLAG_BG_BLEND) { + ret = check_rect(cont, &req->user_req.bg_img, + &req->user_req.bg_rect, NULL); + if (ret < 0) + goto error; + } + + /* Do the analysis depending on the type of operation */ + if (color_fill) { + ret = analyze_color_fill(this, req, &this->node_count); + } else { + + bool upsample; + bool downsample; + + /* + * YUV formats that are non-raster, non-yuv444 needs to be + * up (or down) sampled using the resizer. + * + * NOTE: The resizer needs to be enabled for YUV444 as well, + * even though there is no upsampling. This is most + * likely a bug in the hardware. + */ + upsample = this->src.type != B2R2_FMT_TYPE_RASTER && + b2r2_is_yuv_fmt(this->src.fmt); + downsample = this->dst.type != B2R2_FMT_TYPE_RASTER && + b2r2_is_yuv_fmt(this->dst.fmt); + + if (is_transform(req) || upsample || downsample) + ret = analyze_transform(this, req, &this->node_count, + &this->buf_count); + else + ret = analyze_copy(this, req, &this->node_count, + &this->buf_count); + } + + if (ret == -ENOSYS) { + goto unsupported; + } else if (ret < 0) { + b2r2_log_warn(cont->dev, "%s: Analysis failed!\n", __func__); + goto error; + } + + /* Setup the origin and movement of the destination window */ + if (this->dst.hso == B2R2_TY_HSO_RIGHT_TO_LEFT) { + this->dst.dx = -this->dst.win.width; + this->dst.win.x = this->dst.rect.x + this->dst.rect.width - 1; + } else { + this->dst.dx = this->dst.win.width; + this->dst.win.x = this->dst.rect.x; + } + if (this->dst.vso == B2R2_TY_VSO_BOTTOM_TO_TOP) { + this->dst.dy = -this->dst.win.height; + this->dst.win.y = this->dst.rect.y + this->dst.rect.height - 1; + } else { + this->dst.dy = this->dst.win.height; + this->dst.win.y = this->dst.rect.y; + } + + *buf_count = this->buf_count; + *node_count = this->node_count; + + if (this->buf_count > 0) + *bufs = &this->work_bufs[0]; + + b2r2_log_info(cont->dev, "%s: dst.win=(%d, %d, %d, %d), " + "dst.dx=%d, dst.dy=%d\n", __func__, this->dst.win.x, + this->dst.win.y, this->dst.win.width, this->dst.win.height, + this->dst.dx, this->dst.dy); + if (this->buf_count > 0) + b2r2_log_info(cont->dev, "%s: buf_count=%d, buf_size=%d, " + "node_count=%d\n", __func__, *buf_count, + bufs[0]->size, *node_count); + else + b2r2_log_info(cont->dev, "%s: buf_count=%d, node_count=%d\n", + __func__, *buf_count, *node_count); + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); +unsupported: + return ret; +} + +/** + * b2r2_node_split_configure() - configures the node list + */ +int b2r2_node_split_configure(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *first) +{ + int ret; + + struct b2r2_node_split_buf *dst = &this->dst; + struct b2r2_node *node = first; + + u32 x_pixels = 0; + u32 y_pixels = 0; + + reset_nodes(node); + + while (y_pixels < dst->rect.height) { + s32 dst_x = dst->win.x; + s32 dst_w = dst->win.width; + + /* Clamp window height */ + if (dst->win.height > dst->rect.height - y_pixels) + dst->win.height = dst->rect.height - y_pixels; + + while (x_pixels < dst->rect.width) { + + /* Clamp window width */ + if (dst_w > dst->rect.width - x_pixels) + dst->win.width = dst->rect.width - x_pixels; + + ret = configure_tile(cont, this, node, &node); + if (ret < 0) + goto error; + + dst->win.x += dst->dx; + x_pixels += max(dst->dx, -dst->dx); + b2r2_log_info(cont->dev, "%s: x_pixels=%d\n", + __func__, x_pixels); + } + + dst->win.y += dst->dy; + y_pixels += max(dst->dy, -dst->dy); + + dst->win.x = dst_x; + dst->win.width = dst_w; + x_pixels = 0; + + b2r2_log_info(cont->dev, "%s: y_pixels=%d\n", + __func__, y_pixels); + } + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); + return ret; +} + +/** + * b2r2_node_split_assign_buffers() - assigns temporary buffers to the node list + */ +int b2r2_node_split_assign_buffers(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *first, + struct b2r2_work_buf *bufs, u32 buf_count) +{ + struct b2r2_node *node = first; + + while (node != NULL) { + /* The indices are offset by one */ + if (node->dst_tmp_index) { + BUG_ON(node->dst_tmp_index > buf_count); + + b2r2_log_info(cont->dev, "%s: assigning buf %d as " + "dst\n", __func__, node->dst_tmp_index); + + node->node.GROUP1.B2R2_TBA = + bufs[node->dst_tmp_index - 1].phys_addr; + } + if (node->src_tmp_index) { + u32 addr = bufs[node->src_tmp_index - 1].phys_addr; + + b2r2_log_info(cont->dev, "%s: assigning buf %d as src " + "%d ", __func__, node->src_tmp_index, + node->src_index); + + BUG_ON(node->src_tmp_index > buf_count); + + switch (node->src_index) { + case 1: + b2r2_log_info(cont->dev, "1\n"); + node->node.GROUP3.B2R2_SBA = addr; + break; + case 2: + b2r2_log_info(cont->dev, "2\n"); + node->node.GROUP4.B2R2_SBA = addr; + break; + case 3: + b2r2_log_info(cont->dev, "3\n"); + node->node.GROUP5.B2R2_SBA = addr; + break; + default: + BUG_ON(1); + break; + } + } + + b2r2_log_info(cont->dev, "%s: tba=%p\tsba=%p\n", __func__, + (void *)node->node.GROUP1.B2R2_TBA, + (void *)node->node.GROUP4.B2R2_SBA); + + node = node->next; + } + + return 0; +} + +/** + * b2r2_node_split_unassign_buffers() - releases temporary buffers + */ +void b2r2_node_split_unassign_buffers(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *first) +{ + return; +} + +/** + * b2r2_node_split_cancel() - cancels and releases a job instance + */ +void b2r2_node_split_cancel(struct b2r2_control *cont, + struct b2r2_node_split_job *this) +{ + memset(this, 0, sizeof(*this)); + + return; +} + +static int check_rect(struct b2r2_control *cont, + const struct b2r2_blt_img *img, + const struct b2r2_blt_rect *rect, + const struct b2r2_blt_rect *clip) +{ + int ret; + + s32 l, r, b, t; + + /* Check rectangle dimensions*/ + if ((rect->width <= 0) || (rect->height <= 0)) { + b2r2_log_warn(cont->dev, "%s: Illegal rect (%d, %d, %d, %d)\n", + __func__, rect->x, rect->y, rect->width, + rect->height); + ret = -EINVAL; + goto error; + } + + /* If we are using clip we should only look at the intersection of the + rects */ + if (clip) { + l = max(rect->x, clip->x); + t = max(rect->y, clip->y); + r = min(rect->x + rect->width, clip->x + clip->width); + b = min(rect->y + rect->height, clip->y + clip->height); + } else { + l = rect->x; + t = rect->y; + r = rect->x + rect->width; + b = rect->y + rect->height; + } + + /* Check so that the rect isn't outside the buffer */ + if ((l < 0) || (t < 0) || (l >= img->width) || (t >= img->height)) { + b2r2_log_warn(cont->dev, "%s: rect origin outside buffer\n", + __func__); + ret = -EINVAL; + goto error; + } + + if ((r > img->width) || (b > img->height)) { + b2r2_log_warn(cont->dev, "%s: rect ends outside buffer\n", + __func__); + ret = -EINVAL; + goto error; + } + + /* Check so the intersected rectangle isn't empty */ + if ((l == r) || (t == b)) { + b2r2_log_warn(cont->dev, + "%s: rect is empty (width or height zero)\n", + __func__); + ret = -EINVAL; + goto error; + } + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * bg_format_require_ivmx() + * + * Check if there are any color space conversion needed for the + * background to the destination format. + */ +static bool bg_format_require_ivmx(enum b2r2_blt_fmt bg_fmt, + enum b2r2_blt_fmt dst_fmt) +{ + if (b2r2_is_rgb_fmt(bg_fmt)) { + if (b2r2_is_yvu_fmt(dst_fmt)) + return true; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return true; + else if (b2r2_is_yuv_fmt(dst_fmt)) + return true; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return true; + } else if (b2r2_is_yvu_fmt(bg_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return true; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return true; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return true; + else if (b2r2_is_yuv_fmt(dst_fmt) && + !b2r2_is_yvu_fmt(dst_fmt)) + return true; + } else if (bg_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + bg_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + bg_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + bg_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) { + if (b2r2_is_rgb_fmt(dst_fmt)) { + return true; + } else if (b2r2_is_yvu_fmt(dst_fmt)) { + return true; + } else if (b2r2_is_yuv_fmt(dst_fmt)) { + switch (dst_fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + break; + default: + return true; + } + } + } else if (b2r2_is_yuv_fmt(bg_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return true; + else if (b2r2_is_bgr_fmt(dst_fmt)) + return true; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return true; + else if (b2r2_is_yvu_fmt(dst_fmt)) + return true; + } else if (b2r2_is_bgr_fmt(bg_fmt)) { + if (b2r2_is_rgb_fmt(dst_fmt)) + return true; + else if (b2r2_is_yvu_fmt(dst_fmt)) + return true; + else if (dst_fmt == B2R2_BLT_FMT_24_BIT_YUV888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_AYUV8888 || + dst_fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + dst_fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + return true; + else if (b2r2_is_yuv_fmt(dst_fmt)) + return true; + } + + return false; +} + +/** + * analyze_fmt_conv() - analyze the format conversions needed for a job + */ +static int analyze_fmt_conv(struct b2r2_control *cont, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + const u32 **vmx, u32 *node_count, bool fullrange) +{ + enum b2r2_color_conversion cc = + b2r2_get_color_conversion(src->fmt, dst->fmt, fullrange); + + b2r2_get_vmx(cc, vmx); + + if (dst->type == B2R2_FMT_TYPE_RASTER) { + *node_count = 1; + } else if (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR) { + *node_count = 2; + } else if (dst->type == B2R2_FMT_TYPE_PLANAR) { + *node_count = 3; + } else { + /* That's strange... */ + BUG_ON(1); + } + + return 0; +} + +/** + * analyze_color_fill() - analyze a color fill operation + */ +static int analyze_color_fill(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count) +{ + int ret; + struct b2r2_control *cont = req->instance->control; + + /* Destination must be raster for raw fill to work */ + if (this->dst.type != B2R2_FMT_TYPE_RASTER) { + b2r2_log_warn(cont->dev, + "%s: fill requires raster destination\n", + __func__); + ret = -EINVAL; + goto error; + } + + /* We will try to fill the entire rectangle in one go */ + memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win)); + + /* Check if this is a direct fill */ + if ((!this->blend) && ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) || + (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ARGB8888) || + (this->dst.fmt == B2R2_BLT_FMT_32_BIT_ABGR8888) || + (this->dst.fmt == B2R2_BLT_FMT_32_BIT_AYUV8888) || + (this->dst.fmt == B2R2_BLT_FMT_32_BIT_VUYA8888))) { + this->type = B2R2_DIRECT_FILL; + + /* The color format will be the same as the dst fmt */ + this->src.fmt = this->dst.fmt; + + /* The entire destination rectangle will be */ + memcpy(&this->dst.win, &this->dst.rect, + sizeof(this->dst.win)); + *node_count = 1; + } else { + this->type = B2R2_FILL; + + /* Determine the fill color format */ + if (this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) { + /* The color format will be the same as the dst fmt */ + this->src.fmt = this->dst.fmt; + } else { + /* If the dst fmt is YUV the fill fmt will be as well */ + if (b2r2_is_yuv_fmt(this->dst.fmt)) { + this->src.fmt = B2R2_BLT_FMT_32_BIT_AYUV8888; + } else if (b2r2_is_rgb_fmt(this->dst.fmt)) { + this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888; + } else if (b2r2_is_bgr_fmt(this->dst.fmt)) { + /* Color will still be ARGB, we will translate + using IVMX (configured later) */ + this->src.fmt = B2R2_BLT_FMT_32_BIT_ARGB8888; + } else { + /* Wait, what? */ + b2r2_log_warn(cont->dev, "%s: " + "Illegal destination format for fill", + __func__); + ret = -EINVAL; + goto error; + } + } + + /* Also, B2R2 seems to ignore the pixel alpha value */ + if (((this->flags & B2R2_BLT_FLAG_PER_PIXEL_ALPHA_BLEND) + != 0) && + ((this->flags & B2R2_BLT_FLAG_SOURCE_FILL_RAW) + == 0) && b2r2_fmt_has_alpha(this->src.fmt)) { + u8 pixel_alpha = b2r2_get_alpha(this->src.fmt, + this->src.color); + u32 new_global = pixel_alpha * this->global_alpha / 255; + + this->global_alpha = (u8)new_global; + + /* Set the pixel alpha to full opaque so we don't get + any nasty surprises */ + this->src.color = b2r2_set_alpha(this->src.fmt, 0xFF, + this->src.color); + } + + ret = analyze_fmt_conv( + cont, &this->src, &this->dst, &this->ivmx, + node_count, this->fullrange); + if (ret < 0) + goto error; + } + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; + +} + +/** + * analyze_transform() - analyze a transform operation (rescale, rotate, etc.) + */ +static int analyze_transform(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + bool is_scaling; +#ifdef CONFIG_B2R2_DEBUG + struct b2r2_control *cont = req->instance->control; +#endif + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* + * The transform enum is defined so that all rotation transforms are + * masked with the rotation flag + */ + this->rotation = (this->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) != 0; + + /* B2R2 cannot do rotations if the destination is not raster, or 422R */ + if (this->rotation && (this->dst.type != B2R2_FMT_TYPE_RASTER || + this->dst.fmt == B2R2_BLT_FMT_Y_CB_Y_CR || + this->dst.fmt == B2R2_BLT_FMT_CB_Y_CR_Y)) { + b2r2_log_warn(cont->dev, + "%s: Unsupported operation " + "(rot && (!dst_raster || dst==422R))", + __func__); + ret = -ENOSYS; + goto unsupported; + } + + /* Flip the image by changing the scan order of the destination */ + if (this->transform & B2R2_BLT_TRANSFORM_FLIP_H) + this->dst.hso = B2R2_TY_HSO_RIGHT_TO_LEFT; + if (this->transform & B2R2_BLT_TRANSFORM_FLIP_V) + this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + + /* Check for scaling */ + if (this->rotation) { + is_scaling = (this->src.rect.width != this->dst.rect.height) || + (this->src.rect.height != this->dst.rect.width); + } else { + is_scaling = (this->src.rect.width != this->dst.rect.width) || + (this->src.rect.height != this->dst.rect.height); + } + + /* Plane separated formats must be treated as scaling */ + is_scaling = is_scaling || + (this->src.type == B2R2_FMT_TYPE_SEMI_PLANAR) || + (this->src.type == B2R2_FMT_TYPE_PLANAR) || + (this->dst.type == B2R2_FMT_TYPE_SEMI_PLANAR) || + (this->dst.type == B2R2_FMT_TYPE_PLANAR); + + if (is_scaling && this->rotation && this->blend) { + /* TODO: This is unsupported. Fix it! */ + b2r2_log_info(cont->dev, "%s: Unsupported operation " + "(rot+rescale+blend)\n", __func__); + ret = -ENOSYS; + goto unsupported; + } + + /* Check which type of transform */ + if (is_scaling && this->rotation) { + ret = analyze_rot_scale(this, req, node_count, buf_count); + if (ret < 0) + goto error; + } else if (is_scaling) { + ret = analyze_scaling(this, req, node_count, buf_count); + if (ret < 0) + goto error; + } else if (this->rotation) { + ret = analyze_rotate(this, req, node_count, buf_count); + if (ret < 0) + goto error; + } else { + /* No additional nodes needed for a flip */ + ret = analyze_copy(this, req, node_count, buf_count); + if (ret < 0) + goto error; + this->type = B2R2_FLIP; + } + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); +unsupported: + return ret; +} + +/** + * analyze_copy() - analyze a copy operation + */ +static int analyze_copy(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + struct b2r2_control *cont = req->instance->control; + + memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win)); + + if (!this->blend && + !(this->flags & B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) && + (this->src.fmt == this->dst.fmt) && + (this->src.type == B2R2_FMT_TYPE_RASTER) && + (this->dst.rect.x >= this->clip_rect.x) && + (this->dst.rect.y >= this->clip_rect.y) && + (this->dst.rect.x + this->dst.rect.width <= + this->clip_rect.x + this->clip_rect.width) && + (this->dst.rect.y + this->dst.rect.height <= + this->clip_rect.y + this->clip_rect.height)) { + this->type = B2R2_DIRECT_COPY; + *node_count = 1; + } else { + u32 copy_count; + + this->type = B2R2_COPY; + + ret = analyze_fmt_conv(cont, &this->src, &this->dst, + &this->ivmx, ©_count, this->fullrange); + if (ret < 0) + goto error; + + *node_count = copy_count; + } + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +static int calc_rot_count(u32 width, u32 height) +{ + int count; + + count = width / B2R2_ROTATE_MAX_WIDTH; + if (width % B2R2_ROTATE_MAX_WIDTH) + count++; + if (height > B2R2_ROTATE_MAX_WIDTH && + height % B2R2_ROTATE_MAX_WIDTH) + count *= 2; + + return count; +} + +static int analyze_rot_scale_downscale(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + struct b2r2_control *cont = req->instance->control; + struct b2r2_node_split_buf *src = &this->src; + struct b2r2_node_split_buf *dst = &this->dst; + struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0]; + + u32 num_rows; + u32 num_cols; + u32 rot_count; + u32 rescale_count; + u32 nodes_per_rot; + u32 nodes_per_rescale; + u32 right_width; + u32 bottom_height; + const u32 *dummy_vmx; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + /* Calculate the desired tmp buffer size */ + tmp->win.width = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf); + tmp->win.width >>= 10; + tmp->win.width = min(tmp->win.width, dst->rect.height); + tmp->win.height = dst->rect.width; + + setup_tmp_buf(cont, tmp, this->max_buf_size, dst->fmt, tmp->win.width, + tmp->win.height); + tmp->tmp_buf_index = 1; + this->work_bufs[0].size = tmp->pitch * tmp->height; + + tmp->win.width = tmp->rect.width; + tmp->win.height = tmp->rect.height; + + tmp->dither = dst->dither; + dst->dither = 0; + + /* Update the dst window with the actual tmp buffer dimensions */ + dst->win.width = tmp->win.height; + dst->win.height = tmp->win.width; + + /* The rotated stripes are written to the destination bottom-up */ + if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM) + this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + else + this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM; + + /* + * Calculate how many nodes are required to copy to and from the tmp + * buffer + */ + ret = analyze_fmt_conv(cont, src, tmp, &this->ivmx, &nodes_per_rescale, + this->fullrange); + if (ret < 0) + goto error; + + /* We will not do any format conversion in the rotation stage */ + ret = analyze_fmt_conv(cont, tmp, dst, &dummy_vmx, &nodes_per_rot, + this->fullrange); + if (ret < 0) + goto error; + + /* Calculate node count for the inner tiles */ + num_cols = dst->rect.width / dst->win.width; + num_rows = dst->rect.height / dst->win.height; + + rescale_count = num_cols * num_rows; + rot_count = calc_rot_count(dst->win.height, dst->win.width) * + num_cols * num_rows; + + right_width = dst->rect.width % dst->win.width; + bottom_height = dst->rect.height % dst->win.height; + + /* Calculate node count for the rightmost tiles */ + if (right_width) { + u32 count = calc_rot_count(dst->win.height, right_width); + + rot_count += count * num_rows; + rescale_count += num_rows; + b2r2_log_info(cont->dev, "%s: rightmost: %d nodes\n", __func__, + count*num_rows); + } + + /* Calculate node count for the bottom tiles */ + if (bottom_height) { + u32 count = calc_rot_count(bottom_height, dst->win.width); + + rot_count += count * num_cols; + rescale_count += num_cols; + b2r2_log_info(cont->dev, "%s: bottom: %d nodes\n", __func__, + count * num_cols); + + } + + /* And finally for the bottom right corner */ + if (right_width && bottom_height) { + u32 count = calc_rot_count(bottom_height, right_width); + + rot_count += count; + rescale_count++; + b2r2_log_info(cont->dev, "%s: bottom right: %d nodes\n", + __func__, count); + + } + + *node_count = rot_count * nodes_per_rot; + *node_count += rescale_count * nodes_per_rescale; + *buf_count = 1; + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); + return ret; +} + +static int analyze_rot_scale_upscale(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + /* TODO: When upscaling we should optimally to the rotation first... */ + return analyze_rot_scale_downscale(this, req, node_count, buf_count); +} + +/** + * analyze_rot_scaling() - analyzes a combined rotation and scaling op + */ +static int analyze_rot_scale(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + bool upscale; + struct b2r2_control *cont = req->instance->control; + + ret = analyze_scale_factors(cont, this); + if (ret < 0) + goto error; + + upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 20); + + if (upscale) + ret = analyze_rot_scale_upscale(this, req, node_count, + buf_count); + else + ret = analyze_rot_scale_downscale(this, req, node_count, + buf_count); + + if (ret < 0) + goto error; + + this->type = B2R2_SCALE_AND_ROTATE; + + return 0; + +error: + return ret; +} + +/** + * analyze_scaling() - analyze a rescale operation + */ +static int analyze_scaling(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + u32 copy_count; + u32 nbr_cols; + s32 dst_w; + struct b2r2_control *cont = req->instance->control; + + b2r2_log_info(cont->dev, "%s\n", __func__); + + ret = analyze_scale_factors(cont, this); + if (ret < 0) + goto error; + + /* Find out how many nodes a simple copy would require */ + ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx, + ©_count, this->fullrange); + if (ret < 0) + goto error; + + memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win)); + + /* + * We need to subtract from the actual maximum rescale width since the + * start of the stripe will be floored and the end ceiled. This could in + * some cases cause the stripe to be one pixel more than the maximum + * width. + * + * Example: + * x = 127.8, w = 127.8 + * + * The stripe will touch pixels 127.8 through 255.6, i.e. 129 pixels. + */ + dst_w = rescale(cont, B2R2_RESCALE_MAX_WIDTH - 1, this->h_rsf); + if (dst_w < (1 << 10)) + dst_w = 1; + else + dst_w >>= 10; + + b2r2_log_info(cont->dev, "%s: dst_w=%d dst.rect.width=%d\n", + __func__, dst_w, this->dst.rect.width); + + this->dst.win.width = min(dst_w, this->dst.rect.width); + + b2r2_log_info(cont->dev, "%s: dst.win.width=%d\n", + __func__, this->dst.win.width); + + nbr_cols = this->dst.rect.width / this->dst.win.width; + if (this->dst.rect.width % this->dst.win.width) + nbr_cols++; + + *node_count = copy_count * nbr_cols; + + this->type = B2R2_SCALE; + + b2r2_log_info(cont->dev, "%s exit\n", __func__); + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; + +} + +/** + * analyze_rotate() - analyze a rotate operation + */ +static int analyze_rotate(struct b2r2_node_split_job *this, + const struct b2r2_blt_request *req, u32 *node_count, + u32 *buf_count) +{ + int ret; + u32 nodes_per_tile; + struct b2r2_control *cont = req->instance->control; + + /* Find out how many nodes a simple copy would require */ + ret = analyze_fmt_conv(cont, &this->src, &this->dst, &this->ivmx, + &nodes_per_tile, this->fullrange); + if (ret < 0) + goto error; + + this->type = B2R2_ROTATE; + + /* The rotated stripes are written to the destination bottom-up */ + if (this->dst.vso == B2R2_TY_VSO_TOP_TO_BOTTOM) + this->dst.vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + else + this->dst.vso = B2R2_TY_VSO_TOP_TO_BOTTOM; + + memcpy(&this->dst.win, &this->dst.rect, sizeof(this->dst.win)); + + this->dst.win.height = min(this->dst.win.height, B2R2_ROTATE_MAX_WIDTH); + + /* + * B2R2 cannot do rotations on stripes that are not a multiple of 16 + * pixels high (if larger than 16 pixels). + */ + if (this->dst.win.width > 16) + this->dst.win.width -= (this->dst.win.width % 16); + + /* Blending cannot be combined with rotation */ + if (this->blend) { + struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0]; + enum b2r2_blt_fmt tmp_fmt; + + if (b2r2_is_yuv_fmt(this->dst.fmt)) + tmp_fmt = B2R2_BLT_FMT_32_BIT_AYUV8888; + else if (b2r2_is_bgr_fmt(this->dst.fmt)) + tmp_fmt = B2R2_BLT_FMT_32_BIT_ABGR8888; + else + tmp_fmt = B2R2_BLT_FMT_32_BIT_ARGB8888; + + setup_tmp_buf(cont, tmp, this->max_buf_size, tmp_fmt, + this->dst.win.width, this->dst.win.height); + + tmp->tmp_buf_index = 1; + + tmp->vso = B2R2_TY_VSO_BOTTOM_TO_TOP; + + this->dst.win.width = tmp->rect.width; + this->dst.win.height = tmp->rect.height; + + memcpy(&tmp->win, &tmp->rect, sizeof(tmp->win)); + + *buf_count = 1; + this->work_bufs[0].size = tmp->pitch * tmp->height; + + /* + * One more node per tile is required to rotate to the temp + * buffer. + */ + nodes_per_tile++; + } + + /* Finally, calculate the node count */ + *node_count = nodes_per_tile * + calc_rot_count(this->src.rect.width, this->src.rect.height); + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * analyze_scale_factors() - determines the scale factors for the op + */ +static int analyze_scale_factors(struct b2r2_control *cont, + struct b2r2_node_split_job *this) +{ + int ret; + + u16 hsf; + u16 vsf; + + if (this->rotation) { + ret = calculate_scale_factor(cont->dev, this->src.rect.width, + this->dst.rect.height, &hsf); + if (ret < 0) + goto error; + + ret = calculate_scale_factor(cont->dev, this->src.rect.height, + this->dst.rect.width, &vsf); + if (ret < 0) + goto error; + } else { + ret = calculate_scale_factor(cont->dev, this->src.rect.width, + this->dst.rect.width, &hsf); + if (ret < 0) + goto error; + + ret = calculate_scale_factor(cont->dev, this->src.rect.height, + this->dst.rect.height, &vsf); + if (ret < 0) + goto error; + } + + this->h_rescale = hsf != (1 << 10); + this->v_rescale = vsf != (1 << 10); + + this->h_rsf = hsf; + this->v_rsf = vsf; + + b2r2_log_info(cont->dev, "%s: h_rsf=%.4x\n", __func__, this->h_rsf); + b2r2_log_info(cont->dev, "%s: v_rsf=%.4x\n", __func__, this->v_rsf); + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * configure_tile() - configures one tile of a blit operation + */ +static int configure_tile(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *node, + struct b2r2_node **next) +{ + int ret = 0; + + struct b2r2_node *last; + struct b2r2_node_split_buf *src = &this->src; + struct b2r2_node_split_buf *dst = &this->dst; + struct b2r2_node_split_buf *bg = &this->bg; + + struct b2r2_blt_rect dst_norm; + struct b2r2_blt_rect src_norm; + struct b2r2_blt_rect bg_norm; + + /* Normalize the dest coords to the dest rect coordinate space */ + dst_norm.x = dst->win.x - dst->rect.x; + dst_norm.y = dst->win.y - dst->rect.y; + dst_norm.width = dst->win.width; + dst_norm.height = dst->win.height; + + if (dst->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) { + /* The y coord should be counted from the bottom */ + dst_norm.y = dst->rect.height - (dst_norm.y + 1); + } + if (dst->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) { + /* The x coord should be counted from the right */ + dst_norm.x = dst->rect.width - (dst_norm.x + 1); + } + + /* If the destination is rotated we should swap x, y */ + if (this->rotation) { + src_norm.x = dst_norm.y; + src_norm.y = dst_norm.x; + src_norm.width = dst_norm.height; + src_norm.height = dst_norm.width; + } else { + src_norm.x = dst_norm.x; + src_norm.y = dst_norm.y; + src_norm.width = dst_norm.width; + src_norm.height = dst_norm.height; + } + + /* Convert to src coordinate space */ + src->win.x = src_norm.x + src->rect.x; + src->win.y = src_norm.y + src->rect.y; + src->win.width = src_norm.width; + src->win.height = src_norm.height; + + /* Set bg norm */ + bg_norm.x = dst->win.x - dst->rect.x; + bg_norm.y = dst->win.y - dst->rect.y; + bg_norm.width = dst->win.width; + bg_norm.height = dst->win.height; + + /* Convert to bg coordinate space */ + bg->win.x = bg_norm.x + bg->rect.x; + bg->win.y = bg_norm.y + bg->rect.y; + bg->win.width = bg_norm.width; + bg->win.height = bg_norm.height; + bg->vso = dst->vso; + bg->hso = dst->hso; + + /* Do the configuration depending on operation type */ + switch (this->type) { + case B2R2_DIRECT_FILL: + configure_direct_fill(cont, node, this->src.color, dst, &last); + break; + + case B2R2_DIRECT_COPY: + configure_direct_copy(cont, node, src, dst, &last); + break; + + case B2R2_FILL: + ret = configure_fill(cont, node, src->color, src->fmt, + dst, this->ivmx, &last); + break; + + case B2R2_FLIP: /* FLIP is just a copy with different VSO/HSO */ + case B2R2_COPY: + ret = configure_copy( + cont, node, src, dst, this->ivmx, &last, this); + break; + + case B2R2_ROTATE: + { + struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0]; + + if (this->blend) { + b2r2_log_info(cont->dev, "%s: rotation + " + "blend\n", __func__); + + tmp->win.x = 0; + tmp->win.y = tmp->win.height - 1; + tmp->win.width = dst->win.width; + tmp->win.height = dst->win.height; + + /* Rotate to the temp buf */ + ret = configure_rotate(cont, node, src, tmp, + this->ivmx, &node, NULL); + if (ret < 0) + goto error; + + /* Then do a copy to the destination */ + ret = configure_copy(cont, node, tmp, dst, NULL, + &last, this); + } else { + /* Just do a rotation */ + ret = configure_rotate(cont, node, src, dst, + this->ivmx, &last, this); + } + } + break; + + case B2R2_SCALE: + ret = configure_scale(cont, node, src, dst, this->h_rsf, + this->v_rsf, this->ivmx, &last, this); + break; + + case B2R2_SCALE_AND_ROTATE: + ret = configure_rot_scale(cont, this, node, &last); + break; + + default: + b2r2_log_warn(cont->dev, "%s: Unsupported request\n", __func__); + ret = -ENOSYS; + goto error; + break; + + } + + if (ret < 0) + goto error; + + /* Scale and rotate will configure its own blending and clipping */ + if (this->type != B2R2_SCALE_AND_ROTATE) { + + /* Configure blending and clipping */ + do { + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: " + "Internal error! Out of nodes!\n", + __func__); + ret = -ENOMEM; + goto error; + } + + if (this->blend) { + if (this->flags & B2R2_BLT_FLAG_BG_BLEND) + configure_bg(cont, node, bg, + this->swap_fg_bg); + else + configure_bg(cont, node, dst, + this->swap_fg_bg); + configure_blend(cont, node, this->flags, + this->global_alpha); + } + if (this->clip) + configure_clip(cont, node, &this->clip_rect); + + node = node->next; + + } while (node != last); + } + + /* Consume the nodes */ + *next = last; + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: Error!\n", __func__); + return ret; +} + +/* + * configure_sub_rot() - configure a sub-rotation + * + * This functions configures a set of nodes for rotation using the destination + * window instead of the rectangle for calculating tiles. + */ +static int configure_sub_rot(struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + const u32 *ivmx, struct b2r2_node **next, + struct b2r2_node_split_job *job) +{ + int ret; + + struct b2r2_blt_rect src_win; + struct b2r2_blt_rect dst_win; + + u32 y_pixels = 0; + u32 x_pixels = 0; + + memcpy(&src_win, &src->win, sizeof(src_win)); + memcpy(&dst_win, &dst->win, sizeof(dst_win)); + + b2r2_log_info(cont->dev, "%s: src_win=(%d, %d, %d, %d) " + "dst_win=(%d, %d, %d, %d)\n", __func__, + src_win.x, src_win.y, src_win.width, src_win.height, + dst_win.x, dst_win.y, dst_win.width, dst_win.height); + + dst->win.height = B2R2_ROTATE_MAX_WIDTH; + if (dst->win.width % B2R2_ROTATE_MAX_WIDTH) + dst->win.width -= dst->win.width % B2R2_ROTATE_MAX_WIDTH; + + while (x_pixels < dst_win.width) { + u32 src_x = src->win.x; + u32 src_w = src->win.width; + u32 dst_y = dst->win.y; + u32 dst_h = dst->win.height; + + dst->win.width = min(dst->win.width, dst_win.width - + (int)x_pixels); + src->win.height = dst->win.width; + + b2r2_log_info(cont->dev, "%s: x_pixels=%d\n", + __func__, x_pixels); + + while (y_pixels < dst_win.height) { + dst->win.height = min(dst->win.height, + dst_win.height - (int)y_pixels); + src->win.width = dst->win.height; + + b2r2_log_info(cont->dev, "%s: y_pixels=%d\n", + __func__, y_pixels); + + ret = configure_rotate(cont, node, src, dst, + ivmx, &node, job); + if (ret < 0) + goto error; + + src->win.x += (src->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ? + src->win.width : -src->win.width; + dst->win.y += (dst->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ? + dst->win.height : -dst->win.height; + + y_pixels += dst->win.height; + } + + src->win.x = src_x; + src->win.y += (src->vso == B2R2_TY_VSO_TOP_TO_BOTTOM) ? + src->win.height : -src->win.height; + src->win.width = src_w; + + dst->win.x += (dst->hso == B2R2_TY_HSO_LEFT_TO_RIGHT) ? + dst->win.width : -dst->win.width; + dst->win.y = dst_y; + dst->win.height = dst_h; + + x_pixels += dst->win.width; + y_pixels = 0; + + } + + memcpy(&src->win, &src_win, sizeof(src->win)); + memcpy(&dst->win, &dst_win, sizeof(dst->win)); + + *next = node; + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); + return ret; +} + +/** + * configure_rot_downscale() - configures a combined rotate and downscale + * + * When doing a downscale it is better to do the rotation last. + */ +static int configure_rot_downscale(struct b2r2_control *cont, + struct b2r2_node_split_job *this, + struct b2r2_node *node, struct b2r2_node **next) +{ + int ret; + + struct b2r2_node_split_buf *src = &this->src; + struct b2r2_node_split_buf *dst = &this->dst; + struct b2r2_node_split_buf *tmp = &this->tmp_bufs[0]; + + tmp->win.x = 0; + tmp->win.y = 0; + tmp->win.width = dst->win.height; + tmp->win.height = dst->win.width; + + ret = configure_scale(cont, node, src, tmp, this->h_rsf, this->v_rsf, + this->ivmx, &node, this); + if (ret < 0) + goto error; + + ret = configure_sub_rot(cont, node, tmp, dst, NULL, &node, this); + if (ret < 0) + goto error; + + *next = node; + + return 0; + +error: + b2r2_log_info(cont->dev, "%s: error!\n", __func__); + return ret; +} + +/** + * configure_rot_upscale() - configures a combined rotate and upscale + * + * When doing an upscale it is better to do the rotation first. + */ +static int configure_rot_upscale(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *node, + struct b2r2_node **next) +{ + /* TODO: Implement a optimal upscale (rotation first) */ + return configure_rot_downscale(cont, this, node, next); +} + +/** + * configure_rot_scale() - configures a combined rotation and scaling op + */ +static int configure_rot_scale(struct b2r2_control *cont, + struct b2r2_node_split_job *this, struct b2r2_node *node, + struct b2r2_node **next) +{ + int ret; + + bool upscale = (u32)this->h_rsf * (u32)this->v_rsf < (1 << 10); + + if (upscale) + ret = configure_rot_upscale(cont, this, node, next); + else + ret = configure_rot_downscale(cont, this, node, next); + + if (ret < 0) + goto error; + + return 0; + +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); + return ret; +} + +/** + * configure_direct_fill() - configures the given node for direct fill + * + * @node - the node to configure + * @color - the fill color + * @dst - the destination buffer + * @next - the next empty node in the node list + * + * This operation will always consume one node only. + */ +static void configure_direct_fill( + struct b2r2_control *cont, + struct b2r2_node *node, + u32 color, + struct b2r2_node_split_buf *dst, + struct b2r2_node **next) +{ + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_FILL | B2R2_CIC_SOURCE_1; + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_FILL; + + /* Target setup */ + set_target(node, dst->addr, dst); + + /* Source setup */ + + /* It seems B2R2 checks so that source and dest has the same format */ + node->node.GROUP3.B2R2_STY = b2r2_to_native_fmt(dst->fmt); + node->node.GROUP2.B2R2_S1CF = color; + node->node.GROUP2.B2R2_S2CF = 0; + + /* Consume the node */ + *next = node->next; +} + +/** + * configure_direct_copy() - configures the node for direct copy + * + * @node - the node to configure + * @src - the source buffer + * @dst - the destination buffer + * @next - the next empty node in the node list + * + * This operation will always consume one node only. + */ +static void configure_direct_copy( + struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + struct b2r2_node **next) +{ + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1; + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_DIRECT_COPY; + + /* Source setup, use the base function to avoid altering the INS */ + set_src(&node->node.GROUP3, src->addr, src); + + /* Target setup */ + set_target(node, dst->addr, dst); + + /* Consume the node */ + *next = node->next; +} + +/** + * configure_fill() - configures the given node for color fill + * + * @node - the node to configure + * @color - the fill color + * @fmt - the source color format + * @dst - the destination buffer + * @next - the next empty node in the node list + * + * A normal fill operation can be combined with any other per pixel operations + * such as blend. + * + * This operation will consume as many nodes as are required to write to the + * destination format. + */ +static int configure_fill( + struct b2r2_control *cont, + struct b2r2_node *node, + u32 color, + enum b2r2_blt_fmt fmt, + struct b2r2_node_split_buf *dst, + const u32 *ivmx, + struct b2r2_node **next) +{ + int ret; + struct b2r2_node *last; + + /* Configure the destination */ + ret = configure_dst(cont, node, dst, ivmx, &last); + if (ret < 0) + goto error; + + do { + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: " + "Internal error! Out of nodes!\n", __func__); + ret = -ENOMEM; + goto error; + } + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2 | + B2R2_CIC_COLOR_FILL; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER; + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + + /* B2R2 has a bug that disables color fill from S2. As a + workaround we use S1 for the color. */ + node->node.GROUP2.B2R2_S1CF = 0; + node->node.GROUP2.B2R2_S2CF = color; + + /* TO BE REMOVED: */ + set_src_2(node, dst->addr, dst); + node->node.GROUP4.B2R2_STY = b2r2_to_native_fmt(fmt); + + /* Setup the iVMX for color conversion */ + if (ivmx != NULL) + set_ivmx(node, ivmx); + + if ((dst->type == B2R2_FMT_TYPE_PLANAR) || + (dst->type == B2R2_FMT_TYPE_SEMI_PLANAR)) { + + node->node.GROUP0.B2R2_INS |= + B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP8.B2R2_FCTL = + B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER; + node->node.GROUP9.B2R2_RSF = + (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) | + (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10)); + node->node.GROUP9.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + + node->node.GROUP10.B2R2_RSF = + (1 << (B2R2_RSF_HSRC_INC_SHIFT + 10)) | + (1 << (B2R2_RSF_VSRC_INC_SHIFT + 10)); + node->node.GROUP10.B2R2_RZI = + B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT); + } + + node = node->next; + + } while (node != last); + + /* Consume the nodes */ + *next = node; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * configure_copy() - configures the given node for a copy operation + * + * @node - the node to configure + * @src - the source buffer + * @dst - the destination buffer + * @ivmx - the iVMX to use for color conversion + * @next - the next empty node in the node list + * + * This operation will consume as many nodes as are required to write to the + * destination format. + */ +static int configure_copy( + struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + const u32 *ivmx, + struct b2r2_node **next, + struct b2r2_node_split_job *this) +{ + int ret; + + struct b2r2_node *last; + + ret = configure_dst(cont, node, dst, ivmx, &last); + if (ret < 0) + goto error; + + /* Configure the source for each node */ + do { + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: " + " Internal error! Out of nodes!\n", + __func__); + ret = -ENOMEM; + goto error; + } + + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BYPASS_S2_S3; + if (this != NULL && + (this->flags & B2R2_BLT_FLAG_SOURCE_COLOR_KEY) + != 0) { + u32 key_color = 0; + + node->node.GROUP0.B2R2_ACK |= + B2R2_ACK_CKEY_SEL_SRC_AFTER_CLUT | + B2R2_ACK_CKEY_RED_MATCH_IF_BETWEEN | + B2R2_ACK_CKEY_GREEN_MATCH_IF_BETWEEN | + B2R2_ACK_CKEY_BLUE_MATCH_IF_BETWEEN; + node->node.GROUP0.B2R2_INS |= B2R2_INS_CKEY_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_COLOR_KEY; + + key_color = b2r2_to_RGB888(this->flag_param, src->fmt); + node->node.GROUP12.B2R2_KEY1 = key_color; + node->node.GROUP12.B2R2_KEY2 = key_color; + } + + if (this != NULL && + (this->flags & + B2R2_BLT_FLAG_CLUT_COLOR_CORRECTION) != 0) { + struct b2r2_blt_request *request = + container_of(this, struct b2r2_blt_request, + node_split_job); + node->node.GROUP0.B2R2_INS |= B2R2_INS_CLUTOP_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLUT; + node->node.GROUP7.B2R2_CCO = + B2R2_CCO_CLUT_COLOR_CORRECTION | + B2R2_CCO_CLUT_UPDATE; + node->node.GROUP7.B2R2_CML = request->clut_phys_addr; + } + /* Configure the source(s) */ + configure_src(cont, node, src, ivmx); + + node = node->next; + } while (node != last); + + /* Consume the nodes */ + *next = node; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * configure_rotate() - configures the given node for rotation + * + * @node - the node to configure + * @src - the source buffer + * @dst - the destination buffer + * @ivmx - the iVMX to use for color conversion + * @next - the next empty node in the node list + * + * This operation will consume as many nodes are are required by the combination + * of rotating and writing the destination format. + */ +static int configure_rotate( + struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + const u32 *ivmx, + struct b2r2_node **next, + struct b2r2_node_split_job *this) +{ + int ret; + + struct b2r2_node *last; + + ret = configure_copy(cont, node, src, dst, ivmx, &last, this); + if (ret < 0) + goto error; + + do { + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: " + "Internal error! Out of nodes!\n", + __func__); + ret = -ENOMEM; + goto error; + } + + node->node.GROUP0.B2R2_INS |= B2R2_INS_ROTATION_ENABLED; + + b2r2_log_debug(cont->dev, "%s:\n" + "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n" + "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n" + "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n" + "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n" + "-----------------------------------\n", + __func__, node->node.GROUP1.B2R2_TXY, + node->node.GROUP1.B2R2_TSZ, + node->node.GROUP3.B2R2_SXY, + node->node.GROUP3.B2R2_SSZ, + node->node.GROUP4.B2R2_SXY, + node->node.GROUP4.B2R2_SSZ, + node->node.GROUP5.B2R2_SXY, + node->node.GROUP5.B2R2_SSZ); + + node = node->next; + + } while (node != last); + + /* Consume the nodes */ + *next = node; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: error!\n", __func__); + return ret; +} + +/** + * configure_scale() - configures the given node for scaling + * + * @node - the node to configure + * @src - the source buffer + * @dst - the destination buffer + * @h_rsf - the horizontal rescale factor + * @v_rsf - the vertical rescale factor + * @ivmx - the iVMX to use for color conversion + * @next - the next empty node in the node list + */ +static int configure_scale( + struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, + struct b2r2_node_split_buf *dst, + u16 h_rsf, u16 v_rsf, + const u32 *ivmx, struct b2r2_node **next, + struct b2r2_node_split_job *this) +{ + int ret; + + struct b2r2_node *last; + + struct b2r2_filter_spec *hf = NULL; + struct b2r2_filter_spec *vf = NULL; + + u32 fctl = 0; + u32 rsf = 0; + u32 rzi = 0; + u32 hsrc_init = 0; + u32 vsrc_init = 0; + u32 hfp = 0; + u32 vfp = 0; + + u16 luma_h_rsf = h_rsf; + u16 luma_v_rsf = v_rsf; + + struct b2r2_filter_spec *luma_hf = NULL; + struct b2r2_filter_spec *luma_vf = NULL; + + u32 luma_fctl = 0; + u32 luma_rsf = 0; + u32 luma_rzi = 0; + u32 luma_hsrc_init = 0; + u32 luma_vsrc_init = 0; + u32 luma_hfp = 0; + u32 luma_vfp = 0; + + s32 src_x; + s32 src_y; + s32 src_w; + s32 src_h; + + bool upsample; + bool downsample; + + struct b2r2_blt_rect tmp_win = src->win; + bool src_raster = src->type == B2R2_FMT_TYPE_RASTER; + bool dst_raster = dst->type == B2R2_FMT_TYPE_RASTER; + + /* Rescale the normalized source window */ + src_x = inv_rescale(src->win.x - src->rect.x, luma_h_rsf); + src_y = inv_rescale(src->win.y - src->rect.y, luma_v_rsf); + src_w = inv_rescale(src->win.width, luma_h_rsf); + src_h = inv_rescale(src->win.height, luma_v_rsf); + + /* Convert to src coordinate space */ + src->win.x = (src_x >> 10) + src->rect.x; + src->win.y = (src_y >> 10) + src->rect.y; + + /* + * Since the stripe might start and end on a fractional pixel + * we need to count all the touched pixels in the width. + * + * Example: + * src_x = 1.8, src_w = 2.8 + * + * The stripe touches pixels 1.8 through 4.6, i.e. 4 pixels + */ + src->win.width = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10; + src->win.height = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10; + + luma_hsrc_init = src_x & 0x3ff; + luma_vsrc_init = src_y & 0x3ff; + + /* Check for upsampling of chroma */ + upsample = !src_raster && !b2r2_is_yuv444_fmt(src->fmt); + if (upsample) { + h_rsf /= 2; + + if (b2r2_is_yuv420_fmt(src->fmt)) + v_rsf /= 2; + } + + /* Check for downsampling of chroma */ + downsample = !dst_raster && !b2r2_is_yuv444_fmt(dst->fmt); + if (downsample) { + h_rsf *= 2; + + if (b2r2_is_yuv420_fmt(dst->fmt)) + v_rsf *= 2; + } + + src_x = inv_rescale(tmp_win.x - src->rect.x, h_rsf); + src_y = inv_rescale(tmp_win.y - src->rect.y, v_rsf); + hsrc_init = src_x & 0x3ff; + vsrc_init = src_y & 0x3ff; + + /* Configure resize and filters */ + fctl = B2R2_FCTL_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_VF2D_MODE_ENABLE_RESIZER; + luma_fctl = B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_RESIZER | + B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_RESIZER; + + rsf = (h_rsf << B2R2_RSF_HSRC_INC_SHIFT) | + (v_rsf << B2R2_RSF_VSRC_INC_SHIFT); + luma_rsf = (luma_h_rsf << B2R2_RSF_HSRC_INC_SHIFT) | + (luma_v_rsf << B2R2_RSF_VSRC_INC_SHIFT); + + rzi = B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT) | + (hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) | + (vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT); + luma_rzi = B2R2_RZI_DEFAULT_HNB_REPEAT | + (2 << B2R2_RZI_VNB_REPEAT_SHIFT) | + (luma_hsrc_init << B2R2_RZI_HSRC_INIT_SHIFT) | + (luma_vsrc_init << B2R2_RZI_VSRC_INIT_SHIFT); + + /* + * We should only filter if there is an actual rescale (i.e. not when + * up or downsampling). + */ + if (luma_h_rsf != (1 << 10)) { + hf = b2r2_filter_find(h_rsf); + luma_hf = b2r2_filter_find(luma_h_rsf); + } + if (luma_v_rsf != (1 << 10)) { + vf = b2r2_filter_find(v_rsf); + luma_vf = b2r2_filter_find(luma_v_rsf); + } + + if (hf) { + fctl |= B2R2_FCTL_HF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER; + hfp = hf->h_coeffs_phys_addr; + } + + if (vf) { + fctl |= B2R2_FCTL_VF2D_MODE_ENABLE_COLOR_CHANNEL_FILTER; + vfp = vf->v_coeffs_phys_addr; + } + + if (luma_hf) { + luma_fctl |= B2R2_FCTL_LUMA_HF2D_MODE_ENABLE_FILTER; + luma_hfp = luma_hf->h_coeffs_phys_addr; + } + + if (luma_vf) { + luma_fctl |= B2R2_FCTL_LUMA_VF2D_MODE_ENABLE_FILTER; + luma_vfp = luma_vf->v_coeffs_phys_addr; + } + + ret = configure_copy(cont, node, src, dst, ivmx, &last, this); + if (ret < 0) + goto error; + + do { + bool chroma_rescale = + (h_rsf != (1 << 10)) || (v_rsf != (1 << 10)); + bool luma_rescale = + (luma_h_rsf != (1 << 10)) || + (luma_v_rsf != (1 << 10)); + bool dst_chroma = node->node.GROUP1.B2R2_TTY & + B2R2_TTY_CHROMA_NOT_LUMA; + bool dst_luma = !dst_chroma; + + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: Internal error! Out " + "of nodes!\n", __func__); + ret = -ENOMEM; + goto error; + } + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_FILTER_CONTROL; + + /* + * If the source format is anything other than raster, we + * always have to enable both chroma and luma resizers. This + * could be a bug in the hardware, since it is not mentioned in + * the specification. + * + * Otherwise, we will only enable the chroma resizer when + * writing chroma and the luma resizer when writing luma + * (or both when writing raster). Also, if there is no rescale + * to be done there's no point in using the resizers. + */ + + if (!src_raster || (chroma_rescale && + (dst_raster || dst_chroma))) { + /* Enable chroma resize */ + node->node.GROUP0.B2R2_INS |= + B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_CHROMA; + node->node.GROUP8.B2R2_FCTL |= fctl; + + node->node.GROUP9.B2R2_RSF = rsf; + node->node.GROUP9.B2R2_RZI = rzi; + node->node.GROUP9.B2R2_HFP = hfp; + node->node.GROUP9.B2R2_VFP = vfp; + } + + if (!src_raster || (luma_rescale && + (dst_raster || dst_luma))) { + /* Enable luma resize */ + node->node.GROUP0.B2R2_INS |= + B2R2_INS_RESCALE2D_ENABLED; + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_RESIZE_LUMA; + node->node.GROUP8.B2R2_FCTL |= luma_fctl; + + node->node.GROUP10.B2R2_RSF = luma_rsf; + node->node.GROUP10.B2R2_RZI = luma_rzi; + node->node.GROUP10.B2R2_HFP = luma_hfp; + node->node.GROUP10.B2R2_VFP = luma_vfp; + /* + * Scaling operation from raster to a multi-buffer + * format, requires the raster input to be scaled + * before luminance information can be extracted. + * Raster input is scaled by the chroma resizer. + * Luma resizer only handles luminance data which + * exists in a separate buffer in source image, + * as is the case with YUV planar/semi-planar formats. + */ + if (src_raster) { + /* Activate chroma scaling */ + node->node.GROUP0.B2R2_CIC |= + B2R2_CIC_RESIZE_CHROMA; + node->node.GROUP8.B2R2_FCTL |= fctl; + /* + * Color data must be scaled + * to the same size as luma. + * Use luma scaling parameters. + */ + node->node.GROUP9.B2R2_RSF = luma_rsf; + node->node.GROUP9.B2R2_RZI = luma_rzi; + node->node.GROUP9.B2R2_HFP = luma_hfp; + node->node.GROUP9.B2R2_VFP = luma_vfp; + } + } + + b2r2_log_info(cont->dev, "%s:\n" + "\tB2R2_TXY: %.8x\tB2R2_TSZ: %.8x\n" + "\tB2R2_S1XY: %.8x\tB2R2_S1SZ: %.8x\n" + "\tB2R2_S2XY: %.8x\tB2R2_S2SZ: %.8x\n" + "\tB2R2_S3XY: %.8x\tB2R2_S3SZ: %.8x\n" + "----------------------------------\n", + __func__, node->node.GROUP1.B2R2_TXY, + node->node.GROUP1.B2R2_TSZ, + node->node.GROUP3.B2R2_SXY, + node->node.GROUP3.B2R2_SSZ, + node->node.GROUP4.B2R2_SXY, + node->node.GROUP4.B2R2_SSZ, + node->node.GROUP5.B2R2_SXY, + node->node.GROUP5.B2R2_SSZ); + + node = node->next; + + } while (node != last); + + + + /* Consume the nodes */ + *next = node; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; +} + +/** + * configure_src() - configures the source registers and the iVMX + * + * @node - the node to configure + * @src - the source buffer + * @ivmx - the iVMX to use for color conversion + * + * This operation will not consume any nodes + */ +static void configure_src(struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *src, const u32 *ivmx) +{ + struct b2r2_node_split_buf tmp_buf; + + b2r2_log_info(cont->dev, + "%s: src.win=(%d, %d, %d, %d)\n", __func__, + src->win.x, src->win.y, src->win.width, + src->win.height); + + /* Configure S1 - S3 */ + switch (src->type) { + case B2R2_FMT_TYPE_RASTER: + set_src_2(node, src->addr, src); + break; + case B2R2_FMT_TYPE_SEMI_PLANAR: + memcpy(&tmp_buf, src, sizeof(tmp_buf)); + + /* + * For 420 and 422 the chroma has lower resolution than the + * luma + */ + if (!b2r2_is_yuv444_fmt(src->fmt)) { + tmp_buf.win.x >>= 1; + tmp_buf.win.width = (tmp_buf.win.width + 1) / 2; + + if (b2r2_is_yuv420_fmt(src->fmt)) { + tmp_buf.win.height = + (tmp_buf.win.height + 1) / 2; + tmp_buf.win.y >>= 1; + } + } + + set_src_3(node, src->addr, src); + set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); + break; + case B2R2_FMT_TYPE_PLANAR: + memcpy(&tmp_buf, src, sizeof(tmp_buf)); + + if (!b2r2_is_yuv444_fmt(src->fmt)) { + /* + * Each chroma buffer will have half as many values + * per line as the luma buffer + */ + tmp_buf.pitch = (tmp_buf.pitch + 1) / 2; + + /* Horizontal resolution is half */ + tmp_buf.win.x >>= 1; + tmp_buf.win.width = (tmp_buf.win.width + 1) / 2; + + /* + * If the buffer is in YUV420 format, the vertical + * resolution is half as well + */ + if (b2r2_is_yuv420_fmt(src->fmt)) { + tmp_buf.win.height = + (tmp_buf.win.height + 1) / 2; + tmp_buf.win.y >>= 1; + } + } + + set_src_3(node, src->addr, src); /* Y */ + set_src_2(node, tmp_buf.chroma_addr, &tmp_buf); /* U */ + set_src_1(node, tmp_buf.chroma_cr_addr, &tmp_buf); /* V */ + + break; + default: + /* Should never, ever happen */ + BUG_ON(1); + break; + } + + /* Configure the iVMX for color space conversions */ + if (ivmx != NULL) + set_ivmx(node, ivmx); +} + +/** + * configure_bg() - configures a background for the given node + * + * @node - the node to configure + * @bg - the background buffer + * @swap_fg_bg - if true, fg will be on s1 instead of s2 + * + * This operation will not consume any nodes. + * + * NOTE: This method should be called _AFTER_ the destination has been + * configured. + * + * WARNING: Take care when using this with semi-planar or planar sources since + * either S1 or S2 will be overwritten! + */ +static void configure_bg(struct b2r2_control *cont, + struct b2r2_node *node, + struct b2r2_node_split_buf *bg, bool swap_fg_bg) +{ + b2r2_log_info(cont->dev, + "%s: bg.win=(%d, %d, %d, %d)\n", __func__, + bg->win.x, bg->win.y, bg->win.width, + bg->win.height); + + /* Configure S1 */ + switch (bg->type) { + case B2R2_FMT_TYPE_RASTER: + if (swap_fg_bg) { + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_SWAP_FG_BG; + + set_src(&node->node.GROUP4, bg->addr, bg); + } else { + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_1_FETCH_FROM_MEM; + + set_src(&node->node.GROUP3, bg->addr, bg); + } + break; + default: + /* Should never, ever happen */ + BUG_ON(1); + break; + } +} + +/** + * configure_dst() - configures the destination registers of the given node + * + * @node - the node to configure + * @ivmx - the iVMX to use for color conversion + * @dst - the destination buffer + * + * This operation will consume as many nodes as are required to write the + * destination format. + */ +static int configure_dst(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_node_split_buf *dst, const u32 *ivmx, + struct b2r2_node **next) +{ + int ret; + int nbr_planes = 1; + int i; + + struct b2r2_node_split_buf dst_planes[3]; + + b2r2_log_info(cont->dev, + "%s: dst.win=(%d, %d, %d, %d)\n", __func__, + dst->win.x, dst->win.y, dst->win.width, + dst->win.height); + + memcpy(&dst_planes[0], dst, sizeof(dst_planes[0])); + + if (dst->type != B2R2_FMT_TYPE_RASTER) { + /* There will be at least 2 planes */ + nbr_planes = 2; + + memcpy(&dst_planes[1], dst, sizeof(dst_planes[1])); + + dst_planes[1].addr = dst->chroma_addr; + dst_planes[1].plane_selection = B2R2_TTY_CHROMA_NOT_LUMA; + + if (!b2r2_is_yuv444_fmt(dst->fmt)) { + /* Horizontal resolution is half */ + dst_planes[1].win.x /= 2; + /* + * Must round up the chroma size to handle cases when + * luma size is not divisible by 2. E.g. luma width==7 r + * equires chroma width==4. Chroma width==7/2==3 is only + * enough for luma width==6. + */ + dst_planes[1].win.width = + (dst_planes[1].win.width + 1) / 2; + + /* + * If the buffer is in YUV420 format, the vertical + * resolution is half as well. Height must be rounded in + * the same way as is done for width. + */ + if (b2r2_is_yuv420_fmt(dst->fmt)) { + dst_planes[1].win.y /= 2; + dst_planes[1].win.height = + (dst_planes[1].win.height + 1) / 2; + } + } + + if (dst->type == B2R2_FMT_TYPE_PLANAR) { + /* There will be a third plane as well */ + nbr_planes = 3; + + if (!b2r2_is_yuv444_fmt(dst->fmt)) { + /* The chroma planes have half the luma pitch */ + dst_planes[1].pitch /= 2; + } + + memcpy(&dst_planes[2], &dst_planes[1], + sizeof(dst_planes[2])); + dst_planes[2].addr = dst->chroma_cr_addr; + + /* + * The third plane will be Cr. + * The flag B2R2_TTY_CB_NOT_CR actually works + * the other way around, i.e. as if it was + * B2R2_TTY_CR_NOT_CB. + */ + dst_planes[2].chroma_selection = B2R2_TTY_CB_NOT_CR; + } + + } + + /* Configure one node for each plane */ + for (i = 0; i < nbr_planes; i++) { + + if (node == NULL) { + b2r2_log_warn(cont->dev, "%s: " + "Internal error! Out of nodes!\n", __func__); + ret = -ENOMEM; + goto error; + } + + /* + * When writing chroma, there's no need to read the luma and + * vice versa. + */ + if ((node->node.GROUP3.B2R2_STY & B2R2_NATIVE_YUV) && + (nbr_planes > 1)) { + if (i != 0) { + node->node.GROUP4.B2R2_STY |= + B2R2_S3TY_ENABLE_BLANK_ACCESS; + } + if (i != 1) { + node->node.GROUP0.B2R2_INS &= + ~B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_2_COLOR_FILL_REGISTER; + } + if (i != 2) { + node->node.GROUP0.B2R2_INS &= + ~B2R2_INS_SOURCE_1_FETCH_FROM_MEM; + node->node.GROUP0.B2R2_INS |= + B2R2_INS_SOURCE_1_COLOR_FILL_REGISTER; + } + } else if ((node->node.GROUP3.B2R2_STY & + (B2R2_NATIVE_YCBCR42X_MBN | + B2R2_NATIVE_YCBCR42X_R2B)) && + (nbr_planes > 1)) { + if (i != 0) { + node->node.GROUP4.B2R2_STY |= + B2R2_S3TY_ENABLE_BLANK_ACCESS; + } + } + + set_target(node, dst_planes[i].addr, &dst_planes[i]); + + node = node->next; + } + + /* Consume the nodes */ + *next = node; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; + +} + +/** + * configure_blend() - configures the given node for alpha blending + * + * @node - the node to configure + * @flags - the flags passed in the blt_request + * @global_alpha - the global alpha to use (if enabled in flags) + * + * This operation will not consume any nodes. + * + * NOTE: This method should be called _AFTER_ the destination has been + * configured. + * + * WARNING: Take care when using this with semi-planar or planar sources since + * either S1 or S2 will be overwritten! + */ +static void configure_blend(struct b2r2_control *cont, + struct b2r2_node *node, u32 flags, u32 global_alpha) +{ + node->node.GROUP0.B2R2_ACK &= ~(B2R2_ACK_MODE_BYPASS_S2_S3); + + /* Check if the foreground is premultiplied */ + if ((flags & B2R2_BLT_FLAG_SRC_IS_NOT_PREMULT) != 0) + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_NOT_PREMULT; + else + node->node.GROUP0.B2R2_ACK |= B2R2_ACK_MODE_BLEND_PREMULT; + + /* Check if global alpha blend should be enabled */ + if (flags & B2R2_BLT_FLAG_GLOBAL_ALPHA_BLEND) { + + /* B2R2 expects the global alpha to be in 0...128 range */ + global_alpha = (global_alpha*128)/255; + + node->node.GROUP0.B2R2_ACK |= + global_alpha << B2R2_ACK_GALPHA_ROPID_SHIFT; + } else { + node->node.GROUP0.B2R2_ACK |= + (128 << B2R2_ACK_GALPHA_ROPID_SHIFT); + } +} + +/** + * configure_clip() - configures destination clipping for the given node + * + * @node - the node to configure + * @clip_rect - the clip rectangle + * + * This operation does not consume any nodes. + */ +static void configure_clip(struct b2r2_control *cont, struct b2r2_node *node, + struct b2r2_blt_rect *clip_rect) +{ + s32 l = clip_rect->x; + s32 r = clip_rect->x + clip_rect->width - 1; + s32 t = clip_rect->y; + s32 b = clip_rect->y + clip_rect->height - 1; + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW; + node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED; + + /* Clip window setup */ + node->node.GROUP6.B2R2_CWO = + ((t & 0x7FFF) << B2R2_CWO_Y_SHIFT) | + ((l & 0x7FFF) << B2R2_CWO_X_SHIFT); + node->node.GROUP6.B2R2_CWS = + ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT) | + ((r & 0x7FFF) << B2R2_CWO_X_SHIFT); +} + +/** + * set_buf() - configures the given buffer with the provided values + * + * @addr - the physical base address + * @img - the blt image to base the buffer on + * @rect - the rectangle to use + * @color_fill - determines whether the buffer should be used for color fill + * @color - the color to use in case of color fill + */ +static void set_buf(struct b2r2_control *cont, + struct b2r2_node_split_buf *buf, + u32 addr, + const struct b2r2_blt_img *img, + const struct b2r2_blt_rect *rect, + bool color_fill, + u32 color) +{ + memset(buf, 0, sizeof(*buf)); + + buf->fmt = img->fmt; + buf->type = b2r2_get_fmt_type(img->fmt); + + if (color_fill) { + buf->type = B2R2_FMT_TYPE_RASTER; + buf->color = color; + } else { + buf->addr = addr; + + buf->alpha_range = b2r2_get_alpha_range(img->fmt); + + if (img->pitch == 0) + buf->pitch = b2r2_fmt_byte_pitch(img->fmt, img->width); + else + buf->pitch = img->pitch; + + buf->height = img->height; + buf->width = img->width; + + switch (buf->type) { + case B2R2_FMT_TYPE_SEMI_PLANAR: + buf->chroma_addr = (u32)(((u8 *)addr) + + buf->pitch * buf->height); + break; + case B2R2_FMT_TYPE_PLANAR: + if (b2r2_is_yuv422_fmt(buf->fmt) || + b2r2_is_yuv420_fmt(buf->fmt)) { + buf->chroma_addr = (u32)(((u8 *)addr) + + buf->pitch * buf->height); + } else { + buf->chroma_cr_addr = (u32)(((u8 *)addr) + + buf->pitch * buf->height); + } + if (b2r2_is_yuv420_fmt(buf->fmt)) { + /* + * Use ceil(height/2) in case + * buffer height is not divisible by 2. + */ + buf->chroma_cr_addr = + (u32)(((u8 *)buf->chroma_addr) + + (buf->pitch >> 1) * + ((buf->height + 1) >> 1)); + } else if (b2r2_is_yuv422_fmt(buf->fmt)) { + buf->chroma_cr_addr = + (u32)(((u8 *)buf->chroma_addr) + + (buf->pitch >> 1) * buf->height); + } else if (b2r2_is_yvu420_fmt(buf->fmt)) { + buf->chroma_addr = + (u32)(((u8 *)buf->chroma_cr_addr) + + (buf->pitch >> 1) * + ((buf->height + 1) >> 1)); + } else if (b2r2_is_yvu422_fmt(buf->fmt)) { + buf->chroma_addr = + (u32)(((u8 *)buf->chroma_cr_addr) + + (buf->pitch >> 1) * buf->height); + } + break; + default: + break; + } + + memcpy(&buf->rect, rect, sizeof(buf->rect)); + } +} + +/** + * setup_tmp_buf() - configure a temporary buffer + */ +static int setup_tmp_buf(struct b2r2_control *cont, + struct b2r2_node_split_buf *tmp, + u32 max_size, + enum b2r2_blt_fmt pref_fmt, + u32 pref_width, + u32 pref_height) +{ + int ret; + + enum b2r2_blt_fmt fmt; + + u32 width; + u32 height; + u32 pitch; + u32 size; + + /* Determine what format we should use for the tmp buf */ + if (b2r2_is_rgb_fmt(pref_fmt)) { + fmt = B2R2_BLT_FMT_32_BIT_ARGB8888; + } else if (b2r2_is_bgr_fmt(pref_fmt)) { + fmt = B2R2_BLT_FMT_32_BIT_ABGR8888; + } else if (b2r2_is_yvu_fmt(pref_fmt)) { + fmt = B2R2_BLT_FMT_CB_Y_CR_Y; + } else if (b2r2_is_yuv_fmt(pref_fmt)) { + fmt = B2R2_BLT_FMT_32_BIT_AYUV8888; + } else { + /* Wait, what? */ + b2r2_log_warn(cont->dev, "%s: " + "Cannot create tmp buf from this fmt (%d)\n", + __func__, pref_fmt); + ret = -EINVAL; + goto error; + } + + /* See if we can fit the entire preferred rectangle */ + width = pref_width; + height = pref_height; + pitch = b2r2_fmt_byte_pitch(fmt, width); + size = pitch * height; + + if (size > max_size) { + /* We need to limit the size, so we choose a different width */ + width = min(width, (u32) B2R2_RESCALE_MAX_WIDTH); + pitch = b2r2_fmt_byte_pitch(fmt, width); + height = min(height, max_size / pitch); + size = pitch * height; + } + + /* We should at least have enough room for one scanline */ + if (height == 0) { + b2r2_log_warn(cont->dev, "%s: Not enough tmp mem!\n", + __func__); + ret = -ENOMEM; + goto error; + } + + memset(tmp, 0, sizeof(*tmp)); + + tmp->fmt = fmt; + tmp->type = B2R2_FMT_TYPE_RASTER; + tmp->height = height; + tmp->width = width; + tmp->pitch = pitch; + + tmp->rect.width = width; + tmp->rect.height = tmp->height; + tmp->alpha_range = B2R2_TY_ALPHA_RANGE_255; + + return 0; +error: + b2r2_log_warn(cont->dev, "%s: Exit...\n", __func__); + return ret; + +} + +/** + * is_transform() - returns whether the given request is a transform operation + */ +static bool is_transform(const struct b2r2_blt_request *req) +{ + return (req->user_req.transform != B2R2_BLT_TRANSFORM_NONE) || + (req->user_req.src_rect.width != + req->user_req.dst_rect.width) || + (req->user_req.src_rect.height != + req->user_req.dst_rect.height); +} + +/** + * rescale() - rescales the given dimension + * + * Returns the rescaled dimension in 22.10 fixed point format. + */ +static s32 rescale(struct b2r2_control *cont, s32 dim, u16 sf) +{ + b2r2_log_info(cont->dev, "%s\n", __func__); + + if (sf == 0) { + b2r2_log_err(cont->dev, "%s: Scale factor is 0!\n", __func__); + BUG_ON(1); + } + + /* + * This is normally not safe to do, since it drastically decreases the + * precision of the integer part of the dimension. But since the B2R2 + * hardware only has 12-bit registers for these values, we are safe. + */ + return (dim << 20) / sf; +} + +/** + * inv_rescale() - does an inverted rescale of the given dimension + * + * Returns the rescaled dimension in 22.10 fixed point format. + */ +static s32 inv_rescale(s32 dim, u16 sf) +{ + if (sf == 0) + return dim; + + return dim * sf; +} + +/** + * set_target() - sets the target registers of the given node + */ +static void set_target(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf) +{ + s32 l; + s32 r; + s32 t; + s32 b; + + if (buf->tmp_buf_index) + node->dst_tmp_index = buf->tmp_buf_index; + + node->node.GROUP1.B2R2_TBA = addr; + node->node.GROUP1.B2R2_TTY = buf->pitch | b2r2_to_native_fmt(buf->fmt) | + buf->alpha_range | buf->chroma_selection | buf->hso | + buf->vso | buf->dither | buf->plane_selection; + + if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + node->node.GROUP1.B2R2_TTY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + + node->node.GROUP1.B2R2_TSZ = + ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + node->node.GROUP1.B2R2_TXY = + ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) | + ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT); + + /* Check if the rectangle is outside the buffer */ + if (buf->vso == B2R2_TY_VSO_BOTTOM_TO_TOP) + t = buf->win.y - (buf->win.height - 1); + else + t = buf->win.y; + + if (buf->hso == B2R2_TY_HSO_RIGHT_TO_LEFT) + l = buf->win.x - (buf->win.width - 1); + else + l = buf->win.x; + + r = l + buf->win.width; + b = t + buf->win.height; + + /* Clip to the destination buffer to prevent memory overwrites */ + if ((l < 0) || (r > buf->width) || (t < 0) || (b > buf->height)) { + /* The clip rectangle is including the borders */ + l = max(l, 0); + r = min(r, (s32) buf->width) - 1; + t = max(t, 0); + b = min(b, (s32) buf->height) - 1; + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_CLIP_WINDOW; + node->node.GROUP0.B2R2_INS |= B2R2_INS_RECT_CLIP_ENABLED; + node->node.GROUP6.B2R2_CWO = + ((l & 0x7FFF) << B2R2_CWS_X_SHIFT) | + ((t & 0x7FFF) << B2R2_CWS_Y_SHIFT); + node->node.GROUP6.B2R2_CWS = + ((r & 0x7FFF) << B2R2_CWO_X_SHIFT) | + ((b & 0x7FFF) << B2R2_CWO_Y_SHIFT); + } + +} + +/** + * set_src() - configures the given source register with the given values + */ +static void set_src(struct b2r2_src_config *src, u32 addr, + struct b2r2_node_split_buf *buf) +{ + src->B2R2_SBA = addr; + src->B2R2_STY = buf->pitch | b2r2_to_native_fmt(buf->fmt) | + buf->alpha_range | buf->hso | buf->vso; + + if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + src->B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + + src->B2R2_SSZ = ((buf->win.width & 0xfff) << B2R2_SZ_WIDTH_SHIFT) | + ((buf->win.height & 0xfff) << B2R2_SZ_HEIGHT_SHIFT); + src->B2R2_SXY = ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) | + ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT); + +} + +/** + * set_src_1() - sets the source 1 registers of the given node + */ +static void set_src_1(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf) +{ + if (buf->tmp_buf_index) + node->src_tmp_index = buf->tmp_buf_index; + + node->src_index = 1; + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_1; + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_1_FETCH_FROM_MEM; + + node->node.GROUP3.B2R2_SBA = addr; + node->node.GROUP3.B2R2_STY = buf->pitch | b2r2_to_native_fmt(buf->fmt) | + buf->alpha_range | buf->hso | buf->vso; + + if (buf->fmt == B2R2_BLT_FMT_24_BIT_VUY888 || + buf->fmt == B2R2_BLT_FMT_32_BIT_VUYA8888) + node->node.GROUP3.B2R2_STY |= B2R2_TY_ENDIAN_BIG_NOT_LITTLE; + + node->node.GROUP3.B2R2_SXY = + ((buf->win.x & 0xffff) << B2R2_XY_X_SHIFT) | + ((buf->win.y & 0xffff) << B2R2_XY_Y_SHIFT); + + /* Source 1 has no size register */ +} + +/** + * set_src_2() - sets the source 2 registers of the given node + */ +static void set_src_2(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf) +{ + if (buf->tmp_buf_index) + node->src_tmp_index = buf->tmp_buf_index; + + node->src_index = 2; + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_2; + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_2_FETCH_FROM_MEM; + + set_src(&node->node.GROUP4, addr, buf); +} + +/** + * set_src_3() - sets the source 3 registers of the given node + */ +static void set_src_3(struct b2r2_node *node, u32 addr, + struct b2r2_node_split_buf *buf) +{ + if (buf->tmp_buf_index) + node->src_tmp_index = buf->tmp_buf_index; + + node->src_index = 3; + + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_SOURCE_3; + node->node.GROUP0.B2R2_INS |= B2R2_INS_SOURCE_3_FETCH_FROM_MEM; + + set_src(&node->node.GROUP5, addr, buf); +} + +/** + * set_ivmx() - configures the iVMX registers with the given values + */ +static void set_ivmx(struct b2r2_node *node, const u32 *vmx_values) +{ + node->node.GROUP0.B2R2_CIC |= B2R2_CIC_IVMX; + node->node.GROUP0.B2R2_INS |= B2R2_INS_IVMX_ENABLED; + + node->node.GROUP15.B2R2_VMX0 = vmx_values[0]; + node->node.GROUP15.B2R2_VMX1 = vmx_values[1]; + node->node.GROUP15.B2R2_VMX2 = vmx_values[2]; + node->node.GROUP15.B2R2_VMX3 = vmx_values[3]; +} + +/** + * reset_nodes() - clears the node list + */ +static void reset_nodes(struct b2r2_node *node) +{ + while (node != NULL) { + memset(&node->node, 0, sizeof(node->node)); + + node->src_tmp_index = 0; + node->dst_tmp_index = 0; + + /* TODO: Implement support for short linked lists */ + node->node.GROUP0.B2R2_CIC = 0x7ffff; + + if (node->next != NULL) + node->node.GROUP0.B2R2_NIP = + node->next->physical_address; + node = node->next; + } +} + +int b2r2_node_split_init(struct b2r2_control *cont) +{ + return 0; +} + +void b2r2_node_split_exit(struct b2r2_control *cont) +{ + +} diff --git a/drivers/video/b2r2/b2r2_node_split.h b/drivers/video/b2r2/b2r2_node_split.h new file mode 100644 index 00000000000..a577241c31b --- /dev/null +++ b/drivers/video/b2r2/b2r2_node_split.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 node splitter + * + * Author: Fredrik Allansson <fredrik.allansson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __B2R2_NODE_SPLIT_H_ +#define __B2R2_NODE_SPLIT_H_ + +#include "b2r2_internal.h" +#include "b2r2_hw.h" + +/** + * b2r2_node_split_analyze() - Analyzes a B2R2 request + * + * @req - The request to analyze + * @max_buf_size - The largest size allowed for intermediate buffers + * @node_count - Number of nodes required for the job + * @buf_count - Number of intermediate buffers required for the job + * @bufs - An array of buffers needed for intermediate buffers + * + * Analyzes the request and determines how many nodes and intermediate buffers + * are required. + * + * It is the responsibility of the caller to allocate memory and assign the + * physical addresses. After that b2r2_node_split_assign_buffers should be + * called to assign the buffers to the right nodes. + * + * Returns: + * A handle identifing the analyzed request if successful, a negative + * value otherwise. + */ +int b2r2_node_split_analyze(const struct b2r2_blt_request *req, u32 max_buf_size, + u32 *node_count, struct b2r2_work_buf **bufs, u32* buf_count, + struct b2r2_node_split_job *job); + +/** + * b2r2_node_split_configure() - Performs a node split + * + * @handle - A handle for the analyzed request + * @first - The first node in the list of nodes to use + * + * Fills the supplied list of nodes with the parameters acquired by analyzing + * the request. + * + * All pointers to intermediate buffers are represented by integers to be used + * in the array returned by b2r2_node_split_analyze. + * + * Returns: + * A negative value if an error occurred, 0 otherwise. + */ +int b2r2_node_split_configure(struct b2r2_control *cont, + struct b2r2_node_split_job *job, struct b2r2_node *first); + +/** + * b2r2_node_split_assign_buffers() - Assignes physical addresses + * + * @handle - The handle for the job + * @first - The first node in the node list + * @bufs - Buffers with assigned physical addresses + * @buf_count - Number of physical addresses + * + * Assigns the physical addresses where intermediate buffers are required in + * the node list. + * + * The order of the elements of 'bufs' must be maintained from the call to + * b2r2_node_split_analyze. + * + * Returns: + * A negative value if an error occurred, 0 otherwise. + */ +int b2r2_node_split_assign_buffers(struct b2r2_control *cont, + struct b2r2_node_split_job *job, + struct b2r2_node *first, struct b2r2_work_buf *bufs, + u32 buf_count); + +/** + * b2r2_node_split_unassign_buffers() - Removes all physical addresses + * + * @handle - The handle associated with the job + * @first - The first node in the node list + * + * Removes all references to intermediate buffers from the node list. + * + * This makes it possible to reuse the node list with new buffers by calling + * b2r2_node_split_assign_buffers again. Useful for caching node lists. + */ +void b2r2_node_split_unassign_buffers(struct b2r2_control *cont, + struct b2r2_node_split_job *job, + struct b2r2_node *first); + +/** + * b2r2_node_split_release() - Releases all resources for a job + * + * @handle - The handle identifying the job. This will be set to 0. + * + * Releases all resources associated with a job. + * + * This should always be called once b2r2_node_split_analyze has been called + * in order to release any resources allocated while analyzing. + */ +void b2r2_node_split_cancel(struct b2r2_control *cont, + struct b2r2_node_split_job *job); + +/** + * b2r2_node_split_init() - Initializes the node split module + * + * Initializes the node split module and creates debugfs files. + */ +int b2r2_node_split_init(struct b2r2_control *cont); + +/** + * b2r2_node_split_exit() - Deinitializes the node split module + * + * Releases all resources for the node split module. + */ +void b2r2_node_split_exit(struct b2r2_control *cont); + +#endif diff --git a/drivers/video/b2r2/b2r2_profiler/Makefile b/drivers/video/b2r2/b2r2_profiler/Makefile new file mode 100644 index 00000000000..69a85524fd7 --- /dev/null +++ b/drivers/video/b2r2/b2r2_profiler/Makefile @@ -0,0 +1,3 @@ +# Make file for loadable module B2R2 Profiler + +obj-$(CONFIG_B2R2_PROFILER) += b2r2_profiler.o diff --git a/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c new file mode 100644 index 00000000000..e038941b4e8 --- /dev/null +++ b/drivers/video/b2r2/b2r2_profiler/b2r2_profiler.c @@ -0,0 +1,270 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson B2R2 profiler implementation + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/jiffies.h> + +#include <video/b2r2_blt.h> +#include "../b2r2_profiler_api.h" + + +#define S32_MAX 2147483647 + + +static int src_format_filter_on = false; +module_param(src_format_filter_on, bool, S_IRUGO | S_IWUSR); +static unsigned int src_format_filter; +module_param(src_format_filter, uint, S_IRUGO | S_IWUSR); + +static int print_blts_on = 0; +module_param(print_blts_on, bool, S_IRUGO | S_IWUSR); +static int use_mpix_per_second_in_print_blts = 1; +module_param(use_mpix_per_second_in_print_blts, bool, S_IRUGO | S_IWUSR); + +static int profiler_stats_on = 1; +module_param(profiler_stats_on, bool, S_IRUGO | S_IWUSR); + +static const unsigned int profiler_stats_blts_used = 400; +static struct { + unsigned long sampling_start_time_jiffies; + + s32 min_mpix_per_second; + struct b2r2_blt_req min_blt_request; + struct b2r2_blt_profiling_info min_blt_profiling_info; + + s32 max_mpix_per_second; + struct b2r2_blt_req max_blt_request; + struct b2r2_blt_profiling_info max_blt_profiling_info; + + s32 accumulated_num_pixels; + s32 accumulated_num_usecs; + + u32 num_blts_done; +} profiler_stats; + + +static s32 nsec_2_usec(const s32 nsec); + +static int is_scale_blt(const struct b2r2_blt_req * const request); +static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info); +static void print_blt(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info); + +static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request); +static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs); +static void print_profiler_stats(void); +static void reset_profiler_stats(void); +static void do_profiler_stats(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info); + +static void blt_done(const struct b2r2_blt_req * const blt, + const s32 request_id, + const struct b2r2_blt_profiling_info * const blt_profiling_info); + + +static struct b2r2_profiler this = { + .blt_done = blt_done, +}; + + +static s32 nsec_2_usec(const s32 nsec) +{ + return nsec / 1000; +} + + +static int is_scale_blt(const struct b2r2_blt_req * const request) +{ + if ((request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90 && + (request->src_rect.width != + request->dst_rect.height || + request->src_rect.height != + request->dst_rect.width)) || + (!(request->transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) && + (request->src_rect.width != + request->dst_rect.width || + request->src_rect.height != + request->dst_rect.height))) + return 1; + else + return 0; +} + +static s32 get_blt_mpix_per_second(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info) +{ + return get_mpix_per_second(get_num_pixels_in_blt(request), + nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + + blt_profiling_info->nsec_active_in_b2r2)); +} + +static void print_blt(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info) +{ + char tmp_str[128]; + sprintf(tmp_str, "SF: %#10x, DF: %#10x, F: %#10x, T: %#3x, S: %1i, P: %7i", + request->src_img.fmt, + request->dst_img.fmt, + request->flags, + request->transform, + is_scale_blt(request), + get_num_pixels_in_blt(request)); + if (use_mpix_per_second_in_print_blts) + printk(KERN_ALERT "%s, MPix/s: %3i\n", tmp_str, + get_blt_mpix_per_second(request, blt_profiling_info)); + else + printk(KERN_ALERT "%s, CPU: %10i, B2R2: %10i, Tot: %10i ns\n", + tmp_str, blt_profiling_info->nsec_active_in_cpu, + blt_profiling_info->nsec_active_in_b2r2, + blt_profiling_info->total_time_nsec); +} + + +static s32 get_num_pixels_in_blt(const struct b2r2_blt_req * const request) +{ + s32 num_pixels_in_src = request->src_rect.width * request->src_rect.height; + s32 num_pixels_in_dst = request->dst_rect.width * request->dst_rect.height; + if (request->flags & (B2R2_BLT_FLAG_SOURCE_FILL | + B2R2_BLT_FLAG_SOURCE_FILL_RAW)) + return num_pixels_in_dst; + else + return (num_pixels_in_src + num_pixels_in_dst) / 2; +} + +static s32 get_mpix_per_second(const s32 num_pixels, const s32 num_usecs) +{ + s32 num_pixels_scale_factor = num_pixels != 0 ? + S32_MAX / num_pixels : S32_MAX; + s32 num_usecs_scale_factor = num_usecs != 0 ? + S32_MAX / num_usecs : S32_MAX; + s32 scale_factor = min(num_pixels_scale_factor, num_usecs_scale_factor); + + s32 num_pixels_scaled = num_pixels * scale_factor; + s32 num_usecs_scaled = num_usecs * scale_factor; + + if (num_usecs_scaled < 1000000) + return 0; + + return (num_pixels_scaled / 1000000) / (num_usecs_scaled / 1000000); +} + +static void print_profiler_stats(void) +{ + printk(KERN_ALERT "Min: %3i, Avg: %3i, Max: %3i MPix/s\n", + profiler_stats.min_mpix_per_second, + get_mpix_per_second( + profiler_stats.accumulated_num_pixels, + profiler_stats.accumulated_num_usecs), + profiler_stats.max_mpix_per_second); + printk(KERN_ALERT "Min blit:\n"); + print_blt(&profiler_stats.min_blt_request, + &profiler_stats.min_blt_profiling_info); + printk(KERN_ALERT "Max blit:\n"); + print_blt(&profiler_stats.max_blt_request, + &profiler_stats.max_blt_profiling_info); +} + +static void reset_profiler_stats(void) +{ + profiler_stats.sampling_start_time_jiffies = jiffies; + profiler_stats.min_mpix_per_second = S32_MAX; + profiler_stats.max_mpix_per_second = 0; + profiler_stats.accumulated_num_pixels = 0; + profiler_stats.accumulated_num_usecs = 0; + profiler_stats.num_blts_done = 0; +} + +static void do_profiler_stats(const struct b2r2_blt_req * const request, + const struct b2r2_blt_profiling_info * const blt_profiling_info) +{ + s32 num_pixels_in_blt; + s32 num_usec_blt_took; + s32 blt_mpix_per_second; + + if (time_before(jiffies, profiler_stats.sampling_start_time_jiffies)) + return; + + num_pixels_in_blt = get_num_pixels_in_blt(request); + num_usec_blt_took = nsec_2_usec(blt_profiling_info->nsec_active_in_cpu + + blt_profiling_info->nsec_active_in_b2r2); + blt_mpix_per_second = get_mpix_per_second(num_pixels_in_blt, + num_usec_blt_took); + + if (blt_mpix_per_second <= + profiler_stats.min_mpix_per_second) { + profiler_stats.min_mpix_per_second = blt_mpix_per_second; + memcpy(&profiler_stats.min_blt_request, + request, sizeof(struct b2r2_blt_req)); + memcpy(&profiler_stats.min_blt_profiling_info, + blt_profiling_info, + sizeof(struct b2r2_blt_profiling_info)); + } + + if (blt_mpix_per_second >= profiler_stats.max_mpix_per_second) { + profiler_stats.max_mpix_per_second = blt_mpix_per_second; + memcpy(&profiler_stats.max_blt_request, request, + sizeof(struct b2r2_blt_req)); + memcpy(&profiler_stats.max_blt_profiling_info, + blt_profiling_info, sizeof(struct b2r2_blt_profiling_info)); + } + + profiler_stats.accumulated_num_pixels += num_pixels_in_blt; + profiler_stats.accumulated_num_usecs += num_usec_blt_took; + profiler_stats.num_blts_done++; + + if (profiler_stats.num_blts_done >= profiler_stats_blts_used) { + print_profiler_stats(); + reset_profiler_stats(); + /* The printouts initiated above can disturb the next measurement + so we delay it two seconds to give the printouts a chance to finish. */ + profiler_stats.sampling_start_time_jiffies = jiffies + (2 * HZ); + } +} + +static void blt_done(const struct b2r2_blt_req * const request, + const s32 request_id, + const struct b2r2_blt_profiling_info * const blt_profiling_info) +{ + /* Filters */ + if (src_format_filter_on && request->src_img.fmt != src_format_filter) + return; + + /* Processors */ + if (print_blts_on) + print_blt(request, blt_profiling_info); + + if (profiler_stats_on) + do_profiler_stats(request, blt_profiling_info); +} + + +static int __init b2r2_profiler_init(void) +{ + reset_profiler_stats(); + + return b2r2_register_profiler(&this); +} +module_init(b2r2_profiler_init); + +static void __exit b2r2_profiler_exit(void) +{ + b2r2_unregister_profiler(&this); +} +module_exit(b2r2_profiler_exit); + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Johan Mossberg (johan.xx.mossberg@stericsson.com)"); +MODULE_DESCRIPTION("B2R2 Profiler"); diff --git a/drivers/video/b2r2/b2r2_profiler_api.h b/drivers/video/b2r2/b2r2_profiler_api.h new file mode 100644 index 00000000000..5f1f9abbe49 --- /dev/null +++ b/drivers/video/b2r2/b2r2_profiler_api.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 profiling API + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#ifndef _LINUX_VIDEO_B2R2_PROFILER_API_H +#define _LINUX_VIDEO_B2R2_PROFILER_API_H + +#include <video/b2r2_blt.h> + +/** + * struct b2r2_blt_profiling_info - Profiling information for a blit + * + * @nsec_active_in_cpu: The number of nanoseconds the job was active in the CPU. + * This is an approximate value, check out the code for more + * info. + * @nsec_active_in_b2r2: The number of nanoseconds the job was active in B2R2. This + * is an approximate value, check out the code for more info. + * @total_time_nsec: The total time the job took in nano seconds. Includes ideling. + */ +struct b2r2_blt_profiling_info { + s32 nsec_active_in_cpu; + s32 nsec_active_in_b2r2; + s32 total_time_nsec; +}; + +/** + * struct b2r2_profiler - B2R2 profiler. + * + * The callbacks are never run concurrently. No heavy stuff must be done in the + * callbacks as this might adversely affect the B2R2 driver. The callbacks must + * not call the B2R2 profiler API as this will cause a deadlock. If the callbacks + * call into the B2R2 driver care must be taken as deadlock situations can arise. + * + * @blt_done: Called when a blit has finished, timed out or been canceled. + */ +struct b2r2_profiler { + void (*blt_done)(const struct b2r2_blt_req * const request, const s32 request_id, const struct b2r2_blt_profiling_info * const blt_profiling_info); +}; + +/** + * b2r2_register_profiler() - Registers a profiler. + * + * Currently only one profiler can be registered at any given time. + * + * @profiler: The profiler + * + * Returns 0 on success, negative error code on failure + */ +int b2r2_register_profiler(const struct b2r2_profiler * const profiler); + +/** + * b2r2_unregister_profiler() - Unregisters a profiler. + * + * @profiler: The profiler + */ +void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler); + +#endif /* #ifdef _LINUX_VIDEO_B2R2_PROFILER_API_H */ diff --git a/drivers/video/b2r2/b2r2_profiler_socket.c b/drivers/video/b2r2/b2r2_profiler_socket.c new file mode 100644 index 00000000000..cb95af9380e --- /dev/null +++ b/drivers/video/b2r2/b2r2_profiler_socket.c @@ -0,0 +1,107 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 profiler socket communication + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/semaphore.h> +#include <asm/errno.h> + +#include "b2r2_profiler_api.h" +#include "b2r2_internal.h" +#include "b2r2_core.h" + +/* + * TODO: Call the profiler in a seperate thread and have a circular buffer + * between the B2R2 driver and that thread. That way the profiler can not slow + * down or kill the B2R2 driver. Seems a bit overkill right now as there is + * only one B2R2 profiler and we have full control over it but the situation + * may be different in the future. + */ + + +static const struct b2r2_profiler *b2r2_profiler; +static DEFINE_SEMAPHORE(b2r2_profiler_lock); + + +int b2r2_register_profiler(const struct b2r2_profiler * const profiler) +{ + int return_value; + + return_value = down_interruptible(&b2r2_profiler_lock); + if (return_value != 0) + return return_value; + + if (b2r2_profiler != NULL) { + return_value = -EUSERS; + + goto cleanup; + } + + b2r2_profiler = profiler; + + return_value = 0; + +cleanup: + up(&b2r2_profiler_lock); + + return return_value; +} +EXPORT_SYMBOL(b2r2_register_profiler); + +void b2r2_unregister_profiler(const struct b2r2_profiler * const profiler) +{ + down(&b2r2_profiler_lock); + + if (profiler == b2r2_profiler) + b2r2_profiler = NULL; + + up(&b2r2_profiler_lock); +} +EXPORT_SYMBOL(b2r2_unregister_profiler); + + +bool is_profiler_registered_approx(void) +{ + /* No locking by design, to make it fast, hence the approx */ + if (b2r2_profiler != NULL) + return true; + else + return false; +} + +void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request) +{ + int return_value; + struct b2r2_blt_profiling_info blt_profiling_info; + struct b2r2_core *core = (struct b2r2_core *) request->job.data; + struct b2r2_control *cont = core->control; + + return_value = down_interruptible(&b2r2_profiler_lock); + if (return_value != 0) { + dev_err(cont->dev, + "%s: Failed to acquire semaphore, ret=%i. " + "Lost profiler call!\n", __func__, return_value); + + return; + } + + if (NULL == b2r2_profiler) + goto cleanup; + + blt_profiling_info.nsec_active_in_cpu = request->nsec_active_in_cpu; + blt_profiling_info.nsec_active_in_b2r2 = request->job.nsec_active_in_hw; + blt_profiling_info.total_time_nsec = request->total_time_nsec; + + b2r2_profiler->blt_done(&request->user_req, request->request_id, &blt_profiling_info); + +cleanup: + up(&b2r2_profiler_lock); +} diff --git a/drivers/video/b2r2/b2r2_profiler_socket.h b/drivers/video/b2r2/b2r2_profiler_socket.h new file mode 100644 index 00000000000..80b2c20293f --- /dev/null +++ b/drivers/video/b2r2/b2r2_profiler_socket.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 profiler socket communication + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H +#define _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H + +#include "b2r2_internal.h" + +/* Will give a correct result most of the time but can be wrong */ +bool is_profiler_registered_approx(void); + +void b2r2_call_profiler_blt_done(const struct b2r2_blt_request * const request); + +#endif /* _LINUX_VIDEO_B2R2_PROFILER_SOCKET_H */ diff --git a/drivers/video/b2r2/b2r2_structures.h b/drivers/video/b2r2/b2r2_structures.h new file mode 100644 index 00000000000..99fa7f047d3 --- /dev/null +++ b/drivers/video/b2r2/b2r2_structures.h @@ -0,0 +1,226 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 register struct + * + * Author: Robert Fekete <robert.fekete@stericsson.com> + * Author: Paul Wannback + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + + +#ifndef __B2R2_STRUCTURES_H +#define __B2R2_STRUCTURES_H + +/* C struct view */ +struct b2r2_memory_map { + unsigned char fill0[2304]; + unsigned int BLT_SSBA17; /* @2304 */ + unsigned int BLT_SSBA18; /* @2308 */ + unsigned int BLT_SSBA19; /* @2312 */ + unsigned int BLT_SSBA20; /* @2316 */ + unsigned int BLT_SSBA21; /* @2320 */ + unsigned int BLT_SSBA22; /* @2324 */ + unsigned int BLT_SSBA23; /* @2328 */ + unsigned int BLT_SSBA24; /* @2332 */ + unsigned char fill1[32]; + unsigned int BLT_STBA5; /* @2368 */ + unsigned int BLT_STBA6; /* @2372 */ + unsigned int BLT_STBA7; /* @2376 */ + unsigned int BLT_STBA8; /* @2380 */ + unsigned char fill2[176]; + unsigned int BLT_CTL; /* @2560 */ + unsigned int BLT_ITS; /* @2564 */ + unsigned int BLT_STA1; /* @2568 */ + unsigned char fill3[4]; + unsigned int BLT_SSBA1; /* @2576 */ + unsigned int BLT_SSBA2; /* @2580 */ + unsigned int BLT_SSBA3; /* @2584 */ + unsigned int BLT_SSBA4; /* @2588 */ + unsigned int BLT_SSBA5; /* @2592 */ + unsigned int BLT_SSBA6; /* @2596 */ + unsigned int BLT_SSBA7; /* @2600 */ + unsigned int BLT_SSBA8; /* @2604 */ + unsigned int BLT_STBA1; /* @2608 */ + unsigned int BLT_STBA2; /* @2612 */ + unsigned int BLT_STBA3; /* @2616 */ + unsigned int BLT_STBA4; /* @2620 */ + unsigned int BLT_CQ1_TRIG_IP; /* @2624 */ + unsigned int BLT_CQ1_TRIG_CTL; /* @2628 */ + unsigned int BLT_CQ1_PACE_CTL; /* @2632 */ + unsigned int BLT_CQ1_IP; /* @2636 */ + unsigned int BLT_CQ2_TRIG_IP; /* @2640 */ + unsigned int BLT_CQ2_TRIG_CTL; /* @2644 */ + unsigned int BLT_CQ2_PACE_CTL; /* @2648 */ + unsigned int BLT_CQ2_IP; /* @2652 */ + unsigned int BLT_AQ1_CTL; /* @2656 */ + unsigned int BLT_AQ1_IP; /* @2660 */ + unsigned int BLT_AQ1_LNA; /* @2664 */ + unsigned int BLT_AQ1_STA; /* @2668 */ + unsigned int BLT_AQ2_CTL; /* @2672 */ + unsigned int BLT_AQ2_IP; /* @2676 */ + unsigned int BLT_AQ2_LNA; /* @2680 */ + unsigned int BLT_AQ2_STA; /* @2684 */ + unsigned int BLT_AQ3_CTL; /* @2688 */ + unsigned int BLT_AQ3_IP; /* @2692 */ + unsigned int BLT_AQ3_LNA; /* @2696 */ + unsigned int BLT_AQ3_STA; /* @2700 */ + unsigned int BLT_AQ4_CTL; /* @2704 */ + unsigned int BLT_AQ4_IP; /* @2708 */ + unsigned int BLT_AQ4_LNA; /* @2712 */ + unsigned int BLT_AQ4_STA; /* @2716 */ + unsigned int BLT_SSBA9; /* @2720 */ + unsigned int BLT_SSBA10; /* @2724 */ + unsigned int BLT_SSBA11; /* @2728 */ + unsigned int BLT_SSBA12; /* @2732 */ + unsigned int BLT_SSBA13; /* @2736 */ + unsigned int BLT_SSBA14; /* @2740 */ + unsigned int BLT_SSBA15; /* @2744 */ + unsigned int BLT_SSBA16; /* @2748 */ + unsigned int BLT_SGA1; /* @2752 */ + unsigned int BLT_SGA2; /* @2756 */ + unsigned char fill4[8]; + unsigned int BLT_ITM0; /* @2768 */ + unsigned int BLT_ITM1; /* @2772 */ + unsigned int BLT_ITM2; /* @2776 */ + unsigned int BLT_ITM3; /* @2780 */ + unsigned char fill5[16]; + unsigned int BLT_DFV2; /* @2800 */ + unsigned int BLT_DFV1; /* @2804 */ + unsigned int BLT_PRI; /* @2808 */ + unsigned char fill6[8]; + unsigned int PLUGS1_OP2; /* @2820 */ + unsigned int PLUGS1_CHZ; /* @2824 */ + unsigned int PLUGS1_MSZ; /* @2828 */ + unsigned int PLUGS1_PGZ; /* @2832 */ + unsigned char fill7[16]; + unsigned int PLUGS2_OP2; /* @2852 */ + unsigned int PLUGS2_CHZ; /* @2856 */ + unsigned int PLUGS2_MSZ; /* @2860 */ + unsigned int PLUGS2_PGZ; /* @2864 */ + unsigned char fill8[16]; + unsigned int PLUGS3_OP2; /* @2884 */ + unsigned int PLUGS3_CHZ; /* @2888 */ + unsigned int PLUGS3_MSZ; /* @2892 */ + unsigned int PLUGS3_PGZ; /* @2896 */ + unsigned char fill9[48]; + unsigned int PLUGT_OP2; /* @2948 */ + unsigned int PLUGT_CHZ; /* @2952 */ + unsigned int PLUGT_MSZ; /* @2956 */ + unsigned int PLUGT_PGZ; /* @2960 */ + unsigned char fill10[108]; + unsigned int BLT_NIP; /* @3072 */ + unsigned int BLT_CIC; /* @3076 */ + unsigned int BLT_INS; /* @3080 */ + unsigned int BLT_ACK; /* @3084 */ + unsigned int BLT_TBA; /* @3088 */ + unsigned int BLT_TTY; /* @3092 */ + unsigned int BLT_TXY; /* @3096 */ + unsigned int BLT_TSZ; /* @3100 */ + unsigned int BLT_S1CF; /* @3104 */ + unsigned int BLT_S2CF; /* @3108 */ + unsigned int BLT_S1BA; /* @3112 */ + unsigned int BLT_S1TY; /* @3116 */ + unsigned int BLT_S1XY; /* @3120 */ + unsigned char fill11[4]; + unsigned int BLT_S2BA; /* @3128 */ + unsigned int BLT_S2TY; /* @3132 */ + unsigned int BLT_S2XY; /* @3136 */ + unsigned int BLT_S2SZ; /* @3140 */ + unsigned int BLT_S3BA; /* @3144 */ + unsigned int BLT_S3TY; /* @3148 */ + unsigned int BLT_S3XY; /* @3152 */ + unsigned int BLT_S3SZ; /* @3156 */ + unsigned int BLT_CWO; /* @3160 */ + unsigned int BLT_CWS; /* @3164 */ + unsigned int BLT_CCO; /* @3168 */ + unsigned int BLT_CML; /* @3172 */ + unsigned int BLT_FCTL; /* @3176 */ + unsigned int BLT_PMK; /* @3180 */ + unsigned int BLT_RSF; /* @3184 */ + unsigned int BLT_RZI; /* @3188 */ + unsigned int BLT_HFP; /* @3192 */ + unsigned int BLT_VFP; /* @3196 */ + unsigned int BLT_Y_RSF; /* @3200 */ + unsigned int BLT_Y_RZI; /* @3204 */ + unsigned int BLT_Y_HFP; /* @3208 */ + unsigned int BLT_Y_VFP; /* @3212 */ + unsigned char fill12[16]; + unsigned int BLT_KEY1; /* @3232 */ + unsigned int BLT_KEY2; /* @3236 */ + unsigned char fill13[8]; + unsigned int BLT_SAR; /* @3248 */ + unsigned int BLT_USR; /* @3252 */ + unsigned char fill14[8]; + unsigned int BLT_IVMX0; /* @3264 */ + unsigned int BLT_IVMX1; /* @3268 */ + unsigned int BLT_IVMX2; /* @3272 */ + unsigned int BLT_IVMX3; /* @3276 */ + unsigned int BLT_OVMX0; /* @3280 */ + unsigned int BLT_OVMX1; /* @3284 */ + unsigned int BLT_OVMX2; /* @3288 */ + unsigned int BLT_OVMX3; /* @3292 */ + unsigned char fill15[8]; + unsigned int BLT_VC1R; /* @3304 */ + unsigned char fill16[20]; + unsigned int BLT_Y_HFC0; /* @3328 */ + unsigned int BLT_Y_HFC1; /* @3332 */ + unsigned int BLT_Y_HFC2; /* @3336 */ + unsigned int BLT_Y_HFC3; /* @3340 */ + unsigned int BLT_Y_HFC4; /* @3344 */ + unsigned int BLT_Y_HFC5; /* @3348 */ + unsigned int BLT_Y_HFC6; /* @3352 */ + unsigned int BLT_Y_HFC7; /* @3356 */ + unsigned int BLT_Y_HFC8; /* @3360 */ + unsigned int BLT_Y_HFC9; /* @3364 */ + unsigned int BLT_Y_HFC10; /* @3368 */ + unsigned int BLT_Y_HFC11; /* @3372 */ + unsigned int BLT_Y_HFC12; /* @3376 */ + unsigned int BLT_Y_HFC13; /* @3380 */ + unsigned int BLT_Y_HFC14; /* @3384 */ + unsigned int BLT_Y_HFC15; /* @3388 */ + unsigned char fill17[80]; + unsigned int BLT_Y_VFC0; /* @3472 */ + unsigned int BLT_Y_VFC1; /* @3476 */ + unsigned int BLT_Y_VFC2; /* @3480 */ + unsigned int BLT_Y_VFC3; /* @3484 */ + unsigned int BLT_Y_VFC4; /* @3488 */ + unsigned int BLT_Y_VFC5; /* @3492 */ + unsigned int BLT_Y_VFC6; /* @3496 */ + unsigned int BLT_Y_VFC7; /* @3500 */ + unsigned int BLT_Y_VFC8; /* @3504 */ + unsigned int BLT_Y_VFC9; /* @3508 */ + unsigned char fill18[72]; + unsigned int BLT_HFC0; /* @3584 */ + unsigned int BLT_HFC1; /* @3588 */ + unsigned int BLT_HFC2; /* @3592 */ + unsigned int BLT_HFC3; /* @3596 */ + unsigned int BLT_HFC4; /* @3600 */ + unsigned int BLT_HFC5; /* @3604 */ + unsigned int BLT_HFC6; /* @3608 */ + unsigned int BLT_HFC7; /* @3612 */ + unsigned int BLT_HFC8; /* @3616 */ + unsigned int BLT_HFC9; /* @3620 */ + unsigned int BLT_HFC10; /* @3624 */ + unsigned int BLT_HFC11; /* @3628 */ + unsigned int BLT_HFC12; /* @3632 */ + unsigned int BLT_HFC13; /* @3636 */ + unsigned int BLT_HFC14; /* @3640 */ + unsigned int BLT_HFC15; /* @3644 */ + unsigned char fill19[80]; + unsigned int BLT_VFC0; /* @3728 */ + unsigned int BLT_VFC1; /* @3732 */ + unsigned int BLT_VFC2; /* @3736 */ + unsigned int BLT_VFC3; /* @3740 */ + unsigned int BLT_VFC4; /* @3744 */ + unsigned int BLT_VFC5; /* @3748 */ + unsigned int BLT_VFC6; /* @3752 */ + unsigned int BLT_VFC7; /* @3756 */ + unsigned int BLT_VFC8; /* @3760 */ + unsigned int BLT_VFC9; /* @3764 */ +}; + +#endif /* !defined(__B2R2_STRUCTURES_H) */ + diff --git a/drivers/video/b2r2/b2r2_timing.c b/drivers/video/b2r2/b2r2_timing.c new file mode 100644 index 00000000000..4f3e2b8b042 --- /dev/null +++ b/drivers/video/b2r2/b2r2_timing.c @@ -0,0 +1,22 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 timing + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/time.h> + + +u32 b2r2_get_curr_nsec(void) +{ + struct timespec ts; + + getrawmonotonic(&ts); + + return (u32)timespec_to_ns(&ts); +} diff --git a/drivers/video/b2r2/b2r2_timing.h b/drivers/video/b2r2/b2r2_timing.h new file mode 100644 index 00000000000..e87113c0ec9 --- /dev/null +++ b/drivers/video/b2r2/b2r2_timing.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 timing + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ + +/** + * b2r2_get_curr_nsec() - Return the current nanosecond. Notice that the value + * wraps when the u32 limit is reached. + * + */ +u32 b2r2_get_curr_nsec(void); + +#endif /* _LINUX_DRIVERS_VIDEO_B2R2_TIMING_H_ */ diff --git a/drivers/video/b2r2/b2r2_utils.c b/drivers/video/b2r2/b2r2_utils.c new file mode 100644 index 00000000000..44c738b5aab --- /dev/null +++ b/drivers/video/b2r2/b2r2_utils.c @@ -0,0 +1,1324 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 utils + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> + +#include <video/b2r2_blt.h> + +#include "b2r2_utils.h" +#include "b2r2_debug.h" +#include "b2r2_internal.h" + +const s32 b2r2_s32_max = 2147483647; + + +/** + * calculate_scale_factor() - calculates the scale factor between the given + * values + */ +int calculate_scale_factor(struct device *dev, + u32 from, u32 to, u16 *sf_out) +{ + int ret; + u32 sf; + + b2r2_log_info(dev, "%s\n", __func__); + + if (to == from) { + *sf_out = 1 << 10; + return 0; + } else if (to == 0) { + b2r2_log_err(dev, "%s: To is 0!\n", __func__); + BUG_ON(1); + } + + sf = (from << 10) / to; + + if ((sf & 0xffff0000) != 0) { + /* Overflow error */ + b2r2_log_warn(dev, "%s: " + "Scale factor too large\n", __func__); + ret = -EINVAL; + goto error; + } else if (sf == 0) { + b2r2_log_warn(dev, "%s: " + "Scale factor too small\n", __func__); + ret = -EINVAL; + goto error; + } + + *sf_out = (u16)sf; + + b2r2_log_info(dev, "%s exit\n", __func__); + + return 0; + +error: + b2r2_log_warn(dev, "%s: Exit...\n", __func__); + return ret; +} + +void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img, + struct b2r2_blt_rect *bounding_rect) +{ + bounding_rect->x = 0; + bounding_rect->y = 0; + bounding_rect->width = img->width; + bounding_rect->height = img->height; +} + + +bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect) +{ + return rect->width == 0 || rect->height == 0; +} + +bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2) +{ + return rect1->x >= rect2->x && + rect1->y >= rect2->y && + rect1->x + rect1->width <= rect2->x + rect2->width && + rect1->y + rect1->height <= rect2->y + rect2->height; +} + +bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2) +{ + return rect1->width >= rect2->width && + rect1->height >= rect2->height; +} + +void b2r2_intersect_rects(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2, struct b2r2_blt_rect *intersection) +{ + struct b2r2_blt_rect tmp_rect; + + tmp_rect.x = max(rect1->x, rect2->x); + tmp_rect.y = max(rect1->y, rect2->y); + tmp_rect.width = min(rect1->x + rect1->width, rect2->x + rect2->width) + - tmp_rect.x; + if (tmp_rect.width < 0) + tmp_rect.width = 0; + tmp_rect.height = + min(rect1->y + rect1->height, rect2->y + rect2->height) - + tmp_rect.y; + if (tmp_rect.height < 0) + tmp_rect.height = 0; + + *intersection = tmp_rect; +} + +/* + * Calculate new rectangles for the supplied + * request, so that clipping to destination imaage + * can be avoided. + * Essentially, the new destination rectangle is + * defined inside the old one. Given the transform + * and scaling, one has to calculate which part of + * the old source rectangle corresponds to + * to the new part of old destination rectangle. + */ +void b2r2_trim_rects(struct device *dev, + const struct b2r2_blt_req *req, + struct b2r2_blt_rect *new_bg_rect, + struct b2r2_blt_rect *new_dst_rect, + struct b2r2_blt_rect *new_src_rect) +{ + enum b2r2_blt_transform transform = req->transform; + struct b2r2_blt_rect *old_src_rect = + (struct b2r2_blt_rect *) &req->src_rect; + struct b2r2_blt_rect *old_dst_rect = + (struct b2r2_blt_rect *) &req->dst_rect; + struct b2r2_blt_rect *old_bg_rect = + (struct b2r2_blt_rect *) &req->bg_rect; + struct b2r2_blt_rect dst_img_bounds; + s32 src_x = 0; + s32 src_y = 0; + s32 src_w = 0; + s32 src_h = 0; + s32 dx = 0; + s32 dy = 0; + s16 hsf; + s16 vsf; + + b2r2_log_info(dev, + "%s\nold_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__, + old_dst_rect->x, old_dst_rect->y, + old_dst_rect->width, old_dst_rect->height); + b2r2_log_info(dev, + "%s\nold_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__, + old_src_rect->x, old_src_rect->y, + old_src_rect->width, old_src_rect->height); + + b2r2_get_img_bounding_rect((struct b2r2_blt_img *) &req->dst_img, + &dst_img_bounds); + + /* dst_rect inside dst_img, no clipping necessary */ + if (b2r2_is_rect_inside_rect(old_dst_rect, &dst_img_bounds)) + goto keep_rects; + + b2r2_intersect_rects(old_dst_rect, &dst_img_bounds, new_dst_rect); + b2r2_log_info(dev, + "%s\nnew_dst_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__, + new_dst_rect->x, new_dst_rect->y, + new_dst_rect->width, new_dst_rect->height); + + /* dst_rect completely outside, leave it to validation */ + if (new_dst_rect->width == 0 || new_dst_rect->height == 0) + goto keep_rects; + + dx = new_dst_rect->x - old_dst_rect->x; + dy = new_dst_rect->y - old_dst_rect->y; + + if (transform & B2R2_BLT_TRANSFORM_CCW_ROT_90) { + int res = 0; + res = calculate_scale_factor(dev, old_src_rect->width, + old_dst_rect->height, &hsf); + /* invalid dimensions, leave them to validation */ + if (res < 0) + goto keep_rects; + + res = calculate_scale_factor(dev, old_src_rect->height, + old_dst_rect->width, &vsf); + if (res < 0) + goto keep_rects; + + /* + * After applying the inverse transform + * for 90 degree rotation, the top-left corner + * becomes top-right. + * src_rect origin is defined as top-left, + * so a translation between dst and src + * coordinate spaces is necessary. + */ + src_x = (old_src_rect->width << 10) - + hsf * (dy + new_dst_rect->height); + src_y = dx * vsf; + src_w = new_dst_rect->height * hsf; + src_h = new_dst_rect->width * vsf; + } else { + int res = 0; + res = calculate_scale_factor(dev, old_src_rect->width, + old_dst_rect->width, &hsf); + if (res < 0) + goto keep_rects; + + res = calculate_scale_factor(dev, old_src_rect->height, + old_dst_rect->height, &vsf); + if (res < 0) + goto keep_rects; + + src_x = dx * hsf; + src_y = dy * vsf; + src_w = new_dst_rect->width * hsf; + src_h = new_dst_rect->height * vsf; + } + + /* + * src_w must contain all the pixels that contribute + * to a particular destination rectangle. + * ((x + 0x3ff) >> 10) is equivalent to ceiling(x), + * expressed in 6.10 fixed point format. + * Every destination rectangle, maps to a certain area in the source + * rectangle. The area in source will most likely not be a rectangle + * with exact integer dimensions whenever arbitrary scaling is involved. + * Consider the following example. + * Suppose, that width of the current destination rectangle maps + * to 1.7 pixels in source, starting at x == 5.4, as calculated + * using the scaling factor. + * This means that while the destination rectangle is written, + * the source should be read from x == 5.4 up to x == 5.4 + 1.7 == 7.1 + * Consequently, color from 3 pixels (x == 5, 6 and 7) + * needs to be read from source. + * The formula below the comment yields: + * ceil(0.4 + 1.7) == ceil(2.1) == 3 + * (src_x & 0x3ff) is the fractional part of src_x, + * which is expressed in 6.10 fixed point format. + * Thus, width of the source area should be 3 pixels wide, + * starting at x == 5. + */ + src_w = ((src_x & 0x3ff) + src_w + 0x3ff) >> 10; + src_h = ((src_y & 0x3ff) + src_h + 0x3ff) >> 10; + + src_x >>= 10; + src_y >>= 10; + + if (transform & B2R2_BLT_TRANSFORM_FLIP_H) + src_x = old_src_rect->width - src_x - src_w; + + if (transform & B2R2_BLT_TRANSFORM_FLIP_V) + src_y = old_src_rect->height - src_y - src_h; + + /* + * Translate the src_rect coordinates into true + * src_buffer coordinates. + */ + src_x += old_src_rect->x; + src_y += old_src_rect->y; + + new_src_rect->x = src_x; + new_src_rect->y = src_y; + new_src_rect->width = src_w; + new_src_rect->height = src_h; + + b2r2_log_info(dev, + "%s\nnew_src_rect(x,y,w,h)=(%d, %d, %d, %d)\n", __func__, + new_src_rect->x, new_src_rect->y, + new_src_rect->width, new_src_rect->height); + + if (req->flags & B2R2_BLT_FLAG_BG_BLEND) { + /* Modify bg_rect in the same way as dst_rect */ + s32 dw = new_dst_rect->width - old_dst_rect->width; + s32 dh = new_dst_rect->height - old_dst_rect->height; + b2r2_log_info(dev, + "%s\nold bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n", + __func__, old_bg_rect->x, old_bg_rect->y, + old_bg_rect->width, old_bg_rect->height); + new_bg_rect->x = old_bg_rect->x + dx; + new_bg_rect->y = old_bg_rect->y + dy; + new_bg_rect->width = old_bg_rect->width + dw; + new_bg_rect->height = old_bg_rect->height + dh; + b2r2_log_info(dev, + "%s\nnew bg_rect(x,y,w,h)=(%d, %d, %d, %d)\n", + __func__, new_bg_rect->x, new_bg_rect->y, + new_bg_rect->width, new_bg_rect->height); + } + return; +keep_rects: + /* + * Recalculation was not possible, or not necessary. + * Do not change anything, leave it to validation. + */ + *new_src_rect = *old_src_rect; + *new_dst_rect = *old_dst_rect; + *new_bg_rect = *old_bg_rect; + b2r2_log_info(dev, "%s original rectangles preserved.\n", __func__); + return; +} + +int b2r2_get_fmt_bpp(struct device *dev, enum b2r2_blt_fmt fmt) +{ + /* + * Currently this function is not used that often but if that changes a + * lookup table could make it a lot faster. + */ + switch (fmt) { + case B2R2_BLT_FMT_1_BIT_A1: + return 1; + + case B2R2_BLT_FMT_8_BIT_A8: + return 8; + + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + return 12; + + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return 16; + + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return 24; + + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return 32; + + default: + b2r2_log_err(dev, + "%s: Internal error! Format %#x not recognized.\n", + __func__, fmt); + return 32; + } +} + +int b2r2_get_fmt_y_bpp(struct device *dev, enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return 8; + + default: + b2r2_log_err(dev, + "%s: Internal error! Non YCbCr format supplied.\n", + __func__); + return 8; + } +} + + +bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + return true; + + default: + return false; + } +} + +bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return true; + + default: + return false; + } +} + +bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + return true; + + default: + return false; + } +} + +bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return true; + + default: + return false; + } +} + +u32 b2r2_calc_pitch_from_width(struct device *dev, + s32 width, enum b2r2_blt_fmt fmt) +{ + if (b2r2_is_single_plane_fmt(fmt)) { + return (u32)b2r2_div_round_up(width * + b2r2_get_fmt_bpp(dev, fmt), 8); + } else if (b2r2_is_ycbcrsp_fmt(fmt) || b2r2_is_ycbcrp_fmt(fmt)) { + return (u32)b2r2_div_round_up(width * + b2r2_get_fmt_y_bpp(dev, fmt), 8); + } else { + b2r2_log_err(dev, "%s: Internal error! " + "Pitchless format supplied.\n", + __func__); + return 0; + } +} + +u32 b2r2_get_img_pitch(struct device *dev, struct b2r2_blt_img *img) +{ + if (img->pitch != 0) + return img->pitch; + else + return b2r2_calc_pitch_from_width(dev, img->width, img->fmt); +} + +s32 b2r2_get_img_size(struct device *dev, struct b2r2_blt_img *img) +{ + if (b2r2_is_single_plane_fmt(img->fmt)) { + return (s32)b2r2_get_img_pitch(dev, img) * img->height; + } else if (b2r2_is_ycbcrsp_fmt(img->fmt) || + b2r2_is_ycbcrp_fmt(img->fmt)) { + s32 y_plane_size; + + y_plane_size = (s32)b2r2_get_img_pitch(dev, img) * img->height; + + if (b2r2_is_ycbcr420_fmt(img->fmt)) { + return y_plane_size + y_plane_size / 2; + } else if (b2r2_is_ycbcr422_fmt(img->fmt)) { + return y_plane_size * 2; + } else if (b2r2_is_ycbcr444_fmt(img->fmt)) { + return y_plane_size * 3; + } else { + b2r2_log_err(dev, "%s: Internal error!" + " Format %#x not recognized.\n", + __func__, img->fmt); + return 0; + } + } else if (b2r2_is_mb_fmt(img->fmt)) { + return (img->width * img->height * + b2r2_get_fmt_bpp(dev, img->fmt)) / 8; + } else { + b2r2_log_err(dev, "%s: Internal error! " + "Format %#x not recognized.\n", + __func__, img->fmt); + return 0; + } +} + + +s32 b2r2_div_round_up(s32 dividend, s32 divisor) +{ + s32 quotient = dividend / divisor; + if (dividend % divisor != 0) + quotient++; + + return quotient; +} + +bool b2r2_is_aligned(s32 value, s32 alignment) +{ + return value % alignment == 0; +} + +s32 b2r2_align_up(s32 value, s32 alignment) +{ + s32 remainder = abs(value) % abs(alignment); + s32 value_to_add; + + if (remainder > 0) { + if (value >= 0) + value_to_add = alignment - remainder; + else + value_to_add = remainder; + } else { + value_to_add = 0; + } + + return value + value_to_add; +} + +/** + * b2r2_get_alpha_range() - returns the alpha range of the given format + */ +enum b2r2_ty b2r2_get_alpha_range(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_8_BIT_A8: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + return B2R2_TY_ALPHA_RANGE_255; /* 0 - 255 */ + default: + return B2R2_TY_ALPHA_RANGE_128; /* 0 - 128 */ + } +} + +/** + * b2r2_get_alpha() - returns the pixel alpha in 0...255 range + */ +u8 b2r2_get_alpha(enum b2r2_blt_fmt fmt, u32 pixel) +{ + switch (fmt) { + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + return (pixel >> 24) & 0xff; + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return pixel & 0xff; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + return (pixel & 0xfff) >> 16; + case B2R2_BLT_FMT_16_BIT_ARGB4444: + return (((pixel >> 12) & 0xf) * 255) / 15; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + return (pixel >> 15) * 255; + case B2R2_BLT_FMT_1_BIT_A1: + return pixel * 255; + case B2R2_BLT_FMT_8_BIT_A8: + return pixel; + default: + return 255; + } +} + +/** + * b2r2_set_alpha() - returns a color value with the alpha component set + */ +u32 b2r2_set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color) +{ + u32 alpha_mask; + + switch (fmt) { + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + color &= 0x00ffffff; + alpha_mask = alpha << 24; + break; + case B2R2_BLT_FMT_32_BIT_VUYA8888: + color &= 0xffffff00; + alpha_mask = alpha; + break; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + color &= 0x00ffff; + alpha_mask = alpha << 16; + break; + case B2R2_BLT_FMT_16_BIT_ARGB4444: + color &= 0x0fff; + alpha_mask = (alpha << 8) & 0xF000; + break; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + color &= 0x7fff; + alpha_mask = (alpha / 255) << 15 ; + break; + case B2R2_BLT_FMT_1_BIT_A1: + color = 0; + alpha_mask = (alpha / 255); + break; + case B2R2_BLT_FMT_8_BIT_A8: + color = 0; + alpha_mask = alpha; + break; + default: + alpha_mask = 0; + } + + return color | alpha_mask; +} + +/** + * b2r2_fmt_has_alpha() - returns whether the given format carries an alpha value + */ +bool b2r2_fmt_has_alpha(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + return true; + default: + return false; + } +} + +/** + * b2r2_is_rgb_fmt() - returns whether the given format is a rgb format + */ +bool b2r2_is_rgb_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + return true; + default: + return false; + } +} + +/** + * b2r2_is_bgr_fmt() - returns whether the given format is a bgr format + */ +bool b2r2_is_bgr_fmt(enum b2r2_blt_fmt fmt) +{ + return (fmt == B2R2_BLT_FMT_32_BIT_ABGR8888); +} + +/** + * b2r2_is_yuv_fmt() - returns whether the given format is a yuv format + */ +bool b2r2_is_yuv_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return true; + default: + return false; + } +} + +/** + * b2r2_is_yvu_fmt() - returns whether the given format is a yvu format + */ +bool b2r2_is_yvu_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return true; + default: + return false; + } +} + +/** + * b2r2_is_yuv420_fmt() - returns whether the given format is a yuv420 format + */ +bool b2r2_is_yuv420_fmt(enum b2r2_blt_fmt fmt) +{ + + switch (fmt) { + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + return true; + default: + return false; + } +} + +bool b2r2_is_yuv422_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return true; + default: + return false; + } +} + +/** + * b2r2_is_yvu420_fmt() - returns whether the given format is a yvu420 format + */ +bool b2r2_is_yvu420_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + return true; + default: + return false; + } +} + +bool b2r2_is_yvu422_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return true; + default: + return false; + } +} + + +/** + * b2r2_is_yuv444_fmt() - returns whether the given format is a yuv444 format + */ +bool b2r2_is_yuv444_fmt(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return true; + default: + return false; + } +} + +/** + * b2r2_fmt_byte_pitch() - returns the pitch of a pixmap with the given width + */ +int b2r2_fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width) +{ + int pitch; + + switch (fmt) { + + case B2R2_BLT_FMT_1_BIT_A1: + pitch = width >> 3; /* Shift is faster than division */ + if ((width & 0x3) != 0) /* Check for remainder */ + pitch++; + return pitch; + + case B2R2_BLT_FMT_8_BIT_A8: /* Fall through */ + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: /* Fall through */ + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: /* Fall through */ + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: /* Fall through */ + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return width; + + case B2R2_BLT_FMT_16_BIT_ARGB4444: /* Fall through */ + case B2R2_BLT_FMT_16_BIT_ARGB1555: /* Fall through */ + case B2R2_BLT_FMT_16_BIT_RGB565: /* Fall through */ + case B2R2_BLT_FMT_Y_CB_Y_CR: /* Fall through */ + case B2R2_BLT_FMT_CB_Y_CR_Y: + return width << 1; + + case B2R2_BLT_FMT_24_BIT_RGB888: /* Fall through */ + case B2R2_BLT_FMT_24_BIT_ARGB8565: /* Fall through */ + case B2R2_BLT_FMT_24_BIT_YUV888: /* Fall through */ + case B2R2_BLT_FMT_24_BIT_VUY888: + return width * 3; + + case B2R2_BLT_FMT_32_BIT_ARGB8888: /* Fall through */ + case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Fall through */ + case B2R2_BLT_FMT_32_BIT_AYUV8888: /* Fall through */ + case B2R2_BLT_FMT_32_BIT_VUYA8888: + return width << 2; + + default: + /* Should never, ever happen */ + BUG_ON(1); + return 0; + } +} + +/** + * b2r2_to_native_fmt() - returns the native B2R2 format + */ +enum b2r2_native_fmt b2r2_to_native_fmt(enum b2r2_blt_fmt fmt) +{ + + switch (fmt) { + case B2R2_BLT_FMT_UNUSED: + return B2R2_NATIVE_RGB565; + case B2R2_BLT_FMT_1_BIT_A1: + return B2R2_NATIVE_A1; + case B2R2_BLT_FMT_8_BIT_A8: + return B2R2_NATIVE_A8; + case B2R2_BLT_FMT_16_BIT_RGB565: + return B2R2_NATIVE_RGB565; + case B2R2_BLT_FMT_16_BIT_ARGB4444: + return B2R2_NATIVE_ARGB4444; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + return B2R2_NATIVE_ARGB1555; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + return B2R2_NATIVE_ARGB8565; + case B2R2_BLT_FMT_24_BIT_RGB888: + return B2R2_NATIVE_RGB888; + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_24_BIT_VUY888: /* Not actually supported by HW */ + return B2R2_NATIVE_YCBCR888; + case B2R2_BLT_FMT_32_BIT_ABGR8888: /* Not actually supported by HW */ + case B2R2_BLT_FMT_32_BIT_ARGB8888: + return B2R2_NATIVE_ARGB8888; + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: /* Not actually supported by HW */ + return B2R2_NATIVE_AYCBCR8888; + case B2R2_BLT_FMT_CB_Y_CR_Y: + return B2R2_NATIVE_YCBCR422R; + case B2R2_BLT_FMT_Y_CB_Y_CR: + return B2R2_NATIVE_YCBCR422R; + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + return B2R2_NATIVE_YCBCR42X_R2B; + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return B2R2_NATIVE_YCBCR42X_MBN; + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return B2R2_NATIVE_YUV; + default: + /* Should never ever happen */ + return B2R2_NATIVE_BYTE; + } +} + +/** + * Bit-expand the color from fmt to RGB888 with blue at LSB. + * Copy MSBs into missing LSBs. + */ +u32 b2r2_to_RGB888(u32 color, const enum b2r2_blt_fmt fmt) +{ + u32 out_color = 0; + u32 r = 0; + u32 g = 0; + u32 b = 0; + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + r = ((color & 0xf00) << 12) | ((color & 0xf00) << 8); + g = ((color & 0xf0) << 8) | ((color & 0xf0) << 4); + b = ((color & 0xf) << 4) | (color & 0xf); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_16_BIT_ARGB1555: + r = ((color & 0x7c00) << 9) | ((color & 0x7000) << 4); + g = ((color & 0x3e0) << 6) | ((color & 0x380) << 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_16_BIT_RGB565: + r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3); + g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + out_color = color & 0xffffff; + break; + case B2R2_BLT_FMT_32_BIT_ABGR8888: + r = (color & 0xff) << 16; + g = color & 0xff00; + b = (color & 0xff0000) >> 16; + out_color = r | g | b; + break; + case B2R2_BLT_FMT_24_BIT_ARGB8565: + r = ((color & 0xf800) << 8) | ((color & 0xe000) << 3); + g = ((color & 0x7e0) << 5) | ((color & 0x600) >> 1); + b = ((color & 0x1f) << 3) | ((color & 0x1c) >> 2); + out_color = r | g | b; + break; + default: + break; + } + + return out_color; +} + +/** + * b2r2_get_fmt_type() - returns the type of the given format (raster, planar, etc.) + */ +enum b2r2_fmt_type b2r2_get_fmt_type(enum b2r2_blt_fmt fmt) +{ + switch (fmt) { + case B2R2_BLT_FMT_16_BIT_ARGB4444: + case B2R2_BLT_FMT_16_BIT_ARGB1555: + case B2R2_BLT_FMT_16_BIT_RGB565: + case B2R2_BLT_FMT_24_BIT_RGB888: + case B2R2_BLT_FMT_32_BIT_ARGB8888: + case B2R2_BLT_FMT_Y_CB_Y_CR: + case B2R2_BLT_FMT_CB_Y_CR_Y: + case B2R2_BLT_FMT_32_BIT_ABGR8888: + case B2R2_BLT_FMT_24_BIT_ARGB8565: + case B2R2_BLT_FMT_24_BIT_YUV888: + case B2R2_BLT_FMT_32_BIT_AYUV8888: + case B2R2_BLT_FMT_24_BIT_VUY888: + case B2R2_BLT_FMT_32_BIT_VUYA8888: + case B2R2_BLT_FMT_1_BIT_A1: + case B2R2_BLT_FMT_8_BIT_A8: + return B2R2_FMT_TYPE_RASTER; + case B2R2_BLT_FMT_YUV420_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_PLANAR: + case B2R2_BLT_FMT_YUV444_PACKED_PLANAR: + return B2R2_FMT_TYPE_PLANAR; + case B2R2_BLT_FMT_YUV420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU420_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YVU422_PACKED_SEMI_PLANAR: + case B2R2_BLT_FMT_YUV420_PACKED_SEMIPLANAR_MB_STE: + case B2R2_BLT_FMT_YUV422_PACKED_SEMIPLANAR_MB_STE: + return B2R2_FMT_TYPE_SEMI_PLANAR; + default: + return B2R2_FMT_TYPE_RASTER; + } +} + +#ifdef CONFIG_DEBUG_FS +/** + * sprintf_req() - Builds a string representing the request, for debug + * + * @request:Request that should be encoded into a string + * @buf: Receiving buffer + * @size: Size of receiving buffer + * + * Returns number of characters in string, excluding null terminator + */ +int sprintf_req(struct b2r2_blt_request *request, char *buf, int size) +{ + size_t dev_size = 0; + + /* generic request info */ + dev_size += sprintf(buf + dev_size, + "instance : 0x%08lX\n", + (unsigned long) request->instance); + dev_size += sprintf(buf + dev_size, + "size : %d bytes\n", request->user_req.size); + dev_size += sprintf(buf + dev_size, + "flags : 0x%08lX\n", + (unsigned long) request->user_req.flags); + dev_size += sprintf(buf + dev_size, + "transform : %d\n", + (int) request->user_req.transform); + dev_size += sprintf(buf + dev_size, + "prio : %d\n", request->user_req.transform); + dev_size += sprintf(buf + dev_size, + "global_alpha : %d\n", + (int) request->user_req.global_alpha); + dev_size += sprintf(buf + dev_size, + "report1 : 0x%08lX\n", + (unsigned long) request->user_req.report1); + dev_size += sprintf(buf + dev_size, + "report2 : 0x%08lX\n", + (unsigned long) request->user_req.report2); + dev_size += sprintf(buf + dev_size, + "request_id : 0x%08lX\n\n", + (unsigned long) request->request_id); + + /* src info */ + dev_size += sprintf(buf + dev_size, + "src_img.fmt : %#010x\n", + request->user_req.src_img.fmt); + dev_size += sprintf(buf + dev_size, + "src_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d, " + "offset=%d, len=%d}\n", + request->user_req.src_img.buf.type, + request->user_req.src_img.buf.hwmem_buf_name, + request->user_req.src_img.buf.fd, + request->user_req.src_img.buf.offset, + request->user_req.src_img.buf.len); + dev_size += sprintf(buf + dev_size, + "src_img : {width=%d, height=%d, pitch=%d}\n", + request->user_req.src_img.width, + request->user_req.src_img.height, + request->user_req.src_img.pitch); + dev_size += sprintf(buf + dev_size, + "src_mask.fmt : %#010x\n", + request->user_req.src_mask.fmt); + dev_size += sprintf(buf + dev_size, + "src_mask.buf : {type=%d, hwmem_buf_name=%d, fd=%d," + " offset=%d, len=%d}\n", + request->user_req.src_mask.buf.type, + request->user_req.src_mask.buf.hwmem_buf_name, + request->user_req.src_mask.buf.fd, + request->user_req.src_mask.buf.offset, + request->user_req.src_mask.buf.len); + dev_size += sprintf(buf + dev_size, + "src_mask : {width=%d, height=%d, pitch=%d}\n", + request->user_req.src_mask.width, + request->user_req.src_mask.height, + request->user_req.src_mask.pitch); + dev_size += sprintf(buf + dev_size, + "src_rect : {x=%d, y=%d, width=%d, height=%d}\n", + request->user_req.src_rect.x, + request->user_req.src_rect.y, + request->user_req.src_rect.width, + request->user_req.src_rect.height); + dev_size += sprintf(buf + dev_size, + "src_color : 0x%08lX\n\n", + (unsigned long) request->user_req.src_color); + + /* bg info */ + dev_size += sprintf(buf + dev_size, + "bg_img.fmt : %#010x\n", + request->user_req.bg_img.fmt); + dev_size += sprintf(buf + dev_size, + "bg_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d," + " offset=%d, len=%d}\n", + request->user_req.bg_img.buf.type, + request->user_req.bg_img.buf.hwmem_buf_name, + request->user_req.bg_img.buf.fd, + request->user_req.bg_img.buf.offset, + request->user_req.bg_img.buf.len); + dev_size += sprintf(buf + dev_size, + "bg_img : {width=%d, height=%d, pitch=%d}\n", + request->user_req.bg_img.width, + request->user_req.bg_img.height, + request->user_req.bg_img.pitch); + dev_size += sprintf(buf + dev_size, + "bg_rect : {x=%d, y=%d, width=%d, height=%d}\n\n", + request->user_req.bg_rect.x, + request->user_req.bg_rect.y, + request->user_req.bg_rect.width, + request->user_req.bg_rect.height); + + /* dst info */ + dev_size += sprintf(buf + dev_size, + "dst_img.fmt : %#010x\n", + request->user_req.dst_img.fmt); + dev_size += sprintf(buf + dev_size, + "dst_img.buf : {type=%d, hwmem_buf_name=%d, fd=%d," + " offset=%d, len=%d}\n", + request->user_req.dst_img.buf.type, + request->user_req.dst_img.buf.hwmem_buf_name, + request->user_req.dst_img.buf.fd, + request->user_req.dst_img.buf.offset, + request->user_req.dst_img.buf.len); + dev_size += sprintf(buf + dev_size, + "dst_img : {width=%d, height=%d, pitch=%d}\n", + request->user_req.dst_img.width, + request->user_req.dst_img.height, + request->user_req.dst_img.pitch); + dev_size += sprintf(buf + dev_size, + "dst_rect : {x=%d, y=%d, width=%d, height=%d}\n", + request->user_req.dst_rect.x, + request->user_req.dst_rect.y, + request->user_req.dst_rect.width, + request->user_req.dst_rect.height); + dev_size += sprintf(buf + dev_size, + "dst_clip_rect : {x=%d, y=%d, width=%d, height=%d}\n", + request->user_req.dst_clip_rect.x, + request->user_req.dst_clip_rect.y, + request->user_req.dst_clip_rect.width, + request->user_req.dst_clip_rect.height); + dev_size += sprintf(buf + dev_size, + "dst_color : 0x%08lX\n\n", + (unsigned long) request->user_req.dst_color); + + dev_size += sprintf(buf + dev_size, + "src_resolved.physical : 0x%08lX\n", + (unsigned long) request->src_resolved. + physical_address); + dev_size += sprintf(buf + dev_size, + "src_resolved.virtual : 0x%08lX\n", + (unsigned long) request->src_resolved.virtual_address); + dev_size += sprintf(buf + dev_size, + "src_resolved.filep : 0x%08lX\n", + (unsigned long) request->src_resolved.filep); + dev_size += sprintf(buf + dev_size, + "src_resolved.filep_physical_start : 0x%08lX\n", + (unsigned long) request->src_resolved. + file_physical_start); + dev_size += sprintf(buf + dev_size, + "src_resolved.filep_virtual_start : 0x%08lX\n", + (unsigned long) request->src_resolved.file_virtual_start); + dev_size += sprintf(buf + dev_size, + "src_resolved.file_len : %d\n\n", + request->src_resolved.file_len); + + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.physical : 0x%08lX\n", + (unsigned long) request->src_mask_resolved. + physical_address); + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.virtual : 0x%08lX\n", + (unsigned long) request->src_mask_resolved.virtual_address); + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.filep : 0x%08lX\n", + (unsigned long) request->src_mask_resolved.filep); + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.filep_physical_start : 0x%08lX\n", + (unsigned long) request->src_mask_resolved. + file_physical_start); + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.filep_virtual_start : 0x%08lX\n", + (unsigned long) request->src_mask_resolved. + file_virtual_start); + dev_size += sprintf(buf + dev_size, + "src_mask_resolved.file_len : %d\n\n", + request->src_mask_resolved.file_len); + + dev_size += sprintf(buf + dev_size, + "dst_resolved.physical : 0x%08lX\n", + (unsigned long) request->dst_resolved. + physical_address); + dev_size += sprintf(buf + dev_size, + "dst_resolved.virtual : 0x%08lX\n", + (unsigned long) request->dst_resolved.virtual_address); + dev_size += sprintf(buf + dev_size, + "dst_resolved.filep : 0x%08lX\n", + (unsigned long) request->dst_resolved.filep); + dev_size += sprintf(buf + dev_size, + "dst_resolved.filep_physical_start : 0x%08lX\n", + (unsigned long) request->dst_resolved. + file_physical_start); + dev_size += sprintf(buf + dev_size, + "dst_resolved.filep_virtual_start : 0x%08lX\n", + (unsigned long) request->dst_resolved.file_virtual_start); + dev_size += sprintf(buf + dev_size, + "dst_resolved.file_len : %d\n\n", + request->dst_resolved.file_len); + + return dev_size; +} +#endif + +void b2r2_recalculate_rects(struct device *dev, + struct b2r2_blt_req *req) +{ + struct b2r2_blt_rect new_dst_rect; + struct b2r2_blt_rect new_src_rect; + struct b2r2_blt_rect new_bg_rect; + + b2r2_trim_rects(dev, + req, &new_bg_rect, &new_dst_rect, &new_src_rect); + + req->dst_rect = new_dst_rect; + req->src_rect = new_src_rect; + if (req->flags & B2R2_BLT_FLAG_BG_BLEND) + req->bg_rect = new_bg_rect; +} diff --git a/drivers/video/b2r2/b2r2_utils.h b/drivers/video/b2r2/b2r2_utils.h new file mode 100644 index 00000000000..081ac1f4848 --- /dev/null +++ b/drivers/video/b2r2/b2r2_utils.h @@ -0,0 +1,89 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson B2R2 utils + * + * Author: Johan Mossberg <johan.xx.mossberg@stericsson.com> for ST-Ericsson + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_ +#define _LINUX_DRIVERS_VIDEO_B2R2_UTILS_H_ + +#include <video/b2r2_blt.h> + +#include "b2r2_internal.h" + +extern const s32 b2r2_s32_max; + +int calculate_scale_factor(struct device *dev, + u32 from, u32 to, u16 *sf_out); +void b2r2_get_img_bounding_rect(struct b2r2_blt_img *img, + struct b2r2_blt_rect *bounding_rect); + +bool b2r2_is_zero_area_rect(struct b2r2_blt_rect *rect); +bool b2r2_is_rect_inside_rect(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2); +bool b2r2_is_rect_gte_rect(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2); +void b2r2_intersect_rects(struct b2r2_blt_rect *rect1, + struct b2r2_blt_rect *rect2, + struct b2r2_blt_rect *intersection); +void b2r2_trim_rects(struct device *dev, + const struct b2r2_blt_req *req, + struct b2r2_blt_rect *new_bg_rect, + struct b2r2_blt_rect *new_dst_rect, + struct b2r2_blt_rect *new_src_rect); + +int b2r2_get_fmt_bpp(struct device *dev, enum b2r2_blt_fmt fmt); +int b2r2_get_fmt_y_bpp(struct device *dev, enum b2r2_blt_fmt fmt); + +bool b2r2_is_single_plane_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_independent_pixel_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcri_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcrsp_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcrp_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcr420_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcr422_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_ycbcr444_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_mb_fmt(enum b2r2_blt_fmt fmt); + +/* + * Rounds up if an invalid width causes the pitch to be non byte aligned. + */ +u32 b2r2_calc_pitch_from_width(struct device *dev, + s32 width, enum b2r2_blt_fmt fmt); +u32 b2r2_get_img_pitch(struct device *dev, + struct b2r2_blt_img *img); +s32 b2r2_get_img_size(struct device *dev, + struct b2r2_blt_img *img); + +s32 b2r2_div_round_up(s32 dividend, s32 divisor); +bool b2r2_is_aligned(s32 value, s32 alignment); +s32 b2r2_align_up(s32 value, s32 alignment); + +enum b2r2_ty b2r2_get_alpha_range(enum b2r2_blt_fmt fmt); +u8 b2r2_get_alpha(enum b2r2_blt_fmt fmt, u32 pixel); +u32 b2r2_set_alpha(enum b2r2_blt_fmt fmt, u8 alpha, u32 color); +bool b2r2_fmt_has_alpha(enum b2r2_blt_fmt fmt); +bool b2r2_is_rgb_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_bgr_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yuv_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yvu_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yuv420_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yuv422_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yvu420_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yvu422_fmt(enum b2r2_blt_fmt fmt); +bool b2r2_is_yuv444_fmt(enum b2r2_blt_fmt fmt); +int b2r2_fmt_byte_pitch(enum b2r2_blt_fmt fmt, u32 width); +enum b2r2_native_fmt b2r2_to_native_fmt(enum b2r2_blt_fmt fmt); +u32 b2r2_to_RGB888(u32 color, const enum b2r2_blt_fmt fmt); +enum b2r2_fmt_type b2r2_get_fmt_type(enum b2r2_blt_fmt fmt); +#ifdef CONFIG_DEBUG_FS +int sprintf_req(struct b2r2_blt_request *request, char *buf, int size); +#endif +void b2r2_recalculate_rects(struct device *dev, + struct b2r2_blt_req *req); + +#endif diff --git a/drivers/video/mcde/Kconfig b/drivers/video/mcde/Kconfig new file mode 100644 index 00000000000..cb88a66d370 --- /dev/null +++ b/drivers/video/mcde/Kconfig @@ -0,0 +1,96 @@ +config FB_MCDE + tristate "MCDE support" + depends on FB + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select FB_SYS_FOPS + select HWMEM + ---help--- + This enables support for MCDE based frame buffer driver. + + Please read the file <file:Documentation/fb/mcde.txt> + +config FB_MCDE_DEBUG + bool "MCDE debug messages" + depends on FB_MCDE + ---help--- + Say Y here if you want the MCDE driver to output debug messages + +config FB_MCDE_VDEBUG + bool "MCDE verbose debug messages" + depends on FB_MCDE_DEBUG + ---help--- + Say Y here if you want the MCDE driver to output more debug messages + +config MCDE_FB_AVOID_REALLOC + bool "MCDE early allocate framebuffer" + default n + depends on FB_MCDE + ---help--- + If you say Y here maximum frame buffer size is allocated and + used for all resolutions. If you say N here, the frame buffer is + reallocated when resolution is changed. This reallocation might + fail because of fragmented memory. Note that this memory will + never be deallocated, while the MCDE framebuffer is used. + +config MCDE_DISPLAY_DSI + bool "Support for DSI displays within MCDE" + depends on FB_MCDE + default y + +menu "MCDE DSI displays" + depends on MCDE_DISPLAY_DSI + +config MCDE_DISPLAY_GENERIC_DSI + tristate "Generic DSI display driver" + +config MCDE_DISPLAY_SAMSUNG_S6D16D0 + bool "Samsung S6D16D0 DSI display driver" + ---help--- + Say Y if you have a TPO Taal or Blackpearl display panel. + +config MCDE_DISPLAY_SONY_ACX424AKP_DSI + tristate "Sony acx424akp DSI display driver" + +config MCDE_DISPLAY_AV8100 + tristate "AV8100 HDMI/CVBS display driver" + select AV8100 + +config MCDE_DISPLAY_HDMI_FB_AUTO_CREATE + bool "HDMI_FB_AUTO_CREATE" + default y + depends on MCDE_DISPLAY_AV8100 + ---help--- + Say Y if you want the HDMI frame buffer to be created on start + Say N if you want the HDMI frame buffer to be created when HDMI + cable is plugged (needs user space HDMIservice) + +endmenu + +config MCDE_DISPLAY_DPI + bool "Support for DPI displays within MCDE" + depends on FB_MCDE + default n + ---help--- + Add this option to choose which DPI display driver for MCDE to include + + DPI (Display Pixel Interface) is a MIPI Alliance standard used for + active-matrix LCDs. The DPI uses parallel data lines. + +menu "MCDE DPI displays" + depends on MCDE_DISPLAY_DPI + +config MCDE_DISPLAY_VUIB500_DPI + tristate "DPI display driver for the VUIB500 board" + ---help--- + The VUIB500 is an ST-Ericsson user interface board. + +endmenu + +config MCDE_DISPLAY_AB8500_DENC + tristate "AB8500 CVBS display driver" + depends on FB_MCDE + select AB8500_DENC + + diff --git a/drivers/video/mcde/Makefile b/drivers/video/mcde/Makefile new file mode 100644 index 00000000000..82a78c2542a --- /dev/null +++ b/drivers/video/mcde/Makefile @@ -0,0 +1,23 @@ +mcde-objs += mcde_mod.o +mcde-objs += mcde_hw.o +mcde-objs += mcde_dss.o +mcde-objs += mcde_display.o +mcde-objs += mcde_bus.o +mcde-objs += mcde_fb.o +mcde-objs += mcde_debugfs.o +obj-$(CONFIG_FB_MCDE) += mcde.o + +obj-$(CONFIG_MCDE_DISPLAY_GENERIC_DSI) += display-generic_dsi.o +obj-$(CONFIG_MCDE_DISPLAY_SAMSUNG_S6D16D0) += display-samsung_s6d16d0.o +obj-$(CONFIG_MCDE_DISPLAY_SONY_ACX424AKP_DSI) += display-sony_acx424akp_dsi.o +obj-$(CONFIG_MCDE_DISPLAY_VUIB500_DPI) += display-vuib500-dpi.o +obj-$(CONFIG_MCDE_DISPLAY_AB8500_DENC) += display-ab8500.o +obj-$(CONFIG_MCDE_DISPLAY_AV8100) += display-av8100.o +obj-$(CONFIG_DISPLAY_FICTIVE) += display-fictive.o + +ifdef CONFIG_FB_MCDE_DEBUG +EXTRA_CFLAGS += -DDEBUG +endif +ifdef CONFIG_FB_MCDE_VDEBUG +EXTRA_CFLAGS += -DVERBOSE_DEBUG +endif diff --git a/drivers/video/mcde/display-ab8500.c b/drivers/video/mcde/display-ab8500.c new file mode 100644 index 00000000000..a761a5eec80 --- /dev/null +++ b/drivers/video/mcde/display-ab8500.c @@ -0,0 +1,494 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * AB8500 display driver + * + * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/mfd/ab8500/denc.h> +#include <video/mcde_display.h> +#include <video/mcde_display-ab8500.h> + +#define AB8500_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__) + +#define SDTV_PIXCLOCK 37037 + +/* + * PAL: + * Total nr of active lines: 576 + * Total nr of blanking lines: 49 + * total: 625 + */ +#define PAL_HBP 132 +#define PAL_HFP 12 +#define PAL_VBP_FIELD_1 22 +#define PAL_VBP_FIELD_2 23 +#define PAL_VFP_FIELD_1 2 +#define PAL_VFP_FIELD_2 2 + +/* + * NTSC (ITU-R BT.470-5): + * Total nr of active lines: 486 + * Total nr of blanking lines: 39 + * total: 525 + */ +#define NTSC_ORG_HBP 122 +#define NTSC_ORG_HFP 16 +#define NTSC_ORG_VBP_FIELD_1 16 +#define NTSC_ORG_VBP_FIELD_2 17 +#define NTSC_ORG_VFP_FIELD_1 3 +#define NTSC_ORG_VFP_FIELD_2 3 + +/* + * NTSC (DV variant): + * Total nr of active lines: 480 + * Total nr of blanking lines: 45 + * total: 525 + */ +#define NTSC_HBP 122 +#define NTSC_HFP 16 +#define NTSC_VBP_FIELD_1 19 +#define NTSC_VBP_FIELD_2 20 +#define NTSC_VFP_FIELD_1 3 +#define NTSC_VFP_FIELD_2 3 + +struct display_driver_data { + struct ab8500_denc_conf denc_conf; + struct platform_device *denc_dev; + int nr_regulators; + struct regulator **regulator; +}; + +static int try_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode); +static int set_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode); +static int set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode); +static int on_first_update(struct mcde_display_device *ddev); +static int display_update(struct mcde_display_device *ddev, + bool tripple_buffer); + +static int __devinit ab8500_probe(struct mcde_display_device *ddev) +{ + int ret = 0; + int i; + struct ab8500_display_platform_data *pdata = ddev->dev.platform_data; + struct display_driver_data *driver_data; + + AB8500_DISP_TRACE; + + if (pdata == NULL) { + dev_err(&ddev->dev, "%s:Platform data missing\n", __func__); + return -EINVAL; + } + if (ddev->port->type != MCDE_PORTTYPE_DPI) { + dev_err(&ddev->dev, "%s:Invalid port type %d\n", __func__, + ddev->port->type); + return -EINVAL; + } + + driver_data = (struct display_driver_data *) + kzalloc(sizeof(struct display_driver_data), GFP_KERNEL); + if (!driver_data) { + dev_err(&ddev->dev, "Failed to allocate driver data\n"); + return -ENOMEM; + } + driver_data->denc_dev = ab8500_denc_get_device(); + if (!driver_data->denc_dev) { + dev_err(&ddev->dev, "Failed to get DENC device\n"); + ret = -ENODEV; + goto dev_get_failed; + } + + driver_data->regulator = kzalloc(pdata->nr_regulators * + sizeof(struct regulator *), GFP_KERNEL); + if (!driver_data->regulator) { + dev_err(&ddev->dev, "Failed to allocate regulator list\n"); + ret = -ENOMEM; + goto reg_alloc_failed; + } + for (i = 0; i < pdata->nr_regulators; i++) { + driver_data->regulator[i] = regulator_get(&ddev->dev, + pdata->regulator_id[i]); + if (IS_ERR(driver_data->regulator[i])) { + ret = PTR_ERR(driver_data->regulator[i]); + dev_warn(&ddev->dev, "%s:Failed to get regulator %s\n", + __func__, pdata->regulator_id[i]); + goto regulator_get_failed; + } + } + driver_data->nr_regulators = pdata->nr_regulators; + + dev_set_drvdata(&ddev->dev, driver_data); + + ddev->try_video_mode = try_video_mode; + ddev->set_video_mode = set_video_mode; + ddev->set_power_mode = set_power_mode; + ddev->on_first_update = on_first_update; + ddev->update = display_update; + + return 0; + +regulator_get_failed: + for (i--; i >= 0; i--) + regulator_put(driver_data->regulator[i]); + kfree(driver_data->regulator); + driver_data->regulator = NULL; +reg_alloc_failed: + ab8500_denc_put_device(driver_data->denc_dev); +dev_get_failed: + kfree(driver_data); + return ret; +} + +static int __devexit ab8500_remove(struct mcde_display_device *ddev) +{ + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + AB8500_DISP_TRACE; + + ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + + if (driver_data->regulator) { + int i; + for (i = driver_data->nr_regulators - 1; i >= 0; i--) + regulator_put(driver_data->regulator[i]); + kfree(driver_data->regulator); + driver_data->regulator = NULL; + driver_data->nr_regulators = 0; + } + ab8500_denc_put_device(driver_data->denc_dev); + kfree(driver_data); + return 0; +} + +static int ab8500_resume(struct mcde_display_device *ddev) +{ + int ret = 0; + AB8500_DISP_TRACE; + + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) + dev_warn(&ddev->dev, "%s: Failed to resume display\n", + __func__); + + return ret; +} + +static int ab8500_suspend(struct mcde_display_device *ddev, pm_message_t state) +{ + int ret = 0; + AB8500_DISP_TRACE; + + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + if (ret < 0) + dev_warn(&ddev->dev, "%s: Failed to suspend display\n", + __func__); + + return ret; +} + + +static struct mcde_display_driver ab8500_driver = { + .probe = ab8500_probe, + .remove = ab8500_remove, + .suspend = ab8500_suspend, + .resume = ab8500_resume, + .driver = { + .name = "mcde_tv_ab8500", + }, +}; + +static void print_vmode(struct mcde_video_mode *vmode) +{ + pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres); + pr_debug(" pixclock: %d\n", vmode->pixclock); + pr_debug(" hbp: %d\n", vmode->hbp); + pr_debug(" hfp: %d\n", vmode->hfp); + pr_debug(" vbp: %d\n", vmode->vbp); + pr_debug(" vfp: %d\n", vmode->vfp); + pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false"); +} + +static int try_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + AB8500_DISP_TRACE; + + if (ddev == NULL || video_mode == NULL) { + dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n", + __func__); + return -EINVAL; + } + + if (video_mode->xres != 720) { + dev_warn(&ddev->dev, + "%s:Failed to find video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + return -EINVAL; + } + + /* TODO: move this part to MCDE: mcde_dss_try_video_mode? */ + /* check for PAL */ + switch (video_mode->yres) { + case 576: + /* set including SAV/EAV: */ + video_mode->hbp = PAL_HBP; + video_mode->hfp = PAL_HFP; + video_mode->vbp = PAL_VBP_FIELD_1 + PAL_VBP_FIELD_2; + video_mode->vfp = PAL_VFP_FIELD_1 + PAL_VFP_FIELD_2; + video_mode->interlaced = true; + video_mode->pixclock = SDTV_PIXCLOCK; + break; + case 480: + /* set including SAV/EAV */ + video_mode->hbp = NTSC_HBP; + video_mode->hfp = NTSC_HFP; + video_mode->vbp = NTSC_VBP_FIELD_1 + NTSC_VBP_FIELD_2; + video_mode->vfp = NTSC_VFP_FIELD_1 + NTSC_VFP_FIELD_2; + video_mode->interlaced = true; + video_mode->pixclock = SDTV_PIXCLOCK; + break; + case 486: + /* set including SAV/EAV */ + video_mode->hbp = NTSC_ORG_HBP; + video_mode->hfp = NTSC_ORG_HFP; + video_mode->vbp = NTSC_ORG_VBP_FIELD_1 + NTSC_ORG_VBP_FIELD_2; + video_mode->vfp = NTSC_ORG_VFP_FIELD_1 + NTSC_ORG_VFP_FIELD_2; + video_mode->interlaced = true; + video_mode->pixclock = SDTV_PIXCLOCK; + break; + default: + dev_warn(&ddev->dev, + "%s:Failed to find video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + return -EINVAL; + } + + print_vmode(video_mode); + + return 0; + +} + +static int set_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + int res; + struct ab8500_display_platform_data *pdata = ddev->dev.platform_data; + struct display_driver_data *driver_data = + (struct display_driver_data *)dev_get_drvdata(&ddev->dev); + AB8500_DISP_TRACE; + + if (ddev == NULL || video_mode == NULL) { + dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n", + __func__); + return -EINVAL; + } + ddev->video_mode = *video_mode; + + if (video_mode->xres != 720) { + dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + return -EINVAL; + } + + /* check for PAL BDGHI and N */ + switch (video_mode->yres) { + case 576: + driver_data->denc_conf.TV_std = TV_STD_PAL_BDGHI; + /* TODO: how to choose LOW DEF FILTER */ + driver_data->denc_conf.cr_filter = TV_CR_PAL_HIGH_DEF_FILTER; + /* TODO: PAL N (e.g. uses a setup of 7.5 IRE) */ + driver_data->denc_conf.black_level_setup = false; + break; + case 480: /* NTSC, PAL M DV variant */ + case 486: /* NTSC, PAL M original */ + /* TODO: PAL M */ + driver_data->denc_conf.TV_std = TV_STD_NTSC_M; + /* TODO: how to choose LOW DEF FILTER */ + driver_data->denc_conf.cr_filter = TV_CR_NTSC_HIGH_DEF_FILTER; + driver_data->denc_conf.black_level_setup = true; + break; + default: + dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + return -EINVAL; + } + + + driver_data->denc_conf.progressive = !video_mode->interlaced; + driver_data->denc_conf.act_output = true; + driver_data->denc_conf.test_pattern = false; + driver_data->denc_conf.partial_blanking = true; + driver_data->denc_conf.blank_all = false; + driver_data->denc_conf.suppress_col = false; + driver_data->denc_conf.phase_reset_mode = TV_PHASE_RST_MOD_DISABLE; + driver_data->denc_conf.dac_enable = false; + driver_data->denc_conf.act_dc_output = true; + + set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (pdata->rgb_2_yCbCr_transform) + mcde_chnl_set_col_convert(ddev->chnl_state, + pdata->rgb_2_yCbCr_transform, + MCDE_CONVERT_RGB_2_YCBCR); + mcde_chnl_stop_flow(ddev->chnl_state); + res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode); + if (res < 0) { + dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n", + __func__); + + return res; + } + ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE; + + return 0; +} + +static int set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + int i; + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + AB8500_DISP_TRACE; + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + dev_dbg(&ddev->dev, "off -> standby\n"); + if (ddev->platform_enable) { + ret = ddev->platform_enable(ddev); + if (ret) + goto error; + } + if (driver_data->regulator) { + for (i = 0; i < driver_data->nr_regulators; i++) { + ret = regulator_enable( + driver_data->regulator[i]); + if (ret) + goto off_to_standby_failed; + dev_dbg(&ddev->dev, "regulator %d on\n", i); + } + } + ab8500_denc_power_up(driver_data->denc_dev); + ab8500_denc_reset(driver_data->denc_dev, true); + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + dev_dbg(&ddev->dev, "standby -> on\n"); + ddev->power_mode = MCDE_DISPLAY_PM_ON; + } + /* ON -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + dev_dbg(&ddev->dev, "on -> standby\n"); + ab8500_denc_reset(driver_data->denc_dev, false); + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + /* STANDBY -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + bool error = false; + dev_dbg(&ddev->dev, "standby -> off\n"); + if (driver_data->regulator) { + for (i = 0; i < driver_data->nr_regulators; i++) { + ret = regulator_disable( + driver_data->regulator[i]); + /* continue in case of an error */ + error |= (ret != 0); + dev_dbg(&ddev->dev, "regulator %d off\n", i); + } + } + if (ddev->platform_disable) { + ret = ddev->platform_disable(ddev); + error |= (ret != 0); + } + if (error) { + /* the latest error code is returned */ + goto error; + } + memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode)); + ab8500_denc_power_down(driver_data->denc_dev); + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + + return 0; + + /* In case of an error, try to leave in off-state */ +off_to_standby_failed: + for (i--; i >= 0; i--) + regulator_disable(driver_data->regulator[i]); + ddev->platform_disable(ddev); + +error: + dev_err(&ddev->dev, "Failed to set power mode"); + return ret; +} + +static int on_first_update(struct mcde_display_device *ddev) +{ + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + + ab8500_denc_conf(driver_data->denc_dev, &driver_data->denc_conf); + ab8500_denc_conf_plug_detect(driver_data->denc_dev, true, false, + TV_PLUG_TIME_2S); + ab8500_denc_mask_int_plug_det(driver_data->denc_dev, false, false); + ddev->first_update = false; + return 0; +} + +static int display_update(struct mcde_display_device *ddev, bool tripple_buffer) +{ + int ret; + + if (ddev->first_update) + on_first_update(ddev); + if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) { + ret = set_power_mode(ddev, MCDE_DISPLAY_PM_ON); + if (ret < 0) + goto error; + } + ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area, + tripple_buffer); + if (ret < 0) + goto error; +out: + return ret; +error: + dev_warn(&ddev->dev, "%s:Failed to set power mode to on\n", __func__); + goto out; +} + +/* Module init */ +static int __init mcde_display_tvout_ab8500_init(void) +{ + pr_debug("%s\n", __func__); + + return mcde_display_driver_register(&ab8500_driver); +} +late_initcall(mcde_display_tvout_ab8500_init); + +static void __exit mcde_display_tvout_ab8500_exit(void) +{ + pr_debug("%s\n", __func__); + + mcde_display_driver_unregister(&ab8500_driver); +} +module_exit(mcde_display_tvout_ab8500_exit); + +MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE TVout through AB8500 display driver"); diff --git a/drivers/video/mcde/display-av8100.c b/drivers/video/mcde/display-av8100.c new file mode 100644 index 00000000000..70750998824 --- /dev/null +++ b/drivers/video/mcde/display-av8100.c @@ -0,0 +1,1656 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson HDMI display driver + * + * Author: Per Persson <per-xb-persson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/compdev.h> +#include <linux/clonedev.h> + +#include <video/mcde_fb.h> +#include <video/mcde_display.h> +#include <video/mcde_display-av8100.h> +#include <video/av8100.h> +#include <video/hdmi.h> + +#define SWITCH_HELPSTR ", 0=HDMI, 1=SDTV, 2=DVI\n" + +/* AVI Infoframe */ +#define AVI_INFOFRAME_DATA_SIZE 13 +#define AVI_INFOFRAME_TYPE 0x82 +#define AVI_INFOFRAME_VERSION 0x02 +#define AVI_INFOFRAME_DB1 0x10 /* Active Information present */ +#define AVI_INFOFRAME_DB2 0x08 /* Active Portion Aspect ratio */ + +#ifdef CONFIG_DISPLAY_AV8100_TRIPPLE_BUFFER +#define NUM_FB_BUFFERS 3 +#else +#define NUM_FB_BUFFERS 2 +#endif + +#define DSI_HS_FREQ_HZ 840320000 +#define DSI_LP_FREQ_HZ 19200000 + +struct cea_vesa_video_mode { + u32 cea; + u32 vesa_cea_nr; + struct mcde_video_mode *video_mode; +}; + +static int hdmi_try_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode); +static int hdmi_set_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode); +static int hdmi_set_pixel_format( + struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format); +static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev, + u8 cea, u8 vesa_cea_nr); +static int ceanr_convert(struct mcde_display_device *ddev, + u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h); + +static ssize_t show_hdmisdtvswitch(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_hdmisdtvswitch(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_input_pixel_format(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_input_pixel_format(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_disponoff(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_disponoff(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t show_vesacea(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t show_timing(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t store_timing(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static ssize_t store_stayalive(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static DEVICE_ATTR(disponoff, S_IRUGO | S_IWUSR, show_disponoff, + store_disponoff); +static DEVICE_ATTR(vesacea, S_IRUGO, show_vesacea, NULL); +static DEVICE_ATTR(timing, S_IRUGO | S_IWUSR, show_timing, store_timing); +static DEVICE_ATTR(stayalive, S_IWUSR, NULL, store_stayalive); + +static DEVICE_ATTR(hdmisdtvswitch, S_IRUGO | S_IWUSR, show_hdmisdtvswitch, + store_hdmisdtvswitch); +static DEVICE_ATTR(input_pixel_format, S_IRUGO | S_IWUSR, + show_input_pixel_format, store_input_pixel_format); + +static ssize_t show_hdmisdtvswitch(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mcde_display_device *mdev = to_mcde_display_device(dev); + int index; + + dev_dbg(dev, "%s\n", __func__); + + sprintf(buf, "%1x%s", mdev->port->hdmi_sdtv_switch, SWITCH_HELPSTR); + index = 1 + strlen(SWITCH_HELPSTR) + 1; + + return index; +} + +static ssize_t store_hdmisdtvswitch(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mcde_display_device *mdev = to_mcde_display_device(dev); + dev_dbg(dev, "%s\n", __func__); + + if (count > 0) { + if ((*buf == 0) || (*buf == '0')) { + dev_dbg(dev, "hdmi/sdtv switch = hdmi\n"); + mdev->port->hdmi_sdtv_switch = HDMI_SWITCH; + mdev->native_x_res = NATIVE_XRES_HDMI; + mdev->native_y_res = NATIVE_YRES_HDMI; + } else if ((*buf == 1) || (*buf == '1')) { + dev_dbg(dev, "hdmi/sdtv switch = sdtv\n"); + mdev->port->hdmi_sdtv_switch = SDTV_SWITCH; + mdev->native_x_res = NATIVE_XRES_SDTV; + mdev->native_y_res = NATIVE_YRES_SDTV; + } else if ((*buf == 2) || (*buf == '2')) { + dev_dbg(dev, "hdmi/sdtv switch = dvi\n"); + mdev->port->hdmi_sdtv_switch = DVI_SWITCH; + mdev->native_x_res = NATIVE_XRES_HDMI; + mdev->native_y_res = NATIVE_YRES_HDMI; + } + /* implicitely read by a memcmp in dss */ + mdev->video_mode.force_update = true; + } + + return count; +} + +static ssize_t show_input_pixel_format(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + + return sprintf(buf, "%d\n", ddev->port->pixel_format); +} + +static ssize_t store_input_pixel_format(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + + dev_dbg(dev, "%s\n", __func__); + if (count > 0) { + unsigned long input; + if (strict_strtoul(buf, 10, &input) != 0) + return -EINVAL; + switch (input) { + /* intentional fall through */ + case MCDE_PORTPIXFMT_DSI_16BPP: + case MCDE_PORTPIXFMT_DSI_18BPP: + case MCDE_PORTPIXFMT_DSI_18BPP_PACKED: + case MCDE_PORTPIXFMT_DSI_24BPP: + case MCDE_PORTPIXFMT_DSI_YCBCR422: + ddev->port->pixel_format = input; + break; + default: + dev_warn(&ddev->dev, "invalid format (%ld)\n", + input); + return -EINVAL; + break; + } + /* implicitely read by a memcmp in dss */ + ddev->video_mode.force_update = true; + driver_data->update_port_pixel_format = true; + } + + return count; +} + +static ssize_t show_disponoff(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + + dev_dbg(dev, "%s\n", __func__); + + if (ddev->fbi && driver_data->fbdevname) { + dev_dbg(dev, "name:%s\n", driver_data->fbdevname); + strcpy(buf, driver_data->fbdevname); + return strlen(driver_data->fbdevname) + 1; + } + return 0; +} + +static ssize_t store_disponoff(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mcde_display_device *mdev = to_mcde_display_device(dev); + bool enable = false; + u8 cea = 0; + u8 vesa_cea_nr = 0; +#ifdef CONFIG_COMPDEV + struct mcde_fb *mfb; +#endif + + dev_dbg(dev, "%s\n", __func__); + + if ((count != DISPONOFF_SIZE) && (count != DISPONOFF_SIZE + 1)) + return -EINVAL; + + if ((*buf == '0') && (*(buf + 1) == '1')) + enable = true; + cea = (hex_to_bin(buf[2]) << 4) + hex_to_bin(buf[3]); + vesa_cea_nr = (hex_to_bin(buf[4]) << 4) + hex_to_bin(buf[5]); + dev_dbg(dev, "enable:%d cea:%d nr:%d\n", enable, cea, vesa_cea_nr); + + if (enable && !mdev->fbi) { + struct display_driver_data *driver_data = dev_get_drvdata(dev); + u16 w = mdev->native_x_res; + u16 h = mdev->native_y_res, vh; + int buffering = NUM_FB_BUFFERS; + struct fb_info *fbi; + + ceanr_convert(mdev, cea, vesa_cea_nr, &w, &h); + vh = h * buffering; + fbi = mcde_fb_create(mdev, w, h, w, vh, + mdev->default_pixel_format, FB_ROTATE_UR); + if (IS_ERR(fbi)) + dev_warn(dev, "fb create failed\n"); + else + driver_data->fbdevname = dev_name(fbi->dev); + +#ifdef CONFIG_COMPDEV + /* TODO need another way for compdev to get actual size */ + mdev->native_x_res = w; + mdev->native_y_res = h; + + mfb = to_mcde_fb(fbi); + /* Create a compdev overlay for this display */ + if (compdev_create(mdev, mfb->ovlys[0], false) < 0) { + dev_warn(&mdev->dev, + "Failed to create compdev for display %s\n", + mdev->name); + } else { + dev_dbg(&mdev->dev, "compdev created for (%s)\n", + mdev->name); + } +#ifdef CONFIG_CLONEDEV + if (clonedev_create()) { + dev_warn(&mdev->dev, + "Failed to create clonedev for display %s\n", + mdev->name); + } else { + dev_dbg(&mdev->dev, "clonedev created for (%s)\n", + mdev->name); + } +#endif +#endif + } else if (!enable && mdev->fbi) { +#ifdef CONFIG_CLONEDEV + clonedev_destroy(); +#endif +#ifdef CONFIG_COMPDEV + compdev_destroy(mdev); +#endif + mcde_fb_destroy(mdev); + } + + return count; +} + +static ssize_t show_timing(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + struct mcde_video_mode *video_mode; + int index; + + dev_dbg(dev, "%s\n", __func__); + + index = 0; + if (driver_data->video_mode) { + video_mode = driver_data->video_mode; + memcpy(buf + index, &video_mode->xres, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->yres, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->pixclock, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->hbp, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->hfp, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->vbp, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->vfp, sizeof(u32)); + index += sizeof(u32); + memcpy(buf + index, &video_mode->interlaced, sizeof(u32)); + index += sizeof(u32); + } + return index; +} + +static ssize_t store_timing(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + + dev_dbg(dev, "%s\n", __func__); + + if (count != TIMING_SIZE) + return -EINVAL; + + driver_data->video_mode = video_mode_get(ddev, *buf, *(buf + 1)); + + return count; +} + +static ssize_t store_stayalive(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + + if (count != STAYALIVE_SIZE) + return -EINVAL; + + if ((*buf == 1) || (*buf == '1')) + ddev->stay_alive = true; + else + ddev->stay_alive = false; + + dev_dbg(dev, "%s %d\n", __func__, ddev->stay_alive); + + return count; +} + +static int ceanr_convert(struct mcde_display_device *ddev, + u8 cea, u8 vesa_cea_nr, u16 *w, u16 *h) +{ + struct mcde_video_mode *video_mode; + + dev_dbg(&ddev->dev, "%s\n", __func__); + video_mode = video_mode_get(ddev, cea, vesa_cea_nr); + if (video_mode) { + *w = video_mode->xres; + *h = video_mode->yres; + dev_dbg(&ddev->dev, "cea:%d nr:%d found\n", + cea, vesa_cea_nr); + return 0; + } + + return -EINVAL; +} + +/* Supported HDMI modes */ +static struct mcde_video_mode video_modes_supp_hdmi[] = { + /* 0 CEA #1 640_480_60_P */ + { + .xres = 640, .yres = 480, + .pixclock = 39682, + .hbp = 112, .hfp = 48, + .vbp = 33, .vfp = 12 + }, + /* 1 720_480_60_P */ + { + .xres = 720, .yres = 480, + .pixclock = 37000, + .hbp = 104, .hfp = 34, + .vbp = 30, .vfp = 15 + }, + /* 2 720_576_50_P */ + { + .xres = 720, .yres = 576, + .pixclock = 37037, + .hbp = 132, .hfp = 12, + .vbp = 44, .vfp = 5 + }, + /* 3 1280_720_60_P */ + { + .xres = 1280, .yres = 720, + .pixclock = 13468, + .hbp = 256, .hfp = 114, + .vbp = 20, .vfp = 10 + }, + /* 4 1280_720_50_P */ + { + .xres = 1280, .yres = 720, + .pixclock = 13468, + .hbp = 260, .hfp = 440, + .vbp = 25, .vfp = 5 + }, + /* 5 1280_720_30_P */ + { + .xres = 1280, .yres = 720, + .pixclock = 13468, + .hbp = 260, .hfp = 1760, + .vbp = 20, .vfp = 10 + }, + /* 6 1280_720_24_P */ + { + .xres = 1280, .yres = 720, + .pixclock = 16835, + .hbp = 260, .hfp = 1760, + .vbp = 20, .vfp = 10 + }, + /* 7 1280_720_25_P */ + { + .xres = 1280, .yres = 720, + .pixclock = 13468, + .hbp = 260, .hfp = 2420, + .vbp = 20, .vfp = 10 + }, + /* 8 1920_1080_30_P */ + { + .xres = 1920, .yres = 1080, + .pixclock = 13468, + .hbp = 189, .hfp = 91, + .vbp = 36, .vfp = 9 + }, + /* 9 1920_1080_24_P */ + { + .xres = 1920, .yres = 1080, + .pixclock = 13468, + .hbp = 170, .hfp = 660, + .vbp = 36, .vfp = 9 + }, + /* 10 1920_1080_25_P */ + { + .xres = 1920, .yres = 1080, + .pixclock = 13468, + .hbp = 192, .hfp = 528, + .vbp = 36, .vfp = 9 + }, + /* 11 720_480_60_I */ + { + .xres = 720, .yres = 480, + .pixclock = 74074, + .hbp = 126, .hfp = 12, + .vbp = 44, .vfp = 1, + .interlaced = true, + }, + /* 12 720_576_50_I */ + { + .xres = 720, .yres = 576, + .pixclock = 74074, + .hbp = 132, .hfp = 12, + .vbp = 44, .vfp = 5, + .interlaced = true, + }, + /* 13 1920_1080_50_I */ + { + .xres = 1920, .yres = 1080, + .pixclock = 13468, + .hbp = 192, .hfp = 528, + .vbp = 20, .vfp = 25, + .interlaced = true, + }, + /* 14 1920_1080_60_I */ + { + .xres = 1920, .yres = 1080, + .pixclock = 13468, + .hbp = 192, .hfp = 88, + .vbp = 20, .vfp = 25, + .interlaced = true, + }, + /* 15 VESA #9 800_600_60_P */ + { + .xres = 800, .yres = 600, + .pixclock = 25000, + .hbp = 168, .hfp = 88, + .vbp = 23, .vfp = 5, + .interlaced = false, + }, + /* 16 VESA #14 848_480_60_P */ + { + .xres = 848, .yres = 480, + .pixclock = 29630, + .hbp = 128, .hfp = 112, + .vbp = 23, .vfp = 14, + .interlaced = false, + }, + /* 17 VESA #16 1024_768_60_P */ + { + .xres = 1024, .yres = 768, + .pixclock = 15385, + .hbp = 160, .hfp = 160, + .vbp = 29, .vfp = 9, + .interlaced = false, + }, + /* 18 VESA #22 1280_768_60_P */ + { + .xres = 1280, .yres = 768, + .pixclock = 14652, + .hbp = 80, .hfp = 80, + .vbp = 12, .vfp = 10, + .interlaced = false, + }, + /* 19 VESA #23 1280_768_60_P */ + { + .xres = 1280, .yres = 768, + .pixclock = 12579, + .hbp = 192, .hfp = 192, + .vbp = 20, .vfp = 10, + .interlaced = false, + }, + /* 20 VESA #27 1280_800_60_P */ + { + .xres = 1280, .yres = 800, + .pixclock = 14085, + .hbp = 80, .hfp = 80, + .vbp = 14, .vfp = 9, + .interlaced = false, + }, + /* 21 VESA #28 1280_800_60_P */ + { + .xres = 1280, .yres = 800, + .pixclock = 11976, + .hbp = 200, .hfp = 200, + .vbp = 22, .vfp = 9, + .interlaced = false, + }, + /* 22 VESA #39 1360_768_60_P */ + { + .xres = 1360, .yres = 768, + .pixclock = 11696, + .hbp = 176, .hfp = 256, + .vbp = 18, .vfp = 9, + .interlaced = false, + }, + /* 23 VESA #81 1366_768_60_P */ + { + .xres = 1366, .yres = 768, + .pixclock = 11662, + .hbp = 213, .hfp = 213, + .vbp = 24, .vfp = 6, + .interlaced = false, + }, +}; + +/* Supported TVout modes */ +static struct mcde_video_mode video_modes_supp_sdtv[] = { + /* 720_480_60_I) */ + { + .xres = 720, .yres = 480, + .pixclock = 74074, + .hbp = 126, .hfp = 12, + .vbp = 44, .vfp = 1, + .interlaced = true, + }, + /* 720_576_50_I) */ + { + .xres = 720, .yres = 576, + .pixclock = 74074, + .hbp = 132, .hfp = 12, + .vbp = 44, .vfp = 5, + .interlaced = true, + }, +}; + +static struct cea_vesa_video_mode cea_vesa_video_mode[] = { + /* 640_480_60_P */ + { + .cea = 1, .vesa_cea_nr = 1, + .video_mode = &video_modes_supp_hdmi[0], + }, + /* 720_480_60_P */ + { + .cea = 1, .vesa_cea_nr = 2, + .video_mode = &video_modes_supp_hdmi[1], + }, + /* 720_480_60_P */ + { + .cea = 1, .vesa_cea_nr = 3, + .video_mode = &video_modes_supp_hdmi[1], + }, + /* 720_576_50_P */ + { + .cea = 1, .vesa_cea_nr = 17, + .video_mode = &video_modes_supp_hdmi[2], + }, + /* 720_576_50_P */ + { + .cea = 1, .vesa_cea_nr = 18, + .video_mode = &video_modes_supp_hdmi[2], + }, + /* 1280_720_60_P */ + { + .cea = 1, .vesa_cea_nr = 4, + .video_mode = &video_modes_supp_hdmi[3], + }, + /* 1280_720_50_P */ + { + .cea = 1, .vesa_cea_nr = 19, + .video_mode = &video_modes_supp_hdmi[4], + }, + /* 1280_720_30_P */ + { + .cea = 1, .vesa_cea_nr = 62, + .video_mode = &video_modes_supp_hdmi[5], + }, + /* 1280_720_24_P */ + { + .cea = 1, .vesa_cea_nr = 60, + .video_mode = &video_modes_supp_hdmi[6], + }, + /* 1280_720_25_P */ + { + .cea = 1, .vesa_cea_nr = 61, + .video_mode = &video_modes_supp_hdmi[7], + }, + /* 1920_1080_30_P */ + { + .cea = 1, .vesa_cea_nr = 34, + .video_mode = &video_modes_supp_hdmi[8], + }, + /* 1920_1080_24_P */ + { + .cea = 1, .vesa_cea_nr = 32, + .video_mode = &video_modes_supp_hdmi[9], + }, + /* 1920_1080_25_P */ + { + .cea = 1, .vesa_cea_nr = 33, + .video_mode = &video_modes_supp_hdmi[10], + }, + /* 720_480_60_I) */ + { + .cea = 1, .vesa_cea_nr = 6, + .video_mode = &video_modes_supp_hdmi[11], + }, + /* 720_480_60_I) */ + { + .cea = 1, .vesa_cea_nr = 7, + .video_mode = &video_modes_supp_hdmi[11], + }, + /* 720_576_50_I) */ + { + .cea = 1, .vesa_cea_nr = 21, + .video_mode = &video_modes_supp_hdmi[12], + }, + /* 720_576_50_I) */ + { + .cea = 1, .vesa_cea_nr = 22, + .video_mode = &video_modes_supp_hdmi[12], + }, + /* 1920_1080_50_I) */ + { + .cea = 1, .vesa_cea_nr = 20, + .video_mode = &video_modes_supp_hdmi[13], + }, + /* 1920_1080_60_I) */ + { + .cea = 1, .vesa_cea_nr = 5, + .video_mode = &video_modes_supp_hdmi[14], + }, + /* VESA #4 640_480_60_P) */ + { + .cea = 0, .vesa_cea_nr = 4, + .video_mode = &video_modes_supp_hdmi[0], + }, + /* VESA #9 800_600_60_P) */ + { + .cea = 0, .vesa_cea_nr = 9, + .video_mode = &video_modes_supp_hdmi[15], + }, + /* VESA #14 848_480_60_P) */ + { + .cea = 0, .vesa_cea_nr = 14, + .video_mode = &video_modes_supp_hdmi[16], + }, + /* VESA #16 1024_768_60_P) */ + { + .cea = 0, .vesa_cea_nr = 16, + .video_mode = &video_modes_supp_hdmi[17], + }, + /* VESA #22 1280_768_60_P) */ + { + .cea = 0, .vesa_cea_nr = 22, + .video_mode = &video_modes_supp_hdmi[18], + }, + /* VESA #23 1280_768_60_P) */ + { + .cea = 0, .vesa_cea_nr = 23, + .video_mode = &video_modes_supp_hdmi[19], + }, + /* VESA #27 1280_800_60_P) */ + { + .cea = 0, .vesa_cea_nr = 27, + .video_mode = &video_modes_supp_hdmi[20], + }, + /* VESA #28 1280_800_60_P) */ + { + .cea = 0, .vesa_cea_nr = 28, + .video_mode = &video_modes_supp_hdmi[21], + }, + /* VESA #39 1360_768_60_P) */ + { + .cea = 0, .vesa_cea_nr = 39, + .video_mode = &video_modes_supp_hdmi[22], + }, + /* VESA #81 1366_768_60_P) */ + { + .cea = 0, .vesa_cea_nr = 81, + .video_mode = &video_modes_supp_hdmi[23], + }, +}; + +static ssize_t show_vesacea(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int findex; + + dev_dbg(dev, "%s\n", __func__); + + for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++) { + *(buf + findex * 2) = cea_vesa_video_mode[findex].cea; + *(buf + findex * 2 + 1) = + cea_vesa_video_mode[findex].vesa_cea_nr; + } + *(buf + findex * 2) = '\0'; + + return findex * 2 + 1; +} + +static struct mcde_video_mode *video_mode_get(struct mcde_display_device *ddev, + u8 cea, u8 vesa_cea_nr) +{ + int findex; + + dev_dbg(&ddev->dev, "%s\n", __func__); + + for (findex = 0; findex < ARRAY_SIZE(cea_vesa_video_mode); findex++) + if ((cea == cea_vesa_video_mode[findex].cea) && + (vesa_cea_nr == + cea_vesa_video_mode[findex].vesa_cea_nr)) { + dev_dbg(&ddev->dev, "cea:%d nr:%d\n", cea, vesa_cea_nr); + return cea_vesa_video_mode[findex].video_mode; + } + + return NULL; +} + +static u8 ceanr_get(struct mcde_display_device *ddev) +{ + int cnt; + int cea; + int vesa_cea_nr; + struct mcde_video_mode *vmode = &ddev->video_mode; + struct mcde_video_mode *vmode_try; + + if (!vmode) + return 0; + + dev_dbg(&ddev->dev, "%s\n", __func__); + + for (cnt = 0; cnt < ARRAY_SIZE(cea_vesa_video_mode); cnt++) { + vmode_try = cea_vesa_video_mode[cnt].video_mode; + cea = cea_vesa_video_mode[cnt].cea; + vesa_cea_nr = cea_vesa_video_mode[cnt].vesa_cea_nr; + + if (cea && vmode_try->xres == vmode->xres && + vmode_try->yres == vmode->yres && + vmode_try->pixclock == vmode->pixclock && + vmode_try->hbp == vmode->hbp && + vmode_try->hfp == vmode->hfp && + vmode_try->vbp == vmode->vbp && + vmode_try->vfp == vmode->vfp && + vmode_try->interlaced == vmode->interlaced) { + dev_dbg(&ddev->dev, "ceanr:%d\n", vesa_cea_nr); + return vesa_cea_nr; + } + } + + return 0; +} + +#define AV8100_MAX_LEVEL 255 + +static int hdmi_try_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + int index = 0; + int match_level = AV8100_MAX_LEVEL; + int found_index = -1; + struct mcde_video_mode *video_modes_supp; + int array_size; + + if (ddev == NULL || video_mode == NULL) { + pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__); + return -EINVAL; + } + + dev_vdbg(&ddev->dev, "%s\n", __func__); + + if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) { + video_mode->interlaced = true; + video_modes_supp = video_modes_supp_sdtv; + array_size = ARRAY_SIZE(video_modes_supp_sdtv); + } else { + video_modes_supp = video_modes_supp_hdmi; + array_size = ARRAY_SIZE(video_modes_supp_hdmi); + } + + while (index < array_size) { + /* 1. Check if all parameters match */ + if ((video_mode->xres == video_modes_supp[index].xres) && + (video_mode->yres == video_modes_supp[index].yres) && + ((video_mode->xres + video_mode->hbp + + video_mode->hfp) == + (video_modes_supp[index].xres + + video_modes_supp[index].hbp + + video_modes_supp[index].hfp)) && + ((video_mode->yres + video_mode->vbp + video_mode->vfp) + == + (video_modes_supp[index].yres + + video_modes_supp[index].vbp + + video_modes_supp[index].vfp)) && + (video_mode->pixclock == + video_modes_supp[index].pixclock) && + (video_mode->interlaced == + video_modes_supp[index].interlaced)) { + match_level = 1; + found_index = index; + break; + } + + /* 2. Check if xres,yres,htot,vtot,interlaced match */ + if ((match_level > 2) && + (video_mode->xres == video_modes_supp[index].xres) && + (video_mode->yres == video_modes_supp[index].yres) && + ((video_mode->xres + video_mode->hbp + + video_mode->hfp) == + (video_modes_supp[index].xres + + video_modes_supp[index].hbp + + video_modes_supp[index].hfp)) && + ((video_mode->yres + video_mode->vbp + video_mode->vfp) + == + (video_modes_supp[index].yres + + video_modes_supp[index].vbp + + video_modes_supp[index].vfp)) && + (video_mode->interlaced == + video_modes_supp[index].interlaced)) { + match_level = 2; + found_index = index; + } + + /* 3. Check if xres,yres,pixelclock,interlaced match */ + if ((match_level > 3) && + (video_mode->xres == video_modes_supp[index].xres) && + (video_mode->yres == video_modes_supp[index].yres) && + (video_mode->interlaced == + video_modes_supp[index].interlaced) && + (video_mode->pixclock == + video_modes_supp[index].pixclock)) { + match_level = 3; + found_index = index; + } + + /* 4. Check if xres,yres,interlaced match */ + if ((match_level > 4) && + (video_mode->xres == video_modes_supp[index].xres) && + (video_mode->yres == video_modes_supp[index].yres) && + (video_mode->interlaced == + video_modes_supp[index].interlaced)) { + match_level = 4; + found_index = index; + } + + index++; + } + + if (found_index == -1) { + dev_dbg(&ddev->dev, "video_mode not accepted\n"); + dev_dbg(&ddev->dev, "xres:%d yres:%d pixclock:%d hbp:%d hfp:%d " + "vfp:%d vbp:%d intlcd:%d\n", + video_mode->xres, video_mode->yres, + video_mode->pixclock, + video_mode->hbp, video_mode->hfp, + video_mode->vfp, video_mode->vbp, + video_mode->interlaced); + return -EINVAL; + } + + memset(video_mode, 0, sizeof(struct mcde_video_mode)); + memcpy(video_mode, &video_modes_supp[found_index], + sizeof(struct mcde_video_mode)); + + dev_dbg(&ddev->dev, "%s:HDMI video_mode %d chosen. Level:%d\n", + __func__, found_index, match_level); + + return 0; +} + +static int hdmi_set_video_mode( + struct mcde_display_device *dev, struct mcde_video_mode *video_mode) +{ + int ret; + union av8100_configuration av8100_config; + struct mcde_display_hdmi_platform_data *pdata; + struct display_driver_data *driver_data; + struct av8100_status status; + + /* TODO check video_mode_params */ + if (dev == NULL || video_mode == NULL) { + pr_warning("%s:ddev = NULL or video_mode = NULL\n", __func__); + return -EINVAL; + } + + pdata = dev->dev.platform_data; + driver_data = dev_get_drvdata(&dev->dev); + + dev_dbg(&dev->dev, "%s:\n", __func__); + dev_vdbg(&dev->dev, "%s:xres:%d yres:%d hbp:%d hfp:%d vbp:%d vfp:%d " + "interlaced:%d\n", __func__, + video_mode->xres, + video_mode->yres, + video_mode->hbp, + video_mode->hfp, + video_mode->vbp, + video_mode->vfp, + video_mode->interlaced); + + if (driver_data->update_port_pixel_format) { + hdmi_set_pixel_format(dev, dev->pixel_format); + driver_data->update_port_pixel_format = false; + } + + memset(&(dev->video_mode), 0, sizeof(struct mcde_video_mode)); + memcpy(&(dev->video_mode), video_mode, sizeof(struct mcde_video_mode)); + + if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 && + pdata->rgb_2_yCbCr_transform) + mcde_chnl_set_col_convert(dev->chnl_state, + pdata->rgb_2_yCbCr_transform, + MCDE_CONVERT_RGB_2_YCBCR); + mcde_chnl_stop_flow(dev->chnl_state); + + ret = mcde_chnl_set_video_mode(dev->chnl_state, &dev->video_mode); + if (ret < 0) { + dev_warn(&dev->dev, "Failed to set video mode\n"); + return ret; + } + + status = av8100_status_get(); + if (status.av8100_state == AV8100_OPMODE_UNDEFINED) + return -EINVAL; + + if (av8100_ver_get() == AV8100_CHIPVER_1) { + if (status.av8100_state >= AV8100_OPMODE_STANDBY) { + /* Disable interrupts */ + ret = av8100_disable_interrupt(); + if (ret) { + dev_err(&dev->dev, + "%s:av8100_disable_interrupt failed\n", + __func__); + return ret; + } + + ret = av8100_powerdown(); + if (ret) { + dev_err(&dev->dev, + "av8100_powerdown failed\n"); + return ret; + } + + msleep(10); + } + } + + /* Set to powerup with interrupts disabled */ + status = av8100_status_get(); + if (status.av8100_state < AV8100_OPMODE_STANDBY) { + ret = av8100_powerup(); + if (ret) { + dev_err(&dev->dev, "av8100_powerup failed\n"); + return ret; + } + } + + if (status.av8100_state <= AV8100_OPMODE_IDLE) { + ret = av8100_download_firmware(I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "av8100_download_firmware failed\n"); + return ret; + } + } + + if (av8100_disable_interrupt()) + return -EFAULT; + + /* + * Don't look at dev->port->hdmi_sdtv_switch; it states only which + * one should be started, not which one is currently working + */ + if (av8100_conf_get(AV8100_COMMAND_HDMI, &av8100_config)) + return -EFAULT; + if (av8100_config.hdmi_format.hdmi_mode == AV8100_HDMI_ON) { + /* Set HDMI mode to OFF */ + av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_OFF; + av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0; + av8100_config.hdmi_format.hdmi_format = AV8100_HDMI; + if (av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config)) + return -EFAULT; + + if (av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL, + I2C_INTERFACE)) + return -EFAULT; + } + if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config)) + return -EFAULT; + if (av8100_config.denc_format.enable) { + /* Turn off DENC */ + av8100_config.denc_format.enable = 0; + if (av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config)) + return -EFAULT; + if (av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL, + I2C_INTERFACE)) + return -EFAULT; + } + + /* Get current av8100 video output format */ + ret = av8100_conf_get(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, + &av8100_config); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_get " + "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n", + __func__); + return ret; + } + + if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) + av8100_config.video_output_format.video_output_cea_vesa = + dev->video_mode.yres == NATIVE_YRES_SDTV ? + AV8100_CEA21_22_576I_PAL_50HZ : + AV8100_CEA6_7_NTSC_60HZ; + else + av8100_config.video_output_format.video_output_cea_vesa = + av8100_video_output_format_get( + dev->video_mode.xres, + dev->video_mode.yres, + dev->video_mode.xres + + dev->video_mode.hbp + dev->video_mode.hfp, + dev->video_mode.yres + + dev->video_mode.vbp + dev->video_mode.vfp, + dev->video_mode.pixclock, + dev->video_mode.interlaced); + + if (AV8100_VIDEO_OUTPUT_CEA_VESA_MAX == + av8100_config.video_output_format.video_output_cea_vesa) { + dev_err(&dev->dev, "%s:video output format not found " + "\n", __func__); + return ret; + } + + ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, + &av8100_config); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_prep " + "AV8100_COMMAND_VIDEO_OUTPUT_FORMAT failed\n", + __func__); + return ret; + } + + /* Get current av8100 video input format */ + ret = av8100_conf_get(AV8100_COMMAND_VIDEO_INPUT_FORMAT, + &av8100_config); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_get " + "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n", + __func__); + return ret; + } + + /* Set correct av8100 video input pixel format */ + switch (dev->port->pixel_format) { + case MCDE_PORTPIXFMT_DSI_16BPP: + default: + av8100_config.video_input_format.input_pixel_format = + AV8100_INPUT_PIX_RGB565; + break; + case MCDE_PORTPIXFMT_DSI_18BPP: + av8100_config.video_input_format.input_pixel_format = + AV8100_INPUT_PIX_RGB666; + break; + case MCDE_PORTPIXFMT_DSI_18BPP_PACKED: + av8100_config.video_input_format.input_pixel_format = + AV8100_INPUT_PIX_RGB666P; + break; + case MCDE_PORTPIXFMT_DSI_24BPP: + av8100_config.video_input_format.input_pixel_format = + AV8100_INPUT_PIX_RGB888; + break; + case MCDE_PORTPIXFMT_DSI_YCBCR422: + av8100_config.video_input_format.input_pixel_format = + AV8100_INPUT_PIX_YCBCR422; + break; + } + + /* Set ui_x4 */ + av8100_config.video_input_format.ui_x4 = dev->port->phy.dsi.ui; + + /* Set TE_config */ + switch (dev->port->sync_src) { + case MCDE_SYNCSRC_TE0: + av8100_config.video_input_format.TE_config = AV8100_TE_IT_LINE; + break; + case MCDE_SYNCSRC_TE1: + av8100_config.video_input_format.TE_config = AV8100_TE_GPIO_IT; + break; + case MCDE_SYNCSRC_TE_POLLING: + av8100_config.video_input_format.TE_config = + AV8100_TE_DSI_LANE; /* Only on DSI, no interrupts */ + break; + case MCDE_SYNCSRC_OFF: + default: + av8100_config.video_input_format.TE_config = AV8100_TE_OFF; + break; + } + + ret = av8100_conf_prep(AV8100_COMMAND_VIDEO_INPUT_FORMAT, + &av8100_config); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_prep " + "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n", + __func__); + return ret; + } + + ret = av8100_conf_w(AV8100_COMMAND_VIDEO_INPUT_FORMAT, + NULL, NULL, I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_w " + "AV8100_COMMAND_VIDEO_INPUT_FORMAT failed\n", + __func__); + return ret; + } + + if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) { + if (dev->port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422) + av8100_config.color_transform = + AV8100_COLOR_TRANSFORM_RGB_TO_DENC; + else + av8100_config.color_transform = + AV8100_COLOR_TRANSFORM_YUV_TO_DENC; + } else if (dev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422) { + av8100_config.color_transform = + AV8100_COLOR_TRANSFORM_YUV_TO_RGB; + } else { + av8100_config.color_transform = + AV8100_COLOR_TRANSFORM_INDENTITY; + } + + ret = av8100_conf_prep( + AV8100_COMMAND_COLORSPACECONVERSION, + &av8100_config); + if (ret) { + dev_err(&dev->dev, "%s:av8100_configuration_prepare " + "AV8100_COMMAND_COLORSPACECONVERSION failed\n", + __func__); + return ret; + } + + ret = av8100_conf_w( + AV8100_COMMAND_COLORSPACECONVERSION, + NULL, NULL, I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_w " + "AV8100_COMMAND_COLORSPACECONVERSION failed\n", + __func__); + return ret; + } + + /* Set video output format */ + ret = av8100_conf_w(AV8100_COMMAND_VIDEO_OUTPUT_FORMAT, + NULL, NULL, I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "av8100_conf_w failed\n"); + return ret; + } + + /* Set audio input format */ + ret = av8100_conf_w(AV8100_COMMAND_AUDIO_INPUT_FORMAT, + NULL, NULL, I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_w " + "AV8100_COMMAND_AUDIO_INPUT_FORMAT failed\n", + __func__); + return ret; + } + + dev->update_flags |= UPDATE_FLAG_VIDEO_MODE; + dev->first_update = true; + + return 0; +} + +static u16 rotate_byte_left(u8 c, int nr) +{ + return (0xff & (c << nr)) | (0xff & (c >> (8 - nr))); +} + +static u16 map_yv(u8 in) +{ + return rotate_byte_left(in, 3) << 4; +} + +static u16 map_u(u8 in) +{ + return rotate_byte_left(in, 5) << 4; +} + +static int hdmi_set_pixel_format( + struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format) +{ + dev_dbg(&ddev->dev, "%s\n", __func__); + ddev->pixel_format = format; + + return 0; +} + +static int hdmi_set_port_pixel_format(struct mcde_display_device *ddev) +{ + int ret; + + dev_dbg(&ddev->dev, "%s\n", __func__); + mcde_chnl_stop_flow(ddev->chnl_state); + ret = mcde_chnl_set_pixel_format(ddev->chnl_state, + ddev->port->pixel_format); + + if (ret < 0) { + dev_warn(&ddev->dev, "%s: Failed to set pixel format = %d\n", + __func__, ddev->port->pixel_format); + return ret; + } + + if (ddev->port->pixel_format == MCDE_PORTPIXFMT_DSI_YCBCR422 && + av8100_ver_get() == 2) { + /* The V2 version has an error for unpacking YUV422 */ + struct mcde_palette_table palette = { + .map_col_ch0 = *map_yv, + .map_col_ch1 = *map_u, + .map_col_ch2 = *map_yv, + }; + ret = mcde_chnl_set_palette(ddev->chnl_state, &palette); + } else { + ret = mcde_chnl_set_palette(ddev->chnl_state, NULL); + } + + return 0; +} + +static int hdmi_apply_config(struct mcde_display_device *ddev) +{ + int ret; + + if (!ddev->update_flags) + return 0; + + ret = mcde_chnl_apply(ddev->chnl_state); + if (ret < 0) { + dev_warn(&ddev->dev, "%s:Failed to apply to channel\n", + __func__); + return ret; + } + ddev->update_flags = 0; + + return 0; +} + +static int hdmi_on_first_update(struct mcde_display_device *dev) +{ + int ret; + union av8100_configuration av8100_config; + u8 *infofr_data; + int infofr_crc; + int cnt; + + dev->first_update = false; + + /* + * Prepare HDMI configuration + * Avoid simultaneous output of DENC and HDMI/DVI. + * Only one of them should be enabled. + * Note HDMI/DVI and DENC are always turned off in set_video_mode. + */ + switch (dev->port->hdmi_sdtv_switch) { + case SDTV_SWITCH: + if (av8100_conf_get(AV8100_COMMAND_DENC, &av8100_config)) + return -EFAULT; + av8100_config.denc_format.enable = 1; + if (dev->video_mode.yres == NATIVE_YRES_SDTV) { + av8100_config.denc_format.standard_selection = + AV8100_PAL_BDGHI; + av8100_config.denc_format.cvbs_video_format = + AV8100_CVBS_625; + } else { + av8100_config.denc_format.standard_selection = + AV8100_NTSC_M; + av8100_config.denc_format.cvbs_video_format = + AV8100_CVBS_525; + } + ret = av8100_conf_prep(AV8100_COMMAND_DENC, &av8100_config); + break; + case DVI_SWITCH: + av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON; + av8100_config.hdmi_format.hdmi_format = AV8100_DVI; + av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0; + ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config); + break; + case HDMI_SWITCH: + default: + av8100_config.hdmi_format.hdmi_mode = AV8100_HDMI_ON; + av8100_config.hdmi_format.hdmi_format = AV8100_HDMI; + av8100_config.hdmi_format.dvi_format = AV8100_DVI_CTRL_CTL0; + ret = av8100_conf_prep(AV8100_COMMAND_HDMI, &av8100_config); + break; + } + + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_prep " + "AV8100_COMMAND_HDMI/DENC failed\n", __func__); + return ret; + } + + /* Enable interrupts */ + ret = av8100_enable_interrupt(); + if (ret) { + dev_err(&dev->dev, "%s:av8100_enable_interrupt failed\n", + __func__); + return ret; + } + + if (dev->port->hdmi_sdtv_switch == SDTV_SWITCH) + ret = av8100_conf_w(AV8100_COMMAND_DENC, NULL, NULL, + I2C_INTERFACE); + else + ret = av8100_conf_w(AV8100_COMMAND_HDMI, NULL, NULL, + I2C_INTERFACE); + if (ret) { + dev_err(&dev->dev, "%s:av8100_conf_w " + "AV8100_COMMAND_HDMI/DENC failed\n", __func__); + return ret; + } + + /* AVI Infoframe only if HDMI */ + if (dev->port->hdmi_sdtv_switch != HDMI_SWITCH) + goto hdmi_on_first_update_end; + + /* Create AVI Infoframe */ + av8100_config.infoframes_format.type = AVI_INFOFRAME_TYPE; + av8100_config.infoframes_format.version = AVI_INFOFRAME_VERSION; + av8100_config.infoframes_format.length = AVI_INFOFRAME_DATA_SIZE; + + /* AVI Infoframe data */ + infofr_data = &av8100_config.infoframes_format.data[0]; + memset(infofr_data, 0, AVI_INFOFRAME_DATA_SIZE); + infofr_data[0] = AVI_INFOFRAME_DB1; + infofr_data[1] = AVI_INFOFRAME_DB2; + infofr_data[3] = ceanr_get(dev); + + /* Calculate AVI Infoframe checksum */ + infofr_crc = av8100_config.infoframes_format.type + + av8100_config.infoframes_format.version + + av8100_config.infoframes_format.length; + for (cnt = 0; cnt < AVI_INFOFRAME_DATA_SIZE; cnt++) + infofr_crc += infofr_data[cnt]; + infofr_crc &= 0xFF; + av8100_config.infoframes_format.crc = 0x100 - infofr_crc; + + /* Send AVI Infoframe */ + if (av8100_conf_prep(AV8100_COMMAND_INFOFRAMES, + &av8100_config) != 0) { + dev_err(&dev->dev, "av8100_conf_prep FAIL\n"); + return -EINVAL; + } + + if (av8100_conf_w(AV8100_COMMAND_INFOFRAMES, + NULL, NULL, I2C_INTERFACE) != 0) { + dev_err(&dev->dev, "av8100_conf_w FAIL\n"); + return -EINVAL; + } + +hdmi_on_first_update_end: + return ret; +} + +static int hdmi_set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + struct display_driver_data *driver_data = dev_get_drvdata(&ddev->dev); + int ret = 0; + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + if (ddev->platform_enable) { + ret = ddev->platform_enable(ddev); + if (ret) + return ret; + } + + /* + * the regulator for analog TV out is only enabled here, + * this means that one needs to switch to the OFF state + * to be able to switch from HDMI to CVBS. + */ + if (ddev->port->hdmi_sdtv_switch == SDTV_SWITCH) { + ret = regulator_enable(driver_data->cvbs_regulator); + if (ret) + return ret; + driver_data->cvbs_regulator_enabled = true; + } + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + + hdmi_set_port_pixel_format(ddev); + } + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + + ddev->power_mode = MCDE_DISPLAY_PM_ON; + goto set_power_and_exit; + } + /* ON -> STANDBY */ + else if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + memset(&(ddev->video_mode), 0, sizeof(struct mcde_video_mode)); + ret = av8100_powerscan(); + if (ret) + dev_err(&ddev->dev, "%s:av8100_powerscan failed\n" + , __func__); + if (ddev->platform_disable) { + ret = ddev->platform_disable(ddev); + if (ret) + return ret; + } + if (driver_data->cvbs_regulator_enabled) { + ret = regulator_disable(driver_data->cvbs_regulator); + if (ret) + return ret; + driver_data->cvbs_regulator_enabled = false; + } + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + +set_power_and_exit: + mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode); + + return ret; +} + +static int hdmi_set_rotation(struct mcde_display_device *ddev, + enum mcde_display_rotation rotation) +{ + /* Not possible to rotate HDMI */ + return 0; +} + +static int __devinit hdmi_probe(struct mcde_display_device *dev) +{ + int ret = 0; + struct mcde_port *port; + struct display_driver_data *driver_data; + struct mcde_display_hdmi_platform_data *pdata = + dev->dev.platform_data; + + if (pdata == NULL) { + dev_err(&dev->dev, "%s:Platform data missing\n", __func__); + return -EINVAL; + } + + if (dev->port->type != MCDE_PORTTYPE_DSI) { + dev_err(&dev->dev, "%s:Invalid port type %d\n", + __func__, dev->port->type); + return -EINVAL; + } + + driver_data = (struct display_driver_data *) + kzalloc(sizeof(struct display_driver_data), GFP_KERNEL); + if (!driver_data) { + dev_err(&dev->dev, "Failed to allocate driver data\n"); + return -ENOMEM; + } + + /* DSI use clock continous mode if AV8100_CHIPVER_1 > 1 */ + if (av8100_ver_get() > AV8100_CHIPVER_1) + dev->port->phy.dsi.clk_cont = true; + + dev->on_first_update = hdmi_on_first_update; + dev->try_video_mode = hdmi_try_video_mode; + dev->set_video_mode = hdmi_set_video_mode; + dev->apply_config = hdmi_apply_config; + dev->set_pixel_format = hdmi_set_pixel_format; + dev->set_power_mode = hdmi_set_power_mode; + dev->set_rotation = hdmi_set_rotation; + + port = dev->port; + + port->phy.dsi.host_eot_gen = true; + port->phy.dsi.num_data_lanes = 2; + port->phy.dsi.hs_freq = DSI_HS_FREQ_HZ; + port->phy.dsi.lp_freq = DSI_LP_FREQ_HZ; + + /* Create sysfs files */ + if (device_create_file(&dev->dev, &dev_attr_hdmisdtvswitch)) + dev_info(&dev->dev, + "Unable to create hdmisdtvswitch attr\n"); + if (device_create_file(&dev->dev, &dev_attr_input_pixel_format)) + dev_info(&dev->dev, + "Unable to create input_pixel_format attr\n"); + if (device_create_file(&dev->dev, &dev_attr_disponoff)) + dev_info(&dev->dev, + "Unable to create disponoff attr\n"); + if (device_create_file(&dev->dev, &dev_attr_vesacea)) + dev_info(&dev->dev, + "Unable to create ceavesa attr\n"); + if (device_create_file(&dev->dev, &dev_attr_timing)) + dev_info(&dev->dev, + "Unable to create timing attr\n"); + if (device_create_file(&dev->dev, &dev_attr_stayalive)) + dev_info(&dev->dev, + "Unable to create stayalive attr\n"); + + if (pdata->cvbs_regulator_id) { + driver_data->cvbs_regulator = regulator_get(&dev->dev, + pdata->cvbs_regulator_id); + if (IS_ERR(driver_data->cvbs_regulator)) { + ret = PTR_ERR(driver_data->cvbs_regulator); + dev_warn(&dev->dev, "%s:Failed to get regulator %s\n", + __func__, pdata->cvbs_regulator_id); + driver_data->cvbs_regulator = NULL; + goto av_regulator_get_failed; + } + } + + dev_set_drvdata(&dev->dev, driver_data); + dev_info(&dev->dev, "HDMI display probed\n"); + + return 0; + +av_regulator_get_failed: + kfree(driver_data); + return ret; +} + +static int __devexit hdmi_remove(struct mcde_display_device *dev) +{ + struct display_driver_data *driver_data = dev_get_drvdata(&dev->dev); + struct mcde_display_hdmi_platform_data *pdata = + dev->dev.platform_data; + + /* Remove sysfs files */ + device_remove_file(&dev->dev, &dev_attr_input_pixel_format); + device_remove_file(&dev->dev, &dev_attr_hdmisdtvswitch); + device_remove_file(&dev->dev, &dev_attr_disponoff); + device_remove_file(&dev->dev, &dev_attr_vesacea); + device_remove_file(&dev->dev, &dev_attr_timing); + device_remove_file(&dev->dev, &dev_attr_stayalive); + + dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF); + + if (driver_data->cvbs_regulator) + regulator_put(driver_data->cvbs_regulator); + kfree(driver_data); + if (pdata->hdmi_platform_enable) { + if (pdata->regulator) + regulator_put(pdata->regulator); + if (pdata->reset_gpio) { + gpio_direction_input(pdata->reset_gpio); + gpio_free(pdata->reset_gpio); + } + } + + return 0; +} + +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) +static int hdmi_resume(struct mcde_display_device *ddev) +{ + int ret; + + if (ddev->chnl_state == NULL) + return 0; + + /* set_power_mode will handle call platform_enable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to resume display\n" + , __func__); + + return ret; +} + +static int hdmi_suspend(struct mcde_display_device *ddev, pm_message_t state) +{ + int ret; + + if (ddev->chnl_state == NULL) + return 0; + + /* set_power_mode will handle call platform_disable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to suspend display\n" + , __func__); + + return ret; +} +#endif + +static struct mcde_display_driver hdmi_driver = { + .probe = hdmi_probe, + .remove = hdmi_remove, +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + .suspend = hdmi_suspend, + .resume = hdmi_resume, +#else + .suspend = NULL, + .resume = NULL, +#endif + .driver = { + .name = "av8100_hdmi", + }, +}; + +/* Module init */ +static int __init mcde_display_hdmi_init(void) +{ + pr_info("%s\n", __func__); + + return mcde_display_driver_register(&hdmi_driver); + +} +late_initcall(mcde_display_hdmi_init); + +static void __exit mcde_display_hdmi_exit(void) +{ + pr_info("%s\n", __func__); + + mcde_display_driver_unregister(&hdmi_driver); +} +module_exit(mcde_display_hdmi_exit); + +MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson hdmi display driver"); diff --git a/drivers/video/mcde/display-fictive.c b/drivers/video/mcde/display-fictive.c new file mode 100644 index 00000000000..c7ea1429b9f --- /dev/null +++ b/drivers/video/mcde/display-fictive.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * ST-Ericsson MCDE fictive display driver + * + * Author: Per Persson <per.xb.persson@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/err.h> + +#include <video/mcde_display.h> + +static int __devinit fictive_probe(struct mcde_display_device *dev) +{ + dev->platform_enable = NULL, + dev->platform_disable = NULL, + dev->set_power_mode = NULL; + + dev_info(&dev->dev, "Fictive display probed\n"); + + return 0; +} + +static int __devexit fictive_remove(struct mcde_display_device *dev) +{ + return 0; +} + +static struct mcde_display_driver fictive_driver = { + .probe = fictive_probe, + .remove = fictive_remove, + .driver = { + .name = "mcde_disp_fictive", + }, +}; + +/* Module init */ +static int __init mcde_display_fictive_init(void) +{ + pr_info("%s\n", __func__); + + return mcde_display_driver_register(&fictive_driver); +} +module_init(mcde_display_fictive_init); + +static void __exit mcde_display_fictive_exit(void) +{ + pr_info("%s\n", __func__); + + mcde_display_driver_unregister(&fictive_driver); +} +module_exit(mcde_display_fictive_exit); + +MODULE_AUTHOR("Per Persson <per.xb.persson@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE fictive display driver"); diff --git a/drivers/video/mcde/display-generic_dsi.c b/drivers/video/mcde/display-generic_dsi.c new file mode 100644 index 00000000000..dfbaa4a8765 --- /dev/null +++ b/drivers/video/mcde/display-generic_dsi.c @@ -0,0 +1,307 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE generic DCS display driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/err.h> + +#include <video/mcde_display.h> +#include <video/mcde_display-generic_dsi.h> + +static int generic_platform_enable(struct mcde_display_device *dev) +{ + struct mcde_display_generic_platform_data *pdata = + dev->dev.platform_data; + + dev_dbg(&dev->dev, "%s: Reset & power on generic display\n", __func__); + + if (pdata->regulator) { + if (regulator_enable(pdata->regulator) < 0) { + dev_err(&dev->dev, "%s:Failed to enable regulator\n" + , __func__); + return -EINVAL; + } + } + if (pdata->reset_gpio) + gpio_set_value_cansleep(pdata->reset_gpio, pdata->reset_high); + mdelay(pdata->reset_delay); + if (pdata->reset_gpio) + gpio_set_value_cansleep(pdata->reset_gpio, !pdata->reset_high); + + return 0; +} + +static int generic_platform_disable(struct mcde_display_device *dev) +{ + struct mcde_display_generic_platform_data *pdata = + dev->dev.platform_data; + + dev_dbg(&dev->dev, "%s:Reset & power off generic display\n", __func__); + + if (pdata->regulator) { + if (regulator_disable(pdata->regulator) < 0) { + dev_err(&dev->dev, "%s:Failed to disable regulator\n" + , __func__); + return -EINVAL; + } + } + return 0; +} + +static int generic_set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + struct mcde_display_generic_platform_data *pdata = + ddev->dev.platform_data; + + dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__); + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + + if (ddev->platform_enable) { + ret = ddev->platform_enable(ddev); + if (ret) + return ret; + } + + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_EXIT_SLEEP_MODE, NULL, 0); + if (ret) + return ret; + + msleep(pdata->sleep_out_delay); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_DISPLAY_ON, NULL, 0); + if (ret) + return ret; + + ddev->power_mode = MCDE_DISPLAY_PM_ON; + goto set_power_and_exit; + } + /* ON -> STANDBY */ + else if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_DISPLAY_OFF, NULL, 0); + if (ret) + return ret; + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_ENTER_SLEEP_MODE, NULL, 0); + if (ret) + return ret; + + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* SLEEP -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + if (ddev->platform_disable) { + ret = ddev->platform_disable(ddev); + if (ret) + return ret; + } + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + +set_power_and_exit: + mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode); + + return ret; +} + +static int __devinit generic_probe(struct mcde_display_device *dev) +{ + int ret = 0; + struct mcde_display_generic_platform_data *pdata = + dev->dev.platform_data; + + if (pdata == NULL) { + dev_err(&dev->dev, "%s:Platform data missing\n", __func__); + return -EINVAL; + } + + if (dev->port->type != MCDE_PORTTYPE_DSI) { + dev_err(&dev->dev, + "%s:Invalid port type %d\n", + __func__, dev->port->type); + return -EINVAL; + } + + if (!dev->platform_enable && !dev->platform_disable) { + pdata->generic_platform_enable = true; + if (pdata->reset_gpio) { + ret = gpio_request(pdata->reset_gpio, NULL); + if (ret) { + dev_warn(&dev->dev, + "%s:Failed to request gpio %d\n", + __func__, pdata->reset_gpio); + goto gpio_request_failed; + } + gpio_direction_output(pdata->reset_gpio, + !pdata->reset_high); + } + if (pdata->regulator_id) { + pdata->regulator = regulator_get(&dev->dev, + pdata->regulator_id); + if (IS_ERR(pdata->regulator)) { + ret = PTR_ERR(pdata->regulator); + dev_warn(&dev->dev, + "%s:Failed to get regulator '%s'\n", + __func__, pdata->regulator_id); + pdata->regulator = NULL; + goto regulator_get_failed; + } + + if (regulator_set_voltage(pdata->regulator, + pdata->min_supply_voltage, + pdata->max_supply_voltage) < 0) { + int volt; + + dev_warn(&dev->dev, + "%s:Failed to set voltage '%s'\n", + __func__, pdata->regulator_id); + volt = regulator_get_voltage(pdata->regulator); + dev_warn(&dev->dev, + "Voltage:%d\n", volt); + } + + /* + * When u-boot has display a startup screen. + * U-boot has turned on display power however the + * regulator framework does not know about that + * This is the case here, the display driver has to + * enable the regulator for the display. + */ + if (dev->power_mode == MCDE_DISPLAY_PM_STANDBY) { + ret = regulator_enable(pdata->regulator); + if (ret < 0) { + dev_err(&dev->dev, + "%s:Failed to enable regulator\n" + , __func__); + goto regulator_enable_failed; + } + } + } + } + + dev->platform_enable = generic_platform_enable, + dev->platform_disable = generic_platform_disable, + dev->set_power_mode = generic_set_power_mode; + + dev_info(&dev->dev, "Generic display probed\n"); + + goto out; +regulator_enable_failed: +regulator_get_failed: + if (pdata->generic_platform_enable && pdata->reset_gpio) + gpio_free(pdata->reset_gpio); +gpio_request_failed: +out: + return ret; +} + +static int __devexit generic_remove(struct mcde_display_device *dev) +{ + struct mcde_display_generic_platform_data *pdata = + dev->dev.platform_data; + + dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF); + + if (!pdata->generic_platform_enable) + return 0; + + if (pdata->regulator) + regulator_put(pdata->regulator); + if (pdata->reset_gpio) { + gpio_direction_input(pdata->reset_gpio); + gpio_free(pdata->reset_gpio); + } + + return 0; +} + +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) +static int generic_resume(struct mcde_display_device *ddev) +{ + int ret; + + /* set_power_mode will handle call platform_enable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to resume display\n" + , __func__); + return ret; +} + +static int generic_suspend(struct mcde_display_device *ddev, pm_message_t state) +{ + int ret; + + /* set_power_mode will handle call platform_disable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to suspend display\n" + , __func__); + return ret; +} +#endif + +static struct mcde_display_driver generic_driver = { + .probe = generic_probe, + .remove = generic_remove, +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + .suspend = generic_suspend, + .resume = generic_resume, +#else + .suspend = NULL, + .resume = NULL, +#endif + .driver = { + .name = "mcde_disp_generic", + }, +}; + +/* Module init */ +static int __init mcde_display_generic_init(void) +{ + pr_info("%s\n", __func__); + + return mcde_display_driver_register(&generic_driver); +} +module_init(mcde_display_generic_init); + +static void __exit mcde_display_generic_exit(void) +{ + pr_info("%s\n", __func__); + + mcde_display_driver_unregister(&generic_driver); +} +module_exit(mcde_display_generic_exit); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE generic DCS display driver"); diff --git a/drivers/video/mcde/display-samsung_s6d16d0.c b/drivers/video/mcde/display-samsung_s6d16d0.c new file mode 100644 index 00000000000..900c3f70aa6 --- /dev/null +++ b/drivers/video/mcde/display-samsung_s6d16d0.c @@ -0,0 +1,232 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE Samsung S6D16D0 display driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/regulator/consumer.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/err.h> + +#include <video/mcde_display.h> + +#define RESET_DURATION_US 10 +#define RESET_DELAY_MS 120 +#define SLEEP_OUT_DELAY_MS 120 +#define IO_REGU "vdd1" +#define IO_REGU_MIN 1650000 +#define IO_REGU_MAX 3300000 + +#define DSI_HS_FREQ_HZ 420160000 +#define DSI_LP_FREQ_HZ 19200000 + +struct device_info { + int reset_gpio; + struct mcde_port port; + struct regulator *regulator; +}; + +static inline struct device_info *get_drvdata(struct mcde_display_device *ddev) +{ + return (struct device_info *)dev_get_drvdata(&ddev->dev); +} + +static int power_on(struct mcde_display_device *ddev) +{ + struct device_info *di = get_drvdata(ddev); + + dev_dbg(&ddev->dev, "Reset & power on s6d16d0 display\n"); + + regulator_enable(di->regulator); + gpio_set_value_cansleep(di->reset_gpio, 0); + udelay(RESET_DURATION_US); + gpio_set_value_cansleep(di->reset_gpio, 1); + msleep(RESET_DELAY_MS); + + return 0; +} + +static int power_off(struct mcde_display_device *ddev) +{ + struct device_info *di = get_drvdata(ddev); + + dev_dbg(&ddev->dev, "Power off s6d16d0 display\n"); + + regulator_disable(di->regulator); + + return 0; +} + +static int display_on(struct mcde_display_device *ddev) +{ + int ret; + u8 val = 0; + + dev_dbg(&ddev->dev, "Display on s6d16d0\n"); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_TEAR_ON, &val, 1); + if (ret) + dev_warn(&ddev->dev, + "%s:Failed to enable synchronized update\n", __func__); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE, + NULL, 0); + if (ret) + return ret; + msleep(SLEEP_OUT_DELAY_MS); + return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON, + NULL, 0); +} + +static int display_off(struct mcde_display_device *ddev) +{ + int ret; + + dev_dbg(&ddev->dev, "Display off s6d16d0\n"); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF, + NULL, 0); + if (ret) + return ret; + + return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE, + NULL, 0); +} + +static int set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + + dev_dbg(&ddev->dev, "Set power mode %d\n", power_mode); + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + ret = power_on(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + + ret = display_on(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_ON; + } + /* ON -> STANDBY */ + else if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + + ret = display_off(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + ret = power_off(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + + return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode); +} + +static int __devinit samsung_s6d16d0_probe(struct mcde_display_device *ddev) +{ + int ret = 0; + struct mcde_display_dsi_platform_data *pdata = ddev->dev.platform_data; + struct device_info *di; + + if (pdata == NULL || !pdata->reset_gpio) { + dev_err(&ddev->dev, "Invalid platform data\n"); + return -EINVAL; + } + + di = kzalloc(sizeof(*di), GFP_KERNEL); + if (!di) + return -ENOMEM; + di->reset_gpio = pdata->reset_gpio; + di->port.link = pdata->link; + di->port.type = MCDE_PORTTYPE_DSI; + di->port.mode = MCDE_PORTMODE_CMD; + di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP; + di->port.sync_src = ddev->port->sync_src; + di->port.frame_trig = ddev->port->frame_trig; + di->port.phy.dsi.num_data_lanes = 2; + di->port.phy.dsi.host_eot_gen = true; + /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */ + di->port.phy.dsi.ui = 9; + di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ; + di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ; + + ret = gpio_request(di->reset_gpio, NULL); + if (ret) + goto gpio_request_failed; + gpio_direction_output(di->reset_gpio, 1); + di->regulator = regulator_get(&ddev->dev, IO_REGU); + if (IS_ERR(di->regulator)) { + di->regulator = NULL; + goto regulator_get_failed; + } + ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX); + if (WARN_ON(ret)) + goto regulator_voltage_failed; + + /* Get in sync with u-boot */ + if (ddev->power_mode != MCDE_DISPLAY_PM_OFF) + (void)regulator_enable(di->regulator); + + ddev->set_power_mode = set_power_mode; + ddev->port = &di->port; + ddev->native_x_res = 864; + ddev->native_y_res = 480; + dev_set_drvdata(&ddev->dev, di); + + dev_info(&ddev->dev, "Samsung s6d16d0 display probed\n"); + + return 0; +regulator_voltage_failed: + regulator_put(di->regulator); +regulator_get_failed: + gpio_free(di->reset_gpio); +gpio_request_failed: + kfree(di); + return ret; +} + +static struct mcde_display_driver samsung_s6d16d0_driver = { + .probe = samsung_s6d16d0_probe, + .driver = { + .name = "samsung_s6d16d0", + }, +}; + +static int __init samsung_s6d16d0_init(void) +{ + return mcde_display_driver_register(&samsung_s6d16d0_driver); +} +module_init(samsung_s6d16d0_init); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE Samsung S6D16D0 display driver"); diff --git a/drivers/video/mcde/display-sony_acx424akp_dsi.c b/drivers/video/mcde/display-sony_acx424akp_dsi.c new file mode 100644 index 00000000000..867bbb37375 --- /dev/null +++ b/drivers/video/mcde/display-sony_acx424akp_dsi.c @@ -0,0 +1,422 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE Sony acx424akp DCS display driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/err.h> +#include <linux/slab.h> + +#include <linux/regulator/consumer.h> + +#include <video/mcde_display.h> +#include <video/mcde_display-sony_acx424akp_dsi.h> + +#define RESET_DELAY_MS 11 +#define RESET_LOW_DELAY_US 20 +#define SLEEP_OUT_DELAY_MS 140 +#define SLEEP_IN_DELAY_MS 85 /* Assume 60 Hz 5 frames */ +#define IO_REGU "vddi" +#define IO_REGU_MIN 1600000 +#define IO_REGU_MAX 3300000 + +#define DSI_HS_FREQ_HZ 420160000 +#define DSI_LP_FREQ_HZ 19200000 + +struct device_info { + int reset_gpio; + struct mcde_port port; + struct regulator *regulator; +}; + +static inline struct device_info *get_drvdata(struct mcde_display_device *ddev) +{ + return (struct device_info *)dev_get_drvdata(&ddev->dev); +} + +static int display_read_deviceid(struct mcde_display_device *dev, u16 *id) +{ + struct mcde_chnl_state *chnl; + + u8 id1, id2, id3; + int len = 1; + int ret = 0; + int readret = 0; + + dev_dbg(&dev->dev, "%s: Read device id of the display\n", __func__); + + /* Acquire MCDE resources */ + chnl = mcde_chnl_get(dev->chnl_id, dev->fifo, dev->port); + if (IS_ERR(chnl)) { + ret = PTR_ERR(chnl); + dev_warn(&dev->dev, "Failed to acquire MCDE channel\n"); + goto out; + } + + /* plugnplay: use registers DA, DBh and DCh to detect display */ + readret = mcde_dsi_dcs_read(chnl, 0xDA, (u32 *)&id1, &len); + if (!readret) + readret = mcde_dsi_dcs_read(chnl, 0xDB, (u32 *)&id2, &len); + if (!readret) + readret = mcde_dsi_dcs_read(chnl, 0xDC, (u32 *)&id3, &len); + + if (readret) { + dev_info(&dev->dev, + "mcde_dsi_dcs_read failed to read display ID\n"); + goto read_fail; + } + + *id = (id3 << 8) | id2; +read_fail: + /* close MCDE channel */ + mcde_chnl_put(chnl); +out: + return 0; +} + +static int power_on(struct mcde_display_device *dev) +{ + struct device_info *di = get_drvdata(dev); + + dev_dbg(&dev->dev, "%s: Reset & power on sony display\n", __func__); + + regulator_enable(di->regulator); + gpio_set_value_cansleep(di->reset_gpio, 1); + msleep(RESET_DELAY_MS); + gpio_set_value_cansleep(di->reset_gpio, 0); + udelay(RESET_LOW_DELAY_US); + gpio_set_value_cansleep(di->reset_gpio, 1); + msleep(RESET_DELAY_MS); + + return 0; +} + +static int power_off(struct mcde_display_device *dev) +{ + struct device_info *di = get_drvdata(dev); + + dev_dbg(&dev->dev, "%s:Reset & power off sony display\n", __func__); + + gpio_set_value_cansleep(di->reset_gpio, 0); + msleep(RESET_DELAY_MS); + regulator_disable(di->regulator); + + return 0; +} + +static int display_on(struct mcde_display_device *ddev) +{ + int ret; + u8 val = 0; + + dev_dbg(&ddev->dev, "Display on sony display\n"); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_TEAR_ON, &val, 1); + if (ret) + dev_warn(&ddev->dev, + "%s:Failed to enable synchronized update\n", __func__); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_EXIT_SLEEP_MODE, + NULL, 0); + if (ret) + return ret; + msleep(SLEEP_OUT_DELAY_MS); + return mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_ON, + NULL, 0); +} + +static int display_off(struct mcde_display_device *ddev) +{ + int ret; + + dev_dbg(&ddev->dev, "Display off sony display\n"); + + ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_DISPLAY_OFF, + NULL, 0); + if (ret) + return ret; + + ret = mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_ENTER_SLEEP_MODE, + NULL, 0); + /* Wait for 4 frames or more */ + msleep(SLEEP_IN_DELAY_MS); + + return ret; +} + +static int sony_acx424akp_set_scan_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + u8 param[MCDE_MAX_DSI_DIRECT_CMD_WRITE]; + + dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__); + + /* 180 rotation for SONY ACX424AKP display */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY) { + param[0] = 0xAA; + ret = mcde_dsi_dcs_write(ddev->chnl_state, 0xf3, param, 1); + if (ret) + return ret; + + param[0] = 0x00; + param[1] = 0x00; + ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3); + if (ret) + return ret; + + param[0] = 0xC9; + param[1] = 0x01; + ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3); + if (ret) + return ret; + + param[0] = 0xA2; + param[1] = 0x00; + ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3); + if (ret) + return ret; + + param[0] = 0xFF; + param[1] = 0xAA; + ret = mcde_dsi_generic_write(ddev->chnl_state, param, 3); + if (ret) + return ret; + } + return ret; +} + +static int sony_acx424akp_set_power_mode(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + + dev_dbg(&ddev->dev, "%s:Set Power mode\n", __func__); + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + ret = power_on(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + + ret = display_on(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_ON; + } + /* ON -> STANDBY */ + else if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + + ret = display_off(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + /* STANDBY -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + ret = power_off(ddev); + if (ret) + return ret; + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + + return mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode); +} + +static int __devinit sony_acx424akp_probe(struct mcde_display_device *dev) +{ + int ret = 0; + u16 id = 0; + struct device_info *di; + struct mcde_port *port; + struct mcde_display_sony_acx424akp_platform_data *pdata = + dev->dev.platform_data; + + if (pdata == NULL || !pdata->reset_gpio) { + dev_err(&dev->dev, "Invalid platform data\n"); + return -EINVAL; + } + + di = kzalloc(sizeof(*di), GFP_KERNEL); + if (!di) + return -ENOMEM; + + port = dev->port; + di->reset_gpio = pdata->reset_gpio; + di->port.type = MCDE_PORTTYPE_DSI; + di->port.mode = MCDE_PORTMODE_CMD; + di->port.pixel_format = MCDE_PORTPIXFMT_DSI_24BPP; + di->port.sync_src = dev->port->sync_src; + di->port.frame_trig = dev->port->frame_trig; + di->port.phy.dsi.num_data_lanes = 2; + di->port.link = port->link; + di->port.phy.dsi.host_eot_gen = true; + /* TODO: Move UI to mcde_hw.c when clk_get_rate(dsi) is done */ + di->port.phy.dsi.ui = 9; + di->port.phy.dsi.hs_freq = DSI_HS_FREQ_HZ; + di->port.phy.dsi.lp_freq = DSI_LP_FREQ_HZ; + + ret = gpio_request(di->reset_gpio, NULL); + if (WARN_ON(ret)) + goto gpio_request_failed; + + gpio_direction_output(di->reset_gpio, 1); + di->regulator = regulator_get(&dev->dev, IO_REGU); + if (IS_ERR(di->regulator)) { + ret = PTR_ERR(di->regulator); + di->regulator = NULL; + goto regulator_get_failed; + } + ret = regulator_set_voltage(di->regulator, IO_REGU_MIN, IO_REGU_MAX); + if (WARN_ON(ret)) + goto regulator_voltage_failed; + + dev->set_power_mode = sony_acx424akp_set_power_mode; + + dev->port = &di->port; + dev->native_x_res = 480; + dev->native_y_res = 854; + dev_set_drvdata(&dev->dev, di); + + /* + * When u-boot has display a startup screen. + * U-boot has turned on display power however the + * regulator framework does not know about that + * This is the case here, the display driver has to + * enable the regulator for the display. + */ + if (dev->power_mode != MCDE_DISPLAY_PM_OFF) { + (void) regulator_enable(di->regulator); + } else { + power_on(dev); + dev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + + ret = display_read_deviceid(dev, &id); + if (ret) + goto read_id_failed; + + switch (id) { + case DISPLAY_SONY_ACX424AKP: + case DISPLAY_SONY_ACX424AKP_ID2: + pdata->disp_panel = id; + dev_info(&dev->dev, + "Sony ACX424AKP display (ID 0x%.4X) probed\n", id); + break; + default: + pdata->disp_panel = DISPLAY_NONE; + dev_info(&dev->dev, + "Display not recognized (ID 0x%.4X) probed\n", id); + goto read_id_failed; + } + + return 0; + +read_id_failed: +regulator_voltage_failed: + regulator_put(di->regulator); +regulator_get_failed: + gpio_free(di->reset_gpio); +gpio_request_failed: + kfree(di); + return ret; +} + +static int __devexit sony_acx424akp_remove(struct mcde_display_device *dev) +{ + struct device_info *di = get_drvdata(dev); + + dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF); + + regulator_put(di->regulator); + gpio_direction_input(di->reset_gpio); + gpio_free(di->reset_gpio); + + kfree(di); + + return 0; +} + +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) +static int sony_acx424akp_resume(struct mcde_display_device *ddev) +{ + int ret; + + /* set_power_mode will handle call platform_enable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to resume display\n" + , __func__); + return ret; +} + +static int sony_acx424akp_suspend(struct mcde_display_device *ddev, \ + pm_message_t state) +{ + int ret; + + /* set_power_mode will handle call platform_disable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to suspend display\n" + , __func__); + return ret; +} +#endif + +static struct mcde_display_driver sony_acx424akp_driver = { + .probe = sony_acx424akp_probe, + .remove = sony_acx424akp_remove, +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + .suspend = sony_acx424akp_suspend, + .resume = sony_acx424akp_resume, +#else + .suspend = NULL, + .resume = NULL, +#endif + .driver = { + .name = "mcde_disp_sony_acx424akp", + }, +}; + +/* Module init */ +static int __init mcde_display_sony_acx424akp_init(void) +{ + pr_info("%s\n", __func__); + + return mcde_display_driver_register(&sony_acx424akp_driver); +} +module_init(mcde_display_sony_acx424akp_init); + +static void __exit mcde_display_sony_acx424akp_exit(void) +{ + pr_info("%s\n", __func__); + + mcde_display_driver_unregister(&sony_acx424akp_driver); +} +module_exit(mcde_display_sony_acx424akp_exit); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE Sony ACX424AKP DCS display driver"); diff --git a/drivers/video/mcde/display-vuib500-dpi.c b/drivers/video/mcde/display-vuib500-dpi.c new file mode 100644 index 00000000000..2bd5b990608 --- /dev/null +++ b/drivers/video/mcde/display-vuib500-dpi.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE DPI display driver + * The VUIB500 is an user interface board the can be attached to an HREF. It + * supports the DPI pixel interface and converts this to an analog VGA signal, + * which can be connected to a monitor using a DSUB connector. The VUIB board + * uses an external power supply of 5V. + * + * Author: Marcel Tunnissen <marcel.tuennissen@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/gpio.h> + +#include <video/mcde_display.h> +#include <video/mcde_display-vuib500-dpi.h> + +#define DPI_DISP_TRACE dev_dbg(&ddev->dev, "%s\n", __func__) + +static int try_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode); +static int set_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode); + +static int __devinit dpi_display_probe(struct mcde_display_device *ddev) +{ + int ret = 0; + struct mcde_display_dpi_platform_data *pdata = ddev->dev.platform_data; + DPI_DISP_TRACE; + + if (pdata == NULL) { + dev_err(&ddev->dev, "%s:Platform data missing\n", __func__); + ret = -EINVAL; + goto no_pdata; + } + + if (ddev->port->type != MCDE_PORTTYPE_DPI) { + dev_err(&ddev->dev, + "%s:Invalid port type %d\n", + __func__, ddev->port->type); + ret = -EINVAL; + goto invalid_port_type; + } + + ddev->try_video_mode = try_video_mode; + ddev->set_video_mode = set_video_mode; + dev_info(&ddev->dev, "DPI display probed\n"); + + goto out; +invalid_port_type: +no_pdata: +out: + return ret; +} + +static int __devexit dpi_display_remove(struct mcde_display_device *ddev) +{ + DPI_DISP_TRACE; + + ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + + return 0; +} + +static int dpi_display_resume(struct mcde_display_device *ddev) +{ + int ret; + DPI_DISP_TRACE; + + /* set_power_mode will handle call platform_enable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to resume display\n" + , __func__); + return ret; +} + +static int dpi_display_suspend(struct mcde_display_device *ddev, + pm_message_t state) +{ + int ret; + DPI_DISP_TRACE; + + /* set_power_mode will handle call platform_disable */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + if (ret < 0) + dev_warn(&ddev->dev, "%s:Failed to suspend display\n" + , __func__); + return ret; +} + +static void print_vmode(struct mcde_video_mode *vmode) +{ + pr_debug("resolution: %dx%d\n", vmode->xres, vmode->yres); + pr_debug(" pixclock: %d\n", vmode->pixclock); + pr_debug(" hbp: %d\n", vmode->hbp); + pr_debug(" hfp: %d\n", vmode->hfp); + pr_debug(" hsw: %d\n", vmode->hsw); + pr_debug(" vbp: %d\n", vmode->vbp); + pr_debug(" vfp: %d\n", vmode->vfp); + pr_debug(" vsw: %d\n", vmode->vsw); + pr_debug("interlaced: %s\n", vmode->interlaced ? "true" : "false"); +} + +/* Taken from the programmed value of the LCD clock in PRCMU */ +#define PIX_CLK_FREQ 25000000 +#define VMODE_XRES 640 +#define VMODE_YRES 480 + +static int try_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + int res = -EINVAL; + DPI_DISP_TRACE; + + if (ddev == NULL || video_mode == NULL) { + dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n", + __func__); + return res; + } + + if (video_mode->xres == VMODE_XRES && video_mode->yres == VMODE_YRES) { + video_mode->hbp = 40; + video_mode->hfp = 8; + video_mode->hsw = 96; + video_mode->vbp = 25; + video_mode->vfp = 2; + video_mode->vsw = 2; + /* + * The pixclock setting is not used within MCDE. The clock is + * setup elsewhere. But the pixclock value is visible in user + * space. + */ + video_mode->pixclock = (int) (1e+12 * (1.0 / PIX_CLK_FREQ)); + res = 0; + } /* TODO: add more supported resolutions here */ + video_mode->interlaced = false; + + if (res == 0) + print_vmode(video_mode); + else + dev_warn(&ddev->dev, + "%s:Failed to find video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + + return res; + +} + +static int set_video_mode( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + int res; + DPI_DISP_TRACE; + + if (ddev == NULL || video_mode == NULL) { + dev_warn(&ddev->dev, "%s:ddev = NULL or video_mode = NULL\n", + __func__); + return -EINVAL; + } + if (video_mode->xres != VMODE_XRES || video_mode->yres != VMODE_YRES) { + dev_warn(&ddev->dev, "%s:Failed to set video mode x=%d, y=%d\n", + __func__, video_mode->xres, video_mode->yres); + return -EINVAL; + } + ddev->video_mode = *video_mode; + print_vmode(video_mode); + + res = mcde_chnl_set_video_mode(ddev->chnl_state, &ddev->video_mode); + if (res < 0) { + dev_warn(&ddev->dev, "%s:Failed to set video mode on channel\n", + __func__); + + } + /* notify mcde display driver about updated video mode */ + ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE; + return res; +} + +static struct mcde_display_driver dpi_display_driver = { + .probe = dpi_display_probe, + .remove = dpi_display_remove, + .suspend = dpi_display_suspend, + .resume = dpi_display_resume, + .driver = { + .name = "mcde_display_dpi", + }, +}; + +/* Module init */ +static int __init mcde_dpi_display_init(void) +{ + pr_info("%s\n", __func__); + + return mcde_display_driver_register(&dpi_display_driver); +} +module_init(mcde_dpi_display_init); + +static void __exit mcde_dpi_display_exit(void) +{ + pr_info("%s\n", __func__); + + mcde_display_driver_unregister(&dpi_display_driver); +} +module_exit(mcde_dpi_display_exit); + +MODULE_AUTHOR("Marcel Tunnissen <marcel.tuennissen@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE DPI display driver fro VUIB500 display"); diff --git a/drivers/video/mcde/dsilink_regs.h b/drivers/video/mcde/dsilink_regs.h new file mode 100644 index 00000000000..29fa75a1752 --- /dev/null +++ b/drivers/video/mcde/dsilink_regs.h @@ -0,0 +1,2037 @@ + +#define DSI_VAL2REG(__reg, __fld, __val) \ + (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK) +#define DSI_REG2VAL(__reg, __fld, __val) \ + (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT) + +#define DSI_MCTL_INTEGRATION_MODE 0x00000000 +#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_SHIFT 0 +#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN_MASK 0x00000001 +#define DSI_MCTL_INTEGRATION_MODE_INT_MODE_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_INTEGRATION_MODE, INT_MODE_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL 0x00000004 +#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_SHIFT 0 +#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN_MASK 0x00000001 +#define DSI_MCTL_MAIN_DATA_CTL_LINK_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, LINK_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_SHIFT 1 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_MASK 0x00000002 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_CMD 0 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_VID 1 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_ENUM(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, \ + DSI_MCTL_MAIN_DATA_CTL_IF1_MODE_##__x) +#define DSI_MCTL_MAIN_DATA_CTL_IF1_MODE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, __x) +#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_SHIFT 2 +#define DSI_MCTL_MAIN_DATA_CTL_VID_EN_MASK 0x00000004 +#define DSI_MCTL_MAIN_DATA_CTL_VID_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, VID_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_SHIFT 3 +#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL_MASK 0x00000008 +#define DSI_MCTL_MAIN_DATA_CTL_TVG_SEL(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, __x) +#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_SHIFT 4 +#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL_MASK 0x00000010 +#define DSI_MCTL_MAIN_DATA_CTL_TBG_SEL(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TBG_SEL, __x) +#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_SHIFT 5 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN_MASK 0x00000020 +#define DSI_MCTL_MAIN_DATA_CTL_IF1_TE_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_SHIFT 6 +#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN_MASK 0x00000040 +#define DSI_MCTL_MAIN_DATA_CTL_IF2_TE_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_SHIFT 7 +#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN_MASK 0x00000080 +#define DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_SHIFT 8 +#define DSI_MCTL_MAIN_DATA_CTL_READ_EN_MASK 0x00000100 +#define DSI_MCTL_MAIN_DATA_CTL_READ_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, READ_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_SHIFT 9 +#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN_MASK 0x00000200 +#define DSI_MCTL_MAIN_DATA_CTL_BTA_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, BTA_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_SHIFT 10 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC_MASK 0x00000400 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_ECC(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_ECC, __x) +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_SHIFT 11 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM_MASK 0x00000800 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_GEN_CHECKSUM(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_GEN_CHECKSUM, __x) +#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_SHIFT 12 +#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN_MASK 0x00001000 +#define DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_SHIFT 13 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN_MASK 0x00002000 +#define DSI_MCTL_MAIN_DATA_CTL_DISP_EOT_GEN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DISP_EOT_GEN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_SHIFT 14 +#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN_MASK 0x00004000 +#define DSI_MCTL_MAIN_DATA_CTL_DLX_REMAP_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN, __x) +#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_SHIFT 15 +#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN_MASK 0x00008000 +#define DSI_MCTL_MAIN_DATA_CTL_TE_POLLING_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, __x) +#define DSI_MCTL_MAIN_PHY_CTL 0x00000008 +#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_SHIFT 0 +#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN_MASK 0x00000001 +#define DSI_MCTL_MAIN_PHY_CTL_LANE2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, LANE2_EN, __x) +#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_SHIFT 1 +#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE_MASK 0x00000002 +#define DSI_MCTL_MAIN_PHY_CTL_FORCE_STOP_MODE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, __x) +#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_SHIFT 2 +#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS_MASK 0x00000004 +#define DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, __x) +#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_SHIFT 3 +#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN_MASK 0x00000008 +#define DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, CLK_ULPM_EN, __x) +#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_SHIFT 4 +#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN_MASK 0x00000010 +#define DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT1_ULPM_EN, __x) +#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_SHIFT 5 +#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN_MASK 0x00000020 +#define DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, DAT2_ULPM_EN, __x) +#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT 6 +#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_MASK 0x000003C0 +#define DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_PHY_CTL, WAIT_BURST_TIME, __x) +#define DSI_MCTL_PLL_CTL 0x0000000C +#define DSI_MCTL_PLL_CTL_PLL_MULT_SHIFT 0 +#define DSI_MCTL_PLL_CTL_PLL_MULT_MASK 0x000000FF +#define DSI_MCTL_PLL_CTL_PLL_MULT(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MULT, __x) +#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_SHIFT 8 +#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV_MASK 0x00003F00 +#define DSI_MCTL_PLL_CTL_PLL_OUT_DIV(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_DIV, __x) +#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_SHIFT 14 +#define DSI_MCTL_PLL_CTL_PLL_IN_DIV_MASK 0x0001C000 +#define DSI_MCTL_PLL_CTL_PLL_IN_DIV(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_IN_DIV, __x) +#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_SHIFT 17 +#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2_MASK 0x00020000 +#define DSI_MCTL_PLL_CTL_PLL_SEL_DIV2(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_SEL_DIV2, __x) +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SHIFT 18 +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_MASK 0x00040000 +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_INT_PLL 0 +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_SYS_PLL 1 +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL_ENUM(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, \ + DSI_MCTL_PLL_CTL_PLL_OUT_SEL_##__x) +#define DSI_MCTL_PLL_CTL_PLL_OUT_SEL(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_OUT_SEL, __x) +#define DSI_MCTL_PLL_CTL_PLL_MASTER_SHIFT 31 +#define DSI_MCTL_PLL_CTL_PLL_MASTER_MASK 0x80000000 +#define DSI_MCTL_PLL_CTL_PLL_MASTER(__x) \ + DSI_VAL2REG(DSI_MCTL_PLL_CTL, PLL_MASTER, __x) +#define DSI_MCTL_LANE_STS 0x00000010 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_SHIFT 0 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_MASK 0x00000003 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_START 0 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_IDLE 1 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_HS 2 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ULPM 3 +#define DSI_MCTL_LANE_STS_CLKLANE_STATE_ENUM(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, \ + DSI_MCTL_LANE_STS_CLKLANE_STATE_##__x) +#define DSI_MCTL_LANE_STS_CLKLANE_STATE(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, CLKLANE_STATE, __x) +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_SHIFT 2 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_MASK 0x0000001C +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_START 0 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_IDLE 1 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_WRITE 2 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ULPM 3 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_READ 4 +#define DSI_MCTL_LANE_STS_DATLANE1_STATE_ENUM(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, \ + DSI_MCTL_LANE_STS_DATLANE1_STATE_##__x) +#define DSI_MCTL_LANE_STS_DATLANE1_STATE(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE1_STATE, __x) +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_SHIFT 5 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_MASK 0x00000060 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_START 0 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_IDLE 1 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_WRITE 2 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ULPM 3 +#define DSI_MCTL_LANE_STS_DATLANE2_STATE_ENUM(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, \ + DSI_MCTL_LANE_STS_DATLANE2_STATE_##__x) +#define DSI_MCTL_LANE_STS_DATLANE2_STATE(__x) \ + DSI_VAL2REG(DSI_MCTL_LANE_STS, DATLANE2_STATE, __x) +#define DSI_MCTL_DPHY_TIMEOUT 0x00000014 +#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT 0 +#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_MASK 0x0000000F +#define DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, CLK_DIV, __x) +#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT 4 +#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_MASK 0x0003FFF0 +#define DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, HSTX_TO_VAL, __x) +#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT 18 +#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_MASK 0xFFFC0000 +#define DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_TIMEOUT, LPRX_TO_VAL, __x) +#define DSI_MCTL_ULPOUT_TIME 0x00000018 +#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT 0 +#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_MASK 0x000001FF +#define DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(__x) \ + DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, CKLANE_ULPOUT_TIME, __x) +#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT 9 +#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_MASK 0x0003FE00 +#define DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(__x) \ + DSI_VAL2REG(DSI_MCTL_ULPOUT_TIME, DATA_ULPOUT_TIME, __x) +#define DSI_MCTL_DPHY_STATIC 0x0000001C +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_SHIFT 0 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK_MASK 0x00000001 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_CLK(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_CLK, __x) +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_SHIFT 1 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK_MASK 0x00000002 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_CLK(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_CLK, __x) +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_SHIFT 2 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1_MASK 0x00000004 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT1, __x) +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_SHIFT 3 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1_MASK 0x00000008 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT1, __x) +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_SHIFT 4 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2_MASK 0x00000010 +#define DSI_MCTL_DPHY_STATIC_SWAP_PINS_DAT2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, SWAP_PINS_DAT2, __x) +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_SHIFT 5 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2_MASK 0x00000020 +#define DSI_MCTL_DPHY_STATIC_HS_INVERT_DAT2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, HS_INVERT_DAT2, __x) +#define DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT 6 +#define DSI_MCTL_DPHY_STATIC_UI_X4_MASK 0x00000FC0 +#define DSI_MCTL_DPHY_STATIC_UI_X4(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_STATIC, UI_X4, __x) +#define DSI_MCTL_MAIN_EN 0x00000020 +#define DSI_MCTL_MAIN_EN_PLL_START_SHIFT 0 +#define DSI_MCTL_MAIN_EN_PLL_START_MASK 0x00000001 +#define DSI_MCTL_MAIN_EN_PLL_START(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, PLL_START, __x) +#define DSI_MCTL_MAIN_EN_CKLANE_EN_SHIFT 3 +#define DSI_MCTL_MAIN_EN_CKLANE_EN_MASK 0x00000008 +#define DSI_MCTL_MAIN_EN_CKLANE_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, CKLANE_EN, __x) +#define DSI_MCTL_MAIN_EN_DAT1_EN_SHIFT 4 +#define DSI_MCTL_MAIN_EN_DAT1_EN_MASK 0x00000010 +#define DSI_MCTL_MAIN_EN_DAT1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_EN, __x) +#define DSI_MCTL_MAIN_EN_DAT2_EN_SHIFT 5 +#define DSI_MCTL_MAIN_EN_DAT2_EN_MASK 0x00000020 +#define DSI_MCTL_MAIN_EN_DAT2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_EN, __x) +#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_SHIFT 6 +#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ_MASK 0x00000040 +#define DSI_MCTL_MAIN_EN_CLKLANE_ULPM_REQ(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, __x) +#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_SHIFT 7 +#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ_MASK 0x00000080 +#define DSI_MCTL_MAIN_EN_DAT1_ULPM_REQ(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, __x) +#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_SHIFT 8 +#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ_MASK 0x00000100 +#define DSI_MCTL_MAIN_EN_DAT2_ULPM_REQ(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ, __x) +#define DSI_MCTL_MAIN_EN_IF1_EN_SHIFT 9 +#define DSI_MCTL_MAIN_EN_IF1_EN_MASK 0x00000200 +#define DSI_MCTL_MAIN_EN_IF1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF1_EN, __x) +#define DSI_MCTL_MAIN_EN_IF2_EN_SHIFT 10 +#define DSI_MCTL_MAIN_EN_IF2_EN_MASK 0x00000400 +#define DSI_MCTL_MAIN_EN_IF2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_EN, IF2_EN, __x) +#define DSI_MCTL_MAIN_STS 0x00000024 +#define DSI_MCTL_MAIN_STS_PLL_LOCK_SHIFT 0 +#define DSI_MCTL_MAIN_STS_PLL_LOCK_MASK 0x00000001 +#define DSI_MCTL_MAIN_STS_PLL_LOCK(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, PLL_LOCK, __x) +#define DSI_MCTL_MAIN_STS_CLKLANE_READY_SHIFT 1 +#define DSI_MCTL_MAIN_STS_CLKLANE_READY_MASK 0x00000002 +#define DSI_MCTL_MAIN_STS_CLKLANE_READY(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, CLKLANE_READY, __x) +#define DSI_MCTL_MAIN_STS_DAT1_READY_SHIFT 2 +#define DSI_MCTL_MAIN_STS_DAT1_READY_MASK 0x00000004 +#define DSI_MCTL_MAIN_STS_DAT1_READY(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT1_READY, __x) +#define DSI_MCTL_MAIN_STS_DAT2_READY_SHIFT 3 +#define DSI_MCTL_MAIN_STS_DAT2_READY_MASK 0x00000008 +#define DSI_MCTL_MAIN_STS_DAT2_READY(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, DAT2_READY, __x) +#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_SHIFT 4 +#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR_MASK 0x00000010 +#define DSI_MCTL_MAIN_STS_HSTX_TO_ERR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, HSTX_TO_ERR, __x) +#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_SHIFT 5 +#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR_MASK 0x00000020 +#define DSI_MCTL_MAIN_STS_LPRX_TO_ERR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, LPRX_TO_ERR, __x) +#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_SHIFT 6 +#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK_MASK 0x00000040 +#define DSI_MCTL_MAIN_STS_CRS_UNTERM_PCK(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, CRS_UNTERM_PCK, __x) +#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_SHIFT 7 +#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK_MASK 0x00000080 +#define DSI_MCTL_MAIN_STS_VRS_UNTERM_PCK(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS, VRS_UNTERM_PCK, __x) +#define DSI_MCTL_DPHY_ERR 0x00000028 +#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_SHIFT 6 +#define DSI_MCTL_DPHY_ERR_ERR_ESC_1_MASK 0x00000040 +#define DSI_MCTL_DPHY_ERR_ERR_ESC_1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_1, __x) +#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_SHIFT 7 +#define DSI_MCTL_DPHY_ERR_ERR_ESC_2_MASK 0x00000080 +#define DSI_MCTL_DPHY_ERR_ERR_ESC_2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_ESC_2, __x) +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_SHIFT 8 +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1_MASK 0x00000100 +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_1, __x) +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_SHIFT 9 +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2_MASK 0x00000200 +#define DSI_MCTL_DPHY_ERR_ERR_SYNCESC_2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_SYNCESC_2, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_SHIFT 10 +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1_MASK 0x00000400 +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_1, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_SHIFT 11 +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2_MASK 0x00000800 +#define DSI_MCTL_DPHY_ERR_ERR_CONTROL_2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONTROL_2, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_SHIFT 12 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1_MASK 0x00001000 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_1, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_SHIFT 13 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2_MASK 0x00002000 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP0_2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP0_2, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_SHIFT 14 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1_MASK 0x00004000 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_1(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_1, __x) +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_SHIFT 15 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2_MASK 0x00008000 +#define DSI_MCTL_DPHY_ERR_ERR_CONT_LP1_2(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR, ERR_CONT_LP1_2, __x) +#define DSI_INT_VID_RDDATA 0x00000030 +#define DSI_INT_VID_RDDATA_IF_DATA_SHIFT 0 +#define DSI_INT_VID_RDDATA_IF_DATA_MASK 0x0000FFFF +#define DSI_INT_VID_RDDATA_IF_DATA(__x) \ + DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_DATA, __x) +#define DSI_INT_VID_RDDATA_IF_VALID_SHIFT 16 +#define DSI_INT_VID_RDDATA_IF_VALID_MASK 0x00010000 +#define DSI_INT_VID_RDDATA_IF_VALID(__x) \ + DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_VALID, __x) +#define DSI_INT_VID_RDDATA_IF_START_SHIFT 17 +#define DSI_INT_VID_RDDATA_IF_START_MASK 0x00020000 +#define DSI_INT_VID_RDDATA_IF_START(__x) \ + DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_START, __x) +#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_SHIFT 18 +#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC_MASK 0x00040000 +#define DSI_INT_VID_RDDATA_IF_FRAME_SYNC(__x) \ + DSI_VAL2REG(DSI_INT_VID_RDDATA, IF_FRAME_SYNC, __x) +#define DSI_INT_VID_GNT 0x00000034 +#define DSI_INT_VID_GNT_IF_STALL_SHIFT 0 +#define DSI_INT_VID_GNT_IF_STALL_MASK 0x00000001 +#define DSI_INT_VID_GNT_IF_STALL(__x) \ + DSI_VAL2REG(DSI_INT_VID_GNT, IF_STALL, __x) +#define DSI_INT_CMD_RDDATA 0x00000038 +#define DSI_INT_CMD_RDDATA_IF_DATA_SHIFT 0 +#define DSI_INT_CMD_RDDATA_IF_DATA_MASK 0x0000FFFF +#define DSI_INT_CMD_RDDATA_IF_DATA(__x) \ + DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_DATA, __x) +#define DSI_INT_CMD_RDDATA_IF_VALID_SHIFT 16 +#define DSI_INT_CMD_RDDATA_IF_VALID_MASK 0x00010000 +#define DSI_INT_CMD_RDDATA_IF_VALID(__x) \ + DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_VALID, __x) +#define DSI_INT_CMD_RDDATA_IF_START_SHIFT 17 +#define DSI_INT_CMD_RDDATA_IF_START_MASK 0x00020000 +#define DSI_INT_CMD_RDDATA_IF_START(__x) \ + DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_START, __x) +#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_SHIFT 18 +#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC_MASK 0x00040000 +#define DSI_INT_CMD_RDDATA_IF_FRAME_SYNC(__x) \ + DSI_VAL2REG(DSI_INT_CMD_RDDATA, IF_FRAME_SYNC, __x) +#define DSI_INT_CMD_GNT 0x0000003C +#define DSI_INT_CMD_GNT_IF_STALL_SHIFT 0 +#define DSI_INT_CMD_GNT_IF_STALL_MASK 0x00000001 +#define DSI_INT_CMD_GNT_IF_STALL(__x) \ + DSI_VAL2REG(DSI_INT_CMD_GNT, IF_STALL, __x) +#define DSI_INT_INTERRUPT_CTL 0x00000040 +#define DSI_INT_INTERRUPT_CTL_INT_VAL_SHIFT 0 +#define DSI_INT_INTERRUPT_CTL_INT_VAL_MASK 0x00000001 +#define DSI_INT_INTERRUPT_CTL_INT_VAL(__x) \ + DSI_VAL2REG(DSI_INT_INTERRUPT_CTL, INT_VAL, __x) +#define DSI_CMD_MODE_CTL 0x00000050 +#define DSI_CMD_MODE_CTL_IF1_ID_SHIFT 0 +#define DSI_CMD_MODE_CTL_IF1_ID_MASK 0x00000003 +#define DSI_CMD_MODE_CTL_IF1_ID(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_ID, __x) +#define DSI_CMD_MODE_CTL_IF2_ID_SHIFT 2 +#define DSI_CMD_MODE_CTL_IF2_ID_MASK 0x0000000C +#define DSI_CMD_MODE_CTL_IF2_ID(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_ID, __x) +#define DSI_CMD_MODE_CTL_IF1_LP_EN_SHIFT 4 +#define DSI_CMD_MODE_CTL_IF1_LP_EN_MASK 0x00000010 +#define DSI_CMD_MODE_CTL_IF1_LP_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, IF1_LP_EN, __x) +#define DSI_CMD_MODE_CTL_IF2_LP_EN_SHIFT 5 +#define DSI_CMD_MODE_CTL_IF2_LP_EN_MASK 0x00000020 +#define DSI_CMD_MODE_CTL_IF2_LP_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, IF2_LP_EN, __x) +#define DSI_CMD_MODE_CTL_ARB_MODE_SHIFT 6 +#define DSI_CMD_MODE_CTL_ARB_MODE_MASK 0x00000040 +#define DSI_CMD_MODE_CTL_ARB_MODE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_MODE, __x) +#define DSI_CMD_MODE_CTL_ARB_PRI_SHIFT 7 +#define DSI_CMD_MODE_CTL_ARB_PRI_MASK 0x00000080 +#define DSI_CMD_MODE_CTL_ARB_PRI(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, ARB_PRI, __x) +#define DSI_CMD_MODE_CTL_FIL_VALUE_SHIFT 8 +#define DSI_CMD_MODE_CTL_FIL_VALUE_MASK 0x0000FF00 +#define DSI_CMD_MODE_CTL_FIL_VALUE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, FIL_VALUE, __x) +#define DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT 16 +#define DSI_CMD_MODE_CTL_TE_TIMEOUT_MASK 0x03FF0000 +#define DSI_CMD_MODE_CTL_TE_TIMEOUT(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_CTL, TE_TIMEOUT, __x) +#define DSI_CMD_MODE_STS 0x00000054 +#define DSI_CMD_MODE_STS_ERR_NO_TE_SHIFT 0 +#define DSI_CMD_MODE_STS_ERR_NO_TE_MASK 0x00000001 +#define DSI_CMD_MODE_STS_ERR_NO_TE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_NO_TE, __x) +#define DSI_CMD_MODE_STS_ERR_TE_MISS_SHIFT 1 +#define DSI_CMD_MODE_STS_ERR_TE_MISS_MASK 0x00000002 +#define DSI_CMD_MODE_STS_ERR_TE_MISS(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_TE_MISS, __x) +#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_SHIFT 2 +#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN_MASK 0x00000004 +#define DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI1_UNDERRUN, __x) +#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_SHIFT 3 +#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN_MASK 0x00000008 +#define DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_SDI2_UNDERRUN, __x) +#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_SHIFT 4 +#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD_MASK 0x00000010 +#define DSI_CMD_MODE_STS_ERR_UNWANTED_RD(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, ERR_UNWANTED_RD, __x) +#define DSI_CMD_MODE_STS_CSM_RUNNING_SHIFT 5 +#define DSI_CMD_MODE_STS_CSM_RUNNING_MASK 0x00000020 +#define DSI_CMD_MODE_STS_CSM_RUNNING(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS, CSM_RUNNING, __x) +#define DSI_DIRECT_CMD_SEND 0x00000060 +#define DSI_DIRECT_CMD_SEND_START_SHIFT 0 +#define DSI_DIRECT_CMD_SEND_START_MASK 0xFFFFFFFF +#define DSI_DIRECT_CMD_SEND_START(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_SEND, START, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS 0x00000064 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_SHIFT 0 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_MASK 0x00000007 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE 0 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ 1 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ 4 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TRIG_REQ 5 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_BTA_REQ 6 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, \ + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_##__x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_NAT, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_SHIFT 3 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT_MASK 0x00000008 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LONGNOTSHORT, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT 8 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_MASK 0x00003F00 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_0 3 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_1 19 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_SHORT_WRITE_2 35 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_GENERIC_LONG_WRITE 41 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_0 5 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_SHORT_WRITE_1 21 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_LONG_WRITE 57 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_DCS_READ 6 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SET_MAX_PKT_SIZE 55 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, \ + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_##__x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_HEAD, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT 14 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_MASK 0x0000C000 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_ID, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT 16 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_MASK 0x001F0000 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_SIZE, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_SHIFT 21 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN_MASK 0x00200000 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, CMD_LP_EN, __x) +#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_SHIFT 24 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL_MASK 0x0F000000 +#define DSI_DIRECT_CMD_MAIN_SETTINGS_TRIGGER_VAL(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_MAIN_SETTINGS, TRIGGER_VAL, __x) +#define DSI_DIRECT_CMD_STS 0x00000068 +#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_SHIFT 0 +#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION_MASK 0x00000001 +#define DSI_DIRECT_CMD_STS_CMD_TRANSMISSION(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, CMD_TRANSMISSION, __x) +#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_SHIFT 1 +#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED_MASK 0x00000002 +#define DSI_DIRECT_CMD_STS_WRITE_COMPLETED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, WRITE_COMPLETED, __x) +#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_SHIFT 2 +#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED_MASK 0x00000004 +#define DSI_DIRECT_CMD_STS_TRIGGER_COMPLETED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_COMPLETED, __x) +#define DSI_DIRECT_CMD_STS_READ_COMPLETED_SHIFT 3 +#define DSI_DIRECT_CMD_STS_READ_COMPLETED_MASK 0x00000008 +#define DSI_DIRECT_CMD_STS_READ_COMPLETED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED, __x) +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_SHIFT 4 +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED_MASK 0x00000010 +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_RECEIVED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_RECEIVED, __x) +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_SHIFT 5 +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED_MASK 0x00000020 +#define DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACKNOWLEDGE_WITH_ERR_RECEIVED, __x) +#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_SHIFT 6 +#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED_MASK 0x00000040 +#define DSI_DIRECT_CMD_STS_TRIGGER_RECEIVED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED, __x) +#define DSI_DIRECT_CMD_STS_TE_RECEIVED_SHIFT 7 +#define DSI_DIRECT_CMD_STS_TE_RECEIVED_MASK 0x00000080 +#define DSI_DIRECT_CMD_STS_TE_RECEIVED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, TE_RECEIVED, __x) +#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_SHIFT 8 +#define DSI_DIRECT_CMD_STS_BTA_COMPLETED_MASK 0x00000100 +#define DSI_DIRECT_CMD_STS_BTA_COMPLETED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_COMPLETED, __x) +#define DSI_DIRECT_CMD_STS_BTA_FINISHED_SHIFT 9 +#define DSI_DIRECT_CMD_STS_BTA_FINISHED_MASK 0x00000200 +#define DSI_DIRECT_CMD_STS_BTA_FINISHED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, BTA_FINISHED, __x) +#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_SHIFT 10 +#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR_MASK 0x00000400 +#define DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, READ_COMPLETED_WITH_ERR, __x) +#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_SHIFT 11 +#define DSI_DIRECT_CMD_STS_TRIGGER_VAL_MASK 0x00007800 +#define DSI_DIRECT_CMD_STS_TRIGGER_VAL(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, TRIGGER_VAL, __x) +#define DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT 16 +#define DSI_DIRECT_CMD_STS_ACK_VAL_MASK 0xFFFF0000 +#define DSI_DIRECT_CMD_STS_ACK_VAL(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS, ACK_VAL, __x) +#define DSI_DIRECT_CMD_RD_INIT 0x0000006C +#define DSI_DIRECT_CMD_RD_INIT_RESET_SHIFT 0 +#define DSI_DIRECT_CMD_RD_INIT_RESET_MASK 0xFFFFFFFF +#define DSI_DIRECT_CMD_RD_INIT_RESET(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_INIT, RESET, __x) +#define DSI_DIRECT_CMD_WRDAT0 0x00000070 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_SHIFT 0 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT0_MASK 0x000000FF +#define DSI_DIRECT_CMD_WRDAT0_WRDAT0(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT0, __x) +#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_SHIFT 8 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT1_MASK 0x0000FF00 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT1(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT1, __x) +#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_SHIFT 16 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT2_MASK 0x00FF0000 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT2(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT2, __x) +#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_SHIFT 24 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT3_MASK 0xFF000000 +#define DSI_DIRECT_CMD_WRDAT0_WRDAT3(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT0, WRDAT3, __x) +#define DSI_DIRECT_CMD_WRDAT1 0x00000074 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_SHIFT 0 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT4_MASK 0x000000FF +#define DSI_DIRECT_CMD_WRDAT1_WRDAT4(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT4, __x) +#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_SHIFT 8 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT5_MASK 0x0000FF00 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT5(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT5, __x) +#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_SHIFT 16 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT6_MASK 0x00FF0000 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT6(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT6, __x) +#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_SHIFT 24 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT7_MASK 0xFF000000 +#define DSI_DIRECT_CMD_WRDAT1_WRDAT7(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT1, WRDAT7, __x) +#define DSI_DIRECT_CMD_WRDAT2 0x00000078 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_SHIFT 0 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT8_MASK 0x000000FF +#define DSI_DIRECT_CMD_WRDAT2_WRDAT8(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT8, __x) +#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_SHIFT 8 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT9_MASK 0x0000FF00 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT9(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT9, __x) +#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_SHIFT 16 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT10_MASK 0x00FF0000 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT10(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT10, __x) +#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_SHIFT 24 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT11_MASK 0xFF000000 +#define DSI_DIRECT_CMD_WRDAT2_WRDAT11(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT2, WRDAT11, __x) +#define DSI_DIRECT_CMD_WRDAT3 0x0000007C +#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_SHIFT 0 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT12_MASK 0x000000FF +#define DSI_DIRECT_CMD_WRDAT3_WRDAT12(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT12, __x) +#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_SHIFT 8 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT13_MASK 0x0000FF00 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT13(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT13, __x) +#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_SHIFT 16 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT14_MASK 0x00FF0000 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT14(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT14, __x) +#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_SHIFT 24 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT15_MASK 0xFF000000 +#define DSI_DIRECT_CMD_WRDAT3_WRDAT15(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_WRDAT3, WRDAT15, __x) +#define DSI_DIRECT_CMD_RDDAT 0x00000080 +#define DSI_DIRECT_CMD_RDDAT_RDDAT0_SHIFT 0 +#define DSI_DIRECT_CMD_RDDAT_RDDAT0_MASK 0x000000FF +#define DSI_DIRECT_CMD_RDDAT_RDDAT0(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT0, __x) +#define DSI_DIRECT_CMD_RDDAT_RDDAT1_SHIFT 8 +#define DSI_DIRECT_CMD_RDDAT_RDDAT1_MASK 0x0000FF00 +#define DSI_DIRECT_CMD_RDDAT_RDDAT1(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT1, __x) +#define DSI_DIRECT_CMD_RDDAT_RDDAT2_SHIFT 16 +#define DSI_DIRECT_CMD_RDDAT_RDDAT2_MASK 0x00FF0000 +#define DSI_DIRECT_CMD_RDDAT_RDDAT2(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT2, __x) +#define DSI_DIRECT_CMD_RDDAT_RDDAT3_SHIFT 24 +#define DSI_DIRECT_CMD_RDDAT_RDDAT3_MASK 0xFF000000 +#define DSI_DIRECT_CMD_RDDAT_RDDAT3(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RDDAT, RDDAT3, __x) +#define DSI_DIRECT_CMD_RD_PROPERTY 0x00000084 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_SHIFT 0 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK 0x0000FFFF +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE, __x) +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_SHIFT 16 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID_MASK 0x00030000 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_ID(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_ID, __x) +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_SHIFT 18 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC_MASK 0x00040000 +#define DSI_DIRECT_CMD_RD_PROPERTY_RD_DCSNOTGENERIC(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_PROPERTY, RD_DCSNOTGENERIC, __x) +#define DSI_DIRECT_CMD_RD_STS 0x00000088 +#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_SHIFT 0 +#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED_MASK 0x00000001 +#define DSI_DIRECT_CMD_RD_STS_ERR_FIXED(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_FIXED, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_SHIFT 1 +#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE_MASK 0x00000002 +#define DSI_DIRECT_CMD_RD_STS_ERR_UNCORRECTABLE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNCORRECTABLE, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_SHIFT 2 +#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM_MASK 0x00000004 +#define DSI_DIRECT_CMD_RD_STS_ERR_CHECKSUM(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_CHECKSUM, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_SHIFT 3 +#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE_MASK 0x00000008 +#define DSI_DIRECT_CMD_RD_STS_ERR_UNDECODABLE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_UNDECODABLE, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_SHIFT 4 +#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE_MASK 0x00000010 +#define DSI_DIRECT_CMD_RD_STS_ERR_RECEIVE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_RECEIVE, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_SHIFT 5 +#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE_MASK 0x00000020 +#define DSI_DIRECT_CMD_RD_STS_ERR_OVERSIZE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_OVERSIZE, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_SHIFT 6 +#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH_MASK 0x00000040 +#define DSI_DIRECT_CMD_RD_STS_ERR_WRONG_LENGTH(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_WRONG_LENGTH, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_SHIFT 7 +#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT_MASK 0x00000080 +#define DSI_DIRECT_CMD_RD_STS_ERR_MISSING_EOT(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_MISSING_EOT, __x) +#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_SHIFT 8 +#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR_MASK 0x00000100 +#define DSI_DIRECT_CMD_RD_STS_ERR_EOT_WITH_ERR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS, ERR_EOT_WITH_ERR, __x) +#define DSI_VID_MAIN_CTL 0x00000090 +#define DSI_VID_MAIN_CTL_START_MODE_SHIFT 0 +#define DSI_VID_MAIN_CTL_START_MODE_MASK 0x00000003 +#define DSI_VID_MAIN_CTL_START_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, START_MODE, __x) +#define DSI_VID_MAIN_CTL_STOP_MODE_SHIFT 2 +#define DSI_VID_MAIN_CTL_STOP_MODE_MASK 0x0000000C +#define DSI_VID_MAIN_CTL_STOP_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, STOP_MODE, __x) +#define DSI_VID_MAIN_CTL_VID_ID_SHIFT 4 +#define DSI_VID_MAIN_CTL_VID_ID_MASK 0x00000030 +#define DSI_VID_MAIN_CTL_VID_ID(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_ID, __x) +#define DSI_VID_MAIN_CTL_HEADER_SHIFT 6 +#define DSI_VID_MAIN_CTL_HEADER_MASK 0x00000FC0 +#define DSI_VID_MAIN_CTL_HEADER(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, HEADER, __x) +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_SHIFT 12 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_MASK 0x00003000 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS 0 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS 1 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE 2 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS 3 +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE_ENUM(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, \ + DSI_VID_MAIN_CTL_VID_PIXEL_MODE_##__x) +#define DSI_VID_MAIN_CTL_VID_PIXEL_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, VID_PIXEL_MODE, __x) +#define DSI_VID_MAIN_CTL_BURST_MODE_SHIFT 14 +#define DSI_VID_MAIN_CTL_BURST_MODE_MASK 0x00004000 +#define DSI_VID_MAIN_CTL_BURST_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, BURST_MODE, __x) +#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_SHIFT 15 +#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE_MASK 0x00008000 +#define DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, __x) +#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_SHIFT 16 +#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL_MASK 0x00010000 +#define DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, __x) +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_SHIFT 17 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_MASK 0x00060000 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_NULL 0 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING 1 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0 2 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_1 3 +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_ENUM(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, \ + DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_##__x) +#define DSI_VID_MAIN_CTL_REG_BLKLINE_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, __x) +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_SHIFT 19 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_MASK 0x00180000 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_NULL 0 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_BLANKING 1 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 2 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_1 3 +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_ENUM(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, \ + DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_##__x) +#define DSI_VID_MAIN_CTL_REG_BLKEOL_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, __x) +#define DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT 21 +#define DSI_VID_MAIN_CTL_RECOVERY_MODE_MASK 0x00600000 +#define DSI_VID_MAIN_CTL_RECOVERY_MODE(__x) \ + DSI_VAL2REG(DSI_VID_MAIN_CTL, RECOVERY_MODE, __x) +#define DSI_VID_VSIZE 0x00000094 +#define DSI_VID_VSIZE_VSA_LENGTH_SHIFT 0 +#define DSI_VID_VSIZE_VSA_LENGTH_MASK 0x0000003F +#define DSI_VID_VSIZE_VSA_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_VSIZE, VSA_LENGTH, __x) +#define DSI_VID_VSIZE_VBP_LENGTH_SHIFT 6 +#define DSI_VID_VSIZE_VBP_LENGTH_MASK 0x00000FC0 +#define DSI_VID_VSIZE_VBP_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_VSIZE, VBP_LENGTH, __x) +#define DSI_VID_VSIZE_VFP_LENGTH_SHIFT 12 +#define DSI_VID_VSIZE_VFP_LENGTH_MASK 0x000FF000 +#define DSI_VID_VSIZE_VFP_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_VSIZE, VFP_LENGTH, __x) +#define DSI_VID_VSIZE_VACT_LENGTH_SHIFT 20 +#define DSI_VID_VSIZE_VACT_LENGTH_MASK 0x7FF00000 +#define DSI_VID_VSIZE_VACT_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_VSIZE, VACT_LENGTH, __x) +#define DSI_VID_HSIZE1 0x00000098 +#define DSI_VID_HSIZE1_HSA_LENGTH_SHIFT 0 +#define DSI_VID_HSIZE1_HSA_LENGTH_MASK 0x000003FF +#define DSI_VID_HSIZE1_HSA_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_HSIZE1, HSA_LENGTH, __x) +#define DSI_VID_HSIZE1_HBP_LENGTH_SHIFT 10 +#define DSI_VID_HSIZE1_HBP_LENGTH_MASK 0x000FFC00 +#define DSI_VID_HSIZE1_HBP_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_HSIZE1, HBP_LENGTH, __x) +#define DSI_VID_HSIZE1_HFP_LENGTH_SHIFT 20 +#define DSI_VID_HSIZE1_HFP_LENGTH_MASK 0x7FF00000 +#define DSI_VID_HSIZE1_HFP_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_HSIZE1, HFP_LENGTH, __x) +#define DSI_VID_HSIZE2 0x0000009C +#define DSI_VID_HSIZE2_RGB_SIZE_SHIFT 0 +#define DSI_VID_HSIZE2_RGB_SIZE_MASK 0x00001FFF +#define DSI_VID_HSIZE2_RGB_SIZE(__x) \ + DSI_VAL2REG(DSI_VID_HSIZE2, RGB_SIZE, __x) +#define DSI_VID_BLKSIZE1 0x000000A0 +#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT 0 +#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK 0x00001FFF +#define DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK(__x) \ + DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK, __x) +#define DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT 13 +#define DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK 0x03FFE000 +#define DSI_VID_BLKSIZE1_BLKEOL_PCK(__x) \ + DSI_VAL2REG(DSI_VID_BLKSIZE1, BLKEOL_PCK, __x) +#define DSI_VID_BLKSIZE2 0x000000A4 +#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT 0 +#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_MASK 0x00001FFF +#define DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK(__x) \ + DSI_VAL2REG(DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK, __x) +#define DSI_VID_PCK_TIME 0x000000A8 +#define DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT 0 +#define DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK 0x00001FFF +#define DSI_VID_PCK_TIME_BLKEOL_DURATION(__x) \ + DSI_VAL2REG(DSI_VID_PCK_TIME, BLKEOL_DURATION, __x) +#define DSI_VID_DPHY_TIME 0x000000AC +#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT 0 +#define DSI_VID_DPHY_TIME_REG_LINE_DURATION_MASK 0x00001FFF +#define DSI_VID_DPHY_TIME_REG_LINE_DURATION(__x) \ + DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_LINE_DURATION, __x) +#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT 13 +#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_MASK 0x00FFE000 +#define DSI_VID_DPHY_TIME_REG_WAKEUP_TIME(__x) \ + DSI_VAL2REG(DSI_VID_DPHY_TIME, REG_WAKEUP_TIME, __x) +#define DSI_VID_ERR_COLOR 0x000000B0 +#define DSI_VID_ERR_COLOR_COL_RED_SHIFT 0 +#define DSI_VID_ERR_COLOR_COL_RED_MASK 0x000000FF +#define DSI_VID_ERR_COLOR_COL_RED(__x) \ + DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_RED, __x) +#define DSI_VID_ERR_COLOR_COL_GREEN_SHIFT 8 +#define DSI_VID_ERR_COLOR_COL_GREEN_MASK 0x0000FF00 +#define DSI_VID_ERR_COLOR_COL_GREEN(__x) \ + DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_GREEN, __x) +#define DSI_VID_ERR_COLOR_COL_BLUE_SHIFT 16 +#define DSI_VID_ERR_COLOR_COL_BLUE_MASK 0x00FF0000 +#define DSI_VID_ERR_COLOR_COL_BLUE(__x) \ + DSI_VAL2REG(DSI_VID_ERR_COLOR, COL_BLUE, __x) +#define DSI_VID_ERR_COLOR_PAD_VAL_SHIFT 24 +#define DSI_VID_ERR_COLOR_PAD_VAL_MASK 0xFF000000 +#define DSI_VID_ERR_COLOR_PAD_VAL(__x) \ + DSI_VAL2REG(DSI_VID_ERR_COLOR, PAD_VAL, __x) +#define DSI_VID_VPOS 0x000000B4 +#define DSI_VID_VPOS_LINE_POS_SHIFT 0 +#define DSI_VID_VPOS_LINE_POS_MASK 0x00000003 +#define DSI_VID_VPOS_LINE_POS(__x) \ + DSI_VAL2REG(DSI_VID_VPOS, LINE_POS, __x) +#define DSI_VID_VPOS_LINE_VAL_SHIFT 2 +#define DSI_VID_VPOS_LINE_VAL_MASK 0x00001FFC +#define DSI_VID_VPOS_LINE_VAL(__x) \ + DSI_VAL2REG(DSI_VID_VPOS, LINE_VAL, __x) +#define DSI_VID_HPOS 0x000000B8 +#define DSI_VID_HPOS_HORIZONTAL_POS_SHIFT 0 +#define DSI_VID_HPOS_HORIZONTAL_POS_MASK 0x00000007 +#define DSI_VID_HPOS_HORIZONTAL_POS(__x) \ + DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_POS, __x) +#define DSI_VID_HPOS_HORIZONTAL_VAL_SHIFT 3 +#define DSI_VID_HPOS_HORIZONTAL_VAL_MASK 0x0000FFF8 +#define DSI_VID_HPOS_HORIZONTAL_VAL(__x) \ + DSI_VAL2REG(DSI_VID_HPOS, HORIZONTAL_VAL, __x) +#define DSI_VID_MODE_STS 0x000000BC +#define DSI_VID_MODE_STS_VSG_RUNNING_SHIFT 0 +#define DSI_VID_MODE_STS_VSG_RUNNING_MASK 0x00000001 +#define DSI_VID_MODE_STS_VSG_RUNNING(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RUNNING, __x) +#define DSI_VID_MODE_STS_ERR_MISSING_DATA_SHIFT 1 +#define DSI_VID_MODE_STS_ERR_MISSING_DATA_MASK 0x00000002 +#define DSI_VID_MODE_STS_ERR_MISSING_DATA(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_DATA, __x) +#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_SHIFT 2 +#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC_MASK 0x00000004 +#define DSI_VID_MODE_STS_ERR_MISSING_HSYNC(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_HSYNC, __x) +#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_SHIFT 3 +#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC_MASK 0x00000008 +#define DSI_VID_MODE_STS_ERR_MISSING_VSYNC(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_MISSING_VSYNC, __x) +#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_SHIFT 4 +#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH_MASK 0x00000010 +#define DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_LENGTH, __x) +#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_SHIFT 5 +#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT_MASK 0x00000020 +#define DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, REG_ERR_SMALL_HEIGHT, __x) +#define DSI_VID_MODE_STS_ERR_BURSTWRITE_SHIFT 6 +#define DSI_VID_MODE_STS_ERR_BURSTWRITE_MASK 0x00000040 +#define DSI_VID_MODE_STS_ERR_BURSTWRITE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_BURSTWRITE, __x) +#define DSI_VID_MODE_STS_ERR_LONGWRITE_SHIFT 7 +#define DSI_VID_MODE_STS_ERR_LONGWRITE_MASK 0x00000080 +#define DSI_VID_MODE_STS_ERR_LONGWRITE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGWRITE, __x) +#define DSI_VID_MODE_STS_ERR_LONGREAD_SHIFT 8 +#define DSI_VID_MODE_STS_ERR_LONGREAD_MASK 0x00000100 +#define DSI_VID_MODE_STS_ERR_LONGREAD(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_LONGREAD, __x) +#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_SHIFT 9 +#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH_MASK 0x00000200 +#define DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, ERR_VRS_WRONG_LENGTH, __x) +#define DSI_VID_MODE_STS_VSG_RECOVERY_SHIFT 10 +#define DSI_VID_MODE_STS_VSG_RECOVERY_MASK 0x00000400 +#define DSI_VID_MODE_STS_VSG_RECOVERY(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS, VSG_RECOVERY, __x) +#define DSI_VID_VCA_SETTING1 0x000000C0 +#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT 0 +#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK 0x0000FFFF +#define DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT(__x) \ + DSI_VAL2REG(DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT, __x) +#define DSI_VID_VCA_SETTING1_BURST_LP_SHIFT 16 +#define DSI_VID_VCA_SETTING1_BURST_LP_MASK 0x00010000 +#define DSI_VID_VCA_SETTING1_BURST_LP(__x) \ + DSI_VAL2REG(DSI_VID_VCA_SETTING1, BURST_LP, __x) +#define DSI_VID_VCA_SETTING2 0x000000C4 +#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT 0 +#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK 0x0000FFFF +#define DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT(__x) \ + DSI_VAL2REG(DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT, __x) +#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT 16 +#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK 0xFFFF0000 +#define DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT(__x) \ + DSI_VAL2REG(DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT, __x) +#define DSI_TVG_CTL 0x000000C8 +#define DSI_TVG_CTL_TVG_RUN_SHIFT 0 +#define DSI_TVG_CTL_TVG_RUN_MASK 0x00000001 +#define DSI_TVG_CTL_TVG_RUN(__x) \ + DSI_VAL2REG(DSI_TVG_CTL, TVG_RUN, __x) +#define DSI_TVG_CTL_TVG_STOPMODE_SHIFT 1 +#define DSI_TVG_CTL_TVG_STOPMODE_MASK 0x00000006 +#define DSI_TVG_CTL_TVG_STOPMODE(__x) \ + DSI_VAL2REG(DSI_TVG_CTL, TVG_STOPMODE, __x) +#define DSI_TVG_CTL_TVG_MODE_SHIFT 3 +#define DSI_TVG_CTL_TVG_MODE_MASK 0x00000018 +#define DSI_TVG_CTL_TVG_MODE(__x) \ + DSI_VAL2REG(DSI_TVG_CTL, TVG_MODE, __x) +#define DSI_TVG_CTL_TVG_STRIPE_SIZE_SHIFT 5 +#define DSI_TVG_CTL_TVG_STRIPE_SIZE_MASK 0x000000E0 +#define DSI_TVG_CTL_TVG_STRIPE_SIZE(__x) \ + DSI_VAL2REG(DSI_TVG_CTL, TVG_STRIPE_SIZE, __x) +#define DSI_TVG_IMG_SIZE 0x000000CC +#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_SHIFT 0 +#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE_MASK 0x00001FFF +#define DSI_TVG_IMG_SIZE_TVG_LINE_SIZE(__x) \ + DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_LINE_SIZE, __x) +#define DSI_TVG_IMG_SIZE_TVG_NBLINE_SHIFT 16 +#define DSI_TVG_IMG_SIZE_TVG_NBLINE_MASK 0x07FF0000 +#define DSI_TVG_IMG_SIZE_TVG_NBLINE(__x) \ + DSI_VAL2REG(DSI_TVG_IMG_SIZE, TVG_NBLINE, __x) +#define DSI_TVG_COLOR1 0x000000D0 +#define DSI_TVG_COLOR1_COL1_RED_SHIFT 0 +#define DSI_TVG_COLOR1_COL1_RED_MASK 0x000000FF +#define DSI_TVG_COLOR1_COL1_RED(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR1, COL1_RED, __x) +#define DSI_TVG_COLOR1_COL1_GREEN_SHIFT 8 +#define DSI_TVG_COLOR1_COL1_GREEN_MASK 0x0000FF00 +#define DSI_TVG_COLOR1_COL1_GREEN(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR1, COL1_GREEN, __x) +#define DSI_TVG_COLOR1_COL1_BLUE_SHIFT 16 +#define DSI_TVG_COLOR1_COL1_BLUE_MASK 0x00FF0000 +#define DSI_TVG_COLOR1_COL1_BLUE(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR1, COL1_BLUE, __x) +#define DSI_TVG_COLOR2 0x000000D4 +#define DSI_TVG_COLOR2_COL2_RED_SHIFT 0 +#define DSI_TVG_COLOR2_COL2_RED_MASK 0x000000FF +#define DSI_TVG_COLOR2_COL2_RED(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR2, COL2_RED, __x) +#define DSI_TVG_COLOR2_COL2_GREEN_SHIFT 8 +#define DSI_TVG_COLOR2_COL2_GREEN_MASK 0x0000FF00 +#define DSI_TVG_COLOR2_COL2_GREEN(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR2, COL2_GREEN, __x) +#define DSI_TVG_COLOR2_COL2_BLUE_SHIFT 16 +#define DSI_TVG_COLOR2_COL2_BLUE_MASK 0x00FF0000 +#define DSI_TVG_COLOR2_COL2_BLUE(__x) \ + DSI_VAL2REG(DSI_TVG_COLOR2, COL2_BLUE, __x) +#define DSI_TVG_STS 0x000000D8 +#define DSI_TVG_STS_TVG_RUNNING_SHIFT 0 +#define DSI_TVG_STS_TVG_RUNNING_MASK 0x00000001 +#define DSI_TVG_STS_TVG_RUNNING(__x) \ + DSI_VAL2REG(DSI_TVG_STS, TVG_RUNNING, __x) +#define DSI_TBG_CTL 0x000000E0 +#define DSI_TBG_CTL_TBG_START_SHIFT 0 +#define DSI_TBG_CTL_TBG_START_MASK 0x00000001 +#define DSI_TBG_CTL_TBG_START(__x) \ + DSI_VAL2REG(DSI_TBG_CTL, TBG_START, __x) +#define DSI_TBG_CTL_TBG_HS_REQ_SHIFT 1 +#define DSI_TBG_CTL_TBG_HS_REQ_MASK 0x00000002 +#define DSI_TBG_CTL_TBG_HS_REQ(__x) \ + DSI_VAL2REG(DSI_TBG_CTL, TBG_HS_REQ, __x) +#define DSI_TBG_CTL_TBG_DATA_SEL_SHIFT 2 +#define DSI_TBG_CTL_TBG_DATA_SEL_MASK 0x00000004 +#define DSI_TBG_CTL_TBG_DATA_SEL(__x) \ + DSI_VAL2REG(DSI_TBG_CTL, TBG_DATA_SEL, __x) +#define DSI_TBG_CTL_TBG_MODE_SHIFT 3 +#define DSI_TBG_CTL_TBG_MODE_MASK 0x00000018 +#define DSI_TBG_CTL_TBG_MODE_1BYTE 0 +#define DSI_TBG_CTL_TBG_MODE_2BYTE 1 +#define DSI_TBG_CTL_TBG_MODE_BURST_COUNTER 2 +#define DSI_TBG_CTL_TBG_MODE_BURST 3 +#define DSI_TBG_CTL_TBG_MODE_ENUM(__x) \ + DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, DSI_TBG_CTL_TBG_MODE_##__x) +#define DSI_TBG_CTL_TBG_MODE(__x) \ + DSI_VAL2REG(DSI_TBG_CTL, TBG_MODE, __x) +#define DSI_TBG_SETTING 0x000000E4 +#define DSI_TBG_SETTING_TBG_DATA_SHIFT 0 +#define DSI_TBG_SETTING_TBG_DATA_MASK 0x0000FFFF +#define DSI_TBG_SETTING_TBG_DATA(__x) \ + DSI_VAL2REG(DSI_TBG_SETTING, TBG_DATA, __x) +#define DSI_TBG_SETTING_TBG_CPT_SHIFT 16 +#define DSI_TBG_SETTING_TBG_CPT_MASK 0x0FFF0000 +#define DSI_TBG_SETTING_TBG_CPT(__x) \ + DSI_VAL2REG(DSI_TBG_SETTING, TBG_CPT, __x) +#define DSI_TBG_STS 0x000000E8 +#define DSI_TBG_STS_TBG_STATUS_SHIFT 0 +#define DSI_TBG_STS_TBG_STATUS_MASK 0x00000001 +#define DSI_TBG_STS_TBG_STATUS(__x) \ + DSI_VAL2REG(DSI_TBG_STS, TBG_STATUS, __x) +#define DSI_MCTL_MAIN_STS_CTL 0x000000F0 +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_SHIFT 0 +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN_MASK 0x00000001 +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_SHIFT 1 +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN_MASK 0x00000002 +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_SHIFT 2 +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN_MASK 0x00000004 +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_SHIFT 3 +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN_MASK 0x00000008 +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_SHIFT 4 +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN_MASK 0x00000010 +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_SHIFT 5 +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN_MASK 0x00000020 +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_SHIFT 6 +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN_MASK 0x00000040 +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_SHIFT 7 +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN_MASK 0x00000080 +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EN, __x) +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_SHIFT 16 +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE_MASK 0x00010000 +#define DSI_MCTL_MAIN_STS_CTL_PLL_LOCK_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, PLL_LOCK_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_SHIFT 17 +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE_MASK 0x00020000 +#define DSI_MCTL_MAIN_STS_CTL_CLKLANE_READY_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CLKLANE_READY_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_SHIFT 18 +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE_MASK 0x00040000 +#define DSI_MCTL_MAIN_STS_CTL_DAT1_READY_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT1_READY_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_SHIFT 19 +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE_MASK 0x00080000 +#define DSI_MCTL_MAIN_STS_CTL_DAT2_READY_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, DAT2_READY_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_SHIFT 20 +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE_MASK 0x00100000 +#define DSI_MCTL_MAIN_STS_CTL_HSTX_TO_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, HSTX_TO_ERR_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_SHIFT 21 +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE_MASK 0x00200000 +#define DSI_MCTL_MAIN_STS_CTL_LPRX_TO_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, LPRX_TO_ERR_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_SHIFT 22 +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE_MASK 0x00400000 +#define DSI_MCTL_MAIN_STS_CTL_CRS_UNTERM_PCK_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, CRS_UNTERM_PCK_ERR_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_SHIFT 23 +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE_MASK 0x00800000 +#define DSI_MCTL_MAIN_STS_CTL_VRS_UNTERM_PCK_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CTL, VRS_UNTERM_PCK_ERR_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL 0x000000F4 +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_SHIFT 0 +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN_MASK 0x00000001 +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_SHIFT 1 +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN_MASK 0x00000002 +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EN, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_SHIFT 2 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN_MASK 0x00000004 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EN, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_SHIFT 3 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN_MASK 0x00000008 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EN, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_SHIFT 4 +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN_MASK 0x00000010 +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EN, __x) +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_SHIFT 5 +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN_MASK 0x00000020 +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EN(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EN, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_SHIFT 16 +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE_MASK 0x00010000 +#define DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_SHIFT 17 +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE_MASK 0x00020000 +#define DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_TE_MISS_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_SHIFT 18 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE_MASK 0x00040000 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI1_UNDERRUN_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI1_UNDERRUN_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_SHIFT 19 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE_MASK 0x00080000 +#define DSI_CMD_MODE_STS_CTL_ERR_SDI2_UNDERRUN_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_SDI2_UNDERRUN_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_SHIFT 20 +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE_MASK 0x00100000 +#define DSI_CMD_MODE_STS_CTL_ERR_UNWANTED_RD_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, ERR_UNWANTED_RD_EDGE, __x) +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_SHIFT 21 +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE_MASK 0x00200000 +#define DSI_CMD_MODE_STS_CTL_CSM_RUNNING_EDGE(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CTL, CSM_RUNNING_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL 0x000000F8 +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_SHIFT 0 +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN_MASK 0x00000001 +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_SHIFT 1 +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN_MASK 0x00000002 +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_SHIFT 2 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN_MASK 0x00000004 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_SHIFT 3 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN_MASK 0x00000008 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_SHIFT 4 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN_MASK 0x00000010 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_SHIFT 5 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN_MASK 0x00000020 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_SHIFT 6 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN_MASK 0x00000040 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_SHIFT 7 +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN_MASK 0x00000080 +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_SHIFT 8 +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN_MASK 0x00000100 +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_SHIFT 9 +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN_MASK 0x00000200 +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_SHIFT 10 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN_MASK 0x00000400 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EN, __x) +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_SHIFT 16 +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE_MASK 0x00010000 +#define DSI_DIRECT_CMD_STS_CTL_CMD_TRANSMISSION_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, CMD_TRANSMISSION_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_SHIFT 17 +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE_MASK 0x00020000 +#define DSI_DIRECT_CMD_STS_CTL_WRITE_COMPLETED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, WRITE_COMPLETED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_SHIFT 18 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE_MASK 0x00040000 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_COMPLETED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_COMPLETED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_SHIFT 19 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE_MASK 0x00080000 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_SHIFT 20 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE_MASK 0x00100000 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_RECEIVED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_RECEIVED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_SHIFT 21 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE_MASK 0x00200000 +#define DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, ACKNOWLEDGE_WITH_ERR_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_SHIFT 22 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE_MASK 0x00400000 +#define DSI_DIRECT_CMD_STS_CTL_TRIGGER_RECEIVED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TRIGGER_RECEIVED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_SHIFT 23 +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE_MASK 0x00800000 +#define DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_SHIFT 24 +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE_MASK 0x01000000 +#define DSI_DIRECT_CMD_STS_CTL_BTA_COMPLETED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_COMPLETED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_SHIFT 25 +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE_MASK 0x02000000 +#define DSI_DIRECT_CMD_STS_CTL_BTA_FINISHED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, BTA_FINISHED_EDGE, __x) +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_SHIFT 26 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE_MASK 0x04000000 +#define DSI_DIRECT_CMD_STS_CTL_READ_COMPLETED_WITH_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CTL, READ_COMPLETED_WITH_ERR_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL 0x000000FC +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_SHIFT 0 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN_MASK 0x00000001 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_SHIFT 1 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN_MASK 0x00000002 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_SHIFT 2 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN_MASK 0x00000004 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_SHIFT 3 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN_MASK 0x00000008 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_SHIFT 4 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN_MASK 0x00000010 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_SHIFT 5 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN_MASK 0x00000020 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_SHIFT 6 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN_MASK 0x00000040 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_SHIFT 7 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN_MASK 0x00000080 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_SHIFT 8 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN_MASK 0x00000100 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EN(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EN, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_SHIFT 16 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE_MASK 0x00010000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_FIXED_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_FIXED_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_SHIFT 17 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE_MASK 0x00020000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNCORRECTABLE_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNCORRECTABLE_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_SHIFT 18 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE_MASK 0x00040000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_CHECKSUM_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_CHECKSUM_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_SHIFT 19 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE_MASK 0x00080000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_UNDECODABLE_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_UNDECODABLE_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_SHIFT 20 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE_MASK 0x00100000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_RECEIVE_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_RECEIVE_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_SHIFT 21 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE_MASK 0x00200000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_OVERSIZE_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_OVERSIZE_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_SHIFT 22 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE_MASK 0x00400000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_WRONG_LENGTH_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_WRONG_LENGTH_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_SHIFT 23 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE_MASK 0x00800000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_MISSING_EOT_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_MISSING_EOT_EDGE, __x) +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_SHIFT 24 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE_MASK 0x01000000 +#define DSI_DIRECT_CMD_RD_STS_CTL_ERR_EOT_WITH_ERR_EDGE(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CTL, ERR_EOT_WITH_ERR_EDGE, __x) +#define DSI_VID_MODE_STS_CTL 0x00000100 +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_SHIFT 0 +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN_MASK 0x00000001 +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_SHIFT 1 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN_MASK 0x00000002 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_SHIFT 2 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN_MASK 0x00000004 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_SHIFT 3 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN_MASK 0x00000008 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EN, __x) +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_SHIFT 4 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN_MASK 0x00000010 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EN, __x) +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_SHIFT 5 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN_MASK 0x00000020 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_SHIFT 6 +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN_MASK 0x00000040 +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_SHIFT 7 +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN_MASK 0x00000080 +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_SHIFT 8 +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN_MASK 0x00000100 +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EN, __x) +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_SHIFT 9 +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN_MASK 0x00000200 +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EN(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EN, __x) +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_SHIFT 16 +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE_MASK 0x00010000 +#define DSI_VID_MODE_STS_CTL_VSG_RUNNING_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RUNNING_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_SHIFT 17 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE_MASK 0x00020000 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_DATA_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_SHIFT 18 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE_MASK 0x00040000 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_HSYNC_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_HSYNC_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_SHIFT 19 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE_MASK 0x00080000 +#define DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_MISSING_VSYNC_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_SHIFT 20 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE_MASK 0x00100000 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_LENGTH_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_LENGTH_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_SHIFT 21 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE_MASK 0x00200000 +#define DSI_VID_MODE_STS_CTL_REG_ERR_SMALL_HEIGHT_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, REG_ERR_SMALL_HEIGHT_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_SHIFT 22 +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE_MASK 0x00400000 +#define DSI_VID_MODE_STS_CTL_ERR_BURSTWRITE_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_BURSTWRITE_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_SHIFT 23 +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE_MASK 0x00800000 +#define DSI_VID_MODE_STS_CTL_ERR_LONGWRITE_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGWRITE_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_SHIFT 24 +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE_MASK 0x01000000 +#define DSI_VID_MODE_STS_CTL_ERR_LONGREAD_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_LONGREAD_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_SHIFT 25 +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE_MASK 0x02000000 +#define DSI_VID_MODE_STS_CTL_ERR_VRS_WRONG_LENGTH_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, ERR_VRS_WRONG_LENGTH_EDGE, __x) +#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_SHIFT 26 +#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE_MASK 0x04000000 +#define DSI_VID_MODE_STS_CTL_VSG_RECOVERY_EDGE(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CTL, VSG_RECOVERY_EDGE, __x) +#define DSI_TG_STS_CTL 0x00000104 +#define DSI_TG_STS_CTL_TVG_STS_EN_SHIFT 0 +#define DSI_TG_STS_CTL_TVG_STS_EN_MASK 0x00000001 +#define DSI_TG_STS_CTL_TVG_STS_EN(__x) \ + DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EN, __x) +#define DSI_TG_STS_CTL_TBG_STS_EN_SHIFT 1 +#define DSI_TG_STS_CTL_TBG_STS_EN_MASK 0x00000002 +#define DSI_TG_STS_CTL_TBG_STS_EN(__x) \ + DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EN, __x) +#define DSI_TG_STS_CTL_TVG_STS_EDGE_SHIFT 16 +#define DSI_TG_STS_CTL_TVG_STS_EDGE_MASK 0x00010000 +#define DSI_TG_STS_CTL_TVG_STS_EDGE(__x) \ + DSI_VAL2REG(DSI_TG_STS_CTL, TVG_STS_EDGE, __x) +#define DSI_TG_STS_CTL_TBG_STS_EDGE_SHIFT 17 +#define DSI_TG_STS_CTL_TBG_STS_EDGE_MASK 0x00020000 +#define DSI_TG_STS_CTL_TBG_STS_EDGE(__x) \ + DSI_VAL2REG(DSI_TG_STS_CTL, TBG_STS_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL 0x00000108 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_SHIFT 6 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN_MASK 0x00000040 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_SHIFT 7 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN_MASK 0x00000080 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_SHIFT 8 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN_MASK 0x00000100 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_SHIFT 9 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN_MASK 0x00000200 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_SHIFT 10 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN_MASK 0x00000400 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_SHIFT 11 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN_MASK 0x00000800 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_SHIFT 12 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN_MASK 0x00001000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_SHIFT 13 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN_MASK 0x00002000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_SHIFT 14 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN_MASK 0x00004000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_SHIFT 15 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN_MASK 0x00008000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EN(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EN, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_SHIFT 22 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE_MASK 0x00400000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_1_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_1_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_SHIFT 23 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE_MASK 0x00800000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_ESC_2_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_ESC_2_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_SHIFT 24 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE_MASK 0x01000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_1_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_1_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_SHIFT 25 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE_MASK 0x02000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_SYNCESC_2_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_SYNCESC_2_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_SHIFT 26 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE_MASK 0x04000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_1_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_1_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_SHIFT 27 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE_MASK 0x08000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONTROL_2_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONTROL_2_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_SHIFT 28 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE_MASK 0x10000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_1_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_1_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_SHIFT 29 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE_MASK 0x20000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP0_2_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP0_2_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_SHIFT 30 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE_MASK 0x40000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_1_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_1_EDGE, __x) +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_SHIFT 31 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE_MASK 0x80000000 +#define DSI_MCTL_DHPY_ERR_CTL_ERR_CONT_LP1_2_EDGE(__x) \ + DSI_VAL2REG(DSI_MCTL_DHPY_ERR_CTL, ERR_CONT_LP1_2_EDGE, __x) +#define DSI_MCTL_MAIN_STS_CLR 0x00000110 +#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_SHIFT 0 +#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR_MASK 0x00000001 +#define DSI_MCTL_MAIN_STS_CLR_PLL_LOCK_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, PLL_LOCK_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_SHIFT 1 +#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR_MASK 0x00000002 +#define DSI_MCTL_MAIN_STS_CLR_CLKLANE_READY_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CLKLANE_READY_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_SHIFT 2 +#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR_MASK 0x00000004 +#define DSI_MCTL_MAIN_STS_CLR_DAT1_READY_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT1_READY_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_SHIFT 3 +#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR_MASK 0x00000008 +#define DSI_MCTL_MAIN_STS_CLR_DAT2_READY_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, DAT2_READY_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_SHIFT 4 +#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR_MASK 0x00000010 +#define DSI_MCTL_MAIN_STS_CLR_HSTX_TO_ERR_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, HSTX_TO_ERR_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_SHIFT 5 +#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR_MASK 0x00000020 +#define DSI_MCTL_MAIN_STS_CLR_LPRX_TO_ERR_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, LPRX_TO_ERR_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_SHIFT 6 +#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR_MASK 0x00000040 +#define DSI_MCTL_MAIN_STS_CLR_CRS_UNTERM_PCK_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, CRS_UNTERM_PCK_CLR, __x) +#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_SHIFT 7 +#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR_MASK 0x00000080 +#define DSI_MCTL_MAIN_STS_CLR_VRS_UNTERM_PCK_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_CLR, VRS_UNTERM_PCK_CLR, __x) +#define DSI_CMD_MODE_STS_CLR 0x00000114 +#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_SHIFT 0 +#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR_MASK 0x00000001 +#define DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_NO_TE_CLR, __x) +#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_SHIFT 1 +#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR_MASK 0x00000002 +#define DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_TE_MISS_CLR, __x) +#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_SHIFT 2 +#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR_MASK 0x00000004 +#define DSI_CMD_MODE_STS_CLR_ERR_SDI1_UNDERRUN_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI1_UNDERRUN_CLR, __x) +#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_SHIFT 3 +#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR_MASK 0x00000008 +#define DSI_CMD_MODE_STS_CLR_ERR_SDI2_UNDERRUN_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_SDI2_UNDERRUN_CLR, __x) +#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_SHIFT 4 +#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR_MASK 0x00000010 +#define DSI_CMD_MODE_STS_CLR_ERR_UNWANTED_RD_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, ERR_UNWANTED_RD_CLR, __x) +#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_SHIFT 5 +#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR_MASK 0x00000020 +#define DSI_CMD_MODE_STS_CLR_CSM_RUNNING_CLR(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_CLR, CSM_RUNNING_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR 0x00000118 +#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_SHIFT 0 +#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR_MASK 0x00000001 +#define DSI_DIRECT_CMD_STS_CLR_CMD_TRANSMISSION_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, CMD_TRANSMISSION_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_SHIFT 1 +#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR_MASK 0x00000002 +#define DSI_DIRECT_CMD_STS_CLR_WRITE_COMPLETED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, WRITE_COMPLETED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_SHIFT 2 +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR_MASK 0x00000004 +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_COMPLETED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_COMPLETED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_SHIFT 3 +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR_MASK 0x00000008 +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_SHIFT 4 +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR_MASK 0x00000010 +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_RECEIVED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_RECEIVED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_SHIFT 5 +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR_MASK 0x00000020 +#define DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_SHIFT 6 +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR_MASK 0x00000040 +#define DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TRIGGER_RECEIVED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_SHIFT 7 +#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR_MASK 0x00000080 +#define DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, TE_RECEIVED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_SHIFT 8 +#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR_MASK 0x00000100 +#define DSI_DIRECT_CMD_STS_CLR_BTA_COMPLETED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_COMPLETED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_SHIFT 9 +#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR_MASK 0x00000200 +#define DSI_DIRECT_CMD_STS_CLR_BTA_FINISHED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, BTA_FINISHED_CLR, __x) +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_SHIFT 10 +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR_MASK 0x00000400 +#define DSI_DIRECT_CMD_STS_CLR_READ_COMPLETED_WITH_ERR_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_CLR, READ_COMPLETED_WITH_ERR_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR 0x0000011C +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_SHIFT 0 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR_MASK 0x00000001 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_FIXED_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_FIXED_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_SHIFT 1 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR_MASK 0x00000002 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNCORRECTABLE_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNCORRECTABLE_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_SHIFT 2 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR_MASK 0x00000004 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_CHECKSUM_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_CHECKSUM_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_SHIFT 3 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR_MASK 0x00000008 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_UNDECODABLE_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_UNDECODABLE_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_SHIFT 4 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR_MASK 0x00000010 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_RECEIVE_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_RECEIVE_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_SHIFT 5 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR_MASK 0x00000020 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_OVERSIZE_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_OVERSIZE_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_SHIFT 6 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR_MASK 0x00000040 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_WRONG_LENGTH_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_WRONG_LENGTH_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_SHIFT 7 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR_MASK 0x00000080 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_MISSING_EOT_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_MISSING_EOT_CLR, __x) +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_SHIFT 8 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR_MASK 0x00000100 +#define DSI_DIRECT_CMD_RD_STS_CLR_ERR_EOT_WITH_ERR_CLR(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_CLR, ERR_EOT_WITH_ERR_CLR, __x) +#define DSI_VID_MODE_STS_CLR 0x00000120 +#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_SHIFT 0 +#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR_MASK 0x00000001 +#define DSI_VID_MODE_STS_CLR_VSG_STS_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_STS_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_SHIFT 1 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR_MASK 0x00000002 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_DATA_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_DATA_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_SHIFT 2 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR_MASK 0x00000004 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_HSYNC_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_HSYNC_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_SHIFT 3 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR_MASK 0x00000008 +#define DSI_VID_MODE_STS_CLR_ERR_MISSING_VSYNC_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_MISSING_VSYNC_CLR, __x) +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_SHIFT 4 +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR_MASK 0x00000010 +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_LENGTH_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_LENGTH_CLR, __x) +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_SHIFT 5 +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR_MASK 0x00000020 +#define DSI_VID_MODE_STS_CLR_REG_ERR_SMALL_HEIGHT_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, REG_ERR_SMALL_HEIGHT_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_SHIFT 6 +#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR_MASK 0x00000040 +#define DSI_VID_MODE_STS_CLR_ERR_BURSTWRITE_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_BURSTWRITE_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_SHIFT 7 +#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR_MASK 0x00000080 +#define DSI_VID_MODE_STS_CLR_ERR_LONGWRITE_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGWRITE_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_SHIFT 8 +#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR_MASK 0x00000100 +#define DSI_VID_MODE_STS_CLR_ERR_LONGREAD_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_LONGREAD_CLR, __x) +#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_SHIFT 9 +#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR_MASK 0x00000200 +#define DSI_VID_MODE_STS_CLR_ERR_VRS_WRONG_LENGTH_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, ERR_VRS_WRONG_LENGTH_CLR, __x) +#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_SHIFT 10 +#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR_MASK 0x00000400 +#define DSI_VID_MODE_STS_CLR_VSG_RECOVERY_CLR(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_CLR, VSG_RECOVERY_CLR, __x) +#define DSI_TG_STS_CLR 0x00000124 +#define DSI_TG_STS_CLR_TVG_STS_CLR_SHIFT 0 +#define DSI_TG_STS_CLR_TVG_STS_CLR_MASK 0x00000001 +#define DSI_TG_STS_CLR_TVG_STS_CLR(__x) \ + DSI_VAL2REG(DSI_TG_STS_CLR, TVG_STS_CLR, __x) +#define DSI_TG_STS_CLR_TBG_STS_CLR_SHIFT 1 +#define DSI_TG_STS_CLR_TBG_STS_CLR_MASK 0x00000002 +#define DSI_TG_STS_CLR_TBG_STS_CLR(__x) \ + DSI_VAL2REG(DSI_TG_STS_CLR, TBG_STS_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR 0x00000128 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_SHIFT 6 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR_MASK 0x00000040 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_1_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_1_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_SHIFT 7 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR_MASK 0x00000080 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_ESC_2_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_ESC_2_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_SHIFT 8 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR_MASK 0x00000100 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_1_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_1_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_SHIFT 9 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR_MASK 0x00000200 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_SYNCESC_2_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_SYNCESC_2_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_SHIFT 10 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR_MASK 0x00000400 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_1_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_1_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_SHIFT 11 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR_MASK 0x00000800 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONTROL_2_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONTROL_2_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_SHIFT 12 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR_MASK 0x00001000 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_1_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_1_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_SHIFT 13 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR_MASK 0x00002000 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP0_2_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP0_2_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_SHIFT 14 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR_MASK 0x00004000 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_1_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_1_CLR, __x) +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_SHIFT 15 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR_MASK 0x00008000 +#define DSI_MCTL_DPHY_ERR_CLR_ERR_CONT_LP1_2_CLR(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_CLR, ERR_CONT_LP1_2_CLR, __x) +#define DSI_MCTL_MAIN_STS_FLAG 0x00000130 +#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_SHIFT 0 +#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG_MASK 0x00000001 +#define DSI_MCTL_MAIN_STS_FLAG_PLL_LOCK_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, PLL_LOCK_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_SHIFT 1 +#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG_MASK 0x00000002 +#define DSI_MCTL_MAIN_STS_FLAG_CLKLANE_READY_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CLKLANE_READY_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_SHIFT 2 +#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG_MASK 0x00000004 +#define DSI_MCTL_MAIN_STS_FLAG_DAT1_READY_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT1_READY_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_SHIFT 3 +#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG_MASK 0x00000008 +#define DSI_MCTL_MAIN_STS_FLAG_DAT2_READY_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, DAT2_READY_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_SHIFT 4 +#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG_MASK 0x00000010 +#define DSI_MCTL_MAIN_STS_FLAG_HSTX_TO_ERR_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, HSTX_TO_ERR_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_SHIFT 5 +#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG_MASK 0x00000020 +#define DSI_MCTL_MAIN_STS_FLAG_LPRX_TO_ERR_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, LPRX_TO_ERR_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_SHIFT 6 +#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG_MASK 0x00000040 +#define DSI_MCTL_MAIN_STS_FLAG_CRS_UNTERM_PCK_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, CRS_UNTERM_PCK_FLAG, __x) +#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_SHIFT 7 +#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG_MASK 0x00000080 +#define DSI_MCTL_MAIN_STS_FLAG_VRS_UNTERM_PCK_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_MAIN_STS_FLAG, VRS_UNTERM_PCK_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG 0x00000134 +#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_SHIFT 0 +#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG_MASK 0x00000001 +#define DSI_CMD_MODE_STS_FLAG_ERR_NO_TE_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_SHIFT 1 +#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG_MASK 0x00000002 +#define DSI_CMD_MODE_STS_FLAG_ERR_TE_MISS_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_TE_MISS_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_SHIFT 2 +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG_MASK 0x00000004 +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI1_UNDERRUN_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI1_UNDERRUN_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_SHIFT 3 +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG_MASK 0x00000008 +#define DSI_CMD_MODE_STS_FLAG_ERR_SDI2_UNDERRUN_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_SDI2_UNDERRUN_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_SHIFT 4 +#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG_MASK 0x00000010 +#define DSI_CMD_MODE_STS_FLAG_ERR_UNWANTED_RD_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, ERR_UNWANTED_RD_FLAG, __x) +#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_SHIFT 5 +#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG_MASK 0x00000020 +#define DSI_CMD_MODE_STS_FLAG_CSM_RUNNING_FLAG(__x) \ + DSI_VAL2REG(DSI_CMD_MODE_STS_FLAG, CSM_RUNNING_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG 0x00000138 +#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_SHIFT 0 +#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG_MASK 0x00000001 +#define DSI_DIRECT_CMD_STS_FLAG_CMD_TRANSMISSION_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, CMD_TRANSMISSION_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_SHIFT 1 +#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG_MASK 0x00000002 +#define DSI_DIRECT_CMD_STS_FLAG_WRITE_COMPLETED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, WRITE_COMPLETED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_SHIFT 2 +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG_MASK 0x00000004 +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_COMPLETED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_COMPLETED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_SHIFT 3 +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG_MASK 0x00000008 +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_SHIFT 4 +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG_MASK 0x00000010 +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_RECEIVED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_RECEIVED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_SHIFT 5 +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG_MASK 0x00000020 +#define DSI_DIRECT_CMD_STS_FLAG_ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, ACKNOWLEDGE_WITH_ERR_RECEIVED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_SHIFT 6 +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG_MASK 0x00000040 +#define DSI_DIRECT_CMD_STS_FLAG_TRIGGER_RECEIVED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TRIGGER_RECEIVED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_SHIFT 7 +#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG_MASK 0x00000080 +#define DSI_DIRECT_CMD_STS_FLAG_TE_RECEIVED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_SHIFT 8 +#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG_MASK 0x00000100 +#define DSI_DIRECT_CMD_STS_FLAG_BTA_COMPLETED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_COMPLETED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_SHIFT 9 +#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG_MASK 0x00000200 +#define DSI_DIRECT_CMD_STS_FLAG_BTA_FINISHED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, BTA_FINISHED_FLAG, __x) +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_SHIFT 10 +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG_MASK 0x00000400 +#define DSI_DIRECT_CMD_STS_FLAG_READ_COMPLETED_WITH_ERR_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_STS_FLAG, READ_COMPLETED_WITH_ERR_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG 0x0000013C +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_SHIFT 0 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG_MASK 0x00000001 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_FIXED_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_FIXED_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_SHIFT 1 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG_MASK 0x00000002 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNCORRECTABLE_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNCORRECTABLE_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_SHIFT 2 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG_MASK 0x00000004 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_CHECKSUM_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_CHECKSUM_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_SHIFT 3 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG_MASK 0x00000008 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_UNDECODABLE_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_UNDECODABLE_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_SHIFT 4 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG_MASK 0x00000010 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_RECEIVE_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_RECEIVE_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_SHIFT 5 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG_MASK 0x00000020 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_OVERSIZE_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_OVERSIZE_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_SHIFT 6 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG_MASK 0x00000040 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_WRONG_LENGTH_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_WRONG_LENGTH_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_SHIFT 7 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG_MASK 0x00000080 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_MISSING_EOT_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_MISSING_EOT_FLAG, __x) +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_SHIFT 8 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG_MASK 0x00000100 +#define DSI_DIRECT_CMD_RD_STS_FLAG_ERR_EOT_WITH_ERR_FLAG(__x) \ + DSI_VAL2REG(DSI_DIRECT_CMD_RD_STS_FLAG, ERR_EOT_WITH_ERR_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG 0x00000140 +#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_SHIFT 0 +#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG_MASK 0x00000001 +#define DSI_VID_MODE_STS_FLAG_VSG_STS_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_STS_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_SHIFT 1 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG_MASK 0x00000002 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_DATA_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_DATA_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_SHIFT 2 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG_MASK 0x00000004 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_HSYNC_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_HSYNC_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_SHIFT 3 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG_MASK 0x00000008 +#define DSI_VID_MODE_STS_FLAG_ERR_MISSING_VSYNC_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_MISSING_VSYNC_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_SHIFT 4 +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG_MASK 0x00000010 +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_LENGTH_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_LENGTH_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_SHIFT 5 +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG_MASK 0x00000020 +#define DSI_VID_MODE_STS_FLAG_REG_ERR_SMALL_HEIGHT_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, REG_ERR_SMALL_HEIGHT_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_SHIFT 6 +#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG_MASK 0x00000040 +#define DSI_VID_MODE_STS_FLAG_ERR_BURSTWRITE_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_BURSTWRITE_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_SHIFT 7 +#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG_MASK 0x00000080 +#define DSI_VID_MODE_STS_FLAG_ERR_LONGWRITE_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGWRITE_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_SHIFT 8 +#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG_MASK 0x00000100 +#define DSI_VID_MODE_STS_FLAG_ERR_LONGREAD_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_LONGREAD_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_SHIFT 9 +#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG_MASK 0x00000200 +#define DSI_VID_MODE_STS_FLAG_ERR_VRS_WRONG_LENGTH_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, ERR_VRS_WRONG_LENGTH_FLAG, __x) +#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_SHIFT 10 +#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG_MASK 0x00000400 +#define DSI_VID_MODE_STS_FLAG_VSG_RECOVERY_FLAG(__x) \ + DSI_VAL2REG(DSI_VID_MODE_STS_FLAG, VSG_RECOVERY_FLAG, __x) +#define DSI_TG_STS_FLAG 0x00000144 +#define DSI_TG_STS_FLAG_TVG_STS_FLAG_SHIFT 0 +#define DSI_TG_STS_FLAG_TVG_STS_FLAG_MASK 0x00000001 +#define DSI_TG_STS_FLAG_TVG_STS_FLAG(__x) \ + DSI_VAL2REG(DSI_TG_STS_FLAG, TVG_STS_FLAG, __x) +#define DSI_TG_STS_FLAG_TBG_STS_FLAG_SHIFT 1 +#define DSI_TG_STS_FLAG_TBG_STS_FLAG_MASK 0x00000002 +#define DSI_TG_STS_FLAG_TBG_STS_FLAG(__x) \ + DSI_VAL2REG(DSI_TG_STS_FLAG, TBG_STS_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG 0x00000148 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_SHIFT 6 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG_MASK 0x00000040 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_1_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_1_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_SHIFT 7 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG_MASK 0x00000080 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_ESC_2_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_ESC_2_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_SHIFT 8 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG_MASK 0x00000100 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_1_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_1_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_SHIFT 9 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG_MASK 0x00000200 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_SYNCESC_2_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_SYNCESC_2_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_SHIFT 10 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG_MASK 0x00000400 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_1_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_1_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_SHIFT 11 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG_MASK 0x00000800 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONTROL_2_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONTROL_2_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_SHIFT 12 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG_MASK 0x00001000 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_1_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_1_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_SHIFT 13 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG_MASK 0x00002000 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP0_2_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP0_2_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_SHIFT 14 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG_MASK 0x00004000 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_1_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_1_FLAG, __x) +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_SHIFT 15 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG_MASK 0x00008000 +#define DSI_MCTL_DPHY_ERR_FLAG_ERR_CONT_LP1_2_FLAG(__x) \ + DSI_VAL2REG(DSI_MCTL_DPHY_ERR_FLAG, ERR_CONT_LP1_2_FLAG, __x) +#define DSI_DPHY_LANES_TRIM 0x00000150 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_SHIFT 0 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1_MASK 0x00000003 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT1(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT1, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_SHIFT 2 +#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1_MASK 0x00000004 +#define DSI_DPHY_LANES_TRIM_DPHY_CD_OFF_DAT1(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_CD_OFF_DAT1, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_SHIFT 3 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1_MASK 0x00000008 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT1(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT1, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_SHIFT 4 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1_MASK 0x00000010 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT1(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT1, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_SHIFT 5 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1_MASK 0x00000020 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT1(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT1, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_SHIFT 6 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK_MASK 0x000000C0 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_SHIFT 8 +#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK_MASK 0x00000300 +#define DSI_DPHY_LANES_TRIM_DPHY_LP_RX_VIL_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_RX_VIL_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_SHIFT 10 +#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK_MASK 0x00000C00 +#define DSI_DPHY_LANES_TRIM_DPHY_LP_TX_SLEWRATE_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_LP_TX_SLEWRATE_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_SHIFT 12 +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_MASK 0x00001000 +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_81 0 +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90 1 +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, \ + DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_##__x) +#define DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SPECS_90_81B, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_SHIFT 13 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK_MASK 0x00002000 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_SHIFT 14 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK_MASK 0x00004000 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_SHIFT 15 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK_MASK 0x00008000 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_CLK(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_CLK, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_SHIFT 16 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2_MASK 0x00030000 +#define DSI_DPHY_LANES_TRIM_DPHY_SKEW_DAT2(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_SKEW_DAT2, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_SHIFT 18 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2_MASK 0x00040000 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_UP_DAT2(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_UP_DAT2, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_SHIFT 19 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2_MASK 0x00080000 +#define DSI_DPHY_LANES_TRIM_DPHY_HSTX_SLEWRATE_DOWN_DAT2(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_HSTX_SLEWRATE_DOWN_DAT2, __x) +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_SHIFT 20 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2_MASK 0x00100000 +#define DSI_DPHY_LANES_TRIM_DPHY_TEST_RESERVED_1_DAT2(__x) \ + DSI_VAL2REG(DSI_DPHY_LANES_TRIM, DPHY_TEST_RESERVED_1_DAT2, __x) +#define DSI_ID_REG 0x00000FF0 +#define DSI_ID_REG_Y_SHIFT 0 +#define DSI_ID_REG_Y_MASK 0x0000000F +#define DSI_ID_REG_Y(__x) \ + DSI_VAL2REG(DSI_ID_REG, Y, __x) +#define DSI_ID_REG_X_SHIFT 4 +#define DSI_ID_REG_X_MASK 0x000000F0 +#define DSI_ID_REG_X(__x) \ + DSI_VAL2REG(DSI_ID_REG, X, __x) +#define DSI_ID_REG_H_SHIFT 8 +#define DSI_ID_REG_H_MASK 0x00000300 +#define DSI_ID_REG_H(__x) \ + DSI_VAL2REG(DSI_ID_REG, H, __x) +#define DSI_ID_REG_PRODUCT_ID_SHIFT 10 +#define DSI_ID_REG_PRODUCT_ID_MASK 0x0003FC00 +#define DSI_ID_REG_PRODUCT_ID(__x) \ + DSI_VAL2REG(DSI_ID_REG, PRODUCT_ID, __x) +#define DSI_ID_REG_VENDOR_ID_SHIFT 18 +#define DSI_ID_REG_VENDOR_ID_MASK 0xFFFC0000 +#define DSI_ID_REG_VENDOR_ID(__x) \ + DSI_VAL2REG(DSI_ID_REG, VENDOR_ID, __x) +#define DSI_IP_CONF 0x00000FF4 +#define DSI_IP_CONF_FIFO_SIZE_SHIFT 0 +#define DSI_IP_CONF_FIFO_SIZE_MASK 0x0000003F +#define DSI_IP_CONF_FIFO_SIZE(__x) \ + DSI_VAL2REG(DSI_IP_CONF, FIFO_SIZE, __x) diff --git a/drivers/video/mcde/mcde_bus.c b/drivers/video/mcde/mcde_bus.c new file mode 100644 index 00000000000..bdcf65b0fb9 --- /dev/null +++ b/drivers/video/mcde/mcde_bus.c @@ -0,0 +1,274 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson MCDE display bus driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/notifier.h> + +#include <video/mcde_display.h> +#include <video/mcde_dss.h> + +#define to_mcde_display_driver(__drv) \ + container_of((__drv), struct mcde_display_driver, driver) + +static BLOCKING_NOTIFIER_HEAD(bus_notifier_list); + +static int mcde_drv_suspend(struct device *_dev, pm_message_t state); +static int mcde_drv_resume(struct device *_dev); +struct bus_type mcde_bus_type; + +static int mcde_suspend_device(struct device *dev, void *data) +{ + pm_message_t* state = (pm_message_t *) data; + if (dev->driver && dev->driver->suspend) + return dev->driver->suspend(dev, *state); + return 0; +} + +static int mcde_resume_device(struct device *dev, void *data) +{ + if (dev->driver && dev->driver->resume) + return dev->driver->resume(dev); + return 0; +} + +/* Bus driver */ + +static int mcde_bus_match(struct device *_dev, struct device_driver *driver) +{ + pr_debug("Matching device %s with driver %s\n", + dev_name(_dev), driver->name); + + return strncmp(dev_name(_dev), driver->name, strlen(driver->name)) == 0; +} + +static int mcde_bus_suspend(struct device *_dev, pm_message_t state) +{ + int ret; + ret = bus_for_each_dev(&mcde_bus_type, NULL, &state, + mcde_suspend_device); + if (ret) { + /* TODO Resume all suspended devices */ + /* mcde_bus_resume(dev); */ + return ret; + } + return 0; +} + +static int mcde_bus_resume(struct device *_dev) +{ + return bus_for_each_dev(&mcde_bus_type, NULL, NULL, mcde_resume_device); +} + +struct bus_type mcde_bus_type = { + .name = "mcde_bus", + .match = mcde_bus_match, + .suspend = mcde_bus_suspend, + .resume = mcde_bus_resume, +}; + +static int mcde_drv_probe(struct device *_dev) +{ + struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver); + struct mcde_display_device *dev = to_mcde_display_device(_dev); + + return drv->probe(dev); +} + +static int mcde_drv_remove(struct device *_dev) +{ + struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver); + struct mcde_display_device *dev = to_mcde_display_device(_dev); + + return drv->remove(dev); +} + +static void mcde_drv_shutdown(struct device *_dev) +{ + struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver); + struct mcde_display_device *dev = to_mcde_display_device(_dev); + + drv->shutdown(dev); +} + +static int mcde_drv_suspend(struct device *_dev, pm_message_t state) +{ + struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver); + struct mcde_display_device *dev = to_mcde_display_device(_dev); + + if (drv->suspend) + return drv->suspend(dev, state); + else +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + return dev->set_power_mode(dev, MCDE_DISPLAY_PM_OFF); +#else + return 0; +#endif +} + +static int mcde_drv_resume(struct device *_dev) +{ + struct mcde_display_driver *drv = to_mcde_display_driver(_dev->driver); + struct mcde_display_device *dev = to_mcde_display_device(_dev); + + if (drv->resume) + return drv->resume(dev); + else +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + return dev->set_power_mode(dev, MCDE_DISPLAY_PM_STANDBY); +#else + return 0; +#endif +} + +/* Bus device */ + +static void mcde_bus_release(struct device *dev) +{ +} + +struct device mcde_bus = { + .init_name = "mcde_bus", + .release = mcde_bus_release +}; + +/* Public bus API */ + +int mcde_display_driver_register(struct mcde_display_driver *drv) +{ + drv->driver.bus = &mcde_bus_type; + if (drv->probe) + drv->driver.probe = mcde_drv_probe; + if (drv->remove) + drv->driver.remove = mcde_drv_remove; + if (drv->shutdown) + drv->driver.shutdown = mcde_drv_shutdown; + drv->driver.suspend = mcde_drv_suspend; + drv->driver.resume = mcde_drv_resume; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL(mcde_display_driver_register); + +void mcde_display_driver_unregister(struct mcde_display_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL(mcde_display_driver_unregister); + +static void mcde_display_dev_release(struct device *dev) +{ + /* Do nothing */ +} + +int mcde_display_device_register(struct mcde_display_device *dev) +{ + /* Setup device */ + if (!dev) + return -EINVAL; + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + dev->dev.bus = &mcde_bus_type; + if (dev->dev.parent != NULL) + dev->dev.parent = &mcde_bus; + dev->dev.release = mcde_display_dev_release; + if (dev->id != -1) + dev_set_name(&dev->dev, "%s.%d", dev->name, dev->id); + else + dev_set_name(&dev->dev, dev->name); + + mcde_display_init_device(dev); + + return device_register(&dev->dev); +} +EXPORT_SYMBOL(mcde_display_device_register); + +void mcde_display_device_unregister(struct mcde_display_device *dev) +{ + device_unregister(&dev->dev); +} +EXPORT_SYMBOL(mcde_display_device_unregister); + +/* Notifications */ +int mcde_dss_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&bus_notifier_list, nb); +} +EXPORT_SYMBOL(mcde_dss_register_notifier); + +int mcde_dss_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&bus_notifier_list, nb); +} +EXPORT_SYMBOL(mcde_dss_unregister_notifier); + +static int bus_notify_callback(struct notifier_block *nb, + unsigned long event, void *dev) +{ + struct mcde_display_device *ddev = to_mcde_display_device(dev); + + if (event == BUS_NOTIFY_BOUND_DRIVER) { + ddev->initialized = true; + blocking_notifier_call_chain(&bus_notifier_list, + MCDE_DSS_EVENT_DISPLAY_REGISTERED, ddev); + } else if (event == BUS_NOTIFY_UNBIND_DRIVER) { + ddev->initialized = false; + blocking_notifier_call_chain(&bus_notifier_list, + MCDE_DSS_EVENT_DISPLAY_UNREGISTERED, ddev); + } + return 0; +} + +struct notifier_block bus_nb = { + .notifier_call = bus_notify_callback, +}; + +/* Driver init/exit */ + +int __init mcde_display_init(void) +{ + int ret; + + ret = bus_register(&mcde_bus_type); + if (ret) { + pr_warning("Unable to register bus type\n"); + goto no_bus_registration; + } + ret = device_register(&mcde_bus); + if (ret) { + pr_warning("Unable to register bus device\n"); + goto no_device_registration; + } + ret = bus_register_notifier(&mcde_bus_type, &bus_nb); + if (ret) { + pr_warning("Unable to register bus notifier\n"); + goto no_bus_notifier; + } + + goto out; + +no_bus_notifier: + device_unregister(&mcde_bus); +no_device_registration: + bus_unregister(&mcde_bus_type); +no_bus_registration: +out: + return ret; +} + +void mcde_display_exit(void) +{ + bus_unregister_notifier(&mcde_bus_type, &bus_nb); + device_unregister(&mcde_bus); + bus_unregister(&mcde_bus_type); +} diff --git a/drivers/video/mcde/mcde_debugfs.c b/drivers/video/mcde/mcde_debugfs.c new file mode 100644 index 00000000000..586b1787d00 --- /dev/null +++ b/drivers/video/mcde/mcde_debugfs.c @@ -0,0 +1,207 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE base driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/stat.h> +#include <linux/time.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <asm/page.h> + +#include "mcde_debugfs.h" + +#define MAX_NUM_OVERLAYS 2 +#define MAX_NUM_CHANNELS 4 +#define DEFAULT_DMESG_FPS_LOG_INTERVAL 100 + +struct fps_info { + u32 enable_dmesg; + u32 interval_ms; + struct timespec timestamp_last; + u32 frame_counter_last; + u32 frame_counter; + u32 fpks; +}; + +struct overlay_info { + u8 id; + struct dentry *dentry; + struct fps_info fps; +}; + +struct channel_info { + u8 id; + struct dentry *dentry; + struct mcde_chnl_state *chnl; + struct fps_info fps; + struct overlay_info overlays[MAX_NUM_OVERLAYS]; +}; + +static struct mcde_info { + struct device *dev; + struct dentry *dentry; + struct channel_info channels[MAX_NUM_CHANNELS]; +} mcde; + +/* Requires: lhs > rhs */ +static inline u32 timespec_ms_diff(struct timespec lhs, struct timespec rhs) +{ + struct timespec tmp_ts = timespec_sub(lhs, rhs); + u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts); + do_div(tmp_ns, NSEC_PER_MSEC); + return (u32)tmp_ns; +} + +/* Returns "frames per 1000 secs", divide by 1000 to get fps with 3 decimals */ +static u32 update_fps(struct fps_info *fps) +{ + struct timespec now; + u32 fpks = 0, ms_since_last, num_frames; + + getrawmonotonic(&now); + fps->frame_counter++; + + ms_since_last = timespec_ms_diff(now, fps->timestamp_last); + num_frames = fps->frame_counter - fps->frame_counter_last; + if (num_frames > 1 && ms_since_last >= fps->interval_ms) { + fpks = (num_frames * 1000000) / ms_since_last; + fps->timestamp_last = now; + fps->frame_counter_last = fps->frame_counter; + fps->fpks = fpks; + } + + return fpks; +} + +static void update_chnl_fps(struct channel_info *ci) +{ + u32 fpks = update_fps(&ci->fps); + if (fpks && ci->fps.enable_dmesg) + dev_info(mcde.dev, "FPS: chnl=%d fps=%d.%.3d\n", ci->id, + fpks / 1000, fpks % 1000); +} + +static void update_ovly_fps(struct channel_info *ci, struct overlay_info *oi) +{ + u32 fpks = update_fps(&oi->fps); + if (fpks && oi->fps.enable_dmesg) + dev_info(mcde.dev, "FPS: ovly=%d.%d fps=%d.%.3d\n", ci->id, + oi->id, fpks / 1000, fpks % 1000); +} + +int mcde_debugfs_create(struct device *dev) +{ + if (mcde.dev) + return -EBUSY; + + mcde.dentry = debugfs_create_dir("mcde", NULL); + if (!mcde.dentry) + return -ENOMEM; + mcde.dev = dev; + + return 0; +} + +static struct channel_info *find_chnl(u8 chnl_id) +{ + if (chnl_id > MAX_NUM_CHANNELS) + return NULL; + return &mcde.channels[chnl_id]; +} + +static struct overlay_info *find_ovly(struct channel_info *ci, u8 ovly_id) +{ + if (!ci || ovly_id >= MAX_NUM_OVERLAYS) + return NULL; + return &ci->overlays[ovly_id]; +} + +static void create_fps_files(struct dentry *dentry, struct fps_info *fps) +{ + debugfs_create_u32("frame_counter", S_IRUGO, dentry, + &fps->frame_counter); + debugfs_create_u32("frames_per_ksecs", S_IRUGO, dentry, &fps->fpks); + debugfs_create_u32("interval_ms", S_IRUGO|S_IWUGO, dentry, + &fps->interval_ms); + debugfs_create_u32("dmesg", S_IRUGO|S_IWUGO, dentry, + &fps->enable_dmesg); +} + +int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl) +{ + struct channel_info *ci = find_chnl(chnl_id); + char name[10]; + + if (!chnl || !ci) + return -EINVAL; + if (ci->chnl) + return -EBUSY; + + snprintf(name, sizeof(name), "chnl%d", chnl_id); + ci->dentry = debugfs_create_dir(name, mcde.dentry); + if (!ci->dentry) + return -ENOMEM; + + create_fps_files(ci->dentry, &ci->fps); + + ci->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL; + ci->id = chnl_id; + ci->chnl = chnl; + + return 0; +} + +int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id) +{ + struct channel_info *ci = find_chnl(chnl_id); + struct overlay_info *oi = find_ovly(ci, ovly_id); + char name[10]; + + if (!oi || !ci || ovly_id >= MAX_NUM_OVERLAYS) + return -EINVAL; + if (oi->dentry) + return -EBUSY; + + snprintf(name, sizeof(name), "ovly%d", ovly_id); + oi->dentry = debugfs_create_dir(name, ci->dentry); + if (!oi->dentry) + return -ENOMEM; + + create_fps_files(oi->dentry, &oi->fps); + + oi->fps.interval_ms = DEFAULT_DMESG_FPS_LOG_INTERVAL; + oi->id = ovly_id; + + return 0; +} + +void mcde_debugfs_channel_update(u8 chnl_id) +{ + struct channel_info *ci = find_chnl(chnl_id); + + if (!ci || !ci->chnl) + return; + + update_chnl_fps(ci); +} + +void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id) +{ + struct channel_info *ci = find_chnl(chnl_id); + struct overlay_info *oi = find_ovly(ci, ovly_id); + + if (!oi || !oi->dentry) + return; + + update_ovly_fps(ci, oi); +} + diff --git a/drivers/video/mcde/mcde_debugfs.h b/drivers/video/mcde/mcde_debugfs.h new file mode 100644 index 00000000000..9f1e7f18ea5 --- /dev/null +++ b/drivers/video/mcde/mcde_debugfs.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE base driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#ifndef __MCDE_DEBUGFS__H__ +#define __MCDE_DEBUGFS__H__ + +#include <video/mcde.h> + +int mcde_debugfs_create(struct device *dev); +int mcde_debugfs_channel_create(u8 chnl_id, struct mcde_chnl_state *chnl); +int mcde_debugfs_overlay_create(u8 chnl_id, u8 ovly_id); + +void mcde_debugfs_channel_update(u8 chnl_id); +void mcde_debugfs_overlay_update(u8 chnl_id, u8 ovly_id); + +#endif /* __MCDE_DEBUGFS__H__ */ + diff --git a/drivers/video/mcde/mcde_display.c b/drivers/video/mcde/mcde_display.c new file mode 100644 index 00000000000..9e9eb78516e --- /dev/null +++ b/drivers/video/mcde/mcde_display.c @@ -0,0 +1,366 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE display driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/device.h> + +#include <video/mcde_display.h> + +/*temp*/ +#include <linux/delay.h> + +static void mcde_display_get_native_resolution_default( + struct mcde_display_device *ddev, u16 *x_res, u16 *y_res) +{ + if (x_res) + *x_res = ddev->native_x_res; + if (y_res) + *y_res = ddev->native_y_res; +} + +static enum mcde_ovly_pix_fmt mcde_display_get_default_pixel_format_default( + struct mcde_display_device *ddev) +{ + return ddev->default_pixel_format; +} + +static void mcde_display_get_physical_size_default( + struct mcde_display_device *ddev, u16 *width, u16 *height) +{ + if (width) + *width = ddev->physical_width; + if (height) + *height = ddev->physical_height; +} + +static int mcde_display_set_power_mode_default(struct mcde_display_device *ddev, + enum mcde_display_power_mode power_mode) +{ + int ret = 0; + + /* OFF -> STANDBY */ + if (ddev->power_mode == MCDE_DISPLAY_PM_OFF && + power_mode != MCDE_DISPLAY_PM_OFF) { + if (ddev->platform_enable) { + ret = ddev->platform_enable(ddev); + if (ret) + return ret; + } + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + /* force register settings */ + if (ddev->port->type == MCDE_PORTTYPE_DPI) + ddev->update_flags = UPDATE_FLAG_VIDEO_MODE | UPDATE_FLAG_PIXEL_FORMAT; + } + + if (ddev->port->type == MCDE_PORTTYPE_DSI) { + /* STANDBY -> ON */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_ON) { + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_EXIT_SLEEP_MODE, NULL, 0); + if (ret) + return ret; + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_DISPLAY_ON, NULL, 0); + if (ret) + return ret; + + ddev->power_mode = MCDE_DISPLAY_PM_ON; + } else if (ddev->power_mode == MCDE_DISPLAY_PM_ON && + power_mode <= MCDE_DISPLAY_PM_STANDBY) { + /* ON -> STANDBY */ + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_SET_DISPLAY_OFF, NULL, 0); + if (ret) + return ret; + + ret = mcde_dsi_dcs_write(ddev->chnl_state, + DCS_CMD_ENTER_SLEEP_MODE, NULL, 0); + if (ret) + return ret; + + ddev->power_mode = MCDE_DISPLAY_PM_STANDBY; + } + } else if (ddev->port->type == MCDE_PORTTYPE_DPI) { + ddev->power_mode = power_mode; + } else if (ddev->power_mode != power_mode) { + return -EINVAL; + } + + /* SLEEP -> OFF */ + if (ddev->power_mode == MCDE_DISPLAY_PM_STANDBY && + power_mode == MCDE_DISPLAY_PM_OFF) { + if (ddev->platform_disable) { + ret = ddev->platform_disable(ddev); + if (ret) + return ret; + } + ddev->power_mode = MCDE_DISPLAY_PM_OFF; + } + + mcde_chnl_set_power_mode(ddev->chnl_state, ddev->power_mode); + + return ret; +} + +static inline enum mcde_display_power_mode mcde_display_get_power_mode_default( + struct mcde_display_device *ddev) +{ + return ddev->power_mode; +} + +static inline int mcde_display_try_video_mode_default( + struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode) +{ + /* + * DSI video mode: + * This function is intended for configuring supported video mode(s). + * Overload it into the panel driver file and set up blanking + * intervals and pixel clock according to below recommendations. + * + * vertical blanking parameters vbp, vfp, vsw are given in lines + * horizontal blanking parameters hbp, hfp, hsw are given in pixels + * + * video_mode->pixclock is the time between two pixels (in picoseconds) + * The source of the pixel clock is DSI PLL and it shall be set to + * meet the requirement + * + * non-burst mode: + * pixel clock (Hz) = (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) * + * framerate * bpp / num_data_lanes + * + * burst mode: + * pixel clock (Hz) > (VACT+VBP+VFP+VSA) * (HACT+HBP+HFP+HSA) * + * framerate * bpp / num_data_lanes * 1.1 + * (1.1 is a 10% margin needed for burst mode calculations) + */ + return 0; +} + +static int mcde_display_set_video_mode_default(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode) +{ + int ret; + struct mcde_video_mode channel_video_mode; + + if (!video_mode) + return -EINVAL; + + ddev->video_mode = *video_mode; + channel_video_mode = ddev->video_mode; + /* Dependant on if display should rotate or MCDE should rotate */ + if (ddev->rotation == MCDE_DISPLAY_ROT_90_CCW || + ddev->rotation == MCDE_DISPLAY_ROT_90_CW) { + channel_video_mode.xres = ddev->native_x_res; + channel_video_mode.yres = ddev->native_y_res; + } + ret = mcde_chnl_set_video_mode(ddev->chnl_state, &channel_video_mode); + if (ret < 0) { + dev_warn(&ddev->dev, "%s:Failed to set video mode\n", __func__); + return ret; + } + + ddev->update_flags |= UPDATE_FLAG_VIDEO_MODE; + + return 0; +} + +static inline void mcde_display_get_video_mode_default( + struct mcde_display_device *ddev, struct mcde_video_mode *video_mode) +{ + if (video_mode) + *video_mode = ddev->video_mode; +} + +static int mcde_display_set_pixel_format_default( + struct mcde_display_device *ddev, enum mcde_ovly_pix_fmt format) +{ + int ret; + + ddev->pixel_format = format; + ret = mcde_chnl_set_pixel_format(ddev->chnl_state, + ddev->port->pixel_format); + if (ret < 0) { + dev_warn(&ddev->dev, "%s:Failed to set pixel format = %d\n", + __func__, format); + return ret; + } + + return 0; +} + +static inline enum mcde_ovly_pix_fmt mcde_display_get_pixel_format_default( + struct mcde_display_device *ddev) +{ + return ddev->pixel_format; +} + + +static int mcde_display_set_rotation_default(struct mcde_display_device *ddev, + enum mcde_display_rotation rotation) +{ + int ret; + u8 param = 0; + enum mcde_display_rotation final; + + final = (360 + rotation - ddev->orientation) % 360; + ret = mcde_chnl_set_rotation(ddev->chnl_state, final); + if (WARN_ON(ret)) + return ret; + + if (final == MCDE_DISPLAY_ROT_180_CW) + param = 0x40; /* Horizontal flip */ + (void)mcde_dsi_dcs_write(ddev->chnl_state, DCS_CMD_SET_ADDRESS_MODE, + ¶m, 1); + + ddev->rotation = rotation; + ddev->update_flags |= UPDATE_FLAG_ROTATION; + + return 0; +} + +static inline enum mcde_display_rotation mcde_display_get_rotation_default( + struct mcde_display_device *ddev) +{ + return ddev->rotation; +} + +static int mcde_display_apply_config_default(struct mcde_display_device *ddev) +{ + int ret; + + if (!ddev->update_flags) + return 0; + + if (ddev->update_flags & UPDATE_FLAG_VIDEO_MODE) + mcde_chnl_stop_flow(ddev->chnl_state); + + ret = mcde_chnl_apply(ddev->chnl_state); + if (ret < 0) { + dev_warn(&ddev->dev, "%s:Failed to apply to channel\n", + __func__); + return ret; + } + ddev->update_flags = 0; + ddev->first_update = true; + + return 0; +} + +static int mcde_display_invalidate_area_default( + struct mcde_display_device *ddev, + struct mcde_rectangle *area) +{ + dev_vdbg(&ddev->dev, "%s\n", __func__); + if (area) { + /* take union of rects */ + u16 t; + t = min(ddev->update_area.x, area->x); + /* note should be > 0 */ + ddev->update_area.w = max(ddev->update_area.x + + ddev->update_area.w, + area->x + area->w) - t; + ddev->update_area.x = t; + t = min(ddev->update_area.y, area->y); + ddev->update_area.h = max(ddev->update_area.y + + ddev->update_area.h, + area->y + area->h) - t; + ddev->update_area.y = t; + /* TODO: Implement real clipping when partial refresh is + activated.*/ + ddev->update_area.w = min((u16) ddev->video_mode.xres, + (u16) ddev->update_area.w); + ddev->update_area.h = min((u16) ddev->video_mode.yres, + (u16) ddev->update_area.h); + } else { + ddev->update_area.x = 0; + ddev->update_area.y = 0; + ddev->update_area.w = ddev->video_mode.xres; + ddev->update_area.h = ddev->video_mode.yres; + /* Invalidate_area(ddev, NULL) means reset area to empty + * rectangle really. After that the rectangle should grow by + * taking an union (above). This means that the code should + * really look like below, however the code above is a temp fix + * for rotation. + * TODO: fix + * ddev->update_area.x = ddev->video_mode.xres; + * ddev->update_area.y = ddev->video_mode.yres; + * ddev->update_area.w = 0; + * ddev->update_area.h = 0; + */ + } + + return 0; +} + +static int mcde_display_update_default(struct mcde_display_device *ddev, + bool tripple_buffer) +{ + int ret = 0; + + ret = mcde_chnl_update(ddev->chnl_state, &ddev->update_area, + tripple_buffer); + if (ret < 0) { + dev_warn(&ddev->dev, "%s:Failed to update channel\n", __func__); + return ret; + } + if (ddev->first_update && ddev->on_first_update) + ddev->on_first_update(ddev); + + if (ddev->power_mode != MCDE_DISPLAY_PM_ON && ddev->set_power_mode) { + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_ON); + if (ret < 0) { + dev_warn(&ddev->dev, + "%s:Failed to set power mode to on\n", + __func__); + return ret; + } + } + + dev_vdbg(&ddev->dev, "Overlay updated, chnl=%d\n", ddev->chnl_id); + + return 0; +} + +static inline int mcde_display_on_first_update_default( + struct mcde_display_device *ddev) +{ + ddev->first_update = false; + return 0; +} + +void mcde_display_init_device(struct mcde_display_device *ddev) +{ + /* Setup default callbacks */ + ddev->get_native_resolution = + mcde_display_get_native_resolution_default; + ddev->get_default_pixel_format = + mcde_display_get_default_pixel_format_default; + ddev->get_physical_size = mcde_display_get_physical_size_default; + ddev->set_power_mode = mcde_display_set_power_mode_default; + ddev->get_power_mode = mcde_display_get_power_mode_default; + ddev->try_video_mode = mcde_display_try_video_mode_default; + ddev->set_video_mode = mcde_display_set_video_mode_default; + ddev->get_video_mode = mcde_display_get_video_mode_default; + ddev->set_pixel_format = mcde_display_set_pixel_format_default; + ddev->get_pixel_format = mcde_display_get_pixel_format_default; + ddev->set_rotation = mcde_display_set_rotation_default; + ddev->get_rotation = mcde_display_get_rotation_default; + ddev->apply_config = mcde_display_apply_config_default; + ddev->invalidate_area = mcde_display_invalidate_area_default; + ddev->update = mcde_display_update_default; + ddev->on_first_update = mcde_display_on_first_update_default; + + mutex_init(&ddev->display_lock); +} + diff --git a/drivers/video/mcde/mcde_dss.c b/drivers/video/mcde/mcde_dss.c new file mode 100644 index 00000000000..35a6cbe9540 --- /dev/null +++ b/drivers/video/mcde/mcde_dss.c @@ -0,0 +1,445 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE display sub system driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/slab.h> + +#include <video/mcde_dss.h> + +#define to_overlay(x) container_of(x, struct mcde_overlay, kobj) + +void overlay_release(struct kobject *kobj) +{ + struct mcde_overlay *ovly = to_overlay(kobj); + + kfree(ovly); +} + +struct kobj_type ovly_type = { + .release = overlay_release, +}; + +static int apply_overlay(struct mcde_overlay *ovly, + struct mcde_overlay_info *info, bool force) +{ + int ret = 0; + if (ovly->ddev->invalidate_area) { + /* TODO: transform ovly coord to screen coords (vmode): + * add offset + */ + struct mcde_rectangle dirty = info->dirty; + mutex_lock(&ovly->ddev->display_lock); + ret = ovly->ddev->invalidate_area(ovly->ddev, &dirty); + mutex_unlock(&ovly->ddev->display_lock); + } + + if (ovly->info.paddr != info->paddr || force) + mcde_ovly_set_source_buf(ovly->state, info->paddr); + + if (ovly->info.stride != info->stride || ovly->info.fmt != info->fmt || + force) + mcde_ovly_set_source_info(ovly->state, info->stride, info->fmt); + if (ovly->info.src_x != info->src_x || + ovly->info.src_y != info->src_y || + ovly->info.w != info->w || + ovly->info.h != info->h || force) + mcde_ovly_set_source_area(ovly->state, + info->src_x, info->src_y, info->w, info->h); + if (ovly->info.dst_x != info->dst_x || ovly->info.dst_y != info->dst_y + || ovly->info.dst_z != info->dst_z || + force) + mcde_ovly_set_dest_pos(ovly->state, + info->dst_x, info->dst_y, info->dst_z); + + mcde_ovly_apply(ovly->state); + ovly->info = *info; + + return ret; +} + +/* MCDE DSS operations */ + +int mcde_dss_open_channel(struct mcde_display_device *ddev) +{ + int ret = 0; + struct mcde_chnl_state *chnl; + + mutex_lock(&ddev->display_lock); + /* Acquire MCDE resources */ + chnl = mcde_chnl_get(ddev->chnl_id, ddev->fifo, ddev->port); + if (IS_ERR(chnl)) { + ret = PTR_ERR(chnl); + dev_warn(&ddev->dev, "Failed to acquire MCDE channel\n"); + goto chnl_get_failed; + } + ddev->chnl_state = chnl; +chnl_get_failed: + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_open_channel); + +void mcde_dss_close_channel(struct mcde_display_device *ddev) +{ + mutex_lock(&ddev->display_lock); + mcde_chnl_put(ddev->chnl_state); + ddev->chnl_state = NULL; + mutex_unlock(&ddev->display_lock); +} +EXPORT_SYMBOL(mcde_dss_close_channel); + +int mcde_dss_enable_display(struct mcde_display_device *ddev) +{ + int ret; + + if (ddev->enabled) + return 0; + + mutex_lock(&ddev->display_lock); + mcde_chnl_enable(ddev->chnl_state); + + /* Initiate display communication */ + ret = ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_STANDBY); + if (ret < 0) { + dev_warn(&ddev->dev, "Failed to initialize display\n"); + goto display_failed; + } + + ret = ddev->set_rotation(ddev, ddev->get_rotation(ddev)); + if (ret < 0) + dev_warn(&ddev->dev, "Failed to set rotation\n"); + + dev_dbg(&ddev->dev, "Display enabled, chnl=%d\n", + ddev->chnl_id); + ddev->enabled = true; + mutex_unlock(&ddev->display_lock); + + return 0; + +display_failed: + mcde_chnl_disable(ddev->chnl_state); + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_enable_display); + +void mcde_dss_disable_display(struct mcde_display_device *ddev) +{ + if (!ddev->enabled) + return; + + /* TODO: Disable overlays */ + mutex_lock(&ddev->display_lock); + + mcde_chnl_stop_flow(ddev->chnl_state); + + (void)ddev->set_power_mode(ddev, MCDE_DISPLAY_PM_OFF); + + mcde_chnl_disable(ddev->chnl_state); + + ddev->enabled = false; + mutex_unlock(&ddev->display_lock); + + dev_dbg(&ddev->dev, "Display disabled, chnl=%d\n", ddev->chnl_id); +} +EXPORT_SYMBOL(mcde_dss_disable_display); + +int mcde_dss_apply_channel(struct mcde_display_device *ddev) +{ + int ret; + if (!ddev->apply_config) + return -EINVAL; + mutex_lock(&ddev->display_lock); + ret = ddev->apply_config(ddev); + mutex_unlock(&ddev->display_lock); + + return ret; +} +EXPORT_SYMBOL(mcde_dss_apply_channel); + +struct mcde_overlay *mcde_dss_create_overlay(struct mcde_display_device *ddev, + struct mcde_overlay_info *info) +{ + struct mcde_overlay *ovly; + + ovly = kzalloc(sizeof(struct mcde_overlay), GFP_KERNEL); + if (!ovly) + return NULL; + + kobject_init(&ovly->kobj, &ovly_type); /* Local ref */ + kobject_get(&ovly->kobj); /* Creator ref */ + INIT_LIST_HEAD(&ovly->list); + mutex_lock(&ddev->display_lock); + list_add(&ddev->ovlys, &ovly->list); + mutex_unlock(&ddev->display_lock); + ovly->info = *info; + ovly->ddev = ddev; + + return ovly; +} +EXPORT_SYMBOL(mcde_dss_create_overlay); + +void mcde_dss_destroy_overlay(struct mcde_overlay *ovly) +{ + list_del(&ovly->list); + if (ovly->state) + mcde_dss_disable_overlay(ovly); + kobject_put(&ovly->kobj); +} +EXPORT_SYMBOL(mcde_dss_destroy_overlay); + +int mcde_dss_enable_overlay(struct mcde_overlay *ovly) +{ + int ret; + + if (!ovly->ddev->chnl_state) + return -EINVAL; + + if (!ovly->state) { + struct mcde_ovly_state *state; + state = mcde_ovly_get(ovly->ddev->chnl_state); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + dev_warn(&ovly->ddev->dev, + "Failed to acquire overlay\n"); + return ret; + } + ovly->state = state; + } + + apply_overlay(ovly, &ovly->info, true); + + dev_vdbg(&ovly->ddev->dev, "Overlay enabled, chnl=%d\n", + ovly->ddev->chnl_id); + return 0; +} +EXPORT_SYMBOL(mcde_dss_enable_overlay); + +int mcde_dss_apply_overlay(struct mcde_overlay *ovly, + struct mcde_overlay_info *info) +{ + if (info == NULL) + info = &ovly->info; + return apply_overlay(ovly, info, false); +} +EXPORT_SYMBOL(mcde_dss_apply_overlay); + +void mcde_dss_disable_overlay(struct mcde_overlay *ovly) +{ + if (!ovly->state) + return; + + mcde_ovly_put(ovly->state); + + dev_dbg(&ovly->ddev->dev, "Overlay disabled, chnl=%d\n", + ovly->ddev->chnl_id); + + ovly->state = NULL; +} +EXPORT_SYMBOL(mcde_dss_disable_overlay); + +int mcde_dss_update_overlay(struct mcde_overlay *ovly, bool tripple_buffer) +{ + int ret; + dev_vdbg(&ovly->ddev->dev, "Overlay update, chnl=%d\n", + ovly->ddev->chnl_id); + + if (!ovly->state || !ovly->ddev->update || !ovly->ddev->invalidate_area) + return -EINVAL; + + mutex_lock(&ovly->ddev->display_lock); + /* Do not perform an update if power mode is off */ + if (ovly->ddev->get_power_mode(ovly->ddev) == MCDE_DISPLAY_PM_OFF) { + ret = 0; + goto power_mode_off; + } + + ret = ovly->ddev->update(ovly->ddev, tripple_buffer); + if (ret) + goto update_failed; + + ret = ovly->ddev->invalidate_area(ovly->ddev, NULL); + +power_mode_off: +update_failed: + mutex_unlock(&ovly->ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_update_overlay); + +void mcde_dss_get_overlay_info(struct mcde_overlay *ovly, + struct mcde_overlay_info *info) { + if (info) + *info = ovly->info; +} +EXPORT_SYMBOL(mcde_dss_get_overlay_info); + +void mcde_dss_get_native_resolution(struct mcde_display_device *ddev, + u16 *x_res, u16 *y_res) +{ + u16 x_tmp, y_tmp; + mutex_lock(&ddev->display_lock); + ddev->get_native_resolution(ddev, &x_tmp, &y_tmp); + if (ddev->orientation == MCDE_DISPLAY_ROT_90_CW || + ddev->orientation == MCDE_DISPLAY_ROT_90_CCW) { + *x_res = y_tmp; + *y_res = x_tmp; + } else { + *x_res = x_tmp; + *y_res = y_tmp; + } + mutex_unlock(&ddev->display_lock); +} +EXPORT_SYMBOL(mcde_dss_get_native_resolution); + +enum mcde_ovly_pix_fmt mcde_dss_get_default_pixel_format( + struct mcde_display_device *ddev) +{ + int ret; + mutex_lock(&ddev->display_lock); + ret = ddev->get_default_pixel_format(ddev); + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_get_default_pixel_format); + +void mcde_dss_get_physical_size(struct mcde_display_device *ddev, + u16 *physical_width, u16 *physical_height) +{ + mutex_lock(&ddev->display_lock); + ddev->get_physical_size(ddev, physical_width, physical_height); + mutex_unlock(&ddev->display_lock); +} +EXPORT_SYMBOL(mcde_dss_get_physical_size); + +int mcde_dss_try_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode) +{ + int ret; + mutex_lock(&ddev->display_lock); + ret = ddev->try_video_mode(ddev, video_mode); + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_try_video_mode); + +int mcde_dss_set_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *vmode) +{ + int ret = 0; + struct mcde_video_mode old_vmode; + + mutex_lock(&ddev->display_lock); + /* Do not perform set_video_mode if power mode is off */ + if (ddev->get_power_mode(ddev) == MCDE_DISPLAY_PM_OFF) + goto power_mode_off; + + ddev->get_video_mode(ddev, &old_vmode); + if (memcmp(vmode, &old_vmode, sizeof(old_vmode)) == 0) + goto same_video_mode; + + ret = ddev->set_video_mode(ddev, vmode); + if (ret) + goto set_video_mode_failed; + + if (ddev->invalidate_area) + ret = ddev->invalidate_area(ddev, NULL); +power_mode_off: +same_video_mode: +set_video_mode_failed: + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_set_video_mode); + +void mcde_dss_get_video_mode(struct mcde_display_device *ddev, + struct mcde_video_mode *video_mode) +{ + mutex_lock(&ddev->display_lock); + ddev->get_video_mode(ddev, video_mode); + mutex_unlock(&ddev->display_lock); +} +EXPORT_SYMBOL(mcde_dss_get_video_mode); + +int mcde_dss_set_pixel_format(struct mcde_display_device *ddev, + enum mcde_ovly_pix_fmt pix_fmt) +{ + enum mcde_ovly_pix_fmt old_pix_fmt; + int ret; + + mutex_lock(&ddev->display_lock); + old_pix_fmt = ddev->get_pixel_format(ddev); + if (old_pix_fmt == pix_fmt) { + ret = 0; + goto same_pixel_format; + } + + ret = ddev->set_pixel_format(ddev, pix_fmt); + +same_pixel_format: + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_set_pixel_format); + +int mcde_dss_get_pixel_format(struct mcde_display_device *ddev) +{ + int ret; + mutex_lock(&ddev->display_lock); + ret = ddev->get_pixel_format(ddev); + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_get_pixel_format); + +int mcde_dss_set_rotation(struct mcde_display_device *ddev, + enum mcde_display_rotation rotation) +{ + int ret; + enum mcde_display_rotation old_rotation; + + mutex_lock(&ddev->display_lock); + old_rotation = ddev->get_rotation(ddev); + if (old_rotation == rotation) { + ret = 0; + goto same_rotation; + } + + ret = ddev->set_rotation(ddev, rotation); +same_rotation: + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_set_rotation); + +enum mcde_display_rotation mcde_dss_get_rotation( + struct mcde_display_device *ddev) +{ + int ret; + mutex_lock(&ddev->display_lock); + ret = ddev->get_rotation(ddev); + mutex_unlock(&ddev->display_lock); + return ret; +} +EXPORT_SYMBOL(mcde_dss_get_rotation); + +int __init mcde_dss_init(void) +{ + return 0; +} + +void mcde_dss_exit(void) +{ +} + diff --git a/drivers/video/mcde/mcde_fb.c b/drivers/video/mcde/mcde_fb.c new file mode 100644 index 00000000000..7d877ec3e4f --- /dev/null +++ b/drivers/video/mcde/mcde_fb.c @@ -0,0 +1,898 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson MCDE frame buffer driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/fb.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> + +#include <linux/hwmem.h> +#include <linux/io.h> + +#include <linux/console.h> + +#include <video/mcde_fb.h> + +#define MCDE_FB_BPP_MAX 16 +#define MCDE_FB_VXRES_MAX 1920 +#define MCDE_FB_VYRES_MAX 2160 + +static struct fb_ops fb_ops; + +struct pix_fmt_info { + enum mcde_ovly_pix_fmt pix_fmt; + + u32 bpp; + struct fb_bitfield r; + struct fb_bitfield g; + struct fb_bitfield b; + struct fb_bitfield a; + u32 nonstd; +}; + +struct pix_fmt_info pix_fmt_map[] = { + { + .pix_fmt = MCDE_OVLYPIXFMT_RGB565, + .bpp = 16, + .r = { .offset = 11, .length = 5 }, + .g = { .offset = 5, .length = 6 }, + .b = { .offset = 0, .length = 5 }, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_RGBA5551, + .bpp = 16, + .r = { .offset = 11, .length = 5 }, + .g = { .offset = 6, .length = 5 }, + .b = { .offset = 1, .length = 5 }, + .a = { .offset = 0, .length = 1 }, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_RGBA4444, + .bpp = 16, + .r = { .offset = 12, .length = 4 }, + .g = { .offset = 8, .length = 4 }, + .b = { .offset = 4, .length = 4 }, + .a = { .offset = 0, .length = 4 }, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_YCbCr422, + .bpp = 16, + .nonstd = MCDE_OVLYPIXFMT_YCbCr422, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_RGB888, + .bpp = 24, + .r = { .offset = 16, .length = 8 }, + .g = { .offset = 8, .length = 8 }, + .b = { .offset = 0, .length = 8 }, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_RGBA8888, + .bpp = 32, + .r = { .offset = 16, .length = 8 }, + .g = { .offset = 8, .length = 8 }, + .b = { .offset = 0, .length = 8 }, + .a = { .offset = 24, .length = 8 }, + }, { + .pix_fmt = MCDE_OVLYPIXFMT_RGBX8888, + .bpp = 32, + .r = { .offset = 16, .length = 8 }, + .g = { .offset = 8, .length = 8 }, + .b = { .offset = 0, .length = 8 }, + } + +}; + +static struct platform_device mcde_fb_device = { + .name = "mcde_fb", + .id = -1, +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void early_suspend(struct early_suspend *data) +{ + int i; + struct mcde_fb *mfb = + container_of(data, struct mcde_fb, early_suspend); + + console_lock(); + for (i = 0; i < mfb->num_ovlys; i++) { + if (mfb->ovlys[i] && mfb->ovlys[i]->ddev && + (mfb->ovlys[i]->ddev->stay_alive == false)) + mcde_dss_disable_display(mfb->ovlys[i]->ddev); + } + console_unlock(); +} + +static void late_resume(struct early_suspend *data) +{ + int i; + struct mcde_fb *mfb = + container_of(data, struct mcde_fb, early_suspend); + + console_lock(); + for (i = 0; i < mfb->num_ovlys; i++) { + if (mfb->ovlys[i]) { + struct mcde_overlay *ovly = mfb->ovlys[i]; + (void) mcde_dss_enable_display(ovly->ddev); + } + } + console_unlock(); +} +#endif + +/* Helpers */ + +static struct pix_fmt_info *find_pix_fmt_info(enum mcde_ovly_pix_fmt pix_fmt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) { + if (pix_fmt_map[i].pix_fmt == pix_fmt) + return &pix_fmt_map[i]; + } + return NULL; +} + +static bool bitfield_cmp(struct fb_bitfield *bf1, struct fb_bitfield *bf2) +{ + return bf1->offset == bf2->offset && + bf1->length == bf2->length && + bf1->msb_right == bf2->msb_right; +} + +static struct pix_fmt_info *var_to_pix_fmt_info(struct fb_var_screeninfo *var) +{ + int i; + struct pix_fmt_info *info; + + if (var->nonstd) + return find_pix_fmt_info(var->nonstd); + + for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) { + info = &pix_fmt_map[i]; + if (info->bpp == var->bits_per_pixel && + bitfield_cmp(&info->r, &var->red) && + bitfield_cmp(&info->g, &var->green) && + bitfield_cmp(&info->b, &var->blue) && + bitfield_cmp(&info->a, &var->transp)) + return info; + } + + for (i = 0; i < ARRAY_SIZE(pix_fmt_map); i++) { + info = &pix_fmt_map[i]; + if (var->bits_per_pixel == info->bpp) + return info; + } + + return NULL; +} + +static void pix_fmt_info_to_var(struct pix_fmt_info *pix_fmt_info, + struct fb_var_screeninfo *var) +{ + var->bits_per_pixel = pix_fmt_info->bpp; + var->nonstd = pix_fmt_info->nonstd; + var->red = pix_fmt_info->r; + var->green = pix_fmt_info->g; + var->blue = pix_fmt_info->b; + var->transp = pix_fmt_info->a; +} + +static int init_var_fmt(struct fb_var_screeninfo *var, + u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt, + u32 rotate) +{ + struct pix_fmt_info *info; + + info = find_pix_fmt_info(pix_fmt); + if (!info) + return -EINVAL; + + var->bits_per_pixel = info->bpp; + var->nonstd = info->nonstd; + var->red = info->r; + var->green = info->g; + var->blue = info->b; + var->transp = info->a; + var->grayscale = false; + + var->xres = w; + var->yres = h; + var->xres_virtual = vw; + var->yres_virtual = vh; + var->xoffset = 0; + var->yoffset = 0; + var->activate = FB_ACTIVATE_NOW; + var->rotate = rotate; + + return 0; +}; + +static int reallocate_fb_mem(struct fb_info *fbi, u32 size) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + void *vaddr; + struct hwmem_alloc *alloc; + struct hwmem_mem_chunk mem_chunk; + size_t num_mem_chunks = 1; + int name; + + size = PAGE_ALIGN(size); + + if (size == fbi->screen_size) + return 0; + +/* TODO: Remove once hwmem has support for defragmentation */ +#ifdef CONFIG_MCDE_FB_AVOID_REALLOC + if (!mfb->alloc) { + u32 old_size = size; + + size = MCDE_FB_BPP_MAX / 8 * MCDE_FB_VXRES_MAX * + MCDE_FB_VYRES_MAX; +#endif + + alloc = hwmem_alloc(size, HWMEM_ALLOC_HINT_WRITE_COMBINE | + HWMEM_ALLOC_HINT_UNCACHED, + (HWMEM_ACCESS_READ | HWMEM_ACCESS_WRITE | + HWMEM_ACCESS_IMPORT), + HWMEM_MEM_CONTIGUOUS_SYS); + if (IS_ERR(alloc)) + return PTR_ERR(alloc); + + name = hwmem_get_name(alloc); + if (name < 0) { + hwmem_release(alloc); + return name; + } + + if (mfb->alloc) { + hwmem_kunmap(mfb->alloc); + hwmem_unpin(mfb->alloc); + hwmem_release(mfb->alloc); + } + + (void)hwmem_pin(alloc, &mem_chunk, &num_mem_chunks); + + vaddr = hwmem_kmap(alloc); + if (vaddr == NULL) { + hwmem_unpin(alloc); + hwmem_release(alloc); + return -ENOMEM; + } + + mfb->alloc = alloc; + mfb->alloc_name = name; + + fbi->screen_base = vaddr; + fbi->fix.smem_start = mem_chunk.paddr; + +#ifdef CONFIG_MCDE_FB_AVOID_REALLOC + size = old_size; + } +#endif + + fbi->screen_size = size; + fbi->fix.smem_len = size; + + return 0; +} + +static void free_fb_mem(struct fb_info *fbi) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + + if (mfb->alloc) { + hwmem_kunmap(mfb->alloc); + hwmem_unpin(mfb->alloc); + hwmem_release(mfb->alloc); + mfb->alloc = NULL; + mfb->alloc_name = 0; + + fbi->fix.smem_start = 0; + fbi->fix.smem_len = 0; + fbi->screen_base = 0; + fbi->screen_size = 0; + } +} + +static void init_fb(struct fb_info *fbi) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + + strlcpy(fbi->fix.id, "mcde_fb", sizeof(fbi->fix.id)); + fbi->fix.type = FB_TYPE_PACKED_PIXELS; + fbi->fix.visual = FB_VISUAL_DIRECTCOLOR; + fbi->fix.xpanstep = 1; + fbi->fix.ypanstep = 1; + fbi->flags = FBINFO_HWACCEL_DISABLED; + fbi->fbops = &fb_ops; + fbi->pseudo_palette = &mfb->pseudo_palette[0]; +} + +static void get_ovly_info(struct fb_info *fbi, struct mcde_overlay *ovly, + struct mcde_overlay_info *info) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + + memset(info, 0, sizeof(*info)); + info->paddr = fbi->fix.smem_start + + fbi->fix.line_length * fbi->var.yoffset; + info->vaddr = (u32 *)(fbi->screen_base + + fbi->fix.line_length * fbi->var.yoffset); + /* TODO: move mem check to check_var/pan_display */ + if (info->paddr + fbi->fix.line_length * fbi->var.yres > + fbi->fix.smem_start + fbi->fix.smem_len) { + info->paddr = fbi->fix.smem_start; + info->vaddr = (u32 *)fbi->screen_base; + } + info->fmt = mfb->pix_fmt; + info->stride = fbi->fix.line_length; + if (ovly) { + info->src_x = ovly->info.src_x; + info->src_y = ovly->info.src_y; + info->dst_x = ovly->info.dst_x; + info->dst_y = ovly->info.dst_y; + info->dst_z = 1; + } else { + info->src_x = 0; + info->src_y = 0; + info->dst_x = 0; + info->dst_y = 0; + info->dst_z = 1; + } + info->w = fbi->var.xres; + info->h = fbi->var.yres; + info->dirty.x = 0; + info->dirty.y = 0; + info->dirty.w = fbi->var.xres; + info->dirty.h = fbi->var.yres; +} + +void vmode_to_var(struct mcde_video_mode *video_mode, + struct fb_var_screeninfo *var) +{ + /* TODO: use only 1 vbp and 1 vfp */ + var->xres = video_mode->xres; + var->yres = video_mode->yres; + var->pixclock = video_mode->pixclock; + var->upper_margin = video_mode->vbp; + var->lower_margin = video_mode->vfp; + var->vsync_len = video_mode->vsw; + var->left_margin = video_mode->hbp; + var->right_margin = video_mode->hfp; + var->hsync_len = video_mode->hsw; + var->vmode &= ~FB_VMODE_INTERLACED; + var->vmode |= video_mode->interlaced ? + FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED; +} + +void var_to_vmode(struct fb_var_screeninfo *var, + struct mcde_video_mode *video_mode) +{ + video_mode->xres = var->xres; + video_mode->yres = var->yres; + video_mode->pixclock = var->pixclock; + video_mode->vbp = var->upper_margin; + video_mode->vfp = var->lower_margin; + video_mode->vsw = var->vsync_len; + video_mode->hbp = var->left_margin; + video_mode->hfp = var->right_margin; + video_mode->hsw = var->hsync_len; + video_mode->interlaced = (var->vmode & FB_VMODE_INTERLACED) == + FB_VMODE_INTERLACED; +} + +enum mcde_display_rotation var_to_rotation(struct fb_var_screeninfo *var) +{ + enum mcde_display_rotation rot; + + switch (var->rotate) { + case FB_ROTATE_UR: + rot = MCDE_DISPLAY_ROT_0; + break; + case FB_ROTATE_CW: + rot = MCDE_DISPLAY_ROT_90_CW; + break; + case FB_ROTATE_UD: + rot = MCDE_DISPLAY_ROT_180_CW; + break; + case FB_ROTATE_CCW: + rot = MCDE_DISPLAY_ROT_90_CCW; + break; + default: + rot = MCDE_DISPLAY_ROT_0; + break; + } + dev_vdbg(&mcde_fb_device.dev, "var_rot: %d -> mcde_rot: %d\n", + var->rotate, rot); + return rot; +} + +static struct mcde_display_device *fb_to_display(struct fb_info *fbi) +{ + int i; + struct mcde_fb *mfb = to_mcde_fb(fbi); + + for (i = 0; i < mfb->num_ovlys; i++) { + if (mfb->ovlys[i]) + return mfb->ovlys[i]->ddev; + } + return NULL; +} + +static int check_var(struct fb_var_screeninfo *var, struct fb_info *fbi, + struct mcde_display_device *ddev) +{ + int ret; + u16 w = -1, h = -1; + struct mcde_video_mode vmode; + struct pix_fmt_info *fmtinfo; + + /* TODO: check sizes/offsets/memory validity */ + + /* Device physical size */ + mcde_dss_get_physical_size(ddev, &w, &h); + var->width = w; + var->height = h; + + /* Rotation */ + if (var->rotate > 3) { + dev_info(&(ddev->dev), "check_var failed var->rotate\n"); + return -EINVAL; + } + + /* Video mode */ + var_to_vmode(var, &vmode); + ret = mcde_dss_try_video_mode(ddev, &vmode); + if (ret < 0) { + dev_vdbg(&(ddev->dev), "check_var failed " + "mcde_dss_try_video_mode with size = %x\n", ret); + return ret; + } + vmode_to_var(&vmode, var); + + /* Pixel format */ + fmtinfo = var_to_pix_fmt_info(var); + if (!fmtinfo) { + dev_vdbg(&(ddev->dev), "check_var failed fmtinfo\n"); + return -EINVAL; + } + pix_fmt_info_to_var(fmtinfo, var); + + /* Not used */ + var->grayscale = 0; + var->sync = 0; + + return 0; +} + +static int apply_var(struct fb_info *fbi, struct mcde_display_device *ddev) +{ + int ret, i; + struct mcde_fb *mfb = to_mcde_fb(fbi); + struct fb_var_screeninfo *var; + struct mcde_video_mode vmode; + struct pix_fmt_info *fmt; + u32 line_len, size; + + if (!ddev) + return -ENODEV; + + dev_vdbg(&(ddev->dev), "%s\n", __func__); + + var = &fbi->var; + + ddev->check_transparency = 60; + + /* Reallocate memory */ + line_len = (fbi->var.bits_per_pixel * var->xres_virtual) / 8; + line_len = ALIGN(line_len, MCDE_BUF_LINE_ALIGMENT); + size = line_len * var->yres_virtual; + ret = reallocate_fb_mem(fbi, size); + if (ret) { + dev_vdbg(&(ddev->dev), "apply_var failed with" + "reallocate mem with size = %d\n", size); + return ret; + } + fbi->fix.line_length = line_len; + + if (ddev->fictive) + goto apply_var_end; + + /* Apply pixel format */ + fmt = var_to_pix_fmt_info(var); + mfb->pix_fmt = fmt->pix_fmt; + + /* Apply rotation */ + mcde_dss_set_rotation(ddev, var_to_rotation(var)); + /* Apply video mode */ + memset(&vmode, 0, sizeof(struct mcde_video_mode)); + var_to_vmode(var, &vmode); + ret = mcde_dss_set_video_mode(ddev, &vmode); + if (ret) + return ret; + + mcde_dss_apply_channel(ddev); + + /* Apply overlay info */ + for (i = 0; i < mfb->num_ovlys; i++) { + struct mcde_overlay *ovly = mfb->ovlys[i]; + struct mcde_overlay_info info; + int num_buffers; + + get_ovly_info(fbi, ovly, &info); + (void) mcde_dss_apply_overlay(ovly, &info); + + num_buffers = var->yres_virtual / var->yres; + mcde_dss_update_overlay(ovly, num_buffers == 3); + } + +apply_var_end: + return 0; +} + +/* FB ops */ + +static int mcde_fb_open(struct fb_info *fbi, int user) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); + return 0; +} + +static int mcde_fb_release(struct fb_info *fbi, int user) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); + return 0; +} + +static int mcde_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) +{ + struct mcde_display_device *ddev = fb_to_display(fbi); + + dev_vdbg(fbi->dev, "%s\n", __func__); + + if (!ddev) { + printk(KERN_ERR "mcde_fb_check_var failed !ddev\n"); + return -ENODEV; + } + + return check_var(var, fbi, ddev); +} + +static int mcde_fb_set_par(struct fb_info *fbi) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + struct mcde_display_device *ddev = fb_to_display(fbi); + dev_vdbg(fbi->dev, "%s\n", __func__); + + if (mfb->ovlys[0]->state == NULL && + ddev->fictive == false) { + printk(KERN_INFO "%s() - Enable fb %p\n", + __func__, + mfb->ovlys[0]); + mcde_dss_enable_overlay(mfb->ovlys[0]); + } + + return apply_var(fbi, ddev); +} + +static int mcde_fb_blank(int blank, struct fb_info *fbi) +{ + int ret = 0; + struct mcde_display_device *ddev = fb_to_display(fbi); + + dev_vdbg(fbi->dev, "%s\n", __func__); + + if (ddev->fictive) + goto mcde_fb_blank_end; + + switch (blank) { + case FB_BLANK_NORMAL: + break; + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_POWERDOWN: + mcde_dss_disable_display(ddev); + break; + case FB_BLANK_UNBLANK: + ret = mcde_dss_enable_display(ddev); + break; + default: + ret = -EINVAL; + } + +mcde_fb_blank_end: + return ret; +} + +static int mcde_fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fbi) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); + + if (var->xoffset == fbi->var.xoffset && + var->yoffset == fbi->var.yoffset) + return 0; + + fbi->var.xoffset = var->xoffset; + fbi->var.yoffset = var->yoffset; + return apply_var(fbi, fb_to_display(fbi)); +} + +static void mcde_fb_rotate(struct fb_info *fbi, int rotate) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); +} + +static int mcde_fb_ioctl(struct fb_info *fbi, unsigned int cmd, + unsigned long arg) +{ + struct mcde_fb *mfb = to_mcde_fb(fbi); + + if (cmd == MCDE_GET_BUFFER_NAME_IOC) + return mfb->alloc_name; + + return -EINVAL; +} + +static int mcde_fb_setcolreg(unsigned int regno, unsigned int red, + unsigned int green, unsigned int blue, unsigned int transp, + struct fb_info *fbi) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); + + if (regno >= 256) + return 1; + + if (regno < 17) { + u32 pseudo_val; + u32 r, g, b; + + if (fbi->var.bits_per_pixel > 16) { + r = red >> 8; + g = green >> 8; + b = blue >> 8; + } else if (fbi->var.bits_per_pixel == 16) { + r = red >> (3 + 8); + g = green >> (2 + 8); + b = blue >> (3 + 8); + } else if (fbi->var.bits_per_pixel == 15) { + r = red >> (3 + 8); + g = green >> (3 + 8); + b = blue >> (3 + 8); + } else + r = b = g = (regno & 15); + pseudo_val = r << fbi->var.red.offset; + pseudo_val |= g << fbi->var.green.offset; + pseudo_val |= b << fbi->var.blue.offset; + + ((u32 *)fbi->pseudo_palette)[regno] = pseudo_val; + } + + return 0; +} + +static int mcde_fb_setcmap(struct fb_cmap *cmap, struct fb_info *fbi) +{ + dev_vdbg(fbi->dev, "%s\n", __func__); + + /*Nothing to see here, move along*/ + return 0; +} + +static struct fb_ops fb_ops = { + /* creg, cmap */ + .owner = THIS_MODULE, + .fb_open = mcde_fb_open, + .fb_release = mcde_fb_release, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_check_var = mcde_fb_check_var, + .fb_set_par = mcde_fb_set_par, + .fb_blank = mcde_fb_blank, + .fb_pan_display = mcde_fb_pan_display, + .fb_rotate = mcde_fb_rotate, + .fb_ioctl = mcde_fb_ioctl, + .fb_setcolreg = mcde_fb_setcolreg, + .fb_setcmap = mcde_fb_setcmap, +}; + +/* FB driver */ + +struct fb_info *mcde_fb_create(struct mcde_display_device *ddev, + u16 w, u16 h, u16 vw, u16 vh, enum mcde_ovly_pix_fmt pix_fmt, + u32 rotate) +{ + int ret = 0; + struct fb_info *fbi; + struct mcde_fb *mfb; + struct mcde_overlay *ovly = NULL; + struct mcde_overlay_info ovly_info; + + dev_vdbg(&ddev->dev, "%s\n", __func__); + if (!ddev->initialized) { + dev_warn(&ddev->dev, "%s: Device not initialized\n", __func__); + return ERR_PTR(-EINVAL); + } + + /* Init fb */ + fbi = framebuffer_alloc(sizeof(struct mcde_fb), &mcde_fb_device.dev); + if (fbi == NULL) { + ret = -ENOMEM; + goto fb_alloc_failed; + } + init_fb(fbi); + mfb = to_mcde_fb(fbi); + + if (ddev->fictive == false) { + ret = mcde_dss_open_channel(ddev); + if (ret) + goto channel_open_failed; + + ret = mcde_dss_enable_display(ddev); + if (ret) + goto display_enable_failed; + } + + /* Prepare var and allocate frame buffer memory */ + init_var_fmt(&fbi->var, w, h, vw, vh, pix_fmt, rotate); + check_var(&fbi->var, fbi, ddev); + ret = apply_var(fbi, ddev); + if (ret) + goto apply_var_failed; + + if (ddev->fictive == false) + mcde_dss_set_pixel_format(ddev, ddev->port->pixel_format); + + /* Setup overlay */ + get_ovly_info(fbi, NULL, &ovly_info); + ovly = mcde_dss_create_overlay(ddev, &ovly_info); + if (!ovly) { + ret = PTR_ERR(ovly); + goto ovly_alloc_failed; + } + mfb->ovlys[0] = ovly; + mfb->num_ovlys = 1; + + if (ddev->fictive == false) { + ret = mcde_dss_enable_overlay(ovly); + if (ret) + goto ovly_enable_failed; + } + + mfb->id = ddev->id; + + /* Register framebuffer */ + ret = register_framebuffer(fbi); + if (ret) + goto fb_register_failed; + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret) + dev_warn(&ddev->dev, "%s: Allocate color map memory failed!\n", + __func__); + + ddev->fbi = fbi; + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (ddev->fictive == false) { + mfb->early_suspend.level = + EARLY_SUSPEND_LEVEL_DISABLE_FB; + mfb->early_suspend.suspend = early_suspend; + mfb->early_suspend.resume = late_resume; + register_early_suspend(&mfb->early_suspend); + } +#endif + + goto out; +fb_register_failed: + mcde_dss_disable_overlay(ovly); +ovly_enable_failed: + mcde_dss_destroy_overlay(ovly); +ovly_alloc_failed: + free_fb_mem(fbi); +apply_var_failed: + mcde_dss_disable_display(ddev); +display_enable_failed: + mcde_dss_close_channel(ddev); +channel_open_failed: + framebuffer_release(fbi); + fbi = NULL; +fb_alloc_failed: +out: + return ret ? ERR_PTR(ret) : fbi; +} +EXPORT_SYMBOL(mcde_fb_create); + +int mcde_fb_attach_overlay(struct fb_info *fb_info, struct mcde_overlay *ovl) +{ + /* TODO: Attach extra overlay targets */ + return -EINVAL; +} + +void mcde_fb_destroy(struct mcde_display_device *dev) +{ + struct mcde_fb *mfb; + int i; + + dev_vdbg(&dev->dev, "%s\n", __func__); + + if (dev->fictive == false) { + mcde_dss_disable_display(dev); + mcde_dss_close_channel(dev); + } + + mfb = to_mcde_fb(dev->fbi); + for (i = 0; i < mfb->num_ovlys; i++) { + if (mfb->ovlys[i]) + mcde_dss_destroy_overlay(mfb->ovlys[i]); + } + +#ifdef CONFIG_HAS_EARLYSUSPEND + if (dev->fictive == false) + unregister_early_suspend(&mfb->early_suspend); +#endif + fb_dealloc_cmap(&dev->fbi->cmap); + + unregister_framebuffer(dev->fbi); + free_fb_mem(dev->fbi); + framebuffer_release(dev->fbi); + dev->fbi = NULL; +} + +/* Overlay fbs' platform device */ +static int mcde_fb_probe(struct platform_device *pdev) +{ + return 0; +} + +static int mcde_fb_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver mcde_fb_driver = { + .probe = mcde_fb_probe, + .remove = mcde_fb_remove, + .driver = { + .name = "mcde_fb", + .owner = THIS_MODULE, + }, +}; + +/* MCDE fb init */ + +int __init mcde_fb_init(void) +{ + int ret; + + ret = platform_driver_register(&mcde_fb_driver); + if (ret) + goto fb_driver_failed; + ret = platform_device_register(&mcde_fb_device); + if (ret) + goto fb_device_failed; + + goto out; +fb_device_failed: + platform_driver_unregister(&mcde_fb_driver); +fb_driver_failed: +out: + return ret; +} + +void mcde_fb_exit(void) +{ + platform_device_unregister(&mcde_fb_device); + platform_driver_unregister(&mcde_fb_driver); +} diff --git a/drivers/video/mcde/mcde_hw.c b/drivers/video/mcde/mcde_hw.c new file mode 100644 index 00000000000..92cdb1ef7a9 --- /dev/null +++ b/drivers/video/mcde/mcde_hw.c @@ -0,0 +1,3834 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * ST-Ericsson MCDE base driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/workqueue.h> + +#include <linux/mfd/dbx500-prcmu.h> + +#include <video/mcde.h> +#include "dsilink_regs.h" +#include "mcde_regs.h" +#include "mcde_debugfs.h" + + +/* MCDE channel states + * + * Allowed state transitions: + * IDLE <-> SUSPEND + * IDLE <-> DSI_READ + * IDLE <-> DSI_WRITE + * IDLE -> SETUP -> (WAIT_TE ->) RUNNING -> STOPPING1 -> STOPPING2 -> IDLE + * WAIT_TE -> STOPPED (for missing TE to allow re-enable) + */ +enum chnl_state { + CHNLSTATE_SUSPEND, /* HW in suspended mode, initial state */ + CHNLSTATE_IDLE, /* Channel aquired, but not running, FLOEN==0 */ + CHNLSTATE_DSI_READ, /* Executing DSI read */ + CHNLSTATE_DSI_WRITE, /* Executing DSI write */ + CHNLSTATE_SETUP, /* Channel register setup to prepare for running */ + CHNLSTATE_WAIT_TE, /* Waiting for BTA or external TE */ + CHNLSTATE_RUNNING, /* Update started, FLOEN=1, FLOEN==1 */ + CHNLSTATE_STOPPING, /* Stopping, FLOEN=0, FLOEN==1, awaiting VCMP */ + CHNLSTATE_STOPPED, /* Stopped, after VCMP, FLOEN==0|1 */ +}; + +enum dsi_lane_status { + DSI_LANE_STATE_START = 0x00, + DSI_LANE_STATE_IDLE = 0x01, + DSI_LANE_STATE_WRITE = 0x02, + DSI_LANE_STATE_ULPM = 0x03, +}; + +static int set_channel_state_atomic(struct mcde_chnl_state *chnl, + enum chnl_state state); +static int set_channel_state_sync(struct mcde_chnl_state *chnl, + enum chnl_state state); +static void stop_channel(struct mcde_chnl_state *chnl); +static int _mcde_chnl_enable(struct mcde_chnl_state *chnl); +static int _mcde_chnl_apply(struct mcde_chnl_state *chnl); +static void disable_flow(struct mcde_chnl_state *chnl); +static void enable_flow(struct mcde_chnl_state *chnl); +static void do_softwaretrig(struct mcde_chnl_state *chnl); +static void dsi_te_poll_req(struct mcde_chnl_state *chnl); +static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl, + unsigned int timeout); +static void dsi_te_timer_function(unsigned long value); +static int wait_for_vcmp(struct mcde_chnl_state *chnl); +static int probe_hw(struct platform_device *pdev); +static void wait_for_flow_disabled(struct mcde_chnl_state *chnl); + +#define OVLY_TIMEOUT 100 +#define CHNL_TIMEOUT 100 +#define FLOW_STOP_TIMEOUT 20 +#define SCREEN_PPL_HIGH 1280 +#define SCREEN_PPL_CEA2 720 +#define SCREEN_LPF_CEA2 480 +#define DSI_DELAY0_CEA2_ADD 10 + +#define MCDE_SLEEP_WATCHDOG 500 +#define DSI_TE_NO_ANSWER_TIMEOUT_INIT 2500 +#define DSI_TE_NO_ANSWER_TIMEOUT 250 +#define DSI_WAIT_FOR_ULPM_STATE_MS 1 +#define DSI_ULPM_STATE_NBR_OF_RETRIES 10 +#define DSI_READ_TIMEOUT 200 +#define DSI_WRITE_CMD_TIMEOUT 1000 +#define DSI_READ_DELAY 5 +#define DSI_READ_NBR_OF_RETRIES 2 +#define MCDE_FLOWEN_MAX_TRIAL 60 + +#define MCDE_VERSION_4_1_3 0x04010300 +#define MCDE_VERSION_4_0_4 0x04000400 +#define MCDE_VERSION_3_0_8 0x03000800 +#define MCDE_VERSION_3_0_5 0x03000500 +#define MCDE_VERSION_1_0_4 0x01000400 + +#define CLK_MCDE "mcde" +#define CLK_DPI "lcd" + +static u8 *mcdeio; +static u8 **dsiio; +static struct platform_device *mcde_dev; +static u8 num_dsilinks; +static u8 num_channels; +static u8 num_overlays; +static int mcde_irq; +static u32 input_fifo_size; +static u32 output_fifo_ab_size; +static u32 output_fifo_c0c1_size; + +static struct regulator *regulator_vana; +static struct regulator *regulator_mcde_epod; +static struct regulator *regulator_esram_epod; +static struct clk *clock_mcde; + +/* TODO remove when all platforms support dsilp and dsihs clocks */ +static struct clk *clock_dsi; +static struct clk *clock_dsi_lp; + +static u8 mcde_is_enabled; +static struct delayed_work hw_timeout_work; +static u8 dsi_pll_is_enabled; +static u8 dsi_ifc_is_supported; +static u8 dsi_use_clk_framework; +static u32 mcde_clk_rate; /* In Hz */ + +static struct mutex mcde_hw_lock; +static inline void mcde_lock(const char *func, int line) +{ + mutex_lock(&mcde_hw_lock); + dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line); +} + +static inline void mcde_unlock(const char *func, int line) +{ + dev_vdbg(&mcde_dev->dev, "Exit MCDE: %s:%d\n", func, line); + mutex_unlock(&mcde_hw_lock); +} + +static inline bool mcde_trylock(const char *func, int line) +{ + bool locked = mutex_trylock(&mcde_hw_lock) == 1; + if (locked) + dev_vdbg(&mcde_dev->dev, "Enter MCDE: %s:%d\n", func, line); + return locked; +} + +static u8 mcde_dynamic_power_management = true; + +static inline u32 dsi_rreg(int i, u32 reg) +{ + return readl(dsiio[i] + reg); +} +static inline void dsi_wreg(int i, u32 reg, u32 val) +{ + writel(val, dsiio[i] + reg); +} + +#define dsi_rfld(__i, __reg, __fld) \ +({ \ + const u32 mask = __reg##_##__fld##_MASK; \ + const u32 shift = __reg##_##__fld##_SHIFT; \ + ((dsi_rreg(__i, __reg) & mask) >> shift); \ +}) + +#define dsi_wfld(__i, __reg, __fld, __val) \ +({ \ + const u32 mask = __reg##_##__fld##_MASK; \ + const u32 shift = __reg##_##__fld##_SHIFT; \ + const u32 oldval = dsi_rreg(__i, __reg); \ + const u32 newval = ((__val) << shift); \ + dsi_wreg(__i, __reg, (oldval & ~mask) | (newval & mask)); \ +}) + +static inline u32 mcde_rreg(u32 reg) +{ + return readl(mcdeio + reg); +} +static inline void mcde_wreg(u32 reg, u32 val) +{ + writel(val, mcdeio + reg); +} + + +#define mcde_rfld(__reg, __fld) \ +({ \ + const u32 mask = __reg##_##__fld##_MASK; \ + const u32 shift = __reg##_##__fld##_SHIFT; \ + ((mcde_rreg(__reg) & mask) >> shift); \ +}) + +#define mcde_wfld(__reg, __fld, __val) \ +({ \ + const u32 mask = __reg##_##__fld##_MASK; \ + const u32 shift = __reg##_##__fld##_SHIFT; \ + const u32 oldval = mcde_rreg(__reg); \ + const u32 newval = ((__val) << shift); \ + mcde_wreg(__reg, (oldval & ~mask) | (newval & mask)); \ +}) + +struct ovly_regs { + bool enabled; + bool dirty; + bool dirty_buf; + + u8 ch_id; + u32 baseaddress0; + u32 baseaddress1; + u8 bits_per_pixel; + u8 bpp; + bool bgr; + bool bebo; + bool opq; + u8 col_conv; + u8 alpha_source; + u8 alpha_value; + u8 pixoff; + u16 ppl; + u16 lpf; + u16 cropx; + u16 cropy; + u16 xpos; + u16 ypos; + u8 z; +}; + +struct mcde_ovly_state { + bool inuse; + u8 idx; /* MCDE overlay index */ + struct mcde_chnl_state *chnl; /* Owner channel */ + bool dirty; + bool dirty_buf; + + /* Staged settings */ + u32 paddr; + u16 stride; + enum mcde_ovly_pix_fmt pix_fmt; + + u16 src_x; + u16 src_y; + u16 dst_x; + u16 dst_y; + u16 dst_z; + u16 w; + u16 h; + + u8 alpha_source; + u8 alpha_value; + + /* Applied settings */ + struct ovly_regs regs; +}; + +static struct mcde_ovly_state *overlays; + +struct chnl_regs { + bool dirty; + + bool floen; + u16 x; + u16 y; + u16 ppl; + u16 lpf; + u8 bpp; + bool internal_clk; /* CLKTYPE field */ + u16 pcd; + u8 clksel; + u8 cdwin; + u16 (*map_r)(u8); + u16 (*map_g)(u8); + u16 (*map_b)(u8); + bool palette_enable; + bool bcd; + bool roten; + u8 rotdir; + u32 rotbuf1; + u32 rotbuf2; + u32 rotbufsize; + + /* Blending */ + u8 blend_ctrl; + bool blend_en; + u8 alpha_blend; + + /* DSI */ + u8 dsipacking; +}; + +struct col_regs { + bool dirty; + + u16 y_red; + u16 y_green; + u16 y_blue; + u16 cb_red; + u16 cb_green; + u16 cb_blue; + u16 cr_red; + u16 cr_green; + u16 cr_blue; + u16 off_y; + u16 off_cb; + u16 off_cr; +}; + +struct tv_regs { + bool dirty; + + u16 dho; /* TV mode: left border width; destination horizontal offset */ + /* LCD MODE: horizontal back porch */ + u16 alw; /* TV mode: right border width */ + /* LCD mode: horizontal front porch */ + u16 hsw; /* horizontal synch width */ + u16 dvo; /* TV mode: top border width; destination horizontal offset */ + /* LCD MODE: vertical back porch */ + u16 bsl; /* TV mode: bottom border width; blanking start line */ + /* LCD MODE: vertical front porch */ + /* field 1 */ + u16 bel1; /* TV mode: field total vertical blanking lines */ + /* LCD mode: vertical sync width */ + u16 fsl1; /* field vbp */ + /* field 2 */ + u16 bel2; + u16 fsl2; + u8 tv_mode; + bool sel_mode_tv; + bool inv_clk; + bool interlaced_en; + u32 lcdtim1; +}; + +struct mcde_chnl_state { + bool enabled; + bool reserved; + enum mcde_chnl id; + enum mcde_fifo fifo; + struct mcde_port port; + struct mcde_ovly_state *ovly0; + struct mcde_ovly_state *ovly1; + enum chnl_state state; + wait_queue_head_t state_waitq; + wait_queue_head_t vcmp_waitq; + atomic_t vcmp_cnt; + struct timer_list dsi_te_timer; + struct clk *clk_dsi_lp; + struct clk *clk_dsi_hs; + struct clk *clk_dpi; + + enum mcde_display_power_mode power_mode; + + /* Staged settings */ + u16 (*map_r)(u8); + u16 (*map_g)(u8); + u16 (*map_b)(u8); + bool palette_enable; + struct mcde_video_mode vmode; + enum mcde_display_rotation rotation; + u32 rotbuf1; + u32 rotbuf2; + u32 rotbufsize; + + struct mcde_col_transform rgb_2_ycbcr; + struct mcde_col_transform ycbcr_2_rgb; + struct mcde_col_transform *transform; + + /* Blending */ + u8 blend_ctrl; + bool blend_en; + u8 alpha_blend; + + /* Applied settings */ + struct chnl_regs regs; + struct col_regs col_regs; + struct tv_regs tv_regs; + + /* an interlaced digital TV signal generates a VCMP per field */ + bool vcmp_per_field; + bool even_vcmp; + + bool formatter_updated; + bool esram_is_enabled; +}; + +static struct mcde_chnl_state *channels; +/* + * Wait for CSM_RUNNING, all data sent for display + */ +static inline void wait_while_dsi_running(int lnk) +{ + u8 counter = DSI_READ_TIMEOUT; + while (dsi_rfld(lnk, DSI_CMD_MODE_STS, CSM_RUNNING) && --counter) { + dev_vdbg(&mcde_dev->dev, + "%s: DSI link %u read running state retry %u times\n" + , __func__, lnk, (DSI_READ_TIMEOUT - counter)); + udelay(DSI_READ_DELAY); + } + WARN_ON(!counter); + if (!counter) + dev_warn(&mcde_dev->dev, + "%s: DSI link %u read timeout!\n", __func__, lnk); +} + +static void enable_clocks_and_power(struct platform_device *pdev) +{ + struct mcde_platform_data *pdata = pdev->dev.platform_data; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + /* VANA should be enabled before a DSS hard reset */ + if (regulator_vana) + WARN_ON_ONCE(regulator_enable(regulator_vana)); + + WARN_ON_ONCE(regulator_enable(regulator_mcde_epod)); + + if (!dsi_use_clk_framework) + pdata->platform_set_clocks(); + + WARN_ON_ONCE(clk_enable(clock_mcde)); +} + +static void disable_clocks_and_power(struct platform_device *pdev) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + clk_disable(clock_mcde); + + WARN_ON_ONCE(regulator_disable(regulator_mcde_epod)); + + if (regulator_vana) + WARN_ON_ONCE(regulator_disable(regulator_vana)); +} + +static void update_mcde_registers(void) +{ + struct mcde_platform_data *pdata = mcde_dev->dev.platform_data; + + /* Setup output muxing */ + mcde_wreg(MCDE_CONF0, + MCDE_CONF0_IFIFOCTRLWTRMRKLVL(7) | + MCDE_CONF0_OUTMUX0(pdata->outmux[0]) | + MCDE_CONF0_OUTMUX1(pdata->outmux[1]) | + MCDE_CONF0_OUTMUX2(pdata->outmux[2]) | + MCDE_CONF0_OUTMUX3(pdata->outmux[3]) | + MCDE_CONF0_OUTMUX4(pdata->outmux[4]) | + pdata->syncmux); + + mcde_wfld(MCDE_RISPP, VCMPARIS, 1); + mcde_wfld(MCDE_RISPP, VCMPBRIS, 1); + mcde_wfld(MCDE_RISPP, VCMPC0RIS, 1); + mcde_wfld(MCDE_RISPP, VCMPC1RIS, 1); + + /* Enable channel VCMP interrupts */ + mcde_wreg(MCDE_IMSCPP, + MCDE_IMSCPP_VCMPAIM(true) | + MCDE_IMSCPP_VCMPBIM(true) | + MCDE_IMSCPP_VCMPC0IM(true) | + MCDE_IMSCPP_VCMPC1IM(true)); + + mcde_wreg(MCDE_IMSCCHNL, MCDE_IMSCCHNL_CHNLAIM(0xf)); + mcde_wreg(MCDE_IMSCERR, 0xFFFF01FF); + + /* Setup sync pulse length + * Setting VSPMAX=0 disables the filter and VSYNC + * is generated after VSPMIN mcde cycles + */ + mcde_wreg(MCDE_VSCRC0, + MCDE_VSCRC0_VSPMIN(0) | + MCDE_VSCRC0_VSPMAX(0)); + mcde_wreg(MCDE_VSCRC1, + MCDE_VSCRC1_VSPMIN(1) | + MCDE_VSCRC1_VSPMAX(0xff)); +} + +static void dsi_link_handle_reset(u8 link, bool release) +{ + u32 value; + + value = prcmu_read(DB8500_PRCM_DSI_SW_RESET); + if (release) { + switch (link) { + case 0: + value |= DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN; + break; + case 1: + value |= DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN; + break; + case 2: + value |= DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN; + break; + default: + break; + } + } else { + switch (link) { + case 0: + value &= ~DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN; + break; + case 1: + value &= ~DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN; + break; + case 2: + value &= ~DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN; + break; + default: + break; + } + } + prcmu_write(DB8500_PRCM_DSI_SW_RESET, value); +} + +static void dsi_link_switch_byte_clk(u8 link, bool to_system_clock) +{ + u32 value; + + value = prcmu_read(DB8500_PRCM_DSI_GLITCHFREE_EN); + if (to_system_clock) { + switch (link) { + case 0: + value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI0_BYTE_CLK; + break; + case 1: + value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI1_BYTE_CLK; + break; + case 2: + value |= DB8500_PRCM_DSI_GLITCHFREE_EN_DSI2_BYTE_CLK; + break; + default: + break; + } + } else { + switch (link) { + case 0: + value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI0_BYTE_CLK; + break; + case 1: + value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI1_BYTE_CLK; + break; + case 2: + value &= ~DB8500_PRCM_DSI_GLITCHFREE_EN_DSI2_BYTE_CLK; + break; + default: + break; + } + + } + prcmu_write(DB8500_PRCM_DSI_GLITCHFREE_EN, value); + dsi_wfld(link, DSI_MCTL_PLL_CTL, PLL_OUT_SEL, to_system_clock); +} + +static void dsi_link_handle_ulpm(struct mcde_port *port, bool enter_ulpm) +{ + u8 link = port->link; + u8 num_data_lanes = port->phy.dsi.num_data_lanes; + u8 nbr_of_retries = 0; + u8 lane_state; + + /* + * The D-PHY protocol specifies the time to leave the ULP mode + * in ms. It will at least take 1 ms to exit ULPM. + * The ULPOUT time value is using number of system clock ticks + * divided by 1000. The system clock for the DSI link is the MCDE + * clock. + */ + dsi_wreg(link, DSI_MCTL_ULPOUT_TIME, + DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME(0x1FF) | + DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME(0x1FF)); + + if (enter_ulpm) { + lane_state = DSI_LANE_STATE_ULPM; + dsi_link_switch_byte_clk(link, true); + } + + dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT1_ULPM_REQ, enter_ulpm); + dsi_wfld(link, DSI_MCTL_MAIN_EN, DAT2_ULPM_REQ, + enter_ulpm && num_data_lanes == 2); + dsi_wfld(link, DSI_MCTL_MAIN_EN, CLKLANE_ULPM_REQ, enter_ulpm); + + if (!enter_ulpm) { + lane_state = DSI_LANE_STATE_IDLE; + dsi_link_switch_byte_clk(link, false); + } + + /* Wait for data lanes to enter ULPM */ + while (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE1_STATE) + != lane_state || + (dsi_rfld(link, DSI_MCTL_LANE_STS, DATLANE2_STATE) + != lane_state && + num_data_lanes > 1)) { + mdelay(DSI_WAIT_FOR_ULPM_STATE_MS); + if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) { + dev_dbg(&mcde_dev->dev, + "Could not enter correct state=%d (link=%d)!\n", + lane_state, link); + break; + } + } + + nbr_of_retries = 0; + /* Wait for clock lane to enter ULPM */ + while (dsi_rfld(link, DSI_MCTL_LANE_STS, CLKLANE_STATE) + != lane_state) { + mdelay(DSI_WAIT_FOR_ULPM_STATE_MS); + if (nbr_of_retries++ == DSI_ULPM_STATE_NBR_OF_RETRIES) { + dev_dbg(&mcde_dev->dev, + "Could not enter correct state=%d (link=%d)!\n", + lane_state, link); + break; + } + } +} + +static int dsi_link_enable(struct mcde_chnl_state *chnl) +{ + int ret = 0; + u8 link = chnl->port.link; + + if (dsi_use_clk_framework) { + WARN_ON_ONCE(clk_enable(chnl->clk_dsi_lp)); + WARN_ON_ONCE(clk_enable(chnl->clk_dsi_hs)); + dsi_link_handle_reset(link, true); + } else { + WARN_ON_ONCE(clk_enable(clock_dsi)); + WARN_ON_ONCE(clk_enable(clock_dsi_lp)); + + if (!dsi_pll_is_enabled) { + struct mcde_platform_data *pdata = + mcde_dev->dev.platform_data; + ret = pdata->platform_enable_dsipll(); + if (ret < 0) { + dev_warn(&mcde_dev->dev, "%s: " + "enable_dsipll failed ret = %d\n", + __func__, ret); + goto enable_dsipll_err; + } + dev_dbg(&mcde_dev->dev, "%s enable dsipll\n", + __func__); + } + dsi_pll_is_enabled++; + } + + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, LINK_EN, true); + + dev_dbg(&mcde_dev->dev, "DSI%d LINK_EN\n", link); + + return 0; + +enable_dsipll_err: + clk_disable(clock_dsi_lp); + clk_disable(clock_dsi); + return ret; +} + +static void dsi_link_disable(struct mcde_chnl_state *chnl, bool suspend) +{ + wait_while_dsi_running(chnl->port.link); + dsi_link_handle_ulpm(&chnl->port, true); + if (dsi_use_clk_framework) { + clk_disable(chnl->clk_dsi_lp); + clk_disable(chnl->clk_dsi_hs); + } else { + if (dsi_pll_is_enabled && (--dsi_pll_is_enabled == 0)) { + struct mcde_platform_data *pdata = + mcde_dev->dev.platform_data; + dev_dbg(&mcde_dev->dev, "%s disable dsipll\n", + __func__); + pdata->platform_disable_dsipll(); + } + clk_disable(clock_dsi); + clk_disable(clock_dsi_lp); + } +} + +static void disable_mcde_hw(bool force_disable, bool suspend) +{ + int i; + bool mcde_up = false; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!mcde_is_enabled) + return; + + for (i = 0; i < num_channels; i++) { + struct mcde_chnl_state *chnl = &channels[i]; + if (force_disable || (chnl->enabled && + chnl->state != CHNLSTATE_RUNNING)) { + stop_channel(chnl); + set_channel_state_sync(chnl, CHNLSTATE_SUSPEND); + + if (chnl->formatter_updated) { + if (chnl->port.type == MCDE_PORTTYPE_DSI) + dsi_link_disable(chnl, suspend); + else if (chnl->port.type == MCDE_PORTTYPE_DPI) + clk_disable(chnl->clk_dpi); + chnl->formatter_updated = false; + } + if (chnl->esram_is_enabled) { + WARN_ON_ONCE(regulator_disable( + regulator_esram_epod)); + chnl->esram_is_enabled = false; + } + } else if (chnl->enabled && chnl->state == CHNLSTATE_RUNNING) { + mcde_up = true; + } + } + + if (mcde_up) + return; + + free_irq(mcde_irq, &mcde_dev->dev); + + disable_clocks_and_power(mcde_dev); + + mcde_is_enabled = false; +} + +static void dpi_video_mode_apply(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + chnl->tv_regs.interlaced_en = chnl->vmode.interlaced; + + chnl->tv_regs.sel_mode_tv = chnl->port.phy.dpi.tv_mode; + if (chnl->tv_regs.sel_mode_tv) { + /* TV mode */ + u32 bel; + /* -4 since hsw is excluding SAV/EAV, 2 bytes each */ + chnl->tv_regs.hsw = chnl->vmode.hbp + chnl->vmode.hfp - 4; + /* vbp_field2 = vbp_field1 + 1 */ + chnl->tv_regs.fsl1 = chnl->vmode.vbp / 2; + chnl->tv_regs.fsl2 = chnl->vmode.vbp - chnl->tv_regs.fsl1; + /* +1 since vbp_field2 = vbp_field1 + 1 */ + bel = chnl->vmode.vbp + chnl->vmode.vfp; + /* in TV mode: bel2 = bel1 + 1 */ + chnl->tv_regs.bel1 = bel / 2; + chnl->tv_regs.bel2 = bel - chnl->tv_regs.bel1; + if (chnl->port.phy.dpi.bus_width == 4) + chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P_BE; + else + chnl->tv_regs.tv_mode = MCDE_TVCRA_TVMODE_SDTV_656P; + chnl->tv_regs.inv_clk = true; + } else { + /* LCD mode */ + u32 polarity; + chnl->tv_regs.hsw = chnl->vmode.hsw; + chnl->tv_regs.dho = chnl->vmode.hbp; + chnl->tv_regs.alw = chnl->vmode.hfp; + chnl->tv_regs.bel1 = chnl->vmode.vsw; + chnl->tv_regs.bel2 = chnl->tv_regs.bel1; + chnl->tv_regs.dvo = chnl->vmode.vbp; + chnl->tv_regs.bsl = chnl->vmode.vfp; + chnl->tv_regs.fsl1 = 0; + chnl->tv_regs.fsl2 = 0; + polarity = chnl->port.phy.dpi.polarity; + chnl->tv_regs.lcdtim1 = MCDE_LCDTIM1A_IHS( + (polarity & DPI_ACT_LOW_HSYNC) != 0); + chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IVS( + (polarity & DPI_ACT_LOW_VSYNC) != 0); + chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IOE( + (polarity & DPI_ACT_LOW_DATA_ENABLE) != 0); + chnl->tv_regs.lcdtim1 |= MCDE_LCDTIM1A_IPC( + (polarity & DPI_ACT_ON_FALLING_EDGE) != 0); + } + chnl->tv_regs.dirty = true; +} + +static void update_dpi_registers(enum mcde_chnl chnl_id, struct tv_regs *regs) +{ + u8 idx = chnl_id; + + dev_dbg(&mcde_dev->dev, "%s\n", __func__); + mcde_wreg(MCDE_TVCRA + idx * MCDE_TVCRA_GROUPOFFSET, + MCDE_TVCRA_SEL_MOD(regs->sel_mode_tv) | + MCDE_TVCRA_INTEREN(regs->interlaced_en) | + MCDE_TVCRA_IFIELD(0) | + MCDE_TVCRA_TVMODE(regs->tv_mode) | + MCDE_TVCRA_SDTVMODE(MCDE_TVCRA_SDTVMODE_Y0CBY1CR) | + MCDE_TVCRA_CKINV(regs->inv_clk) | + MCDE_TVCRA_AVRGEN(0)); + mcde_wreg(MCDE_TVBLUA + idx * MCDE_TVBLUA_GROUPOFFSET, + MCDE_TVBLUA_TVBLU(MCDE_CONFIG_TVOUT_BACKGROUND_LUMINANCE) | + MCDE_TVBLUA_TVBCB(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CB)| + MCDE_TVBLUA_TVBCR(MCDE_CONFIG_TVOUT_BACKGROUND_CHROMINANCE_CR)); + + /* Vertical timing registers */ + mcde_wreg(MCDE_TVDVOA + idx * MCDE_TVDVOA_GROUPOFFSET, + MCDE_TVDVOA_DVO1(regs->dvo) | + MCDE_TVDVOA_DVO2(regs->dvo)); + mcde_wreg(MCDE_TVBL1A + idx * MCDE_TVBL1A_GROUPOFFSET, + MCDE_TVBL1A_BEL1(regs->bel1) | + MCDE_TVBL1A_BSL1(regs->bsl)); + mcde_wreg(MCDE_TVBL2A + idx * MCDE_TVBL1A_GROUPOFFSET, + MCDE_TVBL2A_BEL2(regs->bel2) | + MCDE_TVBL2A_BSL2(regs->bsl)); + mcde_wreg(MCDE_TVISLA + idx * MCDE_TVISLA_GROUPOFFSET, + MCDE_TVISLA_FSL1(regs->fsl1) | + MCDE_TVISLA_FSL2(regs->fsl2)); + + /* Horizontal timing registers */ + mcde_wreg(MCDE_TVLBALWA + idx * MCDE_TVLBALWA_GROUPOFFSET, + MCDE_TVLBALWA_LBW(regs->hsw) | + MCDE_TVLBALWA_ALW(regs->alw)); + mcde_wreg(MCDE_TVTIM1A + idx * MCDE_TVTIM1A_GROUPOFFSET, + MCDE_TVTIM1A_DHO(regs->dho)); + if (!regs->sel_mode_tv) + mcde_wreg(MCDE_LCDTIM1A + idx * MCDE_LCDTIM1A_GROUPOFFSET, + regs->lcdtim1); + regs->dirty = false; +} + +static void update_col_registers(enum mcde_chnl chnl_id, struct col_regs *regs) +{ + u8 idx = chnl_id; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + mcde_wreg(MCDE_RGBCONV1A + idx * MCDE_RGBCONV1A_GROUPOFFSET, + MCDE_RGBCONV1A_YR_RED(regs->y_red) | + MCDE_RGBCONV1A_YR_GREEN(regs->y_green)); + mcde_wreg(MCDE_RGBCONV2A + idx * MCDE_RGBCONV2A_GROUPOFFSET, + MCDE_RGBCONV2A_YR_BLUE(regs->y_blue) | + MCDE_RGBCONV2A_CR_RED(regs->cr_red)); + mcde_wreg(MCDE_RGBCONV3A + idx * MCDE_RGBCONV3A_GROUPOFFSET, + MCDE_RGBCONV3A_CR_GREEN(regs->cr_green) | + MCDE_RGBCONV3A_CR_BLUE(regs->cr_blue)); + mcde_wreg(MCDE_RGBCONV4A + idx * MCDE_RGBCONV4A_GROUPOFFSET, + MCDE_RGBCONV4A_CB_RED(regs->cb_red) | + MCDE_RGBCONV4A_CB_GREEN(regs->cb_green)); + mcde_wreg(MCDE_RGBCONV5A + idx * MCDE_RGBCONV5A_GROUPOFFSET, + MCDE_RGBCONV5A_CB_BLUE(regs->cb_blue) | + MCDE_RGBCONV5A_OFF_RED(regs->off_cr)); + mcde_wreg(MCDE_RGBCONV6A + idx * MCDE_RGBCONV6A_GROUPOFFSET, + MCDE_RGBCONV6A_OFF_GREEN(regs->off_y) | + MCDE_RGBCONV6A_OFF_BLUE(regs->off_cb)); + regs->dirty = false; +} + +/* MCDE internal helpers */ +static u8 portfmt2dsipacking(enum mcde_port_pix_fmt pix_fmt) +{ + switch (pix_fmt) { + case MCDE_PORTPIXFMT_DSI_16BPP: + return MCDE_DSIVID0CONF0_PACKING_RGB565; + case MCDE_PORTPIXFMT_DSI_18BPP_PACKED: + return MCDE_DSIVID0CONF0_PACKING_RGB666; + case MCDE_PORTPIXFMT_DSI_18BPP: + case MCDE_PORTPIXFMT_DSI_24BPP: + default: + return MCDE_DSIVID0CONF0_PACKING_RGB888; + case MCDE_PORTPIXFMT_DSI_YCBCR422: + return MCDE_DSIVID0CONF0_PACKING_HDTV; + } +} + +static u8 portfmt2bpp(enum mcde_port_pix_fmt pix_fmt) +{ + /* TODO: Check DPI spec *//* REVIEW: Remove or check */ + switch (pix_fmt) { + case MCDE_PORTPIXFMT_DPI_16BPP_C1: + case MCDE_PORTPIXFMT_DPI_16BPP_C2: + case MCDE_PORTPIXFMT_DPI_16BPP_C3: + case MCDE_PORTPIXFMT_DSI_16BPP: + case MCDE_PORTPIXFMT_DSI_YCBCR422: + return 16; + case MCDE_PORTPIXFMT_DPI_18BPP_C1: + case MCDE_PORTPIXFMT_DPI_18BPP_C2: + case MCDE_PORTPIXFMT_DSI_18BPP_PACKED: + return 18; + case MCDE_PORTPIXFMT_DSI_18BPP: + case MCDE_PORTPIXFMT_DPI_24BPP: + case MCDE_PORTPIXFMT_DSI_24BPP: + return 24; + default: + return 1; + } +} + +static u8 bpp2outbpp(u8 bpp) +{ + switch (bpp) { + case 16: + return MCDE_CRA1_OUTBPP_16BPP; + case 18: + return MCDE_CRA1_OUTBPP_18BPP; + case 24: + return MCDE_CRA1_OUTBPP_24BPP; + default: + return 0; + } +} + +static u8 portfmt2cdwin(enum mcde_port_pix_fmt pix_fmt) +{ + switch (pix_fmt) { + case MCDE_PORTPIXFMT_DPI_16BPP_C1: + return MCDE_CRA1_CDWIN_16BPP_C1; + case MCDE_PORTPIXFMT_DPI_16BPP_C2: + return MCDE_CRA1_CDWIN_16BPP_C2; + case MCDE_PORTPIXFMT_DPI_16BPP_C3: + return MCDE_CRA1_CDWIN_16BPP_C3; + case MCDE_PORTPIXFMT_DPI_18BPP_C1: + return MCDE_CRA1_CDWIN_18BPP_C1; + case MCDE_PORTPIXFMT_DPI_18BPP_C2: + return MCDE_CRA1_CDWIN_18BPP_C2; + case MCDE_PORTPIXFMT_DPI_24BPP: + return MCDE_CRA1_CDWIN_24BPP; + default: + /* only DPI formats are relevant */ + return 0; + } +} + +static u32 get_output_fifo_size(enum mcde_fifo fifo) +{ + u32 ret = 1; /* Avoid div by zero */ + + switch (fifo) { + case MCDE_FIFO_A: + case MCDE_FIFO_B: + ret = output_fifo_ab_size; + break; + case MCDE_FIFO_C0: + case MCDE_FIFO_C1: + ret = output_fifo_c0c1_size; + break; + default: + dev_warn(&mcde_dev->dev, "Unsupported fifo"); + break; + } + return ret; +} + +static inline u8 get_dsi_formatter_id(const struct mcde_port *port) +{ + if (dsi_ifc_is_supported) + return 2 * port->link + port->ifc; + else + return port->link; +} + +static struct mcde_chnl_state *find_channel_by_dsilink(int link) +{ + struct mcde_chnl_state *chnl = &channels[0]; + for (; chnl < &channels[num_channels]; chnl++) + if (chnl->enabled && chnl->port.link == link && + chnl->port.type == MCDE_PORTTYPE_DSI) + return chnl; + return NULL; +} + +static inline void mcde_handle_vcmp(struct mcde_chnl_state *chnl) +{ + if (!chnl->vcmp_per_field || + (chnl->vcmp_per_field && chnl->even_vcmp)) { + atomic_inc(&chnl->vcmp_cnt); + if (chnl->state == CHNLSTATE_STOPPING) + set_channel_state_atomic(chnl, CHNLSTATE_STOPPED); + wake_up_all(&chnl->vcmp_waitq); + } + chnl->even_vcmp = !chnl->even_vcmp; +} + +static void handle_dsi_irq(struct mcde_chnl_state *chnl, int i) +{ + u32 irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS_FLAG, TE_RECEIVED_FLAG); + if (irq_status) { + dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR, + DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true)); + dev_vdbg(&mcde_dev->dev, "BTA TE DSI%d\n", i); + if (chnl->port.frame_trig == MCDE_TRIG_SW) + do_softwaretrig(chnl); + } + + irq_status = dsi_rfld(i, DSI_CMD_MODE_STS_FLAG, ERR_NO_TE_FLAG); + if (irq_status) { + dsi_wreg(i, DSI_CMD_MODE_STS_CLR, + DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true)); + dev_warn(&mcde_dev->dev, "NO_TE DSI%d\n", i); + set_channel_state_atomic(chnl, CHNLSTATE_STOPPED); + } + + irq_status = dsi_rfld(i, DSI_DIRECT_CMD_STS, TRIGGER_RECEIVED); + if (irq_status) { + /* DSI TE polling answer received */ + dsi_wreg(i, DSI_DIRECT_CMD_STS_CLR, + DSI_DIRECT_CMD_STS_CLR_TRIGGER_RECEIVED_CLR(true)); + + /* Reset TE watchdog timer */ + if (chnl->port.sync_src == MCDE_SYNCSRC_TE_POLLING) + dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT); + } +} + +static irqreturn_t mcde_irq_handler(int irq, void *dev) +{ + int i; + u32 irq_status; + + irq_status = mcde_rreg(MCDE_MISCHNL); + if (irq_status) { + dev_err(&mcde_dev->dev, "chnl error=%.8x\n", irq_status); + mcde_wreg(MCDE_RISCHNL, irq_status); + } + irq_status = mcde_rreg(MCDE_MISERR); + if (irq_status) { + dev_err(&mcde_dev->dev, "error=%.8x\n", irq_status); + mcde_wreg(MCDE_RISERR, irq_status); + } + + /* Handle channel irqs */ + irq_status = mcde_rreg(MCDE_RISPP); + if (irq_status & MCDE_RISPP_VCMPARIS_MASK) + mcde_handle_vcmp(&channels[MCDE_CHNL_A]); + if (irq_status & MCDE_RISPP_VCMPBRIS_MASK) + mcde_handle_vcmp(&channels[MCDE_CHNL_B]); + if (irq_status & MCDE_RISPP_VCMPC0RIS_MASK) + mcde_handle_vcmp(&channels[MCDE_CHNL_C0]); + if (irq_status & MCDE_RISPP_VCMPC1RIS_MASK) + mcde_handle_vcmp(&channels[MCDE_CHNL_C1]); + mcde_wreg(MCDE_RISPP, irq_status); + + for (i = 0; i < num_dsilinks; i++) { + struct mcde_chnl_state *chnl_from_dsi; + + chnl_from_dsi = find_channel_by_dsilink(i); + + if (chnl_from_dsi == NULL) + continue; + + handle_dsi_irq(chnl_from_dsi, i); + } + + return IRQ_HANDLED; +} + +/* Transitions allowed: WAIT_TE -> UPDATE -> STOPPING */ +static int set_channel_state_atomic(struct mcde_chnl_state *chnl, + enum chnl_state state) +{ + enum chnl_state chnl_state = chnl->state; + + dev_dbg(&mcde_dev->dev, "Channel state change" + " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl_state, state); + + if ((chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_WAIT_TE) || + (chnl_state == CHNLSTATE_SETUP && state == CHNLSTATE_RUNNING) || + (chnl_state == CHNLSTATE_WAIT_TE && state == CHNLSTATE_RUNNING) || + (chnl_state == CHNLSTATE_RUNNING && state == CHNLSTATE_STOPPING)) { + /* Set wait TE, running, or stopping state */ + chnl->state = state; + return 0; + } else if ((chnl_state == CHNLSTATE_STOPPING && + state == CHNLSTATE_STOPPED) || + (chnl_state == CHNLSTATE_WAIT_TE && + state == CHNLSTATE_STOPPED)) { + /* Set stopped state */ + chnl->state = state; + wake_up_all(&chnl->state_waitq); + return 0; + } else if (state == CHNLSTATE_IDLE) { + /* Set idle state */ + WARN_ON_ONCE(chnl_state != CHNLSTATE_DSI_READ && + chnl_state != CHNLSTATE_DSI_WRITE && + chnl_state != CHNLSTATE_SUSPEND); + chnl->state = state; + wake_up_all(&chnl->state_waitq); + return 0; + } else { + /* Invalid atomic state transition */ + dev_warn(&mcde_dev->dev, "Channel state change error (chnl=%d," + " old=%d, new=%d)\n", chnl->id, chnl_state, state); + WARN_ON_ONCE(true); + return -EINVAL; + } +} + +/* LOCKING: mcde_hw_lock */ +static int set_channel_state_sync(struct mcde_chnl_state *chnl, + enum chnl_state state) +{ + int ret = 0; + enum chnl_state chnl_state = chnl->state; + + dev_dbg(&mcde_dev->dev, "Channel state change" + " (chnl=%d, old=%d, new=%d)\n", chnl->id, chnl->state, state); + + /* No change */ + if (chnl_state == state) + return 0; + + /* Wait for IDLE before changing state */ + if (chnl_state != CHNLSTATE_IDLE) { + ret = wait_event_timeout(chnl->state_waitq, + /* STOPPED -> IDLE is manual, so wait for both */ + chnl->state == CHNLSTATE_STOPPED || + chnl->state == CHNLSTATE_IDLE, + msecs_to_jiffies(CHNL_TIMEOUT)); + if (WARN_ON_ONCE(!ret)) + dev_warn(&mcde_dev->dev, "Wait for channel timeout " + "(chnl=%d, curr=%d, new=%d)\n", + chnl->id, chnl->state, state); + chnl_state = chnl->state; + } + + /* Do manual transition from STOPPED to IDLE */ + if (chnl_state == CHNLSTATE_STOPPED) + wait_for_flow_disabled(chnl); + + /* State is IDLE, do transition to new state */ + chnl->state = state; + + return ret; +} + +static int wait_for_vcmp(struct mcde_chnl_state *chnl) +{ + u64 vcmp = atomic_read(&chnl->vcmp_cnt) + 1; + int ret = wait_event_timeout(chnl->vcmp_waitq, + atomic_read(&chnl->vcmp_cnt) >= vcmp, + msecs_to_jiffies(CHNL_TIMEOUT)); + return ret; +} + +static void get_vid_operating_mode(const struct mcde_port *port, + bool *burst_mode, bool *sync_is_pulse, bool *tvg_enable) +{ + switch (port->phy.dsi.vid_mode) { + case NON_BURST_MODE_WITH_SYNC_EVENT: + *burst_mode = false; + *sync_is_pulse = false; + *tvg_enable = false; + break; + case NON_BURST_MODE_WITH_SYNC_EVENT_TVG_ENABLED: + *burst_mode = false; + *sync_is_pulse = false; + *tvg_enable = true; + break; + case BURST_MODE_WITH_SYNC_EVENT: + *burst_mode = true; + *sync_is_pulse = false; + *tvg_enable = false; + break; + case BURST_MODE_WITH_SYNC_PULSE: + *burst_mode = true; + *sync_is_pulse = true; + *tvg_enable = false; + break; + default: + dev_err(&mcde_dev->dev, "Unsupported video mode"); + break; + } +} + +static void update_vid_static_registers(const struct mcde_port *port) +{ + u8 link = port->link; + bool burst_mode, sync_is_pulse, tvg_enable; + + get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable); + + /* burst mode or non-burst mode */ + dsi_wfld(link, DSI_VID_MAIN_CTL, BURST_MODE, burst_mode); + + /* sync is pulse or event */ + dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_ACTIVE, sync_is_pulse); + dsi_wfld(link, DSI_VID_MAIN_CTL, SYNC_PULSE_HORIZONTAL, sync_is_pulse); + + /* disable video stream when using TVG */ + if (tvg_enable) { + dsi_wfld(link, DSI_MCTL_MAIN_EN, IF1_EN, false); + dsi_wfld(link, DSI_MCTL_MAIN_EN, IF2_EN, false); + } + + /* + * behavior during blanking time + * 00: NULL packet 1x:LP 01:blanking-packet + */ + dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKLINE_MODE, 1); + + /* + * behavior during eol + * 00: NULL packet 1x:LP 01:blanking-packet + */ + dsi_wfld(link, DSI_VID_MAIN_CTL, REG_BLKEOL_MODE, 2); + + /* time to perform LP->HS on D-PHY */ + dsi_wfld(link, DSI_VID_DPHY_TIME, REG_WAKEUP_TIME, + port->phy.dsi.vid_wakeup_time); + + /* + * video stream starts on VSYNC packet + * and stops at the end of a frame + */ + dsi_wfld(link, DSI_VID_MAIN_CTL, VID_ID, port->phy.dsi.virt_id); + dsi_wfld(link, DSI_VID_MAIN_CTL, START_MODE, 0); + dsi_wfld(link, DSI_VID_MAIN_CTL, STOP_MODE, 0); + + /* 1: if1 in video mode, 0: if1 in command mode */ + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, IF1_MODE, 1); + + /* 1: enables the link, 0: disables the link */ + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, VID_EN, 1); +} + +static int update_channel_static_registers(struct mcde_chnl_state *chnl) +{ + const struct mcde_port *port = &chnl->port; + + switch (chnl->fifo) { + case MCDE_FIFO_A: + mcde_wreg(MCDE_CHNL0MUXING + chnl->id * + MCDE_CHNL0MUXING_GROUPOFFSET, + MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_A)); + if (port->type == MCDE_PORTTYPE_DPI) { + mcde_wfld(MCDE_CTRLA, FORMTYPE, + MCDE_CTRLA_FORMTYPE_DPITV); + mcde_wfld(MCDE_CTRLA, FORMID, port->link); + } else if (port->type == MCDE_PORTTYPE_DSI) { + mcde_wfld(MCDE_CTRLA, FORMTYPE, + MCDE_CTRLA_FORMTYPE_DSI); + mcde_wfld(MCDE_CTRLA, FORMID, + get_dsi_formatter_id(port)); + } + break; + case MCDE_FIFO_B: + mcde_wreg(MCDE_CHNL0MUXING + chnl->id * + MCDE_CHNL0MUXING_GROUPOFFSET, + MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_B)); + if (port->type == MCDE_PORTTYPE_DPI) { + mcde_wfld(MCDE_CTRLB, FORMTYPE, + MCDE_CTRLB_FORMTYPE_DPITV); + mcde_wfld(MCDE_CTRLB, FORMID, port->link); + } else if (port->type == MCDE_PORTTYPE_DSI) { + mcde_wfld(MCDE_CTRLB, FORMTYPE, + MCDE_CTRLB_FORMTYPE_DSI); + mcde_wfld(MCDE_CTRLB, FORMID, + get_dsi_formatter_id(port)); + } + + break; + case MCDE_FIFO_C0: + mcde_wreg(MCDE_CHNL0MUXING + chnl->id * + MCDE_CHNL0MUXING_GROUPOFFSET, + MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C0)); + if (port->type == MCDE_PORTTYPE_DPI) + return -EINVAL; + mcde_wfld(MCDE_CTRLC0, FORMTYPE, + MCDE_CTRLC0_FORMTYPE_DSI); + mcde_wfld(MCDE_CTRLC0, FORMID, get_dsi_formatter_id(port)); + break; + case MCDE_FIFO_C1: + mcde_wreg(MCDE_CHNL0MUXING + chnl->id * + MCDE_CHNL0MUXING_GROUPOFFSET, + MCDE_CHNL0MUXING_FIFO_ID_ENUM(FIFO_C1)); + if (port->type == MCDE_PORTTYPE_DPI) + return -EINVAL; + mcde_wfld(MCDE_CTRLC1, FORMTYPE, + MCDE_CTRLC1_FORMTYPE_DSI); + mcde_wfld(MCDE_CTRLC1, FORMID, get_dsi_formatter_id(port)); + break; + default: + return -EINVAL; + } + + /* Formatter */ + if (port->type == MCDE_PORTTYPE_DSI) { + int i = 0; + u8 idx; + u8 lnk = port->link; + + idx = get_dsi_formatter_id(port); + + if (dsi_link_enable(chnl)) + goto failed_to_enable_link; + + if (port->sync_src == MCDE_SYNCSRC_TE_POLLING) { + /* Enable DSI TE polling */ + dsi_te_poll_req(chnl); + + /* Set timer to detect non TE answer */ + dsi_te_poll_set_timer(chnl, + DSI_TE_NO_ANSWER_TIMEOUT_INIT); + } else { + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true); + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true); + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true); + } + + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, HOST_EOT_GEN, + port->phy.dsi.host_eot_gen); + + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, DLX_REMAP_EN, + port->phy.dsi.data_lanes_swap); + + dsi_wreg(lnk, DSI_MCTL_DPHY_STATIC, + DSI_MCTL_DPHY_STATIC_UI_X4(port->phy.dsi.ui)); + dsi_wreg(lnk, DSI_DPHY_LANES_TRIM, + DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_ENUM(0_90)); + dsi_wreg(lnk, DSI_MCTL_DPHY_TIMEOUT, + DSI_MCTL_DPHY_TIMEOUT_CLK_DIV(0xf) | + DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL(0x3fff) | + DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL(0x3fff)); + dsi_wreg(lnk, DSI_MCTL_MAIN_PHY_CTL, + DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME(0xf) | + DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN(true) | + DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN(true) | + DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN(true) | + DSI_MCTL_MAIN_PHY_CTL_LANE2_EN( + port->phy.dsi.num_data_lanes >= 2) | + DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS( + port->phy.dsi.clk_cont)); + /* TODO: make enum */ + dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_MODE, false); + /* TODO: make enum */ + dsi_wfld(lnk, DSI_CMD_MODE_CTL, ARB_PRI, port->ifc == 1); + dsi_wreg(lnk, DSI_MCTL_MAIN_EN, + DSI_MCTL_MAIN_EN_PLL_START(true) | + DSI_MCTL_MAIN_EN_CKLANE_EN(true) | + DSI_MCTL_MAIN_EN_DAT1_EN(true) | + DSI_MCTL_MAIN_EN_DAT2_EN(port->phy.dsi.num_data_lanes + == 2) | + DSI_MCTL_MAIN_EN_IF1_EN(port->ifc == 0) | + DSI_MCTL_MAIN_EN_IF2_EN(port->ifc == 1)); + while (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, CLKLANE_READY) == 0 || + dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT1_READY) == 0 || + (dsi_rfld(lnk, DSI_MCTL_MAIN_STS, DAT2_READY) == 0 && + port->phy.dsi.num_data_lanes > 1)) { + mdelay(1); + if (i++ == 10) { + dev_warn(&mcde_dev->dev, + "DSI lane not ready (link=%d)!\n", lnk); + goto dsi_link_error; + } + } + + dsi_link_handle_ulpm(&chnl->port, false); + mcde_wreg(MCDE_DSIVID0CONF0 + + idx * MCDE_DSIVID0CONF0_GROUPOFFSET, + MCDE_DSIVID0CONF0_BLANKING(0) | + MCDE_DSIVID0CONF0_VID_MODE( + port->mode == MCDE_PORTMODE_VID) | + MCDE_DSIVID0CONF0_CMD8(true) | + MCDE_DSIVID0CONF0_BIT_SWAP(false) | + MCDE_DSIVID0CONF0_BYTE_SWAP(false) | + MCDE_DSIVID0CONF0_DCSVID_NOTGEN(true)); + + if (port->mode == MCDE_PORTMODE_VID) { + update_vid_static_registers(port); + } else { + if (port->ifc == 0) + dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF1_ID, + port->phy.dsi.virt_id); + else if (port->ifc == 1) + dsi_wfld(port->link, DSI_CMD_MODE_CTL, IF2_ID, + port->phy.dsi.virt_id); + } + } + + if (port->type == MCDE_PORTTYPE_DPI) { + if (port->phy.dpi.lcd_freq != clk_round_rate(chnl->clk_dpi, + port->phy.dpi.lcd_freq)) + dev_warn(&mcde_dev->dev, "Could not set lcd freq" + " to %d\n", port->phy.dpi.lcd_freq); + WARN_ON_ONCE(clk_set_rate(chnl->clk_dpi, + port->phy.dpi.lcd_freq)); + WARN_ON_ONCE(clk_enable(chnl->clk_dpi)); + } + + mcde_wfld(MCDE_CR, MCDEEN, true); + chnl->formatter_updated = true; + + dev_vdbg(&mcde_dev->dev, "Static registers setup, chnl=%d\n", chnl->id); + + return 0; +dsi_link_error: + dsi_link_disable(chnl, true); +failed_to_enable_link: + return -EINVAL; +} + +void mcde_chnl_col_convert_apply(struct mcde_chnl_state *chnl, + struct mcde_col_transform *transform) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (chnl->transform != transform) { + + chnl->col_regs.y_red = transform->matrix[0][0]; + chnl->col_regs.y_green = transform->matrix[0][1]; + chnl->col_regs.y_blue = transform->matrix[0][2]; + chnl->col_regs.cb_red = transform->matrix[1][0]; + chnl->col_regs.cb_green = transform->matrix[1][1]; + chnl->col_regs.cb_blue = transform->matrix[1][2]; + chnl->col_regs.cr_red = transform->matrix[2][0]; + chnl->col_regs.cr_green = transform->matrix[2][1]; + chnl->col_regs.cr_blue = transform->matrix[2][2]; + chnl->col_regs.off_y = transform->offset[0]; + chnl->col_regs.off_cb = transform->offset[1]; + chnl->col_regs.off_cr = transform->offset[2]; + chnl->col_regs.dirty = true; + + chnl->transform = transform; + } + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); +} + +static void chnl_ovly_pixel_format_apply(struct mcde_chnl_state *chnl, + struct mcde_ovly_state *ovly) +{ + struct mcde_port *port = &chnl->port; + struct ovly_regs *regs = &ovly->regs; + + /* Note: YUV -> YUV: blending YUV overlays will not make sense. */ + static struct mcde_col_transform crycb_2_ycbcr = { + /* Note that in MCDE YUV 422 pixels come as VYU pixels */ + .matrix = { + {0x0000, 0x0100, 0x0000}, + {0x0000, 0x0000, 0x0100}, + {0x0100, 0x0000, 0x0000}, + }, + .offset = {0, 0, 0}, + }; + + if (port->type == MCDE_PORTTYPE_DSI) { + if (port->pixel_format != MCDE_PORTPIXFMT_DSI_YCBCR422) { + if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) { + /* standard case: DSI: RGB -> RGB */ + regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED; + } else { + /* DSI: YUV -> RGB */ + /* TODO change matrix */ + regs->col_conv = + MCDE_OVL0CR_COLCCTRL_ENABLED_SAT; + mcde_chnl_col_convert_apply(chnl, + &chnl->ycbcr_2_rgb); + } + } else { + if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) + /* DSI: RGB -> YUV */ + mcde_chnl_col_convert_apply(chnl, + &chnl->rgb_2_ycbcr); + else + /* DSI: YUV -> YUV */ + mcde_chnl_col_convert_apply(chnl, + &crycb_2_ycbcr); + regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT; + } + } else if (port->type == MCDE_PORTTYPE_DPI && port->phy.dpi.tv_mode) { + regs->col_conv = MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT; + if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) + mcde_chnl_col_convert_apply(chnl, &chnl->rgb_2_ycbcr); + else + mcde_chnl_col_convert_apply(chnl, &crycb_2_ycbcr); + } else if (port->type == MCDE_PORTTYPE_DPI) { + /* Note: YUV is not support port pixel format for DPI */ + if (ovly->pix_fmt != MCDE_OVLYPIXFMT_YCbCr422) { + /* standard case: DPI: RGB -> RGB */ + regs->col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED; + } else { + /* DPI: YUV -> RGB */ + regs->col_conv = + MCDE_OVL0CR_COLCCTRL_ENABLED_SAT; + mcde_chnl_col_convert_apply(chnl, + &chnl->ycbcr_2_rgb); + } + } +} + +/* REVIEW: Make update_* an mcde_rectangle? */ +static void update_overlay_registers(u8 idx, struct ovly_regs *regs, + struct mcde_port *port, enum mcde_fifo fifo, + u16 update_x, u16 update_y, u16 update_w, + u16 update_h, s16 stride, bool interlaced, + enum mcde_display_rotation rotation) +{ + /* TODO: fix clipping for small overlay */ + u32 lmrgn = (regs->cropx + update_x) * regs->bits_per_pixel; + u32 tmrgn = (regs->cropy + update_y) * stride; + u32 ppl = regs->ppl - update_x; + u32 lpf = regs->lpf - update_y; + s32 ljinc = stride; + u32 pixelfetchwtrmrklevel; + u8 nr_of_bufs = 1; + u32 sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL; + + if (rotation == MCDE_DISPLAY_ROT_180_CCW) { + ljinc = -ljinc; + tmrgn += stride * (regs->lpf - 1) / 8; + } + + /* + * Preferably most of this is done in some apply function instead of for + * every update. However lpf has a dependency on update_y. + */ + if (interlaced && port->type == MCDE_PORTTYPE_DSI) { + nr_of_bufs = 2; + lpf = lpf / 2; + ljinc *= 2; + } + + if ((fifo == MCDE_FIFO_A || fifo == MCDE_FIFO_B) && + regs->ppl >= SCREEN_PPL_HIGH) + pixelfetchwtrmrklevel = input_fifo_size * 2; + else + pixelfetchwtrmrklevel = input_fifo_size / 2; + + if (port->update_auto_trig && port->type == MCDE_PORTTYPE_DSI) { + switch (port->sync_src) { + case MCDE_SYNCSRC_OFF: + sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL; + break; + case MCDE_SYNCSRC_TE0: + case MCDE_SYNCSRC_TE1: + case MCDE_SYNCSRC_TE_POLLING: + default: + sel_mod = MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE; + break; + } + } else if (port->type == MCDE_PORTTYPE_DPI) + sel_mod = MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL; + + mcde_wreg(MCDE_EXTSRC0CONF + idx * MCDE_EXTSRC0CONF_GROUPOFFSET, + MCDE_EXTSRC0CONF_BUF_ID(0) | + MCDE_EXTSRC0CONF_BUF_NB(nr_of_bufs) | + MCDE_EXTSRC0CONF_PRI_OVLID(idx) | + MCDE_EXTSRC0CONF_BPP(regs->bpp) | + MCDE_EXTSRC0CONF_BGR(regs->bgr) | + MCDE_EXTSRC0CONF_BEBO(regs->bebo) | + MCDE_EXTSRC0CONF_BEPO(false)); + mcde_wreg(MCDE_EXTSRC0CR + idx * MCDE_EXTSRC0CR_GROUPOFFSET, + MCDE_EXTSRC0CR_SEL_MOD(sel_mod) | + MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(PRIMARY) | + MCDE_EXTSRC0CR_FS_DIV_DISABLE(false) | + MCDE_EXTSRC0CR_FORCE_FS_DIV(false)); + mcde_wreg(MCDE_OVL0CR + idx * MCDE_OVL0CR_GROUPOFFSET, + MCDE_OVL0CR_OVLEN(regs->enabled) | + MCDE_OVL0CR_COLCCTRL(regs->col_conv) | + MCDE_OVL0CR_CKEYGEN(false) | + MCDE_OVL0CR_ALPHAPMEN(false) | + MCDE_OVL0CR_OVLF(false) | + MCDE_OVL0CR_OVLR(false) | + MCDE_OVL0CR_OVLB(false) | + MCDE_OVL0CR_FETCH_ROPC(0) | + MCDE_OVL0CR_STBPRIO(0) | + MCDE_OVL0CR_BURSTSIZE_ENUM(HW_8W) | + /* TODO: enum, get from ovly */ + MCDE_OVL0CR_MAXOUTSTANDING_ENUM(8_REQ) | + /* TODO: _HW_8W, calculate? */ + MCDE_OVL0CR_ROTBURSTSIZE_ENUM(HW_8W)); + mcde_wreg(MCDE_OVL0CONF + idx * MCDE_OVL0CONF_GROUPOFFSET, + MCDE_OVL0CONF_PPL(ppl) | + MCDE_OVL0CONF_EXTSRC_ID(idx) | + MCDE_OVL0CONF_LPF(lpf)); + mcde_wreg(MCDE_OVL0CONF2 + idx * MCDE_OVL0CONF2_GROUPOFFSET, + MCDE_OVL0CONF2_BP(regs->alpha_source) | + MCDE_OVL0CONF2_ALPHAVALUE(regs->alpha_value) | + MCDE_OVL0CONF2_OPQ(regs->opq) | + MCDE_OVL0CONF2_PIXOFF(lmrgn & 63) | + MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL( + pixelfetchwtrmrklevel)); + mcde_wreg(MCDE_OVL0LJINC + idx * MCDE_OVL0LJINC_GROUPOFFSET, + ljinc); + mcde_wreg(MCDE_OVL0CROP + idx * MCDE_OVL0CROP_GROUPOFFSET, + MCDE_OVL0CROP_TMRGN(tmrgn) | + MCDE_OVL0CROP_LMRGN(lmrgn >> 6)); + regs->dirty = false; + + dev_vdbg(&mcde_dev->dev, "Overlay registers setup, idx=%d\n", idx); +} + +static void update_overlay_registers_on_the_fly(u8 idx, struct ovly_regs *regs) +{ + mcde_wreg(MCDE_OVL0COMP + idx * MCDE_OVL0COMP_GROUPOFFSET, + MCDE_OVL0COMP_XPOS(regs->xpos) | + MCDE_OVL0COMP_CH_ID(regs->ch_id) | + MCDE_OVL0COMP_YPOS(regs->ypos) | + MCDE_OVL0COMP_Z(regs->z)); + + mcde_wreg(MCDE_EXTSRC0A0 + idx * MCDE_EXTSRC0A0_GROUPOFFSET, + regs->baseaddress0); + mcde_wreg(MCDE_EXTSRC0A1 + idx * MCDE_EXTSRC0A1_GROUPOFFSET, + regs->baseaddress1); + regs->dirty_buf = false; +} + +static void do_softwaretrig(struct mcde_chnl_state *chnl) +{ + unsigned long flags; + + local_irq_save(flags); + + enable_flow(chnl); + mcde_wreg(MCDE_CHNL0SYNCHSW + + chnl->id * MCDE_CHNL0SYNCHSW_GROUPOFFSET, + MCDE_CHNL0SYNCHSW_SW_TRIG(true)); + disable_flow(chnl); + + local_irq_restore(flags); + + dev_vdbg(&mcde_dev->dev, "Software TRIG on channel %d\n", chnl->id); +} + +static void disable_flow(struct mcde_chnl_state *chnl) +{ + unsigned long flags; + + if (WARN_ON_ONCE(chnl->state != CHNLSTATE_RUNNING)) + return; + + local_irq_save(flags); + + switch (chnl->id) { + case MCDE_CHNL_A: + mcde_wfld(MCDE_CRA0, FLOEN, false); + break; + case MCDE_CHNL_B: + mcde_wfld(MCDE_CRB0, FLOEN, false); + break; + case MCDE_CHNL_C0: + mcde_wfld(MCDE_CRC, C1EN, false); + break; + case MCDE_CHNL_C1: + mcde_wfld(MCDE_CRC, C2EN, false); + break; + } + + set_channel_state_atomic(chnl, CHNLSTATE_STOPPING); + + local_irq_restore(flags); +} + +static void stop_channel(struct mcde_chnl_state *chnl) +{ + const struct mcde_port *port = &chnl->port; + bool dpi_lcd_mode; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (chnl->state != CHNLSTATE_RUNNING) + return; + + if (port->type == MCDE_PORTTYPE_DSI) { + dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, + false); + if (port->sync_src == MCDE_SYNCSRC_TE_POLLING) + del_timer(&chnl->dsi_te_timer); + } + + disable_flow(chnl); + /* + * Needs to manually trigger VCOMP after the channel is + * disabled. For all channels using video mode + * except for dpi lcd. + */ + dpi_lcd_mode = (port->type == MCDE_PORTTYPE_DPI && + !chnl->port.phy.dpi.tv_mode); + + if (chnl->port.update_auto_trig && !dpi_lcd_mode) + mcde_wreg(MCDE_SISPP, 1 << chnl->id); +} + +static void wait_for_flow_disabled(struct mcde_chnl_state *chnl) +{ + int i = 0; + + switch (chnl->id) { + case MCDE_CHNL_A: + for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) { + if (!mcde_rfld(MCDE_CRA0, FLOEN)) { + dev_vdbg(&mcde_dev->dev, + "Flow (A) disable after >= %d ms\n", i); + break; + } + msleep(1); + } + break; + case MCDE_CHNL_B: + for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) { + if (!mcde_rfld(MCDE_CRB0, FLOEN)) { + dev_vdbg(&mcde_dev->dev, + "Flow (B) disable after >= %d ms\n", i); + break; + } + msleep(1); + } + break; + case MCDE_CHNL_C0: + for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) { + if (!mcde_rfld(MCDE_CRC, C1EN)) { + dev_vdbg(&mcde_dev->dev, + "Flow (C1) disable after >= %d ms\n", i); + break; + } + msleep(1); + } + break; + case MCDE_CHNL_C1: + for (i = 0; i < MCDE_FLOWEN_MAX_TRIAL; i++) { + if (!mcde_rfld(MCDE_CRC, C2EN)) { + dev_vdbg(&mcde_dev->dev, + "Flow (C2) disable after >= %d ms\n", i); + break; + } + msleep(1); + } + break; + } + if (i == MCDE_FLOWEN_MAX_TRIAL) + dev_err(&mcde_dev->dev, "%s: channel %d timeout\n", + __func__, chnl->id); +} + +static void enable_flow(struct mcde_chnl_state *chnl) +{ + const struct mcde_port *port = &chnl->port; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (port->type == MCDE_PORTTYPE_DSI) + dsi_wfld(port->link, DSI_MCTL_MAIN_PHY_CTL, CLK_CONTINUOUS, + port->phy.dsi.clk_cont); + + /* + * When ROTEN is set, the FLOEN bit will also be set but + * the flow has to be started anyway. + */ + switch (chnl->id) { + case MCDE_CHNL_A: + WARN_ON_ONCE(mcde_rfld(MCDE_CRA0, FLOEN)); + mcde_wfld(MCDE_CRA0, ROTEN, chnl->regs.roten); + mcde_wfld(MCDE_CRA0, FLOEN, true); + break; + case MCDE_CHNL_B: + WARN_ON_ONCE(mcde_rfld(MCDE_CRB0, FLOEN)); + mcde_wfld(MCDE_CRB0, ROTEN, chnl->regs.roten); + mcde_wfld(MCDE_CRB0, FLOEN, true); + break; + case MCDE_CHNL_C0: + WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C1EN)); + mcde_wfld(MCDE_CRC, C1EN, true); + break; + case MCDE_CHNL_C1: + WARN_ON_ONCE(mcde_rfld(MCDE_CRC, C2EN)); + mcde_wfld(MCDE_CRC, C2EN, true); + break; + } + + set_channel_state_atomic(chnl, CHNLSTATE_RUNNING); +} + +static void work_sleep_function(struct work_struct *ptr) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + if (mcde_trylock(__func__, __LINE__)) { + if (mcde_dynamic_power_management) + disable_mcde_hw(false, false); + mcde_unlock(__func__, __LINE__); + } +} + +/* TODO get from register */ +#define MCDE_CLK_FREQ_MHZ 160 +static u32 get_pkt_div(u32 disp_ppl, + struct mcde_port *port, + enum mcde_fifo fifo) +{ + /* + * The lines can be split in several packets only on DSI CMD mode. + * In DSI VIDEO mode, 1 line = 1 packet. + * DPI is like DSI VIDEO (watermark = 1 line). + * DPI waits for fifo ready only for the first line of the first frame. + * If line is wider than fifo size, one can set watermark + * at fifo size, or set it to line size as watermark will be + * saturated at fifo size inside MCDE. + */ + switch (port->type) { + case MCDE_PORTTYPE_DSI: + if (port->mode == MCDE_PORTMODE_CMD) + /* Equivalent of ceil(disp_ppl/fifo_size) */ + return (disp_ppl - 1) / get_output_fifo_size(fifo) + 1; + else + return 1; + break; + case MCDE_PORTTYPE_DPI: + return 1; + break; + default: + break; + } + return 1; +} + +static void update_vid_horizontal_blanking(struct mcde_port *port, + struct mcde_video_mode *vmode, bool sync_is_pulse, u8 bpp) +{ + int hfp, hbp, hsa; + u8 link = port->link; + + /* + * vmode->hfp, vmode->hbp and vmode->hsw are given in pixels + * and must be re-calculated into bytes + * + * 6 + 2 is HFP header + checksum + */ + hfp = vmode->hfp * bpp - 6 - 2; + if (sync_is_pulse) { + /* + * 6 is HBP header + checksum + * 4 is RGB header + checksum + */ + hbp = vmode->hbp * bpp - 4 - 6; + /* + * 6 is HBP header + checksum + * 4 is HSW packet bytes + * 4 is RGB header + checksum + */ + hsa = vmode->hsw * bpp - 4 - 4 - 6; + } else { + /* + * 6 is HBP header + checksum + * 4 is HSW packet bytes + * 4 is RGB header + checksum + */ + hbp = (vmode->hbp + vmode->hsw) * bpp - 4 - 4 - 6; + /* HSA is not considered in this mode and set to 0 */ + hsa = 0; + } + if (hfp < 0) { + hfp = 0; + dev_warn(&mcde_dev->dev, + "%s: negative calc for hfp, set to 0\n", __func__); + } + if (hbp < 0) { + hbp = 0; + dev_warn(&mcde_dev->dev, + "%s: negative calc for hbp, set to 0\n", __func__); + } + if (hsa < 0) { + hsa = 0; + dev_warn(&mcde_dev->dev, + "%s: negative calc for hsa, set to 0\n", __func__); + } + + dsi_wfld(link, DSI_VID_HSIZE1, HFP_LENGTH, hfp); + dsi_wfld(link, DSI_VID_HSIZE1, HBP_LENGTH, hbp); + dsi_wfld(link, DSI_VID_HSIZE1, HSA_LENGTH, hsa); +} + +static void update_vid_frame_parameters(struct mcde_port *port, + struct mcde_video_mode *vmode, u8 bpp) +{ + u8 link = port->link; + bool burst_mode, sync_is_pulse, tvg_enable; + u32 hs_byte_clk, pck_len, blkline_pck, line_duration; + u32 blkeol_pck, blkeol_duration; + u8 pixel_mode; + u8 rgb_header; + + get_vid_operating_mode(port, &burst_mode, &sync_is_pulse, &tvg_enable); + + dsi_wfld(link, DSI_VID_VSIZE, VFP_LENGTH, vmode->vfp); + dsi_wfld(link, DSI_VID_VSIZE, VBP_LENGTH, vmode->vbp); + dsi_wfld(link, DSI_VID_VSIZE, VSA_LENGTH, vmode->vsw); + update_vid_horizontal_blanking(port, vmode, sync_is_pulse, bpp); + + dsi_wfld(link, DSI_VID_VSIZE, VACT_LENGTH, vmode->yres); + dsi_wfld(link, DSI_VID_HSIZE2, RGB_SIZE, vmode->xres * bpp); + + /* + * The rgb_header identifies the pixel stream format, + * as described in the MIPI DSI Specification: + * + * 0x0E: Packed pixel stream, 16-bit RGB, 565 format + * 0x1E: Packed pixel stream, 18-bit RGB, 666 format + * 0x2E: Loosely Packed pixel stream, 18-bit RGB, 666 format + * 0x3E: Packed pixel stream, 24-bit RGB, 888 format + */ + switch (port->pixel_format) { + case MCDE_PORTPIXFMT_DSI_16BPP: + pixel_mode = 0; + rgb_header = 0x0E; + break; + case MCDE_PORTPIXFMT_DSI_18BPP: + pixel_mode = 2; + rgb_header = 0x2E; + break; + case MCDE_PORTPIXFMT_DSI_18BPP_PACKED: + pixel_mode = 1; + rgb_header = 0x1E; + break; + case MCDE_PORTPIXFMT_DSI_24BPP: + pixel_mode = 3; + rgb_header = 0x3E; + break; + default: + pixel_mode = 3; + rgb_header = 0x3E; + dev_warn(&mcde_dev->dev, + "%s: invalid pixel format %d\n", + __func__, port->pixel_format); + break; + } + + dsi_wfld(link, DSI_VID_MAIN_CTL, VID_PIXEL_MODE, pixel_mode); + dsi_wfld(link, DSI_VID_MAIN_CTL, HEADER, rgb_header); + + if (tvg_enable) { + /* + * with these settings, expect to see 64 pixels wide + * red and green vertical stripes on the screen when + * tvg_enable = 1 + */ + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, TVG_SEL, 1); + + dsi_wfld(link, DSI_TVG_CTL, TVG_STRIPE_SIZE, 6); + dsi_wfld(link, DSI_TVG_CTL, TVG_MODE, 2); + dsi_wfld(link, DSI_TVG_CTL, TVG_STOPMODE, 2); + dsi_wfld(link, DSI_TVG_CTL, TVG_RUN, 1); + + dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_NBLINE, vmode->yres); + dsi_wfld(link, DSI_TVG_IMG_SIZE, TVG_LINE_SIZE, + vmode->xres * bpp); + + dsi_wfld(link, DSI_TVG_COLOR1, COL1_BLUE, 0); + dsi_wfld(link, DSI_TVG_COLOR1, COL1_GREEN, 0); + dsi_wfld(link, DSI_TVG_COLOR1, COL1_RED, 0xFF); + + dsi_wfld(link, DSI_TVG_COLOR2, COL2_BLUE, 0); + dsi_wfld(link, DSI_TVG_COLOR2, COL2_GREEN, 0xFF); + dsi_wfld(link, DSI_TVG_COLOR2, COL2_RED, 0); + } + + /* + * vid->pixclock is the time between two pixels (in picoseconds) + * + * hs_byte_clk is the amount of transferred bytes per lane and + * second (in MHz) + */ + hs_byte_clk = 1000000 / vmode->pixclock / 8; + pck_len = 1000000 * hs_byte_clk / port->refresh_rate / + (vmode->vsw + vmode->vbp + vmode->yres + vmode->vfp) * + port->phy.dsi.num_data_lanes; + + /* + * 6 is header + checksum, header = 4 bytes, checksum = 2 bytes + * 4 is short packet for vsync/hsync + */ + if (sync_is_pulse) + blkline_pck = pck_len - vmode->hsw - 6; + else + blkline_pck = pck_len - 4 - 6; + + line_duration = (blkline_pck + 6) / port->phy.dsi.num_data_lanes; + blkeol_pck = pck_len - + (vmode->hsw + vmode->hbp + vmode->xres + vmode->hfp) * bpp - 6; + blkeol_duration = (blkeol_pck + 6) / port->phy.dsi.num_data_lanes; + + if (sync_is_pulse) + dsi_wfld(link, DSI_VID_BLKSIZE2, BLKLINE_PULSE_PCK, + blkline_pck); + else + dsi_wfld(link, DSI_VID_BLKSIZE1, BLKLINE_EVENT_PCK, + blkline_pck); + dsi_wfld(link, DSI_VID_DPHY_TIME, REG_LINE_DURATION, line_duration); + if (burst_mode) { + dsi_wfld(link, DSI_VID_BLKSIZE1, BLKEOL_PCK, blkeol_pck); + dsi_wfld(link, DSI_VID_PCK_TIME, BLKEOL_DURATION, + blkeol_duration); + dsi_wfld(link, DSI_VID_VCA_SETTING1, MAX_BURST_LIMIT, + blkeol_pck - 6); + dsi_wfld(link, DSI_VID_VCA_SETTING2, EXACT_BURST_LIMIT, + blkeol_pck); + } + if (sync_is_pulse) + dsi_wfld(link, DSI_VID_VCA_SETTING2, MAX_LINE_LIMIT, + blkline_pck - 6); +} + +static void set_vsync_method(u8 idx, struct mcde_port *port) +{ + u32 out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + u32 src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + + if (port->type == MCDE_PORTTYPE_DSI) { + switch (port->frame_trig) { + case MCDE_TRIG_HW: + src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + break; + case MCDE_TRIG_SW: + src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE; + break; + default: + src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + break; + } + + switch (port->sync_src) { + case MCDE_SYNCSRC_OFF: + out_synch_src = + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + break; + case MCDE_SYNCSRC_TE0: + out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0; + if (src_synch == + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) { + dev_dbg(&mcde_dev->dev, "%s: badly configured " + "frame sync, TE0 defaulting " + "to hw frame trig\n", __func__); + src_synch = + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + } + break; + case MCDE_SYNCSRC_TE1: + out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1; + if (src_synch == + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) { + dev_dbg(&mcde_dev->dev, "%s: badly configured " + "frame sync, TE1 defaulting " + "to hw frame trig\n", __func__); + src_synch = + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + } + break; + case MCDE_SYNCSRC_BTA: + out_synch_src = + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + break; + case MCDE_SYNCSRC_TE_POLLING: + out_synch_src = + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + if (src_synch == + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE) { + dev_dbg(&mcde_dev->dev, "%s: badly configured " + "frame sync, TE_POLLING defaulting " + "to hw frame trig\n", __func__); + src_synch = + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + } + break; + default: + out_synch_src = + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + src_synch = MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE; + dev_dbg(&mcde_dev->dev, "%s: no sync src selected, " + "defaulting to DSI BTA with " + "hw frame trig\n", __func__); + break; + } + } else if (port->type == MCDE_PORTTYPE_DPI) { + out_synch_src = MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER; + src_synch = port->update_auto_trig ? + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE : + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE; + } + + mcde_wreg(MCDE_CHNL0SYNCHMOD + + idx * MCDE_CHNL0SYNCHMOD_GROUPOFFSET, + MCDE_CHNL0SYNCHMOD_SRC_SYNCH(src_synch) | + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(out_synch_src)); +} + +void update_channel_registers(enum mcde_chnl chnl_id, struct chnl_regs *regs, + struct mcde_port *port, enum mcde_fifo fifo, + struct mcde_video_mode *video_mode) +{ + u8 idx = chnl_id; + u32 fifo_wtrmrk = 0; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + /* + * Select appropriate fifo watermark. + * Watermark will be saturated at fifo size inside MCDE. + */ + fifo_wtrmrk = video_mode->xres / + get_pkt_div(video_mode->xres, port, fifo); + + dev_vdbg(&mcde_dev->dev, "%s fifo_watermark=%d for chnl_id=%d\n", + __func__, fifo_wtrmrk, chnl_id); + + switch (chnl_id) { + case MCDE_CHNL_A: + mcde_wfld(MCDE_CTRLA, FIFOWTRMRK, fifo_wtrmrk); + break; + case MCDE_CHNL_B: + mcde_wfld(MCDE_CTRLB, FIFOWTRMRK, fifo_wtrmrk); + break; + case MCDE_CHNL_C0: + mcde_wfld(MCDE_CTRLC0, FIFOWTRMRK, fifo_wtrmrk); + break; + case MCDE_CHNL_C1: + mcde_wfld(MCDE_CTRLC1, FIFOWTRMRK, fifo_wtrmrk); + break; + default: + break; + } + + set_vsync_method(idx, port); + + mcde_wreg(MCDE_CHNL0CONF + idx * MCDE_CHNL0CONF_GROUPOFFSET, + MCDE_CHNL0CONF_PPL(regs->ppl-1) | + MCDE_CHNL0CONF_LPF(regs->lpf-1)); + mcde_wreg(MCDE_CHNL0STAT + idx * MCDE_CHNL0STAT_GROUPOFFSET, + MCDE_CHNL0STAT_CHNLBLBCKGND_EN(false) | + MCDE_CHNL0STAT_CHNLRD(true)); + mcde_wreg(MCDE_CHNL0BCKGNDCOL + idx * MCDE_CHNL0BCKGNDCOL_GROUPOFFSET, + MCDE_CHNL0BCKGNDCOL_B(0) | + MCDE_CHNL0BCKGNDCOL_G(0) | + MCDE_CHNL0BCKGNDCOL_R(0)); + + if (chnl_id == MCDE_CHNL_A || chnl_id == MCDE_CHNL_B) { + u32 mcde_crx1; + u32 mcde_pal0x; + u32 mcde_pal1x; + if (chnl_id == MCDE_CHNL_A) { + mcde_crx1 = MCDE_CRA1; + mcde_pal0x = MCDE_PAL0A; + mcde_pal1x = MCDE_PAL1A; + mcde_wfld(MCDE_CRA0, PALEN, regs->palette_enable); + } else { + mcde_crx1 = MCDE_CRB1; + mcde_pal0x = MCDE_PAL0B; + mcde_pal1x = MCDE_PAL1B; + mcde_wfld(MCDE_CRB0, PALEN, regs->palette_enable); + } + mcde_wreg(mcde_crx1, + MCDE_CRA1_PCD(regs->pcd) | + MCDE_CRA1_CLKSEL(regs->clksel) | + MCDE_CRA1_CDWIN(regs->cdwin) | + MCDE_CRA1_OUTBPP(bpp2outbpp(regs->bpp)) | + MCDE_CRA1_BCD(regs->bcd) | + MCDE_CRA1_CLKTYPE(regs->internal_clk)); + if (regs->palette_enable) { + int i; + for (i = 0; i < 256; i++) { + mcde_wreg(mcde_pal0x, + MCDE_PAL0A_GREEN(regs->map_g(i)) | + MCDE_PAL0A_BLUE(regs->map_b(i))); + mcde_wreg(mcde_pal1x, + MCDE_PAL1A_RED(regs->map_r(i))); + } + } + } + + /* Formatter */ + if (port->type == MCDE_PORTTYPE_DSI) { + u8 fidx; + u32 temp, packet; + /* pkt_div is used to avoid underflow in output fifo for + * large packets */ + u32 pkt_div = 1; + u32 dsi_delay0 = 0; + u32 screen_ppl, screen_lpf; + + fidx = get_dsi_formatter_id(port); + + screen_ppl = video_mode->xres; + screen_lpf = video_mode->yres; + + pkt_div = get_pkt_div(screen_ppl, port, fifo); + + if (video_mode->interlaced) + screen_lpf /= 2; + + /* pkt_delay_progressive = pixelclock * htot / + * (1E12 / 160E6) / pkt_div */ + dsi_delay0 = (video_mode->pixclock) * + (video_mode->xres + video_mode->hbp + + video_mode->hfp) / + (100000000 / ((mcde_clk_rate / 10000))) / pkt_div; + + if ((screen_ppl == SCREEN_PPL_CEA2) && + (screen_lpf == SCREEN_LPF_CEA2)) + dsi_delay0 += DSI_DELAY0_CEA2_ADD; + + temp = mcde_rreg(MCDE_DSIVID0CONF0 + + fidx * MCDE_DSIVID0CONF0_GROUPOFFSET); + mcde_wreg(MCDE_DSIVID0CONF0 + + fidx * MCDE_DSIVID0CONF0_GROUPOFFSET, + (temp & ~MCDE_DSIVID0CONF0_PACKING_MASK) | + MCDE_DSIVID0CONF0_PACKING(regs->dsipacking)); + /* no extra command byte in video mode */ + if (port->mode == MCDE_PORTMODE_CMD) + packet = ((screen_ppl / pkt_div * regs->bpp) >> 3) + 1; + else + packet = ((screen_ppl / pkt_div * regs->bpp) >> 3); + mcde_wreg(MCDE_DSIVID0FRAME + + fidx * MCDE_DSIVID0FRAME_GROUPOFFSET, + MCDE_DSIVID0FRAME_FRAME(packet * pkt_div * screen_lpf)); + mcde_wreg(MCDE_DSIVID0PKT + fidx * MCDE_DSIVID0PKT_GROUPOFFSET, + MCDE_DSIVID0PKT_PACKET(packet)); + mcde_wreg(MCDE_DSIVID0SYNC + + fidx * MCDE_DSIVID0SYNC_GROUPOFFSET, + MCDE_DSIVID0SYNC_SW(0) | + MCDE_DSIVID0SYNC_DMA(0)); + mcde_wreg(MCDE_DSIVID0CMDW + + fidx * MCDE_DSIVID0CMDW_GROUPOFFSET, + MCDE_DSIVID0CMDW_CMDW_START(DCS_CMD_WRITE_START) | + MCDE_DSIVID0CMDW_CMDW_CONTINUE(DCS_CMD_WRITE_CONTINUE)); + mcde_wreg(MCDE_DSIVID0DELAY0 + + fidx * MCDE_DSIVID0DELAY0_GROUPOFFSET, + MCDE_DSIVID0DELAY0_INTPKTDEL(dsi_delay0)); + mcde_wreg(MCDE_DSIVID0DELAY1 + + fidx * MCDE_DSIVID0DELAY1_GROUPOFFSET, + MCDE_DSIVID0DELAY1_TEREQDEL(0) | + MCDE_DSIVID0DELAY1_FRAMESTARTDEL(0)); + + if (port->mode == MCDE_PORTMODE_VID) + update_vid_frame_parameters(port, video_mode, + regs->bpp / 8); + } else if (port->type == MCDE_PORTTYPE_DPI && + !port->phy.dpi.tv_mode) { + /* DPI LCD Mode */ + if (chnl_id == MCDE_CHNL_A) { + mcde_wreg(MCDE_SYNCHCONFA, + MCDE_SYNCHCONFA_HWREQVEVENT_ENUM( + ACTIVE_VIDEO) | + MCDE_SYNCHCONFA_HWREQVCNT( + video_mode->yres - 1) | + MCDE_SYNCHCONFA_SWINTVEVENT_ENUM( + ACTIVE_VIDEO) | + MCDE_SYNCHCONFA_SWINTVCNT( + video_mode->yres - 1)); + } else if (chnl_id == MCDE_CHNL_B) { + mcde_wreg(MCDE_SYNCHCONFB, + MCDE_SYNCHCONFB_HWREQVEVENT_ENUM( + ACTIVE_VIDEO) | + MCDE_SYNCHCONFB_HWREQVCNT( + video_mode->yres - 1) | + MCDE_SYNCHCONFB_SWINTVEVENT_ENUM( + ACTIVE_VIDEO) | + MCDE_SYNCHCONFB_SWINTVCNT( + video_mode->yres - 1)); + } + } + + if (regs->roten) { + u32 stripwidth; + u32 stripwidth_val; + + /* calc strip width, 32 bits used internally */ + stripwidth = regs->rotbufsize / (video_mode->yres * 4); + if (stripwidth >= 32) + stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_32PIX; + else if (stripwidth >= 16) + stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_16PIX; + else if (stripwidth >= 8) + stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_8PIX; + else if (stripwidth >= 4) + stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_4PIX; + else + stripwidth_val = MCDE_ROTACONF_STRIP_WIDTH_2PIX; + dev_vdbg(&mcde_dev->dev, "%s stripwidth=%d\n", __func__, + 1 << (stripwidth_val + 1)); + mcde_wreg(MCDE_ROTADD0A + chnl_id * MCDE_ROTADD0A_GROUPOFFSET, + regs->rotbuf1); + mcde_wreg(MCDE_ROTADD1A + chnl_id * MCDE_ROTADD1A_GROUPOFFSET, + regs->rotbuf2); + mcde_wreg(MCDE_ROTACONF + chnl_id * MCDE_ROTACONF_GROUPOFFSET, + MCDE_ROTACONF_ROTBURSTSIZE_ENUM(HW_8W) | + MCDE_ROTACONF_ROTDIR(regs->rotdir) | + MCDE_ROTACONF_STRIP_WIDTH(stripwidth_val) | + MCDE_ROTACONF_RD_MAXOUT_ENUM(4_REQ) | + MCDE_ROTACONF_WR_MAXOUT_ENUM(8_REQ)); + } + + /* Blending */ + if (chnl_id == MCDE_CHNL_A) { + mcde_wfld(MCDE_CRA0, BLENDEN, regs->blend_en); + mcde_wfld(MCDE_CRA0, BLENDCTRL, regs->blend_ctrl); + mcde_wfld(MCDE_CRA0, ALPHABLEND, regs->alpha_blend); + } else if (chnl_id == MCDE_CHNL_B) { + mcde_wfld(MCDE_CRB0, BLENDEN, regs->blend_en); + mcde_wfld(MCDE_CRB0, BLENDCTRL, regs->blend_ctrl); + mcde_wfld(MCDE_CRB0, ALPHABLEND, regs->alpha_blend); + } + + dev_vdbg(&mcde_dev->dev, "Channel registers setup, chnl=%d\n", chnl_id); + regs->dirty = false; +} + +static int enable_mcde_hw(void) +{ + int ret; + int i; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + cancel_delayed_work(&hw_timeout_work); + schedule_delayed_work(&hw_timeout_work, + msecs_to_jiffies(MCDE_SLEEP_WATCHDOG)); + + for (i = 0; i < num_channels; i++) { + struct mcde_chnl_state *chnl = &channels[i]; + if (chnl->state == CHNLSTATE_SUSPEND) { + /* Mark all registers as dirty */ + set_channel_state_atomic(chnl, CHNLSTATE_IDLE); + chnl->ovly0->regs.dirty = true; + chnl->ovly0->regs.dirty_buf = true; + if (chnl->ovly1) { + chnl->ovly1->regs.dirty = true; + chnl->ovly1->regs.dirty_buf = true; + } + chnl->regs.dirty = true; + chnl->col_regs.dirty = true; + chnl->tv_regs.dirty = true; + atomic_set(&chnl->vcmp_cnt, 0); + } + } + + if (mcde_is_enabled) { + dev_vdbg(&mcde_dev->dev, "%s - already enabled\n", __func__); + return 0; + } + + enable_clocks_and_power(mcde_dev); + + ret = request_irq(mcde_irq, mcde_irq_handler, 0, "mcde", + &mcde_dev->dev); + if (ret) { + dev_dbg(&mcde_dev->dev, "Failed to request irq (irq=%d)\n", + mcde_irq); + cancel_delayed_work(&hw_timeout_work); + return -EINVAL; + } + + update_mcde_registers(); + + dev_vdbg(&mcde_dev->dev, "%s - enable done\n", __func__); + + mcde_is_enabled = true; + return 0; +} + +/* DSI */ +static int mcde_dsi_direct_cmd_write(struct mcde_chnl_state *chnl, + bool dcs, u8 cmd, u8 *data, int len) +{ + int i, ret = 0; + u32 wrdat[4] = { 0, 0, 0, 0 }; + u32 settings; + u8 link = chnl->port.link; + u8 virt_id = chnl->port.phy.dsi.virt_id; + u32 counter = DSI_WRITE_CMD_TIMEOUT; + + if (len > MCDE_MAX_DSI_DIRECT_CMD_WRITE || + chnl->port.type != MCDE_PORTTYPE_DSI) + return -EINVAL; + + mcde_lock(__func__, __LINE__); + + _mcde_chnl_enable(chnl); + if (enable_mcde_hw()) { + mcde_unlock(__func__, __LINE__); + return -EINVAL; + } + if (!chnl->formatter_updated) + (void)update_channel_static_registers(chnl); + + set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE); + + if (dcs) { + wrdat[0] = cmd; + for (i = 1; i <= len; i++) + wrdat[i>>2] |= ((u32)data[i-1] << ((i & 3) * 8)); + } else { + /* no explicit cmd byte for generic_write, only params */ + for (i = 0; i < len; i++) + wrdat[i>>2] |= ((u32)data[i] << ((i & 3) * 8)); + } + + settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(len > 1) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(len+1) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true); + if (dcs) { + if (len == 0) + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + DCS_SHORT_WRITE_0); + else if (len == 1) + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + DCS_SHORT_WRITE_1); + else + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + DCS_LONG_WRITE); + } else { + if (len == 0) + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + GENERIC_SHORT_WRITE_0); + else if (len == 1) + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + GENERIC_SHORT_WRITE_1); + else if (len == 2) + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + GENERIC_SHORT_WRITE_2); + else + settings |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + GENERIC_LONG_WRITE); + } + + dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings); + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, wrdat[0]); + if (len > 3) + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT1, wrdat[1]); + if (len > 7) + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT2, wrdat[2]); + if (len > 11) + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT3, wrdat[3]); + dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0); + dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0); + dsi_wreg(link, DSI_DIRECT_CMD_SEND, true); + + /* loop will normally run zero or one time until WRITE_COMPLETED */ + while (!dsi_rfld(link, DSI_DIRECT_CMD_STS, WRITE_COMPLETED) + && --counter) + cpu_relax(); + + if (!counter) { + dev_err(&mcde_dev->dev, + "%s: DSI write cmd 0x%x timeout on DSI link %u!\n", + __func__, cmd, link); + ret = -ETIME; + } else { + /* inform if >100 loops before command completion */ + if (counter < (DSI_WRITE_CMD_TIMEOUT-DSI_WRITE_CMD_TIMEOUT/10)) + dev_vdbg(&mcde_dev->dev, + "%s: %u loops for DSI command %x completion\n", + __func__, (DSI_WRITE_CMD_TIMEOUT - counter), + cmd); + + dev_vdbg(&mcde_dev->dev, "DSI Write ok %x error %x\n", + dsi_rreg(link, DSI_DIRECT_CMD_STS_FLAG), + dsi_rreg(link, DSI_CMD_MODE_STS_FLAG)); + } + + set_channel_state_atomic(chnl, CHNLSTATE_IDLE); + + mcde_unlock(__func__, __LINE__); + + return ret; +} + +int mcde_dsi_generic_write(struct mcde_chnl_state *chnl, u8* para, int len) +{ + return mcde_dsi_direct_cmd_write(chnl, false, 0, para, len); +} + +int mcde_dsi_dcs_write(struct mcde_chnl_state *chnl, u8 cmd, u8* data, int len) +{ + return mcde_dsi_direct_cmd_write(chnl, true, cmd, data, len); +} + +int mcde_dsi_dcs_read(struct mcde_chnl_state *chnl, + u8 cmd, u32 *data, int *len) +{ + int ret = 0; + u8 link = chnl->port.link; + u8 virt_id = chnl->port.phy.dsi.virt_id; + u32 settings; + bool ok = false; + bool error, ack_with_err; + u8 nbr_of_retries = DSI_READ_NBR_OF_RETRIES; + + if (*len > MCDE_MAX_DCS_READ || chnl->port.type != MCDE_PORTTYPE_DSI) + return -EINVAL; + + mcde_lock(__func__, __LINE__); + + _mcde_chnl_enable(chnl); + if (enable_mcde_hw()) { + mcde_unlock(__func__, __LINE__); + return -EINVAL; + } + if (!chnl->formatter_updated) + (void)update_channel_static_registers(chnl); + + set_channel_state_sync(chnl, CHNLSTATE_DSI_READ); + + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true); + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true); + settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(READ) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(1) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_READ); + dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings); + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, cmd); + + do { + u8 wait = DSI_READ_TIMEOUT; + dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0); + dsi_wreg(link, DSI_DIRECT_CMD_RD_STS_CLR, ~0); + dsi_wreg(link, DSI_DIRECT_CMD_SEND, true); + + while (wait-- && !(error = dsi_rfld(link, DSI_DIRECT_CMD_STS, + READ_COMPLETED_WITH_ERR)) && + !(ok = dsi_rfld(link, DSI_DIRECT_CMD_STS, + READ_COMPLETED))) + udelay(DSI_READ_DELAY); + + ack_with_err = dsi_rfld(link, DSI_DIRECT_CMD_STS, + ACKNOWLEDGE_WITH_ERR_RECEIVED); + if (ack_with_err) + dev_warn(&mcde_dev->dev, + "DCS Acknowledge Error Report %.4X\n", + dsi_rfld(link, DSI_DIRECT_CMD_STS, ACK_VAL)); + } while (--nbr_of_retries && ack_with_err); + + if (ok) { + int rdsize; + u32 rddat; + + rdsize = dsi_rfld(link, DSI_DIRECT_CMD_RD_PROPERTY, RD_SIZE); + rddat = dsi_rreg(link, DSI_DIRECT_CMD_RDDAT); + if (rdsize < *len) + dev_warn(&mcde_dev->dev, "DCS incomplete read %d<%d" + " (%.8X)\n", rdsize, *len, rddat); + *len = min(*len, rdsize); + memcpy(data, &rddat, *len); + } else { + dev_err(&mcde_dev->dev, "DCS read failed, err=%d, sts=%X\n", + error, dsi_rreg(link, DSI_DIRECT_CMD_STS)); + ret = -EIO; + } + + dsi_wreg(link, DSI_CMD_MODE_STS_CLR, ~0); + dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, ~0); + + set_channel_state_atomic(chnl, CHNLSTATE_IDLE); + + mcde_unlock(__func__, __LINE__); + + return ret; +} + +/* + * Set Maximum Return Packet size is a command that specifies the + * maximum size of the payload transmitted from peripheral back to + * the host processor. + * + * During power-on or reset sequence, the Maximum Return Packet Size + * is set to a default value of one. In order to be able to use + * mcde_dsi_dcs_read for reading more than 1 byte at a time, this + * parameter should be set by the host processor to the desired value + * in the initialization routine before commencing normal operation. + */ +int mcde_dsi_set_max_pkt_size(struct mcde_chnl_state *chnl) +{ + u32 settings; + u8 link = chnl->port.link; + u8 virt_id = chnl->port.phy.dsi.virt_id; + + if (chnl->port.type != MCDE_PORTTYPE_DSI) + return -EINVAL; + + mcde_lock(__func__, __LINE__); + + if (enable_mcde_hw()) { + mcde_unlock(__func__, __LINE__); + return -EIO; + } + + set_channel_state_sync(chnl, CHNLSTATE_DSI_WRITE); + + /* + * Set Maximum Return Packet Size is a two-byte command packet + * that specifies the maximum size of the payload as u16 value. + * The order of bytes is: MaxSize LSB, MaxSize MSB + */ + settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(WRITE) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM( + SET_MAX_PKT_SIZE); + dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings); + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, MCDE_MAX_DCS_READ); + dsi_wreg(link, DSI_DIRECT_CMD_SEND, true); + + set_channel_state_atomic(chnl, CHNLSTATE_IDLE); + + mcde_unlock(__func__, __LINE__); + + return 0; +} + +static void dsi_te_poll_req(struct mcde_chnl_state *chnl) +{ + u8 lnk = chnl->port.link; + const struct mcde_port *port = &chnl->port; + + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, false); + if (port->ifc == 0) + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF1_TE_EN, true); + if (port->ifc == 1) + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, IF2_TE_EN, true); + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true); + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, READ_EN, true); + dsi_wfld(lnk, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF); + dsi_wfld(lnk, DSI_MCTL_MAIN_DATA_CTL, TE_POLLING_EN, true); +} + +static void dsi_te_poll_set_timer(struct mcde_chnl_state *chnl, + unsigned int timeout) +{ + mod_timer(&chnl->dsi_te_timer, + jiffies + + msecs_to_jiffies(timeout)); +} + +static void dsi_te_timer_function(unsigned long arg) +{ + struct mcde_chnl_state *chnl; + u8 lnk; + + if (arg >= num_channels) { + dev_err(&mcde_dev->dev, "%s invalid arg:%ld\n", __func__, arg); + return; + } + + chnl = &channels[arg]; + + if (mcde_is_enabled && chnl->enabled && chnl->formatter_updated) { + lnk = chnl->port.link; + /* No TE answer; force stop */ + dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, true); + udelay(20); + dsi_wfld(lnk, DSI_MCTL_MAIN_PHY_CTL, FORCE_STOP_MODE, false); + dev_info(&mcde_dev->dev, "DSI%d force stop\n", lnk); + dsi_te_poll_set_timer(chnl, DSI_TE_NO_ANSWER_TIMEOUT); + } else { + dev_info(&mcde_dev->dev, "1:DSI force stop\n"); + } +} + +static void dsi_te_request(struct mcde_chnl_state *chnl) +{ + u8 link = chnl->port.link; + u8 virt_id = chnl->port.phy.dsi.virt_id; + u32 settings; + + dev_vdbg(&mcde_dev->dev, "Request BTA TE, chnl=%d\n", + chnl->id); + + set_channel_state_atomic(chnl, CHNLSTATE_WAIT_TE); + + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, BTA_EN, true); + dsi_wfld(link, DSI_MCTL_MAIN_DATA_CTL, REG_TE_EN, true); + dsi_wfld(link, DSI_CMD_MODE_CTL, TE_TIMEOUT, 0x3FF); + settings = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_ENUM(TE_REQ) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT(false) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID(virt_id) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE(2) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN(true) | + DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_ENUM(DCS_SHORT_WRITE_1); + dsi_wreg(link, DSI_DIRECT_CMD_MAIN_SETTINGS, settings); + dsi_wreg(link, DSI_DIRECT_CMD_WRDAT0, DCS_CMD_SET_TEAR_ON); + dsi_wreg(link, DSI_DIRECT_CMD_STS_CLR, + DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR(true)); + dsi_wfld(link, DSI_DIRECT_CMD_STS_CTL, TE_RECEIVED_EN, true); + dsi_wreg(link, DSI_CMD_MODE_STS_CLR, + DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR(true)); + dsi_wfld(link, DSI_CMD_MODE_STS_CTL, ERR_NO_TE_EN, true); + dsi_wreg(link, DSI_DIRECT_CMD_SEND, true); +} + +/* MCDE channels */ +static struct mcde_chnl_state *_mcde_chnl_get(enum mcde_chnl chnl_id, + enum mcde_fifo fifo, const struct mcde_port *port) +{ + int i; + struct mcde_chnl_state *chnl = NULL; + struct mcde_platform_data *pdata = mcde_dev->dev.platform_data; + + static struct mcde_col_transform ycbcr_2_rgb = { + /* Note that in MCDE YUV 422 pixels come as VYU pixels */ + .matrix = { + {0xff30, 0x012a, 0xff9c}, + {0x0000, 0x012a, 0x0204}, + {0x0199, 0x012a, 0x0000}, + }, + .offset = {0x0088, 0xfeeb, 0xff21}, + }; + + static struct mcde_col_transform rgb_2_ycbcr = { + .matrix = { + {0x0042, 0x0081, 0x0019}, + {0xffda, 0xffb6, 0x0070}, + {0x0070, 0xffa2, 0xffee}, + }, + .offset = {0x0010, 0x0080, 0x0080}, + }; + + /* Allocate channel */ + for (i = 0; i < num_channels; i++) { + if (chnl_id == channels[i].id) + chnl = &channels[i]; + } + if (!chnl) { + dev_dbg(&mcde_dev->dev, "Invalid channel, chnl=%d\n", chnl_id); + return ERR_PTR(-EINVAL); + } + if (chnl->reserved) { + dev_dbg(&mcde_dev->dev, "Channel in use, chnl=%d\n", chnl_id); + return ERR_PTR(-EBUSY); + } + + chnl->port = *port; + chnl->fifo = fifo; + chnl->formatter_updated = false; + chnl->ycbcr_2_rgb = ycbcr_2_rgb; + chnl->rgb_2_ycbcr = rgb_2_ycbcr; + + chnl->blend_en = true; + chnl->blend_ctrl = MCDE_CRA0_BLENDCTRL_SOURCE; + chnl->alpha_blend = 0xFF; + chnl->rotbuf1 = pdata->rotbuf1; + chnl->rotbuf2 = pdata->rotbuf2; + chnl->rotbufsize = pdata->rotbufsize; + + _mcde_chnl_apply(chnl); + chnl->reserved = true; + + if (chnl->port.type == MCDE_PORTTYPE_DPI) { + chnl->clk_dpi = clk_get(&mcde_dev->dev, CLK_DPI); + if (chnl->port.phy.dpi.tv_mode) + chnl->vcmp_per_field = true; + } else if (chnl->port.type == MCDE_PORTTYPE_DSI && + dsi_use_clk_framework) { + char dsihs_name[10]; + char dsilp_name[10]; + + sprintf(dsihs_name, "dsihs%d", port->link); + sprintf(dsilp_name, "dsilp%d", port->link); + + chnl->clk_dsi_lp = clk_get(&mcde_dev->dev, dsilp_name); + chnl->clk_dsi_hs = clk_get(&mcde_dev->dev, dsihs_name); + if (port->phy.dsi.lp_freq != clk_round_rate(chnl->clk_dsi_lp, + port->phy.dsi.lp_freq)) + dev_warn(&mcde_dev->dev, "Could not set dsi lp freq" + " to %d\n", port->phy.dsi.lp_freq); + WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_lp, + port->phy.dsi.lp_freq)); + if (port->phy.dsi.hs_freq != clk_round_rate(chnl->clk_dsi_hs, + port->phy.dsi.hs_freq)) + dev_warn(&mcde_dev->dev, "Could not set dsi hs freq" + " to %d\n", port->phy.dsi.hs_freq); + WARN_ON_ONCE(clk_set_rate(chnl->clk_dsi_hs, + port->phy.dsi.hs_freq)); + } + return chnl; +} + +static int _mcde_chnl_apply(struct mcde_chnl_state *chnl) +{ + bool roten = false; + u8 rotdir = 0; + + if (chnl->rotation == MCDE_DISPLAY_ROT_90_CCW) { + roten = true; + rotdir = MCDE_ROTACONF_ROTDIR_CCW; + } else if (chnl->rotation == MCDE_DISPLAY_ROT_90_CW) { + roten = true; + rotdir = MCDE_ROTACONF_ROTDIR_CW; + } + /* REVIEW: 180 deg? */ + + chnl->regs.bpp = portfmt2bpp(chnl->port.pixel_format); + chnl->regs.roten = roten; + chnl->regs.rotdir = rotdir; + chnl->regs.rotbuf1 = chnl->rotbuf1; + chnl->regs.rotbuf2 = chnl->rotbuf2; + chnl->regs.rotbufsize = chnl->rotbufsize; + chnl->regs.palette_enable = chnl->palette_enable; + chnl->regs.map_r = chnl->map_r; + chnl->regs.map_g = chnl->map_g; + chnl->regs.map_b = chnl->map_b; + if (chnl->port.type == MCDE_PORTTYPE_DSI) { + chnl->regs.clksel = MCDE_CRA1_CLKSEL_MCDECLK; + chnl->regs.dsipacking = + portfmt2dsipacking(chnl->port.pixel_format); + } else if (chnl->port.type == MCDE_PORTTYPE_DPI) { + if (chnl->port.phy.dpi.tv_mode) { + chnl->regs.internal_clk = false; + chnl->regs.bcd = true; + if (chnl->id == MCDE_CHNL_A) + chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV1CLK; + else + chnl->regs.clksel = MCDE_CRA1_CLKSEL_TV2CLK; + } else { + chnl->regs.internal_clk = true; + chnl->regs.clksel = MCDE_CRA1_CLKSEL_CLKPLL72; + chnl->regs.cdwin = + portfmt2cdwin(chnl->port.pixel_format); + chnl->regs.bcd = (chnl->port.phy.dpi.clock_div < 2); + if (!chnl->regs.bcd) + chnl->regs.pcd = + chnl->port.phy.dpi.clock_div - 2; + } + dpi_video_mode_apply(chnl); + } + + chnl->regs.blend_ctrl = chnl->blend_ctrl; + chnl->regs.blend_en = chnl->blend_en; + chnl->regs.alpha_blend = chnl->alpha_blend; + + chnl->regs.dirty = true; + + dev_vdbg(&mcde_dev->dev, "Channel applied, chnl=%d\n", chnl->id); + return 0; +} + +static void setup_channel(struct mcde_chnl_state *chnl) +{ + set_channel_state_sync(chnl, CHNLSTATE_SETUP); + + if (chnl->port.type == MCDE_PORTTYPE_DPI && chnl->tv_regs.dirty) + update_dpi_registers(chnl->id, &chnl->tv_regs); + if ((chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B) && + chnl->col_regs.dirty) + update_col_registers(chnl->id, &chnl->col_regs); + if (chnl->regs.dirty) + update_channel_registers(chnl->id, &chnl->regs, &chnl->port, + chnl->fifo, &chnl->vmode); +} + +static void chnl_update_continous(struct mcde_chnl_state *chnl, + bool tripple_buffer) +{ + if (chnl->state == CHNLSTATE_RUNNING) { + if (!tripple_buffer) + wait_for_vcmp(chnl); + return; + } + + setup_channel(chnl); + if (chnl->port.sync_src == MCDE_SYNCSRC_TE0) { + mcde_wfld(MCDE_CRC, SYCEN0, true); + } else if (chnl->port.sync_src == MCDE_SYNCSRC_TE1) { + mcde_wfld(MCDE_VSCRC1, VSSEL, 1); + mcde_wfld(MCDE_CRC, SYCEN1, true); + } + + enable_flow(chnl); +} + +static void chnl_update_non_continous(struct mcde_chnl_state *chnl) +{ + /* Commit settings to registers */ + setup_channel(chnl); + + if (chnl->port.type == MCDE_PORTTYPE_DSI) { + if (chnl->port.sync_src == MCDE_SYNCSRC_OFF) { + if (chnl->port.frame_trig == MCDE_TRIG_SW) { + do_softwaretrig(chnl); + } else { + enable_flow(chnl); + disable_flow(chnl); + } + dev_vdbg(&mcde_dev->dev, "Channel update (no sync), " + "chnl=%d\n", chnl->id); + } else if (chnl->port.sync_src == MCDE_SYNCSRC_BTA) { + if (chnl->power_mode == MCDE_DISPLAY_PM_ON) { + dsi_te_request(chnl); + } else { + if (chnl->port.frame_trig == MCDE_TRIG_SW) + do_softwaretrig(chnl); + } + if (chnl->port.frame_trig == MCDE_TRIG_HW) { + /* + * During BTA TE the MCDE block will be stalled, + * once the TE is received the DMA trig will + * happen + */ + enable_flow(chnl); + disable_flow(chnl); + } + } + } +} + +static void chnl_update_overlay(struct mcde_chnl_state *chnl, + struct mcde_ovly_state *ovly) +{ + if (!ovly) + return; + + if (ovly->regs.dirty_buf) { + if (!chnl->port.update_auto_trig) + set_channel_state_sync(chnl, CHNLSTATE_SETUP); + update_overlay_registers_on_the_fly(ovly->idx, &ovly->regs); + mcde_debugfs_overlay_update(chnl->id, ovly != chnl->ovly0); + } + if (ovly->regs.dirty) { + if (!chnl->port.update_auto_trig) + set_channel_state_sync(chnl, CHNLSTATE_SETUP); + chnl_ovly_pixel_format_apply(chnl, ovly); + update_overlay_registers(ovly->idx, &ovly->regs, &chnl->port, + chnl->fifo, chnl->regs.x, chnl->regs.y, + chnl->regs.ppl, chnl->regs.lpf, ovly->stride, + chnl->vmode.interlaced, chnl->rotation); + if (chnl->id == MCDE_CHNL_A || chnl->id == MCDE_CHNL_B) + update_col_registers(chnl->id, &chnl->col_regs); + } +} + +static int _mcde_chnl_update(struct mcde_chnl_state *chnl, + struct mcde_rectangle *update_area, + bool tripple_buffer) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + /* TODO: lock & make wait->trig async */ + if (!chnl->enabled || !update_area + || (update_area->w == 0 && update_area->h == 0)) { + return -EINVAL; + } + + if (chnl->port.update_auto_trig && tripple_buffer) + wait_for_vcmp(chnl); + + chnl->regs.x = update_area->x; + chnl->regs.y = update_area->y; + /* TODO Crop against video_mode.xres and video_mode.yres */ + chnl->regs.ppl = update_area->w; + chnl->regs.lpf = update_area->h; + if (chnl->port.type == MCDE_PORTTYPE_DPI && + chnl->port.phy.dpi.tv_mode) { + /* subtract border */ + chnl->regs.ppl -= chnl->tv_regs.dho + chnl->tv_regs.alw; + /* subtract double borders, ie. for both fields */ + chnl->regs.lpf -= 2 * (chnl->tv_regs.dvo + chnl->tv_regs.bsl); + } else if (chnl->port.type == MCDE_PORTTYPE_DSI && + chnl->vmode.interlaced) + chnl->regs.lpf /= 2; + + chnl_update_overlay(chnl, chnl->ovly0); + chnl_update_overlay(chnl, chnl->ovly1); + + if (chnl->port.update_auto_trig) + chnl_update_continous(chnl, tripple_buffer); + else + chnl_update_non_continous(chnl); + + dev_vdbg(&mcde_dev->dev, "Channel updated, chnl=%d\n", chnl->id); + mcde_debugfs_channel_update(chnl->id); + return 0; +} + +static int _mcde_chnl_enable(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + chnl->enabled = true; + return 0; +} + +/* API entry points */ +/* MCDE channels */ +struct mcde_chnl_state *mcde_chnl_get(enum mcde_chnl chnl_id, + enum mcde_fifo fifo, const struct mcde_port *port) +{ + struct mcde_chnl_state *chnl; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + chnl = _mcde_chnl_get(chnl_id, fifo, port); + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + + return chnl; +} + +int mcde_chnl_set_pixel_format(struct mcde_chnl_state *chnl, + enum mcde_port_pix_fmt pix_fmt) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + chnl->port.pixel_format = pix_fmt; + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + + return 0; +} + +int mcde_chnl_set_palette(struct mcde_chnl_state *chnl, + struct mcde_palette_table *palette) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + if (palette != NULL) { + chnl->map_r = palette->map_col_ch0; + chnl->map_g = palette->map_col_ch1; + chnl->map_b = palette->map_col_ch2; + chnl->palette_enable = true; + } else { + chnl->map_r = NULL; + chnl->map_g = NULL; + chnl->map_b = NULL; + chnl->palette_enable = false; + } + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + return 0; +} + +void mcde_chnl_set_col_convert(struct mcde_chnl_state *chnl, + struct mcde_col_transform *transform, + enum mcde_col_convert convert) +{ + switch (convert) { + case MCDE_CONVERT_RGB_2_YCBCR: + memcpy(&chnl->rgb_2_ycbcr, transform, + sizeof(struct mcde_col_transform)); + /* force update: */ + if (chnl->transform == &chnl->rgb_2_ycbcr) { + chnl->transform = NULL; + chnl->ovly0->dirty = true; + chnl->ovly1->dirty = true; + } + break; + case MCDE_CONVERT_YCBCR_2_RGB: + memcpy(&chnl->ycbcr_2_rgb, transform, + sizeof(struct mcde_col_transform)); + /* force update: */ + if (chnl->transform == &chnl->ycbcr_2_rgb) { + chnl->transform = NULL; + chnl->ovly0->dirty = true; + chnl->ovly1->dirty = true; + } + break; + default: + /* Trivial transforms are handled internally */ + dev_warn(&mcde_dev->dev, + "%s: unsupported col convert\n", __func__); + break; + } +} + +int mcde_chnl_set_video_mode(struct mcde_chnl_state *chnl, + struct mcde_video_mode *vmode) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (chnl == NULL || vmode == NULL) + return -EINVAL; + + chnl->vmode = *vmode; + + chnl->ovly0->dirty = true; + if (chnl->ovly1) + chnl->ovly1->dirty = true; + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + + return 0; +} +EXPORT_SYMBOL(mcde_chnl_set_video_mode); + +int mcde_chnl_set_rotation(struct mcde_chnl_state *chnl, + enum mcde_display_rotation rotation) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + + if ((rotation == MCDE_DISPLAY_ROT_90_CW || + rotation == MCDE_DISPLAY_ROT_90_CCW) && + (chnl->id != MCDE_CHNL_A && chnl->id != MCDE_CHNL_B)) + return -EINVAL; + + chnl->rotation = rotation; + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + + return 0; +} + +int mcde_chnl_set_power_mode(struct mcde_chnl_state *chnl, + enum mcde_display_power_mode power_mode) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + + chnl->power_mode = power_mode; + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); + + return 0; +} + +int mcde_chnl_apply(struct mcde_chnl_state *chnl) +{ + int ret ; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + + mcde_lock(__func__, __LINE__); + ret = _mcde_chnl_apply(chnl); + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret); + + return ret; +} + +int mcde_chnl_update(struct mcde_chnl_state *chnl, + struct mcde_rectangle *update_area, + bool tripple_buffer) +{ + int ret; + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return -EINVAL; + + mcde_lock(__func__, __LINE__); + enable_mcde_hw(); + if (!chnl->formatter_updated) + (void)update_channel_static_registers(chnl); + + if (chnl->regs.roten && !chnl->esram_is_enabled) { + WARN_ON_ONCE(regulator_enable(regulator_esram_epod)); + chnl->esram_is_enabled = true; + } else if (!chnl->regs.roten && chnl->esram_is_enabled) { + WARN_ON_ONCE(regulator_disable(regulator_esram_epod)); + chnl->esram_is_enabled = false; + } + + ret = _mcde_chnl_update(chnl, update_area, tripple_buffer); + + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "%s exit with ret %d\n", __func__, ret); + + return ret; +} + +void mcde_chnl_put(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (chnl->enabled) { + stop_channel(chnl); + cancel_delayed_work(&hw_timeout_work); + disable_mcde_hw(false, true); + chnl->enabled = false; + } + + chnl->reserved = false; + if (chnl->port.type == MCDE_PORTTYPE_DPI) { + clk_put(chnl->clk_dpi); + if (chnl->port.phy.dpi.tv_mode) { + chnl->vcmp_per_field = false; + chnl->even_vcmp = false; + } + } else if (chnl->port.type == MCDE_PORTTYPE_DSI) { + if (dsi_use_clk_framework) { + clk_put(chnl->clk_dsi_lp); + clk_put(chnl->clk_dsi_hs); + } + } + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); +} + +void mcde_chnl_stop_flow(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + mcde_lock(__func__, __LINE__); + if (mcde_is_enabled && chnl->enabled) + stop_channel(chnl); + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); +} + +void mcde_chnl_enable(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + mcde_lock(__func__, __LINE__); + _mcde_chnl_enable(chnl); + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); +} + +void mcde_chnl_disable(struct mcde_chnl_state *chnl) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + mcde_lock(__func__, __LINE__); + cancel_delayed_work(&hw_timeout_work); + /* The channel must be stopped before it is disabled */ + WARN_ON_ONCE(chnl->state == CHNLSTATE_RUNNING); + disable_mcde_hw(false, true); + chnl->enabled = false; + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "%s exit\n", __func__); +} + +/* MCDE overlays */ +struct mcde_ovly_state *mcde_ovly_get(struct mcde_chnl_state *chnl) +{ + struct mcde_ovly_state *ovly; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!chnl->reserved) + return ERR_PTR(-EINVAL); + + if (!chnl->ovly0->inuse) + ovly = chnl->ovly0; + else if (chnl->ovly1 && !chnl->ovly1->inuse) + ovly = chnl->ovly1; + else + ovly = ERR_PTR(-EBUSY); + + if (!IS_ERR(ovly)) { + ovly->inuse = true; + ovly->paddr = 0; + ovly->stride = 0; + ovly->pix_fmt = MCDE_OVLYPIXFMT_RGB565; + ovly->src_x = 0; + ovly->src_y = 0; + ovly->dst_x = 0; + ovly->dst_y = 0; + ovly->dst_z = 0; + ovly->w = 0; + ovly->h = 0; + ovly->alpha_value = 0xFF; + ovly->alpha_source = MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA; + ovly->dirty = true; + mcde_ovly_apply(ovly); + } + + return ovly; +} + +void mcde_ovly_put(struct mcde_ovly_state *ovly) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + if (!ovly->inuse) + return; + if (ovly->regs.enabled) { + ovly->paddr = 0; + ovly->dirty = true; + mcde_ovly_apply(ovly);/* REVIEW: API call calling API call! */ + } + ovly->inuse = false; +} + +void mcde_ovly_set_source_buf(struct mcde_ovly_state *ovly, u32 paddr) +{ + if (!ovly->inuse) + return; + + ovly->dirty = paddr == 0 || ovly->paddr == 0; + ovly->dirty_buf = true; + + ovly->paddr = paddr; +} + +void mcde_ovly_set_source_info(struct mcde_ovly_state *ovly, + u32 stride, enum mcde_ovly_pix_fmt pix_fmt) +{ + if (!ovly->inuse) + return; + + ovly->stride = stride; + ovly->pix_fmt = pix_fmt; + ovly->dirty = true; +} + +void mcde_ovly_set_source_area(struct mcde_ovly_state *ovly, + u16 x, u16 y, u16 w, u16 h) +{ + if (!ovly->inuse) + return; + + ovly->src_x = x; + ovly->src_y = y; + ovly->w = w; + ovly->h = h; + ovly->dirty = true; +} + +void mcde_ovly_set_dest_pos(struct mcde_ovly_state *ovly, u16 x, u16 y, u8 z) +{ + if (!ovly->inuse) + return; + + ovly->dst_x = x; + ovly->dst_y = y; + ovly->dst_z = z; + ovly->dirty = true; +} + +void mcde_ovly_apply(struct mcde_ovly_state *ovly) +{ + if (!ovly->inuse) + return; + + mcde_lock(__func__, __LINE__); + + if (ovly->dirty || ovly->dirty_buf) { + ovly->regs.ch_id = ovly->chnl->id; + ovly->regs.enabled = ovly->paddr != 0; + ovly->regs.baseaddress0 = ovly->paddr; + ovly->regs.baseaddress1 = + ovly->regs.baseaddress0 + ovly->stride; + ovly->regs.dirty_buf = true; + ovly->dirty_buf = false; + } + if (!ovly->dirty) { + mcde_unlock(__func__, __LINE__); + return; + } + + switch (ovly->pix_fmt) {/* REVIEW: Extract to table */ + case MCDE_OVLYPIXFMT_RGB565: + ovly->regs.bits_per_pixel = 16; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB565; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = true; + break; + case MCDE_OVLYPIXFMT_RGBA5551: + ovly->regs.bits_per_pixel = 16; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_IRGB1555; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = false; + break; + case MCDE_OVLYPIXFMT_RGBA4444: + ovly->regs.bits_per_pixel = 16; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB4444; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = false; + break; + case MCDE_OVLYPIXFMT_RGB888: + ovly->regs.bits_per_pixel = 24; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_RGB888; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = true; + break; + case MCDE_OVLYPIXFMT_RGBX8888: + ovly->regs.bits_per_pixel = 32; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_XRGB8888; + ovly->regs.bgr = false; + ovly->regs.bebo = true; + ovly->regs.opq = true; + break; + case MCDE_OVLYPIXFMT_RGBA8888: + ovly->regs.bits_per_pixel = 32; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_ARGB8888; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = false; + break; + case MCDE_OVLYPIXFMT_YCbCr422: + ovly->regs.bits_per_pixel = 16; + ovly->regs.bpp = MCDE_EXTSRC0CONF_BPP_YCBCR422; + ovly->regs.bgr = false; + ovly->regs.bebo = false; + ovly->regs.opq = true; + break; + default: + break; + } + + ovly->regs.ppl = ovly->w; + ovly->regs.lpf = ovly->h; + ovly->regs.cropx = ovly->src_x; + ovly->regs.cropy = ovly->src_y; + ovly->regs.xpos = ovly->dst_x; + ovly->regs.ypos = ovly->dst_y; + ovly->regs.z = ovly->dst_z > 0; /* 0 or 1 */ + ovly->regs.col_conv = MCDE_OVL0CR_COLCCTRL_DISABLED; + ovly->regs.alpha_source = ovly->alpha_source; + ovly->regs.alpha_value = ovly->alpha_value; + + ovly->regs.dirty = true; + ovly->dirty = false; + + mcde_unlock(__func__, __LINE__); + + dev_vdbg(&mcde_dev->dev, "Overlay applied, idx=%d chnl=%d\n", + ovly->idx, ovly->chnl->id); +} + +static int init_clocks_and_power(struct platform_device *pdev) +{ + int ret = 0; + struct mcde_platform_data *pdata = pdev->dev.platform_data; + + if (pdata->regulator_mcde_epod_id) { + regulator_mcde_epod = regulator_get(&pdev->dev, + pdata->regulator_mcde_epod_id); + if (IS_ERR(regulator_mcde_epod)) { + ret = PTR_ERR(regulator_mcde_epod); + dev_warn(&pdev->dev, + "%s: Failed to get regulator '%s'\n", + __func__, pdata->regulator_mcde_epod_id); + regulator_mcde_epod = NULL; + return ret; + } + } else { + dev_warn(&pdev->dev, "%s: No mcde regulator id supplied\n", + __func__); + return -EINVAL; + } + + if (pdata->regulator_esram_epod_id) { + regulator_esram_epod = regulator_get(&pdev->dev, + pdata->regulator_esram_epod_id); + if (IS_ERR(regulator_esram_epod)) { + ret = PTR_ERR(regulator_esram_epod); + dev_warn(&pdev->dev, + "%s: Failed to get regulator '%s'\n", + __func__, pdata->regulator_esram_epod_id); + regulator_esram_epod = NULL; + goto regulator_esram_err; + } + } else { + dev_warn(&pdev->dev, "%s: No esram regulator id supplied\n", + __func__); + } + + if (pdata->regulator_vana_id) { + regulator_vana = regulator_get(&pdev->dev, + pdata->regulator_vana_id); + if (IS_ERR(regulator_vana)) { + ret = PTR_ERR(regulator_vana); + dev_warn(&pdev->dev, + "%s: Failed to get regulator '%s'\n", + __func__, pdata->regulator_vana_id); + regulator_vana = NULL; + goto regulator_vana_err; + } + } else { + dev_dbg(&pdev->dev, "%s: No vana regulator id supplied\n", + __func__); + } + + if (!dsi_use_clk_framework) { + clock_dsi = clk_get(&pdev->dev, pdata->clock_dsi_id); + if (IS_ERR(clock_dsi)) + dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n", + __func__, pdata->clock_dsi_id); + + clock_dsi_lp = clk_get(&pdev->dev, pdata->clock_dsi_lp_id); + if (IS_ERR(clock_dsi_lp)) + dev_dbg(&pdev->dev, "%s: Failed to get clock '%s'\n", + __func__, pdata->clock_dsi_lp_id); + } + + clock_mcde = clk_get(&pdev->dev, CLK_MCDE); + if (IS_ERR(clock_mcde)) { + ret = PTR_ERR(clock_mcde); + dev_warn(&pdev->dev, "%s: Failed to get mcde_clk\n", __func__); + goto clk_mcde_err; + } + + return ret; + +clk_mcde_err: + if (!dsi_use_clk_framework) { + clk_put(clock_dsi_lp); + clk_put(clock_dsi); + } + + if (regulator_vana) + regulator_put(regulator_vana); +regulator_vana_err: + if (regulator_esram_epod) + regulator_put(regulator_esram_epod); +regulator_esram_err: + regulator_put(regulator_mcde_epod); + return ret; +} + +static void remove_clocks_and_power(struct platform_device *pdev) +{ + /* REVIEW: Release only if exist */ + /* REVIEW: Remove make sure MCDE is done */ + if (!dsi_use_clk_framework) { + clk_put(clock_dsi_lp); + clk_put(clock_dsi); + } + clk_put(clock_mcde); + if (regulator_vana) + regulator_put(regulator_vana); + regulator_put(regulator_mcde_epod); + regulator_put(regulator_esram_epod); +} + +static int probe_hw(struct platform_device *pdev) +{ + int i; + int ret; + u32 pid; + struct resource *res; + + dev_info(&mcde_dev->dev, "Probe HW\n"); + + /* Get MCDE HW version */ + regulator_enable(regulator_mcde_epod); + clk_enable(clock_mcde); + pid = mcde_rreg(MCDE_PID); + + dev_info(&mcde_dev->dev, "MCDE HW revision 0x%.8X\n", pid); + + clk_disable(clock_mcde); + regulator_disable(regulator_mcde_epod); + + switch (pid) { + case MCDE_VERSION_3_0_8: + num_dsilinks = 3; + num_channels = 4; + num_overlays = 6; + dsi_ifc_is_supported = true; + input_fifo_size = 128; + output_fifo_ab_size = 640; + output_fifo_c0c1_size = 160; + dsi_use_clk_framework = false; + dev_info(&mcde_dev->dev, "db8500 V2 HW\n"); + break; + case MCDE_VERSION_4_0_4: + num_dsilinks = 2; + num_channels = 2; + num_overlays = 3; + input_fifo_size = 80; + output_fifo_ab_size = 320; + dsi_ifc_is_supported = false; + dsi_use_clk_framework = false; + dev_info(&mcde_dev->dev, "db5500 V2 HW\n"); + break; + case MCDE_VERSION_4_1_3: + num_dsilinks = 3; + num_channels = 4; + num_overlays = 6; + dsi_ifc_is_supported = true; + input_fifo_size = 192; + output_fifo_ab_size = 640; + output_fifo_c0c1_size = 160; + dsi_use_clk_framework = false; + dev_info(&mcde_dev->dev, "db9540 V1 HW\n"); + break; + case MCDE_VERSION_3_0_5: + /* Intentional */ + case MCDE_VERSION_1_0_4: + /* Intentional */ + default: + dev_err(&mcde_dev->dev, "Unsupported HW version\n"); + ret = -ENOTSUPP; + goto unsupported_hw; + break; + } + + channels = kzalloc(num_channels * sizeof(struct mcde_chnl_state), + GFP_KERNEL); + if (!channels) { + ret = -ENOMEM; + goto failed_channels_alloc; + } + + overlays = kzalloc(num_overlays * sizeof(struct mcde_ovly_state), + GFP_KERNEL); + if (!overlays) { + ret = -ENOMEM; + goto failed_overlays_alloc; + } + + dsiio = kzalloc(num_dsilinks * sizeof(*dsiio), GFP_KERNEL); + if (!dsiio) { + ret = -ENOMEM; + goto failed_dsi_alloc; + } + + for (i = 0; i < num_dsilinks; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1+i); + if (!res) { + dev_dbg(&pdev->dev, "No DSI%d io defined\n", i); + ret = -EINVAL; + goto failed_get_dsi_io; + } + dsiio[i] = ioremap(res->start, res->end - res->start + 1); + if (!dsiio[i]) { + dev_dbg(&pdev->dev, "MCDE DSI%d iomap failed\n", i); + ret = -EINVAL; + goto failed_map_dsi_io; + } + dev_info(&pdev->dev, "MCDE DSI%d iomap: 0x%.8X->0x%.8X\n", + i, (u32)res->start, (u32)dsiio[i]); + } + + /* Init MCDE */ + for (i = 0; i < num_overlays; i++) + overlays[i].idx = i; + + channels[0].ovly0 = &overlays[0]; + channels[0].ovly1 = &overlays[1]; + channels[1].ovly0 = &overlays[2]; + + if (pid == MCDE_VERSION_3_0_8) { + channels[1].ovly1 = &overlays[3]; + channels[2].ovly0 = &overlays[4]; + channels[3].ovly0 = &overlays[5]; + } + + mcde_debugfs_create(&mcde_dev->dev); + for (i = 0; i < num_channels; i++) { + channels[i].id = i; + + channels[i].ovly0->chnl = &channels[i]; + if (channels[i].ovly1) + channels[i].ovly1->chnl = &channels[i]; + + init_waitqueue_head(&channels[i].state_waitq); + init_waitqueue_head(&channels[i].vcmp_waitq); + init_timer(&channels[i].dsi_te_timer); + channels[i].dsi_te_timer.function = + dsi_te_timer_function; + channels[i].dsi_te_timer.data = i; + + mcde_debugfs_channel_create(i, &channels[i]); + mcde_debugfs_overlay_create(i, 0); + if (channels[i].ovly1) + mcde_debugfs_overlay_create(i, 1); + } + (void) prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "mcde", 100); + mcde_clk_rate = clk_get_rate(clock_mcde); + dev_info(&mcde_dev->dev, "MCDE_CLK is %d MHz\n", mcde_clk_rate); + prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "mcde"); + + return 0; + +failed_map_dsi_io: + for (i = 0; i < num_dsilinks; i++) { + if (dsiio[i]) + iounmap(dsiio[i]); + } +failed_get_dsi_io: + kfree(dsiio); + dsiio = NULL; +failed_dsi_alloc: + kfree(overlays); + overlays = NULL; +failed_overlays_alloc: + kfree(channels); + channels = NULL; +unsupported_hw: +failed_channels_alloc: + num_dsilinks = 0; + num_channels = 0; + num_overlays = 0; + return ret; +} + +static int __devinit mcde_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + struct mcde_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) { + dev_dbg(&pdev->dev, "No platform data\n"); + return -EINVAL; + } + + mcde_dev = pdev; + + /* Hook up irq */ + mcde_irq = platform_get_irq(pdev, 0); + if (mcde_irq <= 0) { + dev_dbg(&pdev->dev, "No irq defined\n"); + ret = -EINVAL; + goto failed_irq_get; + } + + /* Map I/O */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_dbg(&pdev->dev, "No MCDE io defined\n"); + ret = -EINVAL; + goto failed_get_mcde_io; + } + mcdeio = ioremap(res->start, res->end - res->start + 1); + if (!mcdeio) { + dev_dbg(&pdev->dev, "MCDE iomap failed\n"); + ret = -EINVAL; + goto failed_map_mcde_io; + } + dev_info(&pdev->dev, "MCDE iomap: 0x%.8X->0x%.8X\n", + (u32)res->start, (u32)mcdeio); + + ret = init_clocks_and_power(pdev); + if (ret < 0) { + dev_warn(&pdev->dev, "%s: init_clocks_and_power failed\n" + , __func__); + goto failed_init_clocks; + } + + INIT_DELAYED_WORK_DEFERRABLE(&hw_timeout_work, work_sleep_function); + + ret = probe_hw(pdev); + if (ret) + goto failed_probe_hw; + + ret = enable_mcde_hw(); + if (ret) + goto failed_mcde_enable; + + return 0; + +failed_mcde_enable: +failed_probe_hw: + remove_clocks_and_power(pdev); +failed_init_clocks: + iounmap(mcdeio); +failed_map_mcde_io: +failed_get_mcde_io: +failed_irq_get: + return ret; +} + +static int __devexit mcde_remove(struct platform_device *pdev) +{ + struct mcde_chnl_state *chnl = &channels[0]; + + for (; chnl < &channels[num_channels]; chnl++) { + if (del_timer(&chnl->dsi_te_timer)) + dev_vdbg(&mcde_dev->dev, + "%s dsi timer could not be stopped\n" + , __func__); + } + + remove_clocks_and_power(pdev); + return 0; +} + +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) +static int mcde_resume(struct platform_device *pdev) +{ + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + mcde_lock(__func__, __LINE__); + + if (enable_mcde_hw()) { + mcde_unlock(__func__, __LINE__); + return -EINVAL; + } + + mcde_unlock(__func__, __LINE__); + + return 0; +} + +static int mcde_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + + dev_vdbg(&mcde_dev->dev, "%s\n", __func__); + + mcde_lock(__func__, __LINE__); + + cancel_delayed_work(&hw_timeout_work); + + if (!mcde_is_enabled) { + mcde_unlock(__func__, __LINE__); + return 0; + } + disable_mcde_hw(true, true); + + mcde_unlock(__func__, __LINE__); + + return ret; +} +#endif + +static struct platform_driver mcde_driver = { + .probe = mcde_probe, + .remove = mcde_remove, +#if !defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_PM) + .suspend = mcde_suspend, + .resume = mcde_resume, +#else + .suspend = NULL, + .resume = NULL, +#endif + .driver = { + .name = "mcde", + }, +}; + +int __init mcde_init(void) +{ + mutex_init(&mcde_hw_lock); + return platform_driver_register(&mcde_driver); +} + +void mcde_exit(void) +{ + /* REVIEW: shutdown MCDE? */ + platform_driver_unregister(&mcde_driver); +} diff --git a/drivers/video/mcde/mcde_mod.c b/drivers/video/mcde/mcde_mod.c new file mode 100644 index 00000000000..60df0d4965f --- /dev/null +++ b/drivers/video/mcde/mcde_mod.c @@ -0,0 +1,69 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * + * ST-Ericsson MCDE driver + * + * Author: Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com> + * for ST-Ericsson. + * + * License terms: GNU General Public License (GPL), version 2. + */ +#include <linux/init.h> +#include <linux/module.h> + +#include <video/mcde.h> +#include <video/mcde_fb.h> +#include <video/mcde_dss.h> +#include <video/mcde_display.h> + +/* Module init */ + +static int __init mcde_subsystem_init(void) +{ + int ret; + pr_info("MCDE subsystem init begin\n"); + + /* MCDE module init sequence */ + ret = mcde_init(); + if (ret) + goto mcde_failed; + ret = mcde_display_init(); + if (ret) + goto mcde_display_failed; + ret = mcde_dss_init(); + if (ret) + goto mcde_dss_failed; + ret = mcde_fb_init(); + if (ret) + goto mcde_fb_failed; + pr_info("MCDE subsystem init done\n"); + + goto done; +mcde_fb_failed: + mcde_dss_exit(); +mcde_dss_failed: + mcde_display_exit(); +mcde_display_failed: + mcde_exit(); +mcde_failed: +done: + return ret; +} +#ifdef MODULE +module_init(mcde_subsystem_init); +#else +fs_initcall(mcde_subsystem_init); +#endif + +static void __exit mcde_module_exit(void) +{ + mcde_exit(); + mcde_display_exit(); + mcde_dss_exit(); +} +module_exit(mcde_module_exit); + +MODULE_AUTHOR("Marcus Lorentzon <marcus.xm.lorentzon@stericsson.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ST-Ericsson MCDE driver"); + diff --git a/drivers/video/mcde/mcde_regs.h b/drivers/video/mcde/mcde_regs.h new file mode 100644 index 00000000000..0eece3faea2 --- /dev/null +++ b/drivers/video/mcde/mcde_regs.h @@ -0,0 +1,5096 @@ + +#define MCDE_VAL2REG(__reg, __fld, __val) \ + (((__val) << __reg##_##__fld##_SHIFT) & __reg##_##__fld##_MASK) +#define MCDE_REG2VAL(__reg, __fld, __val) \ + (((__val) & __reg##_##__fld##_MASK) >> __reg##_##__fld##_SHIFT) + +#define MCDE_CR 0x00000000 +#define MCDE_CR_IFIFOCTRLEN_SHIFT 15 +#define MCDE_CR_IFIFOCTRLEN_MASK 0x00008000 +#define MCDE_CR_IFIFOCTRLEN(__x) \ + MCDE_VAL2REG(MCDE_CR, IFIFOCTRLEN, __x) +#define MCDE_CR_AUTOCLKG_EN_SHIFT 30 +#define MCDE_CR_AUTOCLKG_EN_MASK 0x40000000 +#define MCDE_CR_AUTOCLKG_EN(__x) \ + MCDE_VAL2REG(MCDE_CR, AUTOCLKG_EN, __x) +#define MCDE_CR_MCDEEN_SHIFT 31 +#define MCDE_CR_MCDEEN_MASK 0x80000000 +#define MCDE_CR_MCDEEN(__x) \ + MCDE_VAL2REG(MCDE_CR, MCDEEN, __x) +#define MCDE_CONF0 0x00000004 +#define MCDE_CONF0_SYNCMUX0_SHIFT 0 +#define MCDE_CONF0_SYNCMUX0_MASK 0x00000001 +#define MCDE_CONF0_SYNCMUX0(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX0, __x) +#define MCDE_CONF0_SYNCMUX1_SHIFT 1 +#define MCDE_CONF0_SYNCMUX1_MASK 0x00000002 +#define MCDE_CONF0_SYNCMUX1(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX1, __x) +#define MCDE_CONF0_SYNCMUX2_SHIFT 2 +#define MCDE_CONF0_SYNCMUX2_MASK 0x00000004 +#define MCDE_CONF0_SYNCMUX2(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX2, __x) +#define MCDE_CONF0_SYNCMUX3_SHIFT 3 +#define MCDE_CONF0_SYNCMUX3_MASK 0x00000008 +#define MCDE_CONF0_SYNCMUX3(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX3, __x) +#define MCDE_CONF0_SYNCMUX4_SHIFT 4 +#define MCDE_CONF0_SYNCMUX4_MASK 0x00000010 +#define MCDE_CONF0_SYNCMUX4(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX4, __x) +#define MCDE_CONF0_SYNCMUX5_SHIFT 5 +#define MCDE_CONF0_SYNCMUX5_MASK 0x00000020 +#define MCDE_CONF0_SYNCMUX5(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX5, __x) +#define MCDE_CONF0_SYNCMUX6_SHIFT 6 +#define MCDE_CONF0_SYNCMUX6_MASK 0x00000040 +#define MCDE_CONF0_SYNCMUX6(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX6, __x) +#define MCDE_CONF0_SYNCMUX7_SHIFT 7 +#define MCDE_CONF0_SYNCMUX7_MASK 0x00000080 +#define MCDE_CONF0_SYNCMUX7(__x) \ + MCDE_VAL2REG(MCDE_CONF0, SYNCMUX7, __x) +#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT 12 +#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL_MASK 0x00007000 +#define MCDE_CONF0_IFIFOCTRLWTRMRKLVL(__x) \ + MCDE_VAL2REG(MCDE_CONF0, IFIFOCTRLWTRMRKLVL, __x) +#define MCDE_CONF0_OUTMUX0_SHIFT 16 +#define MCDE_CONF0_OUTMUX0_MASK 0x00070000 +#define MCDE_CONF0_OUTMUX0(__x) \ + MCDE_VAL2REG(MCDE_CONF0, OUTMUX0, __x) +#define MCDE_CONF0_OUTMUX1_SHIFT 19 +#define MCDE_CONF0_OUTMUX1_MASK 0x00380000 +#define MCDE_CONF0_OUTMUX1(__x) \ + MCDE_VAL2REG(MCDE_CONF0, OUTMUX1, __x) +#define MCDE_CONF0_OUTMUX2_SHIFT 22 +#define MCDE_CONF0_OUTMUX2_MASK 0x01C00000 +#define MCDE_CONF0_OUTMUX2(__x) \ + MCDE_VAL2REG(MCDE_CONF0, OUTMUX2, __x) +#define MCDE_CONF0_OUTMUX3_SHIFT 25 +#define MCDE_CONF0_OUTMUX3_MASK 0x0E000000 +#define MCDE_CONF0_OUTMUX3(__x) \ + MCDE_VAL2REG(MCDE_CONF0, OUTMUX3, __x) +#define MCDE_CONF0_OUTMUX4_SHIFT 28 +#define MCDE_CONF0_OUTMUX4_MASK 0x70000000 +#define MCDE_CONF0_OUTMUX4(__x) \ + MCDE_VAL2REG(MCDE_CONF0, OUTMUX4, __x) +#define MCDE_SSP 0x00000008 +#define MCDE_SSP_SSPDATA_SHIFT 0 +#define MCDE_SSP_SSPDATA_MASK 0x000000FF +#define MCDE_SSP_SSPDATA(__x) \ + MCDE_VAL2REG(MCDE_SSP, SSPDATA, __x) +#define MCDE_SSP_SSPCMD_SHIFT 8 +#define MCDE_SSP_SSPCMD_MASK 0x00000100 +#define MCDE_SSP_SSPCMD_DATA 0 +#define MCDE_SSP_SSPCMD_COMMAND 1 +#define MCDE_SSP_SSPCMD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_SSP, SSPCMD, MCDE_SSP_SSPCMD_##__x) +#define MCDE_SSP_SSPCMD(__x) \ + MCDE_VAL2REG(MCDE_SSP, SSPCMD, __x) +#define MCDE_SSP_SSPEN_SHIFT 16 +#define MCDE_SSP_SSPEN_MASK 0x00010000 +#define MCDE_SSP_SSPEN(__x) \ + MCDE_VAL2REG(MCDE_SSP, SSPEN, __x) +#define MCDE_AIS 0x00000100 +#define MCDE_AIS_MCDEPPI_SHIFT 0 +#define MCDE_AIS_MCDEPPI_MASK 0x00000001 +#define MCDE_AIS_MCDEPPI(__x) \ + MCDE_VAL2REG(MCDE_AIS, MCDEPPI, __x) +#define MCDE_AIS_MCDEOVLI_SHIFT 1 +#define MCDE_AIS_MCDEOVLI_MASK 0x00000002 +#define MCDE_AIS_MCDEOVLI(__x) \ + MCDE_VAL2REG(MCDE_AIS, MCDEOVLI, __x) +#define MCDE_AIS_MCDECHNLI_SHIFT 2 +#define MCDE_AIS_MCDECHNLI_MASK 0x00000004 +#define MCDE_AIS_MCDECHNLI(__x) \ + MCDE_VAL2REG(MCDE_AIS, MCDECHNLI, __x) +#define MCDE_AIS_MCDEERRI_SHIFT 3 +#define MCDE_AIS_MCDEERRI_MASK 0x00000008 +#define MCDE_AIS_MCDEERRI(__x) \ + MCDE_VAL2REG(MCDE_AIS, MCDEERRI, __x) +#define MCDE_AIS_DSI0AI_SHIFT 4 +#define MCDE_AIS_DSI0AI_MASK 0x00000010 +#define MCDE_AIS_DSI0AI(__x) \ + MCDE_VAL2REG(MCDE_AIS, DSI0AI, __x) +#define MCDE_AIS_DSI1AI_SHIFT 5 +#define MCDE_AIS_DSI1AI_MASK 0x00000020 +#define MCDE_AIS_DSI1AI(__x) \ + MCDE_VAL2REG(MCDE_AIS, DSI1AI, __x) +#define MCDE_AIS_DSI2AI_SHIFT 6 +#define MCDE_AIS_DSI2AI_MASK 0x00000040 +#define MCDE_AIS_DSI2AI(__x) \ + MCDE_VAL2REG(MCDE_AIS, DSI2AI, __x) +#define MCDE_IMSCPP 0x00000104 +#define MCDE_IMSCPP_VCMPAIM_SHIFT 0 +#define MCDE_IMSCPP_VCMPAIM_MASK 0x00000001 +#define MCDE_IMSCPP_VCMPAIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VCMPAIM, __x) +#define MCDE_IMSCPP_VCMPBIM_SHIFT 1 +#define MCDE_IMSCPP_VCMPBIM_MASK 0x00000002 +#define MCDE_IMSCPP_VCMPBIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VCMPBIM, __x) +#define MCDE_IMSCPP_VSCC0IM_SHIFT 2 +#define MCDE_IMSCPP_VSCC0IM_MASK 0x00000004 +#define MCDE_IMSCPP_VSCC0IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VSCC0IM, __x) +#define MCDE_IMSCPP_VSCC1IM_SHIFT 3 +#define MCDE_IMSCPP_VSCC1IM_MASK 0x00000008 +#define MCDE_IMSCPP_VSCC1IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VSCC1IM, __x) +#define MCDE_IMSCPP_VCMPC0IM_SHIFT 4 +#define MCDE_IMSCPP_VCMPC0IM_MASK 0x00000010 +#define MCDE_IMSCPP_VCMPC0IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VCMPC0IM, __x) +#define MCDE_IMSCPP_VCMPC1IM_SHIFT 5 +#define MCDE_IMSCPP_VCMPC1IM_MASK 0x00000020 +#define MCDE_IMSCPP_VCMPC1IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, VCMPC1IM, __x) +#define MCDE_IMSCPP_ROTFDIM_B_SHIFT 6 +#define MCDE_IMSCPP_ROTFDIM_B_MASK 0x00000040 +#define MCDE_IMSCPP_ROTFDIM_B(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_B, __x) +#define MCDE_IMSCPP_ROTFDIM_A_SHIFT 7 +#define MCDE_IMSCPP_ROTFDIM_A_MASK 0x00000080 +#define MCDE_IMSCPP_ROTFDIM_A(__x) \ + MCDE_VAL2REG(MCDE_IMSCPP, ROTFDIM_A, __x) +#define MCDE_IMSCOVL 0x00000108 +#define MCDE_IMSCOVL_OVLRDIM_SHIFT 0 +#define MCDE_IMSCOVL_OVLRDIM_MASK 0x0000FFFF +#define MCDE_IMSCOVL_OVLRDIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCOVL, OVLRDIM, __x) +#define MCDE_IMSCOVL_OVLFDIM_SHIFT 16 +#define MCDE_IMSCOVL_OVLFDIM_MASK 0xFFFF0000 +#define MCDE_IMSCOVL_OVLFDIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCOVL, OVLFDIM, __x) +#define MCDE_IMSCCHNL 0x0000010C +#define MCDE_IMSCCHNL_CHNLRDIM_SHIFT 0 +#define MCDE_IMSCCHNL_CHNLRDIM_MASK 0x0000FFFF +#define MCDE_IMSCCHNL_CHNLRDIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLRDIM, __x) +#define MCDE_IMSCCHNL_CHNLAIM_SHIFT 16 +#define MCDE_IMSCCHNL_CHNLAIM_MASK 0xFFFF0000 +#define MCDE_IMSCCHNL_CHNLAIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCCHNL, CHNLAIM, __x) +#define MCDE_IMSCERR 0x00000110 +#define MCDE_IMSCERR_FUAIM_SHIFT 0 +#define MCDE_IMSCERR_FUAIM_MASK 0x00000001 +#define MCDE_IMSCERR_FUAIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, FUAIM, __x) +#define MCDE_IMSCERR_FUBIM_SHIFT 1 +#define MCDE_IMSCERR_FUBIM_MASK 0x00000002 +#define MCDE_IMSCERR_FUBIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, FUBIM, __x) +#define MCDE_IMSCERR_SCHBLCKDIM_SHIFT 2 +#define MCDE_IMSCERR_SCHBLCKDIM_MASK 0x00000004 +#define MCDE_IMSCERR_SCHBLCKDIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, SCHBLCKDIM, __x) +#define MCDE_IMSCERR_ROTAFEIM_WRITE_SHIFT 3 +#define MCDE_IMSCERR_ROTAFEIM_WRITE_MASK 0x00000008 +#define MCDE_IMSCERR_ROTAFEIM_WRITE(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_WRITE, __x) +#define MCDE_IMSCERR_ROTAFEIM_READ_SHIFT 4 +#define MCDE_IMSCERR_ROTAFEIM_READ_MASK 0x00000010 +#define MCDE_IMSCERR_ROTAFEIM_READ(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, ROTAFEIM_READ, __x) +#define MCDE_IMSCERR_ROTBFEIM_WRITE_SHIFT 5 +#define MCDE_IMSCERR_ROTBFEIM_WRITE_MASK 0x00000020 +#define MCDE_IMSCERR_ROTBFEIM_WRITE(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_WRITE, __x) +#define MCDE_IMSCERR_ROTBFEIM_READ_SHIFT 6 +#define MCDE_IMSCERR_ROTBFEIM_READ_MASK 0x00000040 +#define MCDE_IMSCERR_ROTBFEIM_READ(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, ROTBFEIM_READ, __x) +#define MCDE_IMSCERR_FUC0IM_SHIFT 7 +#define MCDE_IMSCERR_FUC0IM_MASK 0x00000080 +#define MCDE_IMSCERR_FUC0IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, FUC0IM, __x) +#define MCDE_IMSCERR_FUC1IM_SHIFT 8 +#define MCDE_IMSCERR_FUC1IM_MASK 0x00000100 +#define MCDE_IMSCERR_FUC1IM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, FUC1IM, __x) +#define MCDE_IMSCERR_OVLFERRIM_SHIFT 16 +#define MCDE_IMSCERR_OVLFERRIM_MASK 0xFFFF0000 +#define MCDE_IMSCERR_OVLFERRIM(__x) \ + MCDE_VAL2REG(MCDE_IMSCERR, OVLFERRIM, __x) +#define MCDE_RISPP 0x00000114 +#define MCDE_RISPP_VCMPARIS_SHIFT 0 +#define MCDE_RISPP_VCMPARIS_MASK 0x00000001 +#define MCDE_RISPP_VCMPARIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VCMPARIS, __x) +#define MCDE_RISPP_VCMPBRIS_SHIFT 1 +#define MCDE_RISPP_VCMPBRIS_MASK 0x00000002 +#define MCDE_RISPP_VCMPBRIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VCMPBRIS, __x) +#define MCDE_RISPP_VSCC0RIS_SHIFT 2 +#define MCDE_RISPP_VSCC0RIS_MASK 0x00000004 +#define MCDE_RISPP_VSCC0RIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VSCC0RIS, __x) +#define MCDE_RISPP_VSCC1RIS_SHIFT 3 +#define MCDE_RISPP_VSCC1RIS_MASK 0x00000008 +#define MCDE_RISPP_VSCC1RIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VSCC1RIS, __x) +#define MCDE_RISPP_VCMPC0RIS_SHIFT 4 +#define MCDE_RISPP_VCMPC0RIS_MASK 0x00000010 +#define MCDE_RISPP_VCMPC0RIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VCMPC0RIS, __x) +#define MCDE_RISPP_VCMPC1RIS_SHIFT 5 +#define MCDE_RISPP_VCMPC1RIS_MASK 0x00000020 +#define MCDE_RISPP_VCMPC1RIS(__x) \ + MCDE_VAL2REG(MCDE_RISPP, VCMPC1RIS, __x) +#define MCDE_RISPP_ROTFDRIS_B_SHIFT 6 +#define MCDE_RISPP_ROTFDRIS_B_MASK 0x00000040 +#define MCDE_RISPP_ROTFDRIS_B(__x) \ + MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_B, __x) +#define MCDE_RISPP_ROTFDRIS_A_SHIFT 7 +#define MCDE_RISPP_ROTFDRIS_A_MASK 0x00000080 +#define MCDE_RISPP_ROTFDRIS_A(__x) \ + MCDE_VAL2REG(MCDE_RISPP, ROTFDRIS_A, __x) +#define MCDE_RISOVL 0x00000118 +#define MCDE_RISOVL_OVLRDRIS_SHIFT 0 +#define MCDE_RISOVL_OVLRDRIS_MASK 0x0000FFFF +#define MCDE_RISOVL_OVLRDRIS(__x) \ + MCDE_VAL2REG(MCDE_RISOVL, OVLRDRIS, __x) +#define MCDE_RISOVL_OVLFDRIS_SHIFT 16 +#define MCDE_RISOVL_OVLFDRIS_MASK 0xFFFF0000 +#define MCDE_RISOVL_OVLFDRIS(__x) \ + MCDE_VAL2REG(MCDE_RISOVL, OVLFDRIS, __x) +#define MCDE_RISCHNL 0x0000011C +#define MCDE_RISCHNL_CHNLRDRIS_SHIFT 0 +#define MCDE_RISCHNL_CHNLRDRIS_MASK 0x0000FFFF +#define MCDE_RISCHNL_CHNLRDRIS(__x) \ + MCDE_VAL2REG(MCDE_RISCHNL, CHNLRDRIS, __x) +#define MCDE_RISCHNL_CHNLARIS_SHIFT 16 +#define MCDE_RISCHNL_CHNLARIS_MASK 0xFFFF0000 +#define MCDE_RISCHNL_CHNLARIS(__x) \ + MCDE_VAL2REG(MCDE_RISCHNL, CHNLARIS, __x) +#define MCDE_RISERR 0x00000120 +#define MCDE_RISERR_FUARIS_SHIFT 0 +#define MCDE_RISERR_FUARIS_MASK 0x00000001 +#define MCDE_RISERR_FUARIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, FUARIS, __x) +#define MCDE_RISERR_FUBRIS_SHIFT 1 +#define MCDE_RISERR_FUBRIS_MASK 0x00000002 +#define MCDE_RISERR_FUBRIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, FUBRIS, __x) +#define MCDE_RISERR_SCHBLCKDRIS_SHIFT 2 +#define MCDE_RISERR_SCHBLCKDRIS_MASK 0x00000004 +#define MCDE_RISERR_SCHBLCKDRIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, SCHBLCKDRIS, __x) +#define MCDE_RISERR_ROTAFERIS_WRITE_SHIFT 3 +#define MCDE_RISERR_ROTAFERIS_WRITE_MASK 0x00000008 +#define MCDE_RISERR_ROTAFERIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_WRITE, __x) +#define MCDE_RISERR_ROTAFERIS_READ_SHIFT 4 +#define MCDE_RISERR_ROTAFERIS_READ_MASK 0x00000010 +#define MCDE_RISERR_ROTAFERIS_READ(__x) \ + MCDE_VAL2REG(MCDE_RISERR, ROTAFERIS_READ, __x) +#define MCDE_RISERR_ROTBFERIS_WRITE_SHIFT 5 +#define MCDE_RISERR_ROTBFERIS_WRITE_MASK 0x00000020 +#define MCDE_RISERR_ROTBFERIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_WRITE, __x) +#define MCDE_RISERR_ROTBFERIS_READ_SHIFT 6 +#define MCDE_RISERR_ROTBFERIS_READ_MASK 0x00000040 +#define MCDE_RISERR_ROTBFERIS_READ(__x) \ + MCDE_VAL2REG(MCDE_RISERR, ROTBFERIS_READ, __x) +#define MCDE_RISERR_FUC0RIS_SHIFT 7 +#define MCDE_RISERR_FUC0RIS_MASK 0x00000080 +#define MCDE_RISERR_FUC0RIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, FUC0RIS, __x) +#define MCDE_RISERR_FUC1RIS_SHIFT 8 +#define MCDE_RISERR_FUC1RIS_MASK 0x00000100 +#define MCDE_RISERR_FUC1RIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, FUC1RIS, __x) +#define MCDE_RISERR_OVLFERRRIS_SHIFT 16 +#define MCDE_RISERR_OVLFERRRIS_MASK 0xFFFF0000 +#define MCDE_RISERR_OVLFERRRIS(__x) \ + MCDE_VAL2REG(MCDE_RISERR, OVLFERRRIS, __x) +#define MCDE_MISPP 0x00000124 +#define MCDE_MISPP_VCMPAMIS_SHIFT 0 +#define MCDE_MISPP_VCMPAMIS_MASK 0x00000001 +#define MCDE_MISPP_VCMPAMIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VCMPAMIS, __x) +#define MCDE_MISPP_VCMPBMIS_SHIFT 1 +#define MCDE_MISPP_VCMPBMIS_MASK 0x00000002 +#define MCDE_MISPP_VCMPBMIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VCMPBMIS, __x) +#define MCDE_MISPP_VSCC0MIS_SHIFT 2 +#define MCDE_MISPP_VSCC0MIS_MASK 0x00000004 +#define MCDE_MISPP_VSCC0MIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VSCC0MIS, __x) +#define MCDE_MISPP_VSCC1MIS_SHIFT 3 +#define MCDE_MISPP_VSCC1MIS_MASK 0x00000008 +#define MCDE_MISPP_VSCC1MIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VSCC1MIS, __x) +#define MCDE_MISPP_VCMPC0MIS_SHIFT 4 +#define MCDE_MISPP_VCMPC0MIS_MASK 0x00000010 +#define MCDE_MISPP_VCMPC0MIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VCMPC0MIS, __x) +#define MCDE_MISPP_VCMPC1MIS_SHIFT 5 +#define MCDE_MISPP_VCMPC1MIS_MASK 0x00000020 +#define MCDE_MISPP_VCMPC1MIS(__x) \ + MCDE_VAL2REG(MCDE_MISPP, VCMPC1MIS, __x) +#define MCDE_MISPP_ROTFDMIS_A_SHIFT 6 +#define MCDE_MISPP_ROTFDMIS_A_MASK 0x00000040 +#define MCDE_MISPP_ROTFDMIS_A(__x) \ + MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_A, __x) +#define MCDE_MISPP_ROTFDMIS_B_SHIFT 7 +#define MCDE_MISPP_ROTFDMIS_B_MASK 0x00000080 +#define MCDE_MISPP_ROTFDMIS_B(__x) \ + MCDE_VAL2REG(MCDE_MISPP, ROTFDMIS_B, __x) +#define MCDE_MISOVL 0x00000128 +#define MCDE_MISOVL_OVLRDMIS_SHIFT 0 +#define MCDE_MISOVL_OVLRDMIS_MASK 0x0000FFFF +#define MCDE_MISOVL_OVLRDMIS(__x) \ + MCDE_VAL2REG(MCDE_MISOVL, OVLRDMIS, __x) +#define MCDE_MISOVL_OVLFDMIS_SHIFT 16 +#define MCDE_MISOVL_OVLFDMIS_MASK 0xFFFF0000 +#define MCDE_MISOVL_OVLFDMIS(__x) \ + MCDE_VAL2REG(MCDE_MISOVL, OVLFDMIS, __x) +#define MCDE_MISCHNL 0x0000012C +#define MCDE_MISCHNL_CHNLRDMIS_SHIFT 0 +#define MCDE_MISCHNL_CHNLRDMIS_MASK 0x0000FFFF +#define MCDE_MISCHNL_CHNLRDMIS(__x) \ + MCDE_VAL2REG(MCDE_MISCHNL, CHNLRDMIS, __x) +#define MCDE_MISCHNL_CHNLAMIS_SHIFT 16 +#define MCDE_MISCHNL_CHNLAMIS_MASK 0xFFFF0000 +#define MCDE_MISCHNL_CHNLAMIS(__x) \ + MCDE_VAL2REG(MCDE_MISCHNL, CHNLAMIS, __x) +#define MCDE_MISERR 0x00000130 +#define MCDE_MISERR_FUAMIS_SHIFT 0 +#define MCDE_MISERR_FUAMIS_MASK 0x00000001 +#define MCDE_MISERR_FUAMIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, FUAMIS, __x) +#define MCDE_MISERR_FUBMIS_SHIFT 1 +#define MCDE_MISERR_FUBMIS_MASK 0x00000002 +#define MCDE_MISERR_FUBMIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, FUBMIS, __x) +#define MCDE_MISERR_SCHBLCKDMIS_SHIFT 2 +#define MCDE_MISERR_SCHBLCKDMIS_MASK 0x00000004 +#define MCDE_MISERR_SCHBLCKDMIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, SCHBLCKDMIS, __x) +#define MCDE_MISERR_ROTAFEMIS_WRITE_SHIFT 3 +#define MCDE_MISERR_ROTAFEMIS_WRITE_MASK 0x00000008 +#define MCDE_MISERR_ROTAFEMIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_WRITE, __x) +#define MCDE_MISERR_ROTAFEMIS_READ_SHIFT 4 +#define MCDE_MISERR_ROTAFEMIS_READ_MASK 0x00000010 +#define MCDE_MISERR_ROTAFEMIS_READ(__x) \ + MCDE_VAL2REG(MCDE_MISERR, ROTAFEMIS_READ, __x) +#define MCDE_MISERR_ROTBFEMIS_WRITE_SHIFT 5 +#define MCDE_MISERR_ROTBFEMIS_WRITE_MASK 0x00000020 +#define MCDE_MISERR_ROTBFEMIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_WRITE, __x) +#define MCDE_MISERR_ROTBFEMIS_READ_SHIFT 6 +#define MCDE_MISERR_ROTBFEMIS_READ_MASK 0x00000040 +#define MCDE_MISERR_ROTBFEMIS_READ(__x) \ + MCDE_VAL2REG(MCDE_MISERR, ROTBFEMIS_READ, __x) +#define MCDE_MISERR_FUC0MIS_SHIFT 7 +#define MCDE_MISERR_FUC0MIS_MASK 0x00000080 +#define MCDE_MISERR_FUC0MIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, FUC0MIS, __x) +#define MCDE_MISERR_FUC1MIS_SHIFT 8 +#define MCDE_MISERR_FUC1MIS_MASK 0x00000100 +#define MCDE_MISERR_FUC1MIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, FUC1MIS, __x) +#define MCDE_MISERR_OVLFERMIS_SHIFT 16 +#define MCDE_MISERR_OVLFERMIS_MASK 0xFFFF0000 +#define MCDE_MISERR_OVLFERMIS(__x) \ + MCDE_VAL2REG(MCDE_MISERR, OVLFERMIS, __x) +#define MCDE_SISPP 0x00000134 +#define MCDE_SISPP_VCMPASIS_SHIFT 0 +#define MCDE_SISPP_VCMPASIS_MASK 0x00000001 +#define MCDE_SISPP_VCMPASIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VCMPASIS, __x) +#define MCDE_SISPP_VCMPBSIS_SHIFT 1 +#define MCDE_SISPP_VCMPBSIS_MASK 0x00000002 +#define MCDE_SISPP_VCMPBSIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VCMPBSIS, __x) +#define MCDE_SISPP_VSCC0SIS_SHIFT 2 +#define MCDE_SISPP_VSCC0SIS_MASK 0x00000004 +#define MCDE_SISPP_VSCC0SIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VSCC0SIS, __x) +#define MCDE_SISPP_VSCC1SIS_SHIFT 3 +#define MCDE_SISPP_VSCC1SIS_MASK 0x00000008 +#define MCDE_SISPP_VSCC1SIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VSCC1SIS, __x) +#define MCDE_SISPP_VCMPC0SIS_SHIFT 4 +#define MCDE_SISPP_VCMPC0SIS_MASK 0x00000010 +#define MCDE_SISPP_VCMPC0SIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VCMPC0SIS, __x) +#define MCDE_SISPP_VCMPC1SIS_SHIFT 5 +#define MCDE_SISPP_VCMPC1SIS_MASK 0x00000020 +#define MCDE_SISPP_VCMPC1SIS(__x) \ + MCDE_VAL2REG(MCDE_SISPP, VCMPC1SIS, __x) +#define MCDE_SISPP_ROTFDSIS_A_SHIFT 6 +#define MCDE_SISPP_ROTFDSIS_A_MASK 0x00000040 +#define MCDE_SISPP_ROTFDSIS_A(__x) \ + MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_A, __x) +#define MCDE_SISPP_ROTFDSIS_B_SHIFT 7 +#define MCDE_SISPP_ROTFDSIS_B_MASK 0x00000080 +#define MCDE_SISPP_ROTFDSIS_B(__x) \ + MCDE_VAL2REG(MCDE_SISPP, ROTFDSIS_B, __x) +#define MCDE_SISOVL 0x00000138 +#define MCDE_SISOVL_OVLRDSIS_SHIFT 0 +#define MCDE_SISOVL_OVLRDSIS_MASK 0x0000FFFF +#define MCDE_SISOVL_OVLRDSIS(__x) \ + MCDE_VAL2REG(MCDE_SISOVL, OVLRDSIS, __x) +#define MCDE_SISOVL_OVLFDSIS_SHIFT 16 +#define MCDE_SISOVL_OVLFDSIS_MASK 0xFFFF0000 +#define MCDE_SISOVL_OVLFDSIS(__x) \ + MCDE_VAL2REG(MCDE_SISOVL, OVLFDSIS, __x) +#define MCDE_SISCHNL 0x0000013C +#define MCDE_SISCHNL_CHNLRDSIS_SHIFT 0 +#define MCDE_SISCHNL_CHNLRDSIS_MASK 0x0000FFFF +#define MCDE_SISCHNL_CHNLRDSIS(__x) \ + MCDE_VAL2REG(MCDE_SISCHNL, CHNLRDSIS, __x) +#define MCDE_SISCHNL_CHNLASIS_SHIFT 16 +#define MCDE_SISCHNL_CHNLASIS_MASK 0xFFFF0000 +#define MCDE_SISCHNL_CHNLASIS(__x) \ + MCDE_VAL2REG(MCDE_SISCHNL, CHNLASIS, __x) +#define MCDE_SISERR 0x00000140 +#define MCDE_SISERR_FUASIS_SHIFT 0 +#define MCDE_SISERR_FUASIS_MASK 0x00000001 +#define MCDE_SISERR_FUASIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, FUASIS, __x) +#define MCDE_SISERR_FUBSIS_SHIFT 1 +#define MCDE_SISERR_FUBSIS_MASK 0x00000002 +#define MCDE_SISERR_FUBSIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, FUBSIS, __x) +#define MCDE_SISERR_SCHBLCKDSIS_SHIFT 2 +#define MCDE_SISERR_SCHBLCKDSIS_MASK 0x00000004 +#define MCDE_SISERR_SCHBLCKDSIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, SCHBLCKDSIS, __x) +#define MCDE_SISERR_ROTAFESIS_WRITE_SHIFT 3 +#define MCDE_SISERR_ROTAFESIS_WRITE_MASK 0x00000008 +#define MCDE_SISERR_ROTAFESIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_WRITE, __x) +#define MCDE_SISERR_ROTAFESIS_READ_SHIFT 4 +#define MCDE_SISERR_ROTAFESIS_READ_MASK 0x00000010 +#define MCDE_SISERR_ROTAFESIS_READ(__x) \ + MCDE_VAL2REG(MCDE_SISERR, ROTAFESIS_READ, __x) +#define MCDE_SISERR_ROTBFESIS_WRITE_SHIFT 5 +#define MCDE_SISERR_ROTBFESIS_WRITE_MASK 0x00000020 +#define MCDE_SISERR_ROTBFESIS_WRITE(__x) \ + MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_WRITE, __x) +#define MCDE_SISERR_ROTBFESIS_READ_SHIFT 6 +#define MCDE_SISERR_ROTBFESIS_READ_MASK 0x00000040 +#define MCDE_SISERR_ROTBFESIS_READ(__x) \ + MCDE_VAL2REG(MCDE_SISERR, ROTBFESIS_READ, __x) +#define MCDE_SISERR_FUC0SIS_SHIFT 7 +#define MCDE_SISERR_FUC0SIS_MASK 0x00000080 +#define MCDE_SISERR_FUC0SIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, FUC0SIS, __x) +#define MCDE_SISERR_FUC1SIS_SHIFT 8 +#define MCDE_SISERR_FUC1SIS_MASK 0x00000100 +#define MCDE_SISERR_FUC1SIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, FUC1SIS, __x) +#define MCDE_SISERR_OVLFERSIS_SHIFT 16 +#define MCDE_SISERR_OVLFERSIS_MASK 0xFFFF0000 +#define MCDE_SISERR_OVLFERSIS(__x) \ + MCDE_VAL2REG(MCDE_SISERR, OVLFERSIS, __x) +#define MCDE_PID 0x000001FC +#define MCDE_PID_METALFIX_VERSION_SHIFT 0 +#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF +#define MCDE_PID_METALFIX_VERSION(__x) \ + MCDE_VAL2REG(MCDE_PID, METALFIX_VERSION, __x) +#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8 +#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00 +#define MCDE_PID_DEVELOPMENT_VERSION(__x) \ + MCDE_VAL2REG(MCDE_PID, DEVELOPMENT_VERSION, __x) +#define MCDE_PID_MINOR_VERSION_SHIFT 16 +#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000 +#define MCDE_PID_MINOR_VERSION(__x) \ + MCDE_VAL2REG(MCDE_PID, MINOR_VERSION, __x) +#define MCDE_PID_MAJOR_VERSION_SHIFT 24 +#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000 +#define MCDE_PID_MAJOR_VERSION(__x) \ + MCDE_VAL2REG(MCDE_PID, MAJOR_VERSION, __x) +#define MCDE_EXTSRC0A0 0x00000200 +#define MCDE_EXTSRC0A0_GROUPOFFSET 0x20 +#define MCDE_EXTSRC0A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC0A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC0A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC1A0 0x00000220 +#define MCDE_EXTSRC1A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC1A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC1A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC2A0 0x00000240 +#define MCDE_EXTSRC2A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC2A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC2A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC3A0 0x00000260 +#define MCDE_EXTSRC3A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC3A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC3A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC4A0 0x00000280 +#define MCDE_EXTSRC4A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC4A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC4A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC5A0 0x000002A0 +#define MCDE_EXTSRC5A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC5A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC5A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC6A0 0x000002C0 +#define MCDE_EXTSRC6A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC6A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC6A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC7A0 0x000002E0 +#define MCDE_EXTSRC7A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC7A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC7A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC8A0 0x00000300 +#define MCDE_EXTSRC8A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC8A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC8A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC9A0 0x00000320 +#define MCDE_EXTSRC9A0_BASEADDRESS0_SHIFT 3 +#define MCDE_EXTSRC9A0_BASEADDRESS0_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC9A0_BASEADDRESS0(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9A0, BASEADDRESS0, __x) +#define MCDE_EXTSRC0A1 0x00000204 +#define MCDE_EXTSRC0A1_GROUPOFFSET 0x20 +#define MCDE_EXTSRC0A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC0A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC0A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC1A1 0x00000224 +#define MCDE_EXTSRC1A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC1A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC1A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC2A1 0x00000244 +#define MCDE_EXTSRC2A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC2A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC2A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC3A1 0x00000264 +#define MCDE_EXTSRC3A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC3A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC3A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC4A1 0x00000284 +#define MCDE_EXTSRC4A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC4A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC4A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC5A1 0x000002A4 +#define MCDE_EXTSRC5A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC5A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC5A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC6A1 0x000002C4 +#define MCDE_EXTSRC6A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC6A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC6A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC7A1 0x000002E4 +#define MCDE_EXTSRC7A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC7A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC7A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC8A1 0x00000304 +#define MCDE_EXTSRC8A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC8A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC8A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC9A1 0x00000324 +#define MCDE_EXTSRC9A1_BASEADDRESS1_SHIFT 3 +#define MCDE_EXTSRC9A1_BASEADDRESS1_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC9A1_BASEADDRESS1(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9A1, BASEADDRESS1, __x) +#define MCDE_EXTSRC6A2 0x000002C8 +#define MCDE_EXTSRC6A2_BASEADDRESS2_SHIFT 3 +#define MCDE_EXTSRC6A2_BASEADDRESS2_MASK 0xFFFFFFF8 +#define MCDE_EXTSRC6A2_BASEADDRESS2(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6A2, BASEADDRESS2, __x) +#define MCDE_EXTSRC0CONF 0x0000020C +#define MCDE_EXTSRC0CONF_GROUPOFFSET 0x20 +#define MCDE_EXTSRC0CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC0CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC0CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_ID, __x) +#define MCDE_EXTSRC0CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC0CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC0CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BUF_NB, __x) +#define MCDE_EXTSRC0CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC0CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC0CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC0CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC0CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC0CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC0CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC0CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC0CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC0CONF_BPP_RGB444 4 +#define MCDE_EXTSRC0CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC0CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC0CONF_BPP_RGB565 7 +#define MCDE_EXTSRC0CONF_BPP_RGB888 8 +#define MCDE_EXTSRC0CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC0CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC0CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC0CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, MCDE_EXTSRC0CONF_BPP_##__x) +#define MCDE_EXTSRC0CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BPP, __x) +#define MCDE_EXTSRC0CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC0CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC0CONF_BGR_RGB 0 +#define MCDE_EXTSRC0CONF_BGR_BGR 1 +#define MCDE_EXTSRC0CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, MCDE_EXTSRC0CONF_BGR_##__x) +#define MCDE_EXTSRC0CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BGR, __x) +#define MCDE_EXTSRC0CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC0CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC0CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC0CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC0CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, MCDE_EXTSRC0CONF_BEBO_##__x) +#define MCDE_EXTSRC0CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEBO, __x) +#define MCDE_EXTSRC0CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC0CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC0CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC0CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC0CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, MCDE_EXTSRC0CONF_BEPO_##__x) +#define MCDE_EXTSRC0CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CONF, BEPO, __x) +#define MCDE_EXTSRC1CONF 0x0000022C +#define MCDE_EXTSRC1CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC1CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC1CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_ID, __x) +#define MCDE_EXTSRC1CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC1CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC1CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BUF_NB, __x) +#define MCDE_EXTSRC1CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC1CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC1CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC1CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC1CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC1CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC1CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC1CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC1CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC1CONF_BPP_RGB444 4 +#define MCDE_EXTSRC1CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC1CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC1CONF_BPP_RGB565 7 +#define MCDE_EXTSRC1CONF_BPP_RGB888 8 +#define MCDE_EXTSRC1CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC1CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC1CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC1CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, MCDE_EXTSRC1CONF_BPP_##__x) +#define MCDE_EXTSRC1CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BPP, __x) +#define MCDE_EXTSRC1CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC1CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC1CONF_BGR_RGB 0 +#define MCDE_EXTSRC1CONF_BGR_BGR 1 +#define MCDE_EXTSRC1CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, MCDE_EXTSRC1CONF_BGR_##__x) +#define MCDE_EXTSRC1CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BGR, __x) +#define MCDE_EXTSRC1CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC1CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC1CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC1CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC1CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, MCDE_EXTSRC1CONF_BEBO_##__x) +#define MCDE_EXTSRC1CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEBO, __x) +#define MCDE_EXTSRC1CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC1CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC1CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC1CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC1CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, MCDE_EXTSRC1CONF_BEPO_##__x) +#define MCDE_EXTSRC1CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CONF, BEPO, __x) +#define MCDE_EXTSRC2CONF 0x0000024C +#define MCDE_EXTSRC2CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC2CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC2CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_ID, __x) +#define MCDE_EXTSRC2CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC2CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC2CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BUF_NB, __x) +#define MCDE_EXTSRC2CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC2CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC2CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC2CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC2CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC2CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC2CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC2CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC2CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC2CONF_BPP_RGB444 4 +#define MCDE_EXTSRC2CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC2CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC2CONF_BPP_RGB565 7 +#define MCDE_EXTSRC2CONF_BPP_RGB888 8 +#define MCDE_EXTSRC2CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC2CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC2CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC2CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, MCDE_EXTSRC2CONF_BPP_##__x) +#define MCDE_EXTSRC2CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BPP, __x) +#define MCDE_EXTSRC2CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC2CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC2CONF_BGR_RGB 0 +#define MCDE_EXTSRC2CONF_BGR_BGR 1 +#define MCDE_EXTSRC2CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, MCDE_EXTSRC2CONF_BGR_##__x) +#define MCDE_EXTSRC2CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BGR, __x) +#define MCDE_EXTSRC2CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC2CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC2CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC2CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC2CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, MCDE_EXTSRC2CONF_BEBO_##__x) +#define MCDE_EXTSRC2CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEBO, __x) +#define MCDE_EXTSRC2CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC2CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC2CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC2CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC2CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, MCDE_EXTSRC2CONF_BEPO_##__x) +#define MCDE_EXTSRC2CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CONF, BEPO, __x) +#define MCDE_EXTSRC3CONF 0x0000026C +#define MCDE_EXTSRC3CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC3CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC3CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_ID, __x) +#define MCDE_EXTSRC3CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC3CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC3CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BUF_NB, __x) +#define MCDE_EXTSRC3CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC3CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC3CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC3CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC3CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC3CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC3CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC3CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC3CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC3CONF_BPP_RGB444 4 +#define MCDE_EXTSRC3CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC3CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC3CONF_BPP_RGB565 7 +#define MCDE_EXTSRC3CONF_BPP_RGB888 8 +#define MCDE_EXTSRC3CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC3CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC3CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC3CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, MCDE_EXTSRC3CONF_BPP_##__x) +#define MCDE_EXTSRC3CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BPP, __x) +#define MCDE_EXTSRC3CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC3CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC3CONF_BGR_RGB 0 +#define MCDE_EXTSRC3CONF_BGR_BGR 1 +#define MCDE_EXTSRC3CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, MCDE_EXTSRC3CONF_BGR_##__x) +#define MCDE_EXTSRC3CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BGR, __x) +#define MCDE_EXTSRC3CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC3CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC3CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC3CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC3CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, MCDE_EXTSRC3CONF_BEBO_##__x) +#define MCDE_EXTSRC3CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEBO, __x) +#define MCDE_EXTSRC3CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC3CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC3CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC3CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC3CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, MCDE_EXTSRC3CONF_BEPO_##__x) +#define MCDE_EXTSRC3CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CONF, BEPO, __x) +#define MCDE_EXTSRC4CONF 0x0000028C +#define MCDE_EXTSRC4CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC4CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC4CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_ID, __x) +#define MCDE_EXTSRC4CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC4CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC4CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BUF_NB, __x) +#define MCDE_EXTSRC4CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC4CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC4CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC4CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC4CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC4CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC4CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC4CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC4CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC4CONF_BPP_RGB444 4 +#define MCDE_EXTSRC4CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC4CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC4CONF_BPP_RGB565 7 +#define MCDE_EXTSRC4CONF_BPP_RGB888 8 +#define MCDE_EXTSRC4CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC4CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC4CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC4CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, MCDE_EXTSRC4CONF_BPP_##__x) +#define MCDE_EXTSRC4CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BPP, __x) +#define MCDE_EXTSRC4CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC4CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC4CONF_BGR_RGB 0 +#define MCDE_EXTSRC4CONF_BGR_BGR 1 +#define MCDE_EXTSRC4CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, MCDE_EXTSRC4CONF_BGR_##__x) +#define MCDE_EXTSRC4CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BGR, __x) +#define MCDE_EXTSRC4CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC4CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC4CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC4CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC4CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, MCDE_EXTSRC4CONF_BEBO_##__x) +#define MCDE_EXTSRC4CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEBO, __x) +#define MCDE_EXTSRC4CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC4CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC4CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC4CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC4CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, MCDE_EXTSRC4CONF_BEPO_##__x) +#define MCDE_EXTSRC4CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CONF, BEPO, __x) +#define MCDE_EXTSRC5CONF 0x000002AC +#define MCDE_EXTSRC5CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC5CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC5CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_ID, __x) +#define MCDE_EXTSRC5CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC5CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC5CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BUF_NB, __x) +#define MCDE_EXTSRC5CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC5CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC5CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC5CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC5CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC5CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC5CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC5CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC5CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC5CONF_BPP_RGB444 4 +#define MCDE_EXTSRC5CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC5CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC5CONF_BPP_RGB565 7 +#define MCDE_EXTSRC5CONF_BPP_RGB888 8 +#define MCDE_EXTSRC5CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC5CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC5CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC5CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, MCDE_EXTSRC5CONF_BPP_##__x) +#define MCDE_EXTSRC5CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BPP, __x) +#define MCDE_EXTSRC5CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC5CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC5CONF_BGR_RGB 0 +#define MCDE_EXTSRC5CONF_BGR_BGR 1 +#define MCDE_EXTSRC5CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, MCDE_EXTSRC5CONF_BGR_##__x) +#define MCDE_EXTSRC5CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BGR, __x) +#define MCDE_EXTSRC5CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC5CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC5CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC5CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC5CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, MCDE_EXTSRC5CONF_BEBO_##__x) +#define MCDE_EXTSRC5CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEBO, __x) +#define MCDE_EXTSRC5CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC5CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC5CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC5CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC5CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, MCDE_EXTSRC5CONF_BEPO_##__x) +#define MCDE_EXTSRC5CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CONF, BEPO, __x) +#define MCDE_EXTSRC6CONF 0x000002CC +#define MCDE_EXTSRC6CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC6CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC6CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_ID, __x) +#define MCDE_EXTSRC6CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC6CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC6CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BUF_NB, __x) +#define MCDE_EXTSRC6CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC6CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC6CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC6CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC6CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC6CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC6CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC6CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC6CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC6CONF_BPP_RGB444 4 +#define MCDE_EXTSRC6CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC6CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC6CONF_BPP_RGB565 7 +#define MCDE_EXTSRC6CONF_BPP_RGB888 8 +#define MCDE_EXTSRC6CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC6CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC6CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC6CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, MCDE_EXTSRC6CONF_BPP_##__x) +#define MCDE_EXTSRC6CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BPP, __x) +#define MCDE_EXTSRC6CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC6CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC6CONF_BGR_RGB 0 +#define MCDE_EXTSRC6CONF_BGR_BGR 1 +#define MCDE_EXTSRC6CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, MCDE_EXTSRC6CONF_BGR_##__x) +#define MCDE_EXTSRC6CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BGR, __x) +#define MCDE_EXTSRC6CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC6CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC6CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC6CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC6CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, MCDE_EXTSRC6CONF_BEBO_##__x) +#define MCDE_EXTSRC6CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEBO, __x) +#define MCDE_EXTSRC6CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC6CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC6CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC6CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC6CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, MCDE_EXTSRC6CONF_BEPO_##__x) +#define MCDE_EXTSRC6CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CONF, BEPO, __x) +#define MCDE_EXTSRC7CONF 0x000002EC +#define MCDE_EXTSRC7CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC7CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC7CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_ID, __x) +#define MCDE_EXTSRC7CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC7CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC7CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BUF_NB, __x) +#define MCDE_EXTSRC7CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC7CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC7CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC7CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC7CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC7CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC7CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC7CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC7CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC7CONF_BPP_RGB444 4 +#define MCDE_EXTSRC7CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC7CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC7CONF_BPP_RGB565 7 +#define MCDE_EXTSRC7CONF_BPP_RGB888 8 +#define MCDE_EXTSRC7CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC7CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC7CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC7CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, MCDE_EXTSRC7CONF_BPP_##__x) +#define MCDE_EXTSRC7CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BPP, __x) +#define MCDE_EXTSRC7CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC7CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC7CONF_BGR_RGB 0 +#define MCDE_EXTSRC7CONF_BGR_BGR 1 +#define MCDE_EXTSRC7CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, MCDE_EXTSRC7CONF_BGR_##__x) +#define MCDE_EXTSRC7CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BGR, __x) +#define MCDE_EXTSRC7CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC7CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC7CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC7CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC7CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, MCDE_EXTSRC7CONF_BEBO_##__x) +#define MCDE_EXTSRC7CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEBO, __x) +#define MCDE_EXTSRC7CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC7CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC7CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC7CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC7CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, MCDE_EXTSRC7CONF_BEPO_##__x) +#define MCDE_EXTSRC7CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CONF, BEPO, __x) +#define MCDE_EXTSRC8CONF 0x0000030C +#define MCDE_EXTSRC8CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC8CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC8CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_ID, __x) +#define MCDE_EXTSRC8CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC8CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC8CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BUF_NB, __x) +#define MCDE_EXTSRC8CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC8CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC8CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC8CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC8CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC8CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC8CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC8CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC8CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC8CONF_BPP_RGB444 4 +#define MCDE_EXTSRC8CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC8CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC8CONF_BPP_RGB565 7 +#define MCDE_EXTSRC8CONF_BPP_RGB888 8 +#define MCDE_EXTSRC8CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC8CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC8CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC8CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, MCDE_EXTSRC8CONF_BPP_##__x) +#define MCDE_EXTSRC8CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BPP, __x) +#define MCDE_EXTSRC8CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC8CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC8CONF_BGR_RGB 0 +#define MCDE_EXTSRC8CONF_BGR_BGR 1 +#define MCDE_EXTSRC8CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, MCDE_EXTSRC8CONF_BGR_##__x) +#define MCDE_EXTSRC8CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BGR, __x) +#define MCDE_EXTSRC8CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC8CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC8CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC8CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC8CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, MCDE_EXTSRC8CONF_BEBO_##__x) +#define MCDE_EXTSRC8CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEBO, __x) +#define MCDE_EXTSRC8CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC8CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC8CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC8CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC8CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, MCDE_EXTSRC8CONF_BEPO_##__x) +#define MCDE_EXTSRC8CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CONF, BEPO, __x) +#define MCDE_EXTSRC9CONF 0x0000032C +#define MCDE_EXTSRC9CONF_BUF_ID_SHIFT 0 +#define MCDE_EXTSRC9CONF_BUF_ID_MASK 0x00000003 +#define MCDE_EXTSRC9CONF_BUF_ID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_ID, __x) +#define MCDE_EXTSRC9CONF_BUF_NB_SHIFT 2 +#define MCDE_EXTSRC9CONF_BUF_NB_MASK 0x0000000C +#define MCDE_EXTSRC9CONF_BUF_NB(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BUF_NB, __x) +#define MCDE_EXTSRC9CONF_PRI_OVLID_SHIFT 4 +#define MCDE_EXTSRC9CONF_PRI_OVLID_MASK 0x000000F0 +#define MCDE_EXTSRC9CONF_PRI_OVLID(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, PRI_OVLID, __x) +#define MCDE_EXTSRC9CONF_BPP_SHIFT 8 +#define MCDE_EXTSRC9CONF_BPP_MASK 0x00000F00 +#define MCDE_EXTSRC9CONF_BPP_1BPP_PAL 0 +#define MCDE_EXTSRC9CONF_BPP_2BPP_PAL 1 +#define MCDE_EXTSRC9CONF_BPP_4BPP_PAL 2 +#define MCDE_EXTSRC9CONF_BPP_8BPP_PAL 3 +#define MCDE_EXTSRC9CONF_BPP_RGB444 4 +#define MCDE_EXTSRC9CONF_BPP_ARGB4444 5 +#define MCDE_EXTSRC9CONF_BPP_IRGB1555 6 +#define MCDE_EXTSRC9CONF_BPP_RGB565 7 +#define MCDE_EXTSRC9CONF_BPP_RGB888 8 +#define MCDE_EXTSRC9CONF_BPP_XRGB8888 9 +#define MCDE_EXTSRC9CONF_BPP_ARGB8888 10 +#define MCDE_EXTSRC9CONF_BPP_YCBCR422 11 +#define MCDE_EXTSRC9CONF_BPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, MCDE_EXTSRC9CONF_BPP_##__x) +#define MCDE_EXTSRC9CONF_BPP(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BPP, __x) +#define MCDE_EXTSRC9CONF_BGR_SHIFT 12 +#define MCDE_EXTSRC9CONF_BGR_MASK 0x00001000 +#define MCDE_EXTSRC9CONF_BGR_RGB 0 +#define MCDE_EXTSRC9CONF_BGR_BGR 1 +#define MCDE_EXTSRC9CONF_BGR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, MCDE_EXTSRC9CONF_BGR_##__x) +#define MCDE_EXTSRC9CONF_BGR(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BGR, __x) +#define MCDE_EXTSRC9CONF_BEBO_SHIFT 13 +#define MCDE_EXTSRC9CONF_BEBO_MASK 0x00002000 +#define MCDE_EXTSRC9CONF_BEBO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC9CONF_BEBO_BIG_ENDIAN 1 +#define MCDE_EXTSRC9CONF_BEBO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, MCDE_EXTSRC9CONF_BEBO_##__x) +#define MCDE_EXTSRC9CONF_BEBO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEBO, __x) +#define MCDE_EXTSRC9CONF_BEPO_SHIFT 14 +#define MCDE_EXTSRC9CONF_BEPO_MASK 0x00004000 +#define MCDE_EXTSRC9CONF_BEPO_LITTLE_ENDIAN 0 +#define MCDE_EXTSRC9CONF_BEPO_BIG_ENDIAN 1 +#define MCDE_EXTSRC9CONF_BEPO_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, MCDE_EXTSRC9CONF_BEPO_##__x) +#define MCDE_EXTSRC9CONF_BEPO(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CONF, BEPO, __x) +#define MCDE_EXTSRC0CR 0x00000210 +#define MCDE_EXTSRC0CR_GROUPOFFSET 0x20 +#define MCDE_EXTSRC0CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC0CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC0CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC0CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC0CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC0CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, MCDE_EXTSRC0CR_SEL_MOD_##__x) +#define MCDE_EXTSRC0CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, SEL_MOD, __x) +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC0CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC0CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC0CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC0CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC0CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC0CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC0CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC0CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC1CR 0x00000230 +#define MCDE_EXTSRC1CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC1CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC1CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC1CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC1CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC1CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, MCDE_EXTSRC1CR_SEL_MOD_##__x) +#define MCDE_EXTSRC1CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, SEL_MOD, __x) +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC1CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC1CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC1CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC1CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC1CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC1CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC1CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC1CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC2CR 0x00000250 +#define MCDE_EXTSRC2CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC2CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC2CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC2CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC2CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC2CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, MCDE_EXTSRC2CR_SEL_MOD_##__x) +#define MCDE_EXTSRC2CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, SEL_MOD, __x) +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC2CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC2CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC2CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC2CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC2CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC2CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC2CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC2CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC3CR 0x00000270 +#define MCDE_EXTSRC3CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC3CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC3CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC3CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC3CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC3CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, MCDE_EXTSRC3CR_SEL_MOD_##__x) +#define MCDE_EXTSRC3CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, SEL_MOD, __x) +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC3CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC3CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC3CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC3CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC3CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC3CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC3CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC3CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC4CR 0x00000290 +#define MCDE_EXTSRC4CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC4CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC4CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC4CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC4CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC4CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, MCDE_EXTSRC4CR_SEL_MOD_##__x) +#define MCDE_EXTSRC4CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, SEL_MOD, __x) +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC4CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC4CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC4CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC4CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC4CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC4CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC4CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC4CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC5CR 0x000002B0 +#define MCDE_EXTSRC5CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC5CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC5CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC5CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC5CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC5CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, MCDE_EXTSRC5CR_SEL_MOD_##__x) +#define MCDE_EXTSRC5CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, SEL_MOD, __x) +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC5CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC5CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC5CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC5CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC5CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC5CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC5CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC5CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC6CR 0x000002D0 +#define MCDE_EXTSRC6CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC6CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC6CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC6CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC6CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC6CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, MCDE_EXTSRC6CR_SEL_MOD_##__x) +#define MCDE_EXTSRC6CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, SEL_MOD, __x) +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC6CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC6CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC6CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC6CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC6CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC6CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC6CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC6CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC7CR 0x000002F0 +#define MCDE_EXTSRC7CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC7CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC7CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC7CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC7CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC7CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, MCDE_EXTSRC7CR_SEL_MOD_##__x) +#define MCDE_EXTSRC7CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, SEL_MOD, __x) +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC7CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC7CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC7CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC7CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC7CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC7CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC7CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC7CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC8CR 0x00000310 +#define MCDE_EXTSRC8CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC8CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC8CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC8CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC8CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC8CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, MCDE_EXTSRC8CR_SEL_MOD_##__x) +#define MCDE_EXTSRC8CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, SEL_MOD, __x) +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC8CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC8CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC8CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC8CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC8CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC8CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC8CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC8CR, FORCE_FS_DIV, __x) +#define MCDE_EXTSRC9CR 0x00000330 +#define MCDE_EXTSRC9CR_SEL_MOD_SHIFT 0 +#define MCDE_EXTSRC9CR_SEL_MOD_MASK 0x00000003 +#define MCDE_EXTSRC9CR_SEL_MOD_EXTERNAL_SEL 0 +#define MCDE_EXTSRC9CR_SEL_MOD_AUTO_TOGGLE 1 +#define MCDE_EXTSRC9CR_SEL_MOD_SOFTWARE_SEL 2 +#define MCDE_EXTSRC9CR_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, MCDE_EXTSRC9CR_SEL_MOD_##__x) +#define MCDE_EXTSRC9CR_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, SEL_MOD, __x) +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_SHIFT 2 +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_MASK 0x00000004 +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ALL 0 +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_PRIMARY 1 +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, \ + MCDE_EXTSRC9CR_MULTIOVL_CTRL_##__x) +#define MCDE_EXTSRC9CR_MULTIOVL_CTRL(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, MULTIOVL_CTRL, __x) +#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_SHIFT 3 +#define MCDE_EXTSRC9CR_FS_DIV_DISABLE_MASK 0x00000008 +#define MCDE_EXTSRC9CR_FS_DIV_DISABLE(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, FS_DIV_DISABLE, __x) +#define MCDE_EXTSRC9CR_FORCE_FS_DIV_SHIFT 4 +#define MCDE_EXTSRC9CR_FORCE_FS_DIV_MASK 0x00000010 +#define MCDE_EXTSRC9CR_FORCE_FS_DIV(__x) \ + MCDE_VAL2REG(MCDE_EXTSRC9CR, FORCE_FS_DIV, __x) +#define MCDE_OVL0CR 0x00000400 +#define MCDE_OVL0CR_GROUPOFFSET 0x20 +#define MCDE_OVL0CR_OVLEN_SHIFT 0 +#define MCDE_OVL0CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL0CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, OVLEN, __x) +#define MCDE_OVL0CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL0CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL0CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL0CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL0CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL0CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, MCDE_OVL0CR_COLCCTRL_##__x) +#define MCDE_OVL0CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, COLCCTRL, __x) +#define MCDE_OVL0CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL0CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL0CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, CKEYGEN, __x) +#define MCDE_OVL0CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL0CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL0CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, ALPHAPMEN, __x) +#define MCDE_OVL0CR_OVLF_SHIFT 5 +#define MCDE_OVL0CR_OVLF_MASK 0x00000020 +#define MCDE_OVL0CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, OVLF, __x) +#define MCDE_OVL0CR_OVLR_SHIFT 6 +#define MCDE_OVL0CR_OVLR_MASK 0x00000040 +#define MCDE_OVL0CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, OVLR, __x) +#define MCDE_OVL0CR_OVLB_SHIFT 7 +#define MCDE_OVL0CR_OVLB_MASK 0x00000080 +#define MCDE_OVL0CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, OVLB, __x) +#define MCDE_OVL0CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL0CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL0CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, FETCH_ROPC, __x) +#define MCDE_OVL0CR_STBPRIO_SHIFT 16 +#define MCDE_OVL0CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL0CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, STBPRIO, __x) +#define MCDE_OVL0CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL0CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL0CR_BURSTSIZE_1W 0 +#define MCDE_OVL0CR_BURSTSIZE_2W 1 +#define MCDE_OVL0CR_BURSTSIZE_4W 2 +#define MCDE_OVL0CR_BURSTSIZE_8W 3 +#define MCDE_OVL0CR_BURSTSIZE_16W 4 +#define MCDE_OVL0CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL0CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL0CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL0CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL0CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL0CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, MCDE_OVL0CR_BURSTSIZE_##__x) +#define MCDE_OVL0CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, BURSTSIZE, __x) +#define MCDE_OVL0CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL0CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL0CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL0CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL0CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL0CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL0CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL0CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, \ + MCDE_OVL0CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL0CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, MAXOUTSTANDING, __x) +#define MCDE_OVL0CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL0CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL0CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL0CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL0CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL0CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL0CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL0CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL0CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL0CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL0CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL0CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL0CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, MCDE_OVL0CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL0CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL0CR, ROTBURSTSIZE, __x) +#define MCDE_OVL1CR 0x00000420 +#define MCDE_OVL1CR_OVLEN_SHIFT 0 +#define MCDE_OVL1CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL1CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, OVLEN, __x) +#define MCDE_OVL1CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL1CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL1CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL1CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL1CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL1CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, MCDE_OVL1CR_COLCCTRL_##__x) +#define MCDE_OVL1CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, COLCCTRL, __x) +#define MCDE_OVL1CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL1CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL1CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, CKEYGEN, __x) +#define MCDE_OVL1CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL1CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL1CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, ALPHAPMEN, __x) +#define MCDE_OVL1CR_OVLF_SHIFT 5 +#define MCDE_OVL1CR_OVLF_MASK 0x00000020 +#define MCDE_OVL1CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, OVLF, __x) +#define MCDE_OVL1CR_OVLR_SHIFT 6 +#define MCDE_OVL1CR_OVLR_MASK 0x00000040 +#define MCDE_OVL1CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, OVLR, __x) +#define MCDE_OVL1CR_OVLB_SHIFT 7 +#define MCDE_OVL1CR_OVLB_MASK 0x00000080 +#define MCDE_OVL1CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, OVLB, __x) +#define MCDE_OVL1CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL1CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL1CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, FETCH_ROPC, __x) +#define MCDE_OVL1CR_STBPRIO_SHIFT 16 +#define MCDE_OVL1CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL1CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, STBPRIO, __x) +#define MCDE_OVL1CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL1CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL1CR_BURSTSIZE_1W 0 +#define MCDE_OVL1CR_BURSTSIZE_2W 1 +#define MCDE_OVL1CR_BURSTSIZE_4W 2 +#define MCDE_OVL1CR_BURSTSIZE_8W 3 +#define MCDE_OVL1CR_BURSTSIZE_16W 4 +#define MCDE_OVL1CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL1CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL1CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL1CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL1CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL1CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, MCDE_OVL1CR_BURSTSIZE_##__x) +#define MCDE_OVL1CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, BURSTSIZE, __x) +#define MCDE_OVL1CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL1CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL1CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL1CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL1CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL1CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL1CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL1CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, \ + MCDE_OVL1CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL1CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, MAXOUTSTANDING, __x) +#define MCDE_OVL1CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL1CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL1CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL1CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL1CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL1CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL1CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL1CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL1CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL1CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL1CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL1CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL1CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, MCDE_OVL1CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL1CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL1CR, ROTBURSTSIZE, __x) +#define MCDE_OVL2CR 0x00000440 +#define MCDE_OVL2CR_OVLEN_SHIFT 0 +#define MCDE_OVL2CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL2CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, OVLEN, __x) +#define MCDE_OVL2CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL2CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL2CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL2CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL2CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL2CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, MCDE_OVL2CR_COLCCTRL_##__x) +#define MCDE_OVL2CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, COLCCTRL, __x) +#define MCDE_OVL2CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL2CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL2CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, CKEYGEN, __x) +#define MCDE_OVL2CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL2CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL2CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, ALPHAPMEN, __x) +#define MCDE_OVL2CR_OVLF_SHIFT 5 +#define MCDE_OVL2CR_OVLF_MASK 0x00000020 +#define MCDE_OVL2CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, OVLF, __x) +#define MCDE_OVL2CR_OVLR_SHIFT 6 +#define MCDE_OVL2CR_OVLR_MASK 0x00000040 +#define MCDE_OVL2CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, OVLR, __x) +#define MCDE_OVL2CR_OVLB_SHIFT 7 +#define MCDE_OVL2CR_OVLB_MASK 0x00000080 +#define MCDE_OVL2CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, OVLB, __x) +#define MCDE_OVL2CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL2CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL2CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, FETCH_ROPC, __x) +#define MCDE_OVL2CR_STBPRIO_SHIFT 16 +#define MCDE_OVL2CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL2CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, STBPRIO, __x) +#define MCDE_OVL2CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL2CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL2CR_BURSTSIZE_1W 0 +#define MCDE_OVL2CR_BURSTSIZE_2W 1 +#define MCDE_OVL2CR_BURSTSIZE_4W 2 +#define MCDE_OVL2CR_BURSTSIZE_8W 3 +#define MCDE_OVL2CR_BURSTSIZE_16W 4 +#define MCDE_OVL2CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL2CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL2CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL2CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL2CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL2CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, MCDE_OVL2CR_BURSTSIZE_##__x) +#define MCDE_OVL2CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, BURSTSIZE, __x) +#define MCDE_OVL2CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL2CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL2CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL2CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL2CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL2CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL2CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL2CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, \ + MCDE_OVL2CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL2CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, MAXOUTSTANDING, __x) +#define MCDE_OVL2CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL2CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL2CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL2CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL2CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL2CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL2CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL2CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL2CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL2CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL2CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL2CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL2CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, MCDE_OVL2CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL2CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL2CR, ROTBURSTSIZE, __x) +#define MCDE_OVL3CR 0x00000460 +#define MCDE_OVL3CR_OVLEN_SHIFT 0 +#define MCDE_OVL3CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL3CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, OVLEN, __x) +#define MCDE_OVL3CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL3CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL3CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL3CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL3CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL3CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, MCDE_OVL3CR_COLCCTRL_##__x) +#define MCDE_OVL3CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, COLCCTRL, __x) +#define MCDE_OVL3CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL3CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL3CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, CKEYGEN, __x) +#define MCDE_OVL3CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL3CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL3CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, ALPHAPMEN, __x) +#define MCDE_OVL3CR_OVLF_SHIFT 5 +#define MCDE_OVL3CR_OVLF_MASK 0x00000020 +#define MCDE_OVL3CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, OVLF, __x) +#define MCDE_OVL3CR_OVLR_SHIFT 6 +#define MCDE_OVL3CR_OVLR_MASK 0x00000040 +#define MCDE_OVL3CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, OVLR, __x) +#define MCDE_OVL3CR_OVLB_SHIFT 7 +#define MCDE_OVL3CR_OVLB_MASK 0x00000080 +#define MCDE_OVL3CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, OVLB, __x) +#define MCDE_OVL3CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL3CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL3CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, FETCH_ROPC, __x) +#define MCDE_OVL3CR_STBPRIO_SHIFT 16 +#define MCDE_OVL3CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL3CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, STBPRIO, __x) +#define MCDE_OVL3CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL3CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL3CR_BURSTSIZE_1W 0 +#define MCDE_OVL3CR_BURSTSIZE_2W 1 +#define MCDE_OVL3CR_BURSTSIZE_4W 2 +#define MCDE_OVL3CR_BURSTSIZE_8W 3 +#define MCDE_OVL3CR_BURSTSIZE_16W 4 +#define MCDE_OVL3CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL3CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL3CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL3CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL3CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL3CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, MCDE_OVL3CR_BURSTSIZE_##__x) +#define MCDE_OVL3CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, BURSTSIZE, __x) +#define MCDE_OVL3CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL3CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL3CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL3CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL3CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL3CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL3CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL3CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, \ + MCDE_OVL3CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL3CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, MAXOUTSTANDING, __x) +#define MCDE_OVL3CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL3CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL3CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL3CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL3CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL3CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL3CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL3CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL3CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL3CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL3CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL3CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL3CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, MCDE_OVL3CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL3CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL3CR, ROTBURSTSIZE, __x) +#define MCDE_OVL4CR 0x00000480 +#define MCDE_OVL4CR_OVLEN_SHIFT 0 +#define MCDE_OVL4CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL4CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, OVLEN, __x) +#define MCDE_OVL4CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL4CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL4CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL4CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL4CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL4CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, MCDE_OVL4CR_COLCCTRL_##__x) +#define MCDE_OVL4CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, COLCCTRL, __x) +#define MCDE_OVL4CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL4CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL4CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, CKEYGEN, __x) +#define MCDE_OVL4CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL4CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL4CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, ALPHAPMEN, __x) +#define MCDE_OVL4CR_OVLF_SHIFT 5 +#define MCDE_OVL4CR_OVLF_MASK 0x00000020 +#define MCDE_OVL4CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, OVLF, __x) +#define MCDE_OVL4CR_OVLR_SHIFT 6 +#define MCDE_OVL4CR_OVLR_MASK 0x00000040 +#define MCDE_OVL4CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, OVLR, __x) +#define MCDE_OVL4CR_OVLB_SHIFT 7 +#define MCDE_OVL4CR_OVLB_MASK 0x00000080 +#define MCDE_OVL4CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, OVLB, __x) +#define MCDE_OVL4CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL4CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL4CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, FETCH_ROPC, __x) +#define MCDE_OVL4CR_STBPRIO_SHIFT 16 +#define MCDE_OVL4CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL4CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, STBPRIO, __x) +#define MCDE_OVL4CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL4CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL4CR_BURSTSIZE_1W 0 +#define MCDE_OVL4CR_BURSTSIZE_2W 1 +#define MCDE_OVL4CR_BURSTSIZE_4W 2 +#define MCDE_OVL4CR_BURSTSIZE_8W 3 +#define MCDE_OVL4CR_BURSTSIZE_16W 4 +#define MCDE_OVL4CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL4CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL4CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL4CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL4CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL4CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, MCDE_OVL4CR_BURSTSIZE_##__x) +#define MCDE_OVL4CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, BURSTSIZE, __x) +#define MCDE_OVL4CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL4CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL4CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL4CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL4CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL4CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL4CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL4CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, \ + MCDE_OVL4CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL4CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, MAXOUTSTANDING, __x) +#define MCDE_OVL4CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL4CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL4CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL4CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL4CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL4CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL4CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL4CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL4CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL4CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL4CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL4CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL4CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, MCDE_OVL4CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL4CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL4CR, ROTBURSTSIZE, __x) +#define MCDE_OVL5CR 0x000004A0 +#define MCDE_OVL5CR_OVLEN_SHIFT 0 +#define MCDE_OVL5CR_OVLEN_MASK 0x00000001 +#define MCDE_OVL5CR_OVLEN(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, OVLEN, __x) +#define MCDE_OVL5CR_COLCCTRL_SHIFT 1 +#define MCDE_OVL5CR_COLCCTRL_MASK 0x00000006 +#define MCDE_OVL5CR_COLCCTRL_DISABLED 0 +#define MCDE_OVL5CR_COLCCTRL_ENABLED_NO_SAT 1 +#define MCDE_OVL5CR_COLCCTRL_ENABLED_SAT 2 +#define MCDE_OVL5CR_COLCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, MCDE_OVL5CR_COLCCTRL_##__x) +#define MCDE_OVL5CR_COLCCTRL(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, COLCCTRL, __x) +#define MCDE_OVL5CR_CKEYGEN_SHIFT 3 +#define MCDE_OVL5CR_CKEYGEN_MASK 0x00000008 +#define MCDE_OVL5CR_CKEYGEN(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, CKEYGEN, __x) +#define MCDE_OVL5CR_ALPHAPMEN_SHIFT 4 +#define MCDE_OVL5CR_ALPHAPMEN_MASK 0x00000010 +#define MCDE_OVL5CR_ALPHAPMEN(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, ALPHAPMEN, __x) +#define MCDE_OVL5CR_OVLF_SHIFT 5 +#define MCDE_OVL5CR_OVLF_MASK 0x00000020 +#define MCDE_OVL5CR_OVLF(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, OVLF, __x) +#define MCDE_OVL5CR_OVLR_SHIFT 6 +#define MCDE_OVL5CR_OVLR_MASK 0x00000040 +#define MCDE_OVL5CR_OVLR(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, OVLR, __x) +#define MCDE_OVL5CR_OVLB_SHIFT 7 +#define MCDE_OVL5CR_OVLB_MASK 0x00000080 +#define MCDE_OVL5CR_OVLB(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, OVLB, __x) +#define MCDE_OVL5CR_FETCH_ROPC_SHIFT 8 +#define MCDE_OVL5CR_FETCH_ROPC_MASK 0x0000FF00 +#define MCDE_OVL5CR_FETCH_ROPC(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, FETCH_ROPC, __x) +#define MCDE_OVL5CR_STBPRIO_SHIFT 16 +#define MCDE_OVL5CR_STBPRIO_MASK 0x000F0000 +#define MCDE_OVL5CR_STBPRIO(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, STBPRIO, __x) +#define MCDE_OVL5CR_BURSTSIZE_SHIFT 20 +#define MCDE_OVL5CR_BURSTSIZE_MASK 0x00F00000 +#define MCDE_OVL5CR_BURSTSIZE_1W 0 +#define MCDE_OVL5CR_BURSTSIZE_2W 1 +#define MCDE_OVL5CR_BURSTSIZE_4W 2 +#define MCDE_OVL5CR_BURSTSIZE_8W 3 +#define MCDE_OVL5CR_BURSTSIZE_16W 4 +#define MCDE_OVL5CR_BURSTSIZE_HW_1W 8 +#define MCDE_OVL5CR_BURSTSIZE_HW_2W 9 +#define MCDE_OVL5CR_BURSTSIZE_HW_4W 10 +#define MCDE_OVL5CR_BURSTSIZE_HW_8W 11 +#define MCDE_OVL5CR_BURSTSIZE_HW_16W 12 +#define MCDE_OVL5CR_BURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, MCDE_OVL5CR_BURSTSIZE_##__x) +#define MCDE_OVL5CR_BURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, BURSTSIZE, __x) +#define MCDE_OVL5CR_MAXOUTSTANDING_SHIFT 24 +#define MCDE_OVL5CR_MAXOUTSTANDING_MASK 0x0F000000 +#define MCDE_OVL5CR_MAXOUTSTANDING_1_REQ 0 +#define MCDE_OVL5CR_MAXOUTSTANDING_2_REQ 1 +#define MCDE_OVL5CR_MAXOUTSTANDING_4_REQ 2 +#define MCDE_OVL5CR_MAXOUTSTANDING_8_REQ 3 +#define MCDE_OVL5CR_MAXOUTSTANDING_16_REQ 4 +#define MCDE_OVL5CR_MAXOUTSTANDING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, \ + MCDE_OVL5CR_MAXOUTSTANDING_##__x) +#define MCDE_OVL5CR_MAXOUTSTANDING(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, MAXOUTSTANDING, __x) +#define MCDE_OVL5CR_ROTBURSTSIZE_SHIFT 28 +#define MCDE_OVL5CR_ROTBURSTSIZE_MASK 0xF0000000 +#define MCDE_OVL5CR_ROTBURSTSIZE_1W 0 +#define MCDE_OVL5CR_ROTBURSTSIZE_2W 1 +#define MCDE_OVL5CR_ROTBURSTSIZE_4W 2 +#define MCDE_OVL5CR_ROTBURSTSIZE_8W 3 +#define MCDE_OVL5CR_ROTBURSTSIZE_16W 4 +#define MCDE_OVL5CR_ROTBURSTSIZE_HW_1W 8 +#define MCDE_OVL5CR_ROTBURSTSIZE_HW_2W 9 +#define MCDE_OVL5CR_ROTBURSTSIZE_HW_4W 10 +#define MCDE_OVL5CR_ROTBURSTSIZE_HW_8W 11 +#define MCDE_OVL5CR_ROTBURSTSIZE_HW_16W 12 +#define MCDE_OVL5CR_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, MCDE_OVL5CR_ROTBURSTSIZE_##__x) +#define MCDE_OVL5CR_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_OVL5CR, ROTBURSTSIZE, __x) +#define MCDE_OVL0CONF 0x00000404 +#define MCDE_OVL0CONF_GROUPOFFSET 0x20 +#define MCDE_OVL0CONF_PPL_SHIFT 0 +#define MCDE_OVL0CONF_PPL_MASK 0x000007FF +#define MCDE_OVL0CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF, PPL, __x) +#define MCDE_OVL0CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL0CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL0CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF, EXTSRC_ID, __x) +#define MCDE_OVL0CONF_LPF_SHIFT 16 +#define MCDE_OVL0CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL0CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF, LPF, __x) +#define MCDE_OVL1CONF 0x00000424 +#define MCDE_OVL1CONF_PPL_SHIFT 0 +#define MCDE_OVL1CONF_PPL_MASK 0x000007FF +#define MCDE_OVL1CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF, PPL, __x) +#define MCDE_OVL1CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL1CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL1CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF, EXTSRC_ID, __x) +#define MCDE_OVL1CONF_LPF_SHIFT 16 +#define MCDE_OVL1CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL1CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF, LPF, __x) +#define MCDE_OVL2CONF 0x00000444 +#define MCDE_OVL2CONF_PPL_SHIFT 0 +#define MCDE_OVL2CONF_PPL_MASK 0x000007FF +#define MCDE_OVL2CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF, PPL, __x) +#define MCDE_OVL2CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL2CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL2CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF, EXTSRC_ID, __x) +#define MCDE_OVL2CONF_LPF_SHIFT 16 +#define MCDE_OVL2CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL2CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF, LPF, __x) +#define MCDE_OVL3CONF 0x00000464 +#define MCDE_OVL3CONF_PPL_SHIFT 0 +#define MCDE_OVL3CONF_PPL_MASK 0x000007FF +#define MCDE_OVL3CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF, PPL, __x) +#define MCDE_OVL3CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL3CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL3CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF, EXTSRC_ID, __x) +#define MCDE_OVL3CONF_LPF_SHIFT 16 +#define MCDE_OVL3CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL3CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF, LPF, __x) +#define MCDE_OVL4CONF 0x00000484 +#define MCDE_OVL4CONF_PPL_SHIFT 0 +#define MCDE_OVL4CONF_PPL_MASK 0x000007FF +#define MCDE_OVL4CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF, PPL, __x) +#define MCDE_OVL4CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL4CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL4CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF, EXTSRC_ID, __x) +#define MCDE_OVL4CONF_LPF_SHIFT 16 +#define MCDE_OVL4CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL4CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF, LPF, __x) +#define MCDE_OVL5CONF 0x000004A4 +#define MCDE_OVL5CONF_PPL_SHIFT 0 +#define MCDE_OVL5CONF_PPL_MASK 0x000007FF +#define MCDE_OVL5CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF, PPL, __x) +#define MCDE_OVL5CONF_EXTSRC_ID_SHIFT 11 +#define MCDE_OVL5CONF_EXTSRC_ID_MASK 0x00007800 +#define MCDE_OVL5CONF_EXTSRC_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF, EXTSRC_ID, __x) +#define MCDE_OVL5CONF_LPF_SHIFT 16 +#define MCDE_OVL5CONF_LPF_MASK 0x07FF0000 +#define MCDE_OVL5CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF, LPF, __x) +#define MCDE_OVL0CONF2 0x00000408 +#define MCDE_OVL0CONF2_GROUPOFFSET 0x20 +#define MCDE_OVL0CONF2_BP_SHIFT 0 +#define MCDE_OVL0CONF2_BP_MASK 0x00000001 +#define MCDE_OVL0CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL0CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL0CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, BP, MCDE_OVL0CONF2_BP_##__x) +#define MCDE_OVL0CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, BP, __x) +#define MCDE_OVL0CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL0CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL0CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, ALPHAVALUE, __x) +#define MCDE_OVL0CONF2_OPQ_SHIFT 9 +#define MCDE_OVL0CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL0CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, OPQ, __x) +#define MCDE_OVL0CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL0CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL0CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, PIXOFF, __x) +#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL0CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL0CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL1CONF2 0x00000428 +#define MCDE_OVL1CONF2_BP_SHIFT 0 +#define MCDE_OVL1CONF2_BP_MASK 0x00000001 +#define MCDE_OVL1CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL1CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL1CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, BP, MCDE_OVL1CONF2_BP_##__x) +#define MCDE_OVL1CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, BP, __x) +#define MCDE_OVL1CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL1CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL1CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, ALPHAVALUE, __x) +#define MCDE_OVL1CONF2_OPQ_SHIFT 9 +#define MCDE_OVL1CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL1CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, OPQ, __x) +#define MCDE_OVL1CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL1CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL1CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, PIXOFF, __x) +#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL1CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL1CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL2CONF2 0x00000448 +#define MCDE_OVL2CONF2_BP_SHIFT 0 +#define MCDE_OVL2CONF2_BP_MASK 0x00000001 +#define MCDE_OVL2CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL2CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL2CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, BP, MCDE_OVL2CONF2_BP_##__x) +#define MCDE_OVL2CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, BP, __x) +#define MCDE_OVL2CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL2CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL2CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, ALPHAVALUE, __x) +#define MCDE_OVL2CONF2_OPQ_SHIFT 9 +#define MCDE_OVL2CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL2CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, OPQ, __x) +#define MCDE_OVL2CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL2CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL2CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, PIXOFF, __x) +#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL2CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL2CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL3CONF2 0x00000468 +#define MCDE_OVL3CONF2_BP_SHIFT 0 +#define MCDE_OVL3CONF2_BP_MASK 0x00000001 +#define MCDE_OVL3CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL3CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL3CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, BP, MCDE_OVL3CONF2_BP_##__x) +#define MCDE_OVL3CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, BP, __x) +#define MCDE_OVL3CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL3CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL3CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, ALPHAVALUE, __x) +#define MCDE_OVL3CONF2_OPQ_SHIFT 9 +#define MCDE_OVL3CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL3CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, OPQ, __x) +#define MCDE_OVL3CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL3CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL3CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, PIXOFF, __x) +#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL3CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL3CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL4CONF2 0x00000488 +#define MCDE_OVL4CONF2_BP_SHIFT 0 +#define MCDE_OVL4CONF2_BP_MASK 0x00000001 +#define MCDE_OVL4CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL4CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL4CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, BP, MCDE_OVL4CONF2_BP_##__x) +#define MCDE_OVL4CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, BP, __x) +#define MCDE_OVL4CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL4CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL4CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, ALPHAVALUE, __x) +#define MCDE_OVL4CONF2_OPQ_SHIFT 9 +#define MCDE_OVL4CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL4CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, OPQ, __x) +#define MCDE_OVL4CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL4CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL4CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, PIXOFF, __x) +#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL4CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL4CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL5CONF2 0x000004A8 +#define MCDE_OVL5CONF2_BP_SHIFT 0 +#define MCDE_OVL5CONF2_BP_MASK 0x00000001 +#define MCDE_OVL5CONF2_BP_PER_PIXEL_ALPHA 0 +#define MCDE_OVL5CONF2_BP_CONSTANT_ALPHA 1 +#define MCDE_OVL5CONF2_BP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, BP, MCDE_OVL5CONF2_BP_##__x) +#define MCDE_OVL5CONF2_BP(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, BP, __x) +#define MCDE_OVL5CONF2_ALPHAVALUE_SHIFT 1 +#define MCDE_OVL5CONF2_ALPHAVALUE_MASK 0x000001FE +#define MCDE_OVL5CONF2_ALPHAVALUE(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, ALPHAVALUE, __x) +#define MCDE_OVL5CONF2_OPQ_SHIFT 9 +#define MCDE_OVL5CONF2_OPQ_MASK 0x00000200 +#define MCDE_OVL5CONF2_OPQ(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, OPQ, __x) +#define MCDE_OVL5CONF2_PIXOFF_SHIFT 10 +#define MCDE_OVL5CONF2_PIXOFF_MASK 0x0000FC00 +#define MCDE_OVL5CONF2_PIXOFF(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, PIXOFF, __x) +#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT 16 +#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL_MASK 0x1FFF0000 +#define MCDE_OVL5CONF2_PIXELFETCHERWATERMARKLEVEL(__x) \ + MCDE_VAL2REG(MCDE_OVL5CONF2, PIXELFETCHERWATERMARKLEVEL, __x) +#define MCDE_OVL0LJINC 0x0000040C +#define MCDE_OVL0LJINC_GROUPOFFSET 0x20 +#define MCDE_OVL0LJINC_LJINC_SHIFT 0 +#define MCDE_OVL0LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL0LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL0LJINC, LJINC, __x) +#define MCDE_OVL1LJINC 0x0000042C +#define MCDE_OVL1LJINC_LJINC_SHIFT 0 +#define MCDE_OVL1LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL1LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL1LJINC, LJINC, __x) +#define MCDE_OVL2LJINC 0x0000044C +#define MCDE_OVL2LJINC_LJINC_SHIFT 0 +#define MCDE_OVL2LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL2LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL2LJINC, LJINC, __x) +#define MCDE_OVL3LJINC 0x0000046C +#define MCDE_OVL3LJINC_LJINC_SHIFT 0 +#define MCDE_OVL3LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL3LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL3LJINC, LJINC, __x) +#define MCDE_OVL4LJINC 0x0000048C +#define MCDE_OVL4LJINC_LJINC_SHIFT 0 +#define MCDE_OVL4LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL4LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL4LJINC, LJINC, __x) +#define MCDE_OVL5LJINC 0x000004AC +#define MCDE_OVL5LJINC_LJINC_SHIFT 0 +#define MCDE_OVL5LJINC_LJINC_MASK 0xFFFFFFFF +#define MCDE_OVL5LJINC_LJINC(__x) \ + MCDE_VAL2REG(MCDE_OVL5LJINC, LJINC, __x) +#define MCDE_OVL0CROP 0x00000410 +#define MCDE_OVL0CROP_GROUPOFFSET 0x20 +#define MCDE_OVL0CROP_TMRGN_SHIFT 0 +#define MCDE_OVL0CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL0CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL0CROP, TMRGN, __x) +#define MCDE_OVL0CROP_LMRGN_SHIFT 22 +#define MCDE_OVL0CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL0CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL0CROP, LMRGN, __x) +#define MCDE_OVL1CROP 0x00000430 +#define MCDE_OVL1CROP_TMRGN_SHIFT 0 +#define MCDE_OVL1CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL1CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL1CROP, TMRGN, __x) +#define MCDE_OVL1CROP_LMRGN_SHIFT 22 +#define MCDE_OVL1CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL1CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL1CROP, LMRGN, __x) +#define MCDE_OVL2CROP 0x00000450 +#define MCDE_OVL2CROP_TMRGN_SHIFT 0 +#define MCDE_OVL2CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL2CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL2CROP, TMRGN, __x) +#define MCDE_OVL2CROP_LMRGN_SHIFT 22 +#define MCDE_OVL2CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL2CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL2CROP, LMRGN, __x) +#define MCDE_OVL3CROP 0x00000470 +#define MCDE_OVL3CROP_TMRGN_SHIFT 0 +#define MCDE_OVL3CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL3CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL3CROP, TMRGN, __x) +#define MCDE_OVL3CROP_LMRGN_SHIFT 22 +#define MCDE_OVL3CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL3CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL3CROP, LMRGN, __x) +#define MCDE_OVL4CROP 0x00000490 +#define MCDE_OVL4CROP_TMRGN_SHIFT 0 +#define MCDE_OVL4CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL4CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL4CROP, TMRGN, __x) +#define MCDE_OVL4CROP_LMRGN_SHIFT 22 +#define MCDE_OVL4CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL4CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL4CROP, LMRGN, __x) +#define MCDE_OVL5CROP 0x000004B0 +#define MCDE_OVL5CROP_TMRGN_SHIFT 0 +#define MCDE_OVL5CROP_TMRGN_MASK 0x003FFFFF +#define MCDE_OVL5CROP_TMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL5CROP, TMRGN, __x) +#define MCDE_OVL5CROP_LMRGN_SHIFT 22 +#define MCDE_OVL5CROP_LMRGN_MASK 0xFFC00000 +#define MCDE_OVL5CROP_LMRGN(__x) \ + MCDE_VAL2REG(MCDE_OVL5CROP, LMRGN, __x) +#define MCDE_OVL0COMP 0x00000414 +#define MCDE_OVL0COMP_GROUPOFFSET 0x20 +#define MCDE_OVL0COMP_XPOS_SHIFT 0 +#define MCDE_OVL0COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL0COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL0COMP, XPOS, __x) +#define MCDE_OVL0COMP_CH_ID_SHIFT 11 +#define MCDE_OVL0COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL0COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL0COMP, CH_ID, __x) +#define MCDE_OVL0COMP_YPOS_SHIFT 16 +#define MCDE_OVL0COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL0COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL0COMP, YPOS, __x) +#define MCDE_OVL0COMP_Z_SHIFT 27 +#define MCDE_OVL0COMP_Z_MASK 0x78000000 +#define MCDE_OVL0COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL0COMP, Z, __x) +#define MCDE_OVL1COMP 0x00000434 +#define MCDE_OVL1COMP_XPOS_SHIFT 0 +#define MCDE_OVL1COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL1COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL1COMP, XPOS, __x) +#define MCDE_OVL1COMP_CH_ID_SHIFT 11 +#define MCDE_OVL1COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL1COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL1COMP, CH_ID, __x) +#define MCDE_OVL1COMP_YPOS_SHIFT 16 +#define MCDE_OVL1COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL1COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL1COMP, YPOS, __x) +#define MCDE_OVL1COMP_Z_SHIFT 27 +#define MCDE_OVL1COMP_Z_MASK 0x78000000 +#define MCDE_OVL1COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL1COMP, Z, __x) +#define MCDE_OVL2COMP 0x00000454 +#define MCDE_OVL2COMP_XPOS_SHIFT 0 +#define MCDE_OVL2COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL2COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL2COMP, XPOS, __x) +#define MCDE_OVL2COMP_CH_ID_SHIFT 11 +#define MCDE_OVL2COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL2COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL2COMP, CH_ID, __x) +#define MCDE_OVL2COMP_YPOS_SHIFT 16 +#define MCDE_OVL2COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL2COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL2COMP, YPOS, __x) +#define MCDE_OVL2COMP_Z_SHIFT 27 +#define MCDE_OVL2COMP_Z_MASK 0x78000000 +#define MCDE_OVL2COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL2COMP, Z, __x) +#define MCDE_OVL3COMP 0x00000474 +#define MCDE_OVL3COMP_XPOS_SHIFT 0 +#define MCDE_OVL3COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL3COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL3COMP, XPOS, __x) +#define MCDE_OVL3COMP_CH_ID_SHIFT 11 +#define MCDE_OVL3COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL3COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL3COMP, CH_ID, __x) +#define MCDE_OVL3COMP_YPOS_SHIFT 16 +#define MCDE_OVL3COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL3COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL3COMP, YPOS, __x) +#define MCDE_OVL3COMP_Z_SHIFT 27 +#define MCDE_OVL3COMP_Z_MASK 0x78000000 +#define MCDE_OVL3COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL3COMP, Z, __x) +#define MCDE_OVL4COMP 0x00000494 +#define MCDE_OVL4COMP_XPOS_SHIFT 0 +#define MCDE_OVL4COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL4COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL4COMP, XPOS, __x) +#define MCDE_OVL4COMP_CH_ID_SHIFT 11 +#define MCDE_OVL4COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL4COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL4COMP, CH_ID, __x) +#define MCDE_OVL4COMP_YPOS_SHIFT 16 +#define MCDE_OVL4COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL4COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL4COMP, YPOS, __x) +#define MCDE_OVL4COMP_Z_SHIFT 27 +#define MCDE_OVL4COMP_Z_MASK 0x78000000 +#define MCDE_OVL4COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL4COMP, Z, __x) +#define MCDE_OVL5COMP 0x000004B4 +#define MCDE_OVL5COMP_XPOS_SHIFT 0 +#define MCDE_OVL5COMP_XPOS_MASK 0x000007FF +#define MCDE_OVL5COMP_XPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL5COMP, XPOS, __x) +#define MCDE_OVL5COMP_CH_ID_SHIFT 11 +#define MCDE_OVL5COMP_CH_ID_MASK 0x00007800 +#define MCDE_OVL5COMP_CH_ID(__x) \ + MCDE_VAL2REG(MCDE_OVL5COMP, CH_ID, __x) +#define MCDE_OVL5COMP_YPOS_SHIFT 16 +#define MCDE_OVL5COMP_YPOS_MASK 0x07FF0000 +#define MCDE_OVL5COMP_YPOS(__x) \ + MCDE_VAL2REG(MCDE_OVL5COMP, YPOS, __x) +#define MCDE_OVL5COMP_Z_SHIFT 27 +#define MCDE_OVL5COMP_Z_MASK 0x78000000 +#define MCDE_OVL5COMP_Z(__x) \ + MCDE_VAL2REG(MCDE_OVL5COMP, Z, __x) +#define MCDE_CHNL0CONF 0x00000600 +#define MCDE_CHNL0CONF_GROUPOFFSET 0x20 +#define MCDE_CHNL0CONF_PPL_SHIFT 0 +#define MCDE_CHNL0CONF_PPL_MASK 0x000007FF +#define MCDE_CHNL0CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_CHNL0CONF, PPL, __x) +#define MCDE_CHNL0CONF_LPF_SHIFT 16 +#define MCDE_CHNL0CONF_LPF_MASK 0x07FF0000 +#define MCDE_CHNL0CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_CHNL0CONF, LPF, __x) +#define MCDE_CHNL1CONF 0x00000620 +#define MCDE_CHNL1CONF_PPL_SHIFT 0 +#define MCDE_CHNL1CONF_PPL_MASK 0x000007FF +#define MCDE_CHNL1CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_CHNL1CONF, PPL, __x) +#define MCDE_CHNL1CONF_LPF_SHIFT 16 +#define MCDE_CHNL1CONF_LPF_MASK 0x07FF0000 +#define MCDE_CHNL1CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_CHNL1CONF, LPF, __x) +#define MCDE_CHNL2CONF 0x00000640 +#define MCDE_CHNL2CONF_PPL_SHIFT 0 +#define MCDE_CHNL2CONF_PPL_MASK 0x000007FF +#define MCDE_CHNL2CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_CHNL2CONF, PPL, __x) +#define MCDE_CHNL2CONF_LPF_SHIFT 16 +#define MCDE_CHNL2CONF_LPF_MASK 0x07FF0000 +#define MCDE_CHNL2CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_CHNL2CONF, LPF, __x) +#define MCDE_CHNL3CONF 0x00000660 +#define MCDE_CHNL3CONF_PPL_SHIFT 0 +#define MCDE_CHNL3CONF_PPL_MASK 0x000007FF +#define MCDE_CHNL3CONF_PPL(__x) \ + MCDE_VAL2REG(MCDE_CHNL3CONF, PPL, __x) +#define MCDE_CHNL3CONF_LPF_SHIFT 16 +#define MCDE_CHNL3CONF_LPF_MASK 0x07FF0000 +#define MCDE_CHNL3CONF_LPF(__x) \ + MCDE_VAL2REG(MCDE_CHNL3CONF, LPF, __x) +#define MCDE_CHNL0STAT 0x00000604 +#define MCDE_CHNL0STAT_GROUPOFFSET 0x20 +#define MCDE_CHNL0STAT_CHNLRD_SHIFT 0 +#define MCDE_CHNL0STAT_CHNLRD_MASK 0x00000001 +#define MCDE_CHNL0STAT_CHNLRD(__x) \ + MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLRD, __x) +#define MCDE_CHNL0STAT_CHNLA_SHIFT 1 +#define MCDE_CHNL0STAT_CHNLA_MASK 0x00000002 +#define MCDE_CHNL0STAT_CHNLA(__x) \ + MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLA, __x) +#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_SHIFT 16 +#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN_MASK 0x00010000 +#define MCDE_CHNL0STAT_CHNLBLBCKGND_EN(__x) \ + MCDE_VAL2REG(MCDE_CHNL0STAT, CHNLBLBCKGND_EN, __x) +#define MCDE_CHNL1STAT 0x00000624 +#define MCDE_CHNL1STAT_CHNLRD_SHIFT 0 +#define MCDE_CHNL1STAT_CHNLRD_MASK 0x00000001 +#define MCDE_CHNL1STAT_CHNLRD(__x) \ + MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLRD, __x) +#define MCDE_CHNL1STAT_CHNLA_SHIFT 1 +#define MCDE_CHNL1STAT_CHNLA_MASK 0x00000002 +#define MCDE_CHNL1STAT_CHNLA(__x) \ + MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLA, __x) +#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_SHIFT 16 +#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN_MASK 0x00010000 +#define MCDE_CHNL1STAT_CHNLBLBCKGND_EN(__x) \ + MCDE_VAL2REG(MCDE_CHNL1STAT, CHNLBLBCKGND_EN, __x) +#define MCDE_CHNL2STAT 0x00000644 +#define MCDE_CHNL2STAT_CHNLRD_SHIFT 0 +#define MCDE_CHNL2STAT_CHNLRD_MASK 0x00000001 +#define MCDE_CHNL2STAT_CHNLRD(__x) \ + MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLRD, __x) +#define MCDE_CHNL2STAT_CHNLA_SHIFT 1 +#define MCDE_CHNL2STAT_CHNLA_MASK 0x00000002 +#define MCDE_CHNL2STAT_CHNLA(__x) \ + MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLA, __x) +#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_SHIFT 16 +#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN_MASK 0x00010000 +#define MCDE_CHNL2STAT_CHNLBLBCKGND_EN(__x) \ + MCDE_VAL2REG(MCDE_CHNL2STAT, CHNLBLBCKGND_EN, __x) +#define MCDE_CHNL3STAT 0x00000664 +#define MCDE_CHNL3STAT_CHNLRD_SHIFT 0 +#define MCDE_CHNL3STAT_CHNLRD_MASK 0x00000001 +#define MCDE_CHNL3STAT_CHNLRD(__x) \ + MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLRD, __x) +#define MCDE_CHNL3STAT_CHNLA_SHIFT 1 +#define MCDE_CHNL3STAT_CHNLA_MASK 0x00000002 +#define MCDE_CHNL3STAT_CHNLA(__x) \ + MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLA, __x) +#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_SHIFT 16 +#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN_MASK 0x00010000 +#define MCDE_CHNL3STAT_CHNLBLBCKGND_EN(__x) \ + MCDE_VAL2REG(MCDE_CHNL3STAT, CHNLBLBCKGND_EN, __x) +#define MCDE_CHNL0SYNCHMOD 0x00000608 +#define MCDE_CHNL0SYNCHMOD_GROUPOFFSET 0x20 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SHIFT 0 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_MASK 0x00000003 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_HARDWARE 0 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_NO_SYNCH 1 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_SOFTWARE 2 +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, \ + MCDE_CHNL0SYNCHMOD_SRC_SYNCH_##__x) +#define MCDE_CHNL0SYNCHMOD_SRC_SYNCH(__x) \ + MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, SRC_SYNCH, __x) +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2 +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0 +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE0 1 +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_TE1 2 +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, \ + MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC_##__x) +#define MCDE_CHNL0SYNCHMOD_OUT_SYNCH_SRC(__x) \ + MCDE_VAL2REG(MCDE_CHNL0SYNCHMOD, OUT_SYNCH_SRC, __x) +#define MCDE_CHNL1SYNCHMOD 0x00000628 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SHIFT 0 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_MASK 0x00000003 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_HARDWARE 0 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_NO_SYNCH 1 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_SOFTWARE 2 +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, \ + MCDE_CHNL1SYNCHMOD_SRC_SYNCH_##__x) +#define MCDE_CHNL1SYNCHMOD_SRC_SYNCH(__x) \ + MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, SRC_SYNCH, __x) +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2 +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0 +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE0 1 +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_TE1 2 +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, \ + MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC_##__x) +#define MCDE_CHNL1SYNCHMOD_OUT_SYNCH_SRC(__x) \ + MCDE_VAL2REG(MCDE_CHNL1SYNCHMOD, OUT_SYNCH_SRC, __x) +#define MCDE_CHNL2SYNCHMOD 0x00000648 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SHIFT 0 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_MASK 0x00000003 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_HARDWARE 0 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_NO_SYNCH 1 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_SOFTWARE 2 +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, \ + MCDE_CHNL2SYNCHMOD_SRC_SYNCH_##__x) +#define MCDE_CHNL2SYNCHMOD_SRC_SYNCH(__x) \ + MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, SRC_SYNCH, __x) +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2 +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0 +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE0 1 +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_TE1 2 +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, \ + MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC_##__x) +#define MCDE_CHNL2SYNCHMOD_OUT_SYNCH_SRC(__x) \ + MCDE_VAL2REG(MCDE_CHNL2SYNCHMOD, OUT_SYNCH_SRC, __x) +#define MCDE_CHNL3SYNCHMOD 0x00000668 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SHIFT 0 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_MASK 0x00000003 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_HARDWARE 0 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_NO_SYNCH 1 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_SOFTWARE 2 +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, \ + MCDE_CHNL3SYNCHMOD_SRC_SYNCH_##__x) +#define MCDE_CHNL3SYNCHMOD_SRC_SYNCH(__x) \ + MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, SRC_SYNCH, __x) +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_SHIFT 2 +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_MASK 0x0000001C +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_FORMATTER 0 +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE0 1 +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_TE1 2 +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, \ + MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC_##__x) +#define MCDE_CHNL3SYNCHMOD_OUT_SYNCH_SRC(__x) \ + MCDE_VAL2REG(MCDE_CHNL3SYNCHMOD, OUT_SYNCH_SRC, __x) +#define MCDE_CHNL0SYNCHSW 0x0000060C +#define MCDE_CHNL0SYNCHSW_GROUPOFFSET 0x20 +#define MCDE_CHNL0SYNCHSW_SW_TRIG_SHIFT 0 +#define MCDE_CHNL0SYNCHSW_SW_TRIG_MASK 0x00000001 +#define MCDE_CHNL0SYNCHSW_SW_TRIG(__x) \ + MCDE_VAL2REG(MCDE_CHNL0SYNCHSW, SW_TRIG, __x) +#define MCDE_CHNL1SYNCHSW 0x0000062C +#define MCDE_CHNL1SYNCHSW_SW_TRIG_SHIFT 0 +#define MCDE_CHNL1SYNCHSW_SW_TRIG_MASK 0x00000001 +#define MCDE_CHNL1SYNCHSW_SW_TRIG(__x) \ + MCDE_VAL2REG(MCDE_CHNL1SYNCHSW, SW_TRIG, __x) +#define MCDE_CHNL2SYNCHSW 0x0000064C +#define MCDE_CHNL2SYNCHSW_SW_TRIG_SHIFT 0 +#define MCDE_CHNL2SYNCHSW_SW_TRIG_MASK 0x00000001 +#define MCDE_CHNL2SYNCHSW_SW_TRIG(__x) \ + MCDE_VAL2REG(MCDE_CHNL2SYNCHSW, SW_TRIG, __x) +#define MCDE_CHNL3SYNCHSW 0x0000066C +#define MCDE_CHNL3SYNCHSW_SW_TRIG_SHIFT 0 +#define MCDE_CHNL3SYNCHSW_SW_TRIG_MASK 0x00000001 +#define MCDE_CHNL3SYNCHSW_SW_TRIG(__x) \ + MCDE_VAL2REG(MCDE_CHNL3SYNCHSW, SW_TRIG, __x) +#define MCDE_CHNL0BCKGNDCOL 0x00000610 +#define MCDE_CHNL0BCKGNDCOL_GROUPOFFSET 0x20 +#define MCDE_CHNL0BCKGNDCOL_B_SHIFT 0 +#define MCDE_CHNL0BCKGNDCOL_B_MASK 0x000000FF +#define MCDE_CHNL0BCKGNDCOL_B(__x) \ + MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, B, __x) +#define MCDE_CHNL0BCKGNDCOL_G_SHIFT 8 +#define MCDE_CHNL0BCKGNDCOL_G_MASK 0x0000FF00 +#define MCDE_CHNL0BCKGNDCOL_G(__x) \ + MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, G, __x) +#define MCDE_CHNL0BCKGNDCOL_R_SHIFT 16 +#define MCDE_CHNL0BCKGNDCOL_R_MASK 0x00FF0000 +#define MCDE_CHNL0BCKGNDCOL_R(__x) \ + MCDE_VAL2REG(MCDE_CHNL0BCKGNDCOL, R, __x) +#define MCDE_CHNL1BCKGNDCOL 0x00000630 +#define MCDE_CHNL1BCKGNDCOL_B_SHIFT 0 +#define MCDE_CHNL1BCKGNDCOL_B_MASK 0x000000FF +#define MCDE_CHNL1BCKGNDCOL_B(__x) \ + MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, B, __x) +#define MCDE_CHNL1BCKGNDCOL_G_SHIFT 8 +#define MCDE_CHNL1BCKGNDCOL_G_MASK 0x0000FF00 +#define MCDE_CHNL1BCKGNDCOL_G(__x) \ + MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, G, __x) +#define MCDE_CHNL1BCKGNDCOL_R_SHIFT 16 +#define MCDE_CHNL1BCKGNDCOL_R_MASK 0x00FF0000 +#define MCDE_CHNL1BCKGNDCOL_R(__x) \ + MCDE_VAL2REG(MCDE_CHNL1BCKGNDCOL, R, __x) +#define MCDE_CHNL2BCKGNDCOL 0x00000650 +#define MCDE_CHNL2BCKGNDCOL_B_SHIFT 0 +#define MCDE_CHNL2BCKGNDCOL_B_MASK 0x000000FF +#define MCDE_CHNL2BCKGNDCOL_B(__x) \ + MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, B, __x) +#define MCDE_CHNL2BCKGNDCOL_G_SHIFT 8 +#define MCDE_CHNL2BCKGNDCOL_G_MASK 0x0000FF00 +#define MCDE_CHNL2BCKGNDCOL_G(__x) \ + MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, G, __x) +#define MCDE_CHNL2BCKGNDCOL_R_SHIFT 16 +#define MCDE_CHNL2BCKGNDCOL_R_MASK 0x00FF0000 +#define MCDE_CHNL2BCKGNDCOL_R(__x) \ + MCDE_VAL2REG(MCDE_CHNL2BCKGNDCOL, R, __x) +#define MCDE_CHNL3BCKGNDCOL 0x00000670 +#define MCDE_CHNL3BCKGNDCOL_B_SHIFT 0 +#define MCDE_CHNL3BCKGNDCOL_B_MASK 0x000000FF +#define MCDE_CHNL3BCKGNDCOL_B(__x) \ + MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, B, __x) +#define MCDE_CHNL3BCKGNDCOL_G_SHIFT 8 +#define MCDE_CHNL3BCKGNDCOL_G_MASK 0x0000FF00 +#define MCDE_CHNL3BCKGNDCOL_G(__x) \ + MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, G, __x) +#define MCDE_CHNL3BCKGNDCOL_R_SHIFT 16 +#define MCDE_CHNL3BCKGNDCOL_R_MASK 0x00FF0000 +#define MCDE_CHNL3BCKGNDCOL_R(__x) \ + MCDE_VAL2REG(MCDE_CHNL3BCKGNDCOL, R, __x) +#define MCDE_CHNL0MUXING 0x00000614 +#define MCDE_CHNL0MUXING_GROUPOFFSET 0x20 +#define MCDE_CHNL0MUXING_FIFO_ID_SHIFT 0 +#define MCDE_CHNL0MUXING_FIFO_ID_MASK 0x00000007 +#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_A 0 +#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_B 1 +#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C0 2 +#define MCDE_CHNL0MUXING_FIFO_ID_FIFO_C1 3 +#define MCDE_CHNL0MUXING_FIFO_ID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, MCDE_CHNL0MUXING_FIFO_ID_##__x) +#define MCDE_CHNL0MUXING_FIFO_ID(__x) \ + MCDE_VAL2REG(MCDE_CHNL0MUXING, FIFO_ID, __x) +#define MCDE_CHNL1MUXING 0x00000634 +#define MCDE_CHNL1MUXING_FIFO_ID_SHIFT 0 +#define MCDE_CHNL1MUXING_FIFO_ID_MASK 0x00000007 +#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_A 0 +#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_B 1 +#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C0 2 +#define MCDE_CHNL1MUXING_FIFO_ID_FIFO_C1 3 +#define MCDE_CHNL1MUXING_FIFO_ID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, MCDE_CHNL1MUXING_FIFO_ID_##__x) +#define MCDE_CHNL1MUXING_FIFO_ID(__x) \ + MCDE_VAL2REG(MCDE_CHNL1MUXING, FIFO_ID, __x) +#define MCDE_CHNL2MUXING 0x00000654 +#define MCDE_CHNL2MUXING_FIFO_ID_SHIFT 0 +#define MCDE_CHNL2MUXING_FIFO_ID_MASK 0x00000007 +#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_A 0 +#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_B 1 +#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C0 2 +#define MCDE_CHNL2MUXING_FIFO_ID_FIFO_C1 3 +#define MCDE_CHNL2MUXING_FIFO_ID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, MCDE_CHNL2MUXING_FIFO_ID_##__x) +#define MCDE_CHNL2MUXING_FIFO_ID(__x) \ + MCDE_VAL2REG(MCDE_CHNL2MUXING, FIFO_ID, __x) +#define MCDE_CHNL3MUXING 0x00000674 +#define MCDE_CHNL3MUXING_FIFO_ID_SHIFT 0 +#define MCDE_CHNL3MUXING_FIFO_ID_MASK 0x00000007 +#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_A 0 +#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_B 1 +#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C0 2 +#define MCDE_CHNL3MUXING_FIFO_ID_FIFO_C1 3 +#define MCDE_CHNL3MUXING_FIFO_ID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, MCDE_CHNL3MUXING_FIFO_ID_##__x) +#define MCDE_CHNL3MUXING_FIFO_ID(__x) \ + MCDE_VAL2REG(MCDE_CHNL3MUXING, FIFO_ID, __x) +#define MCDE_CRA0 0x00000800 +#define MCDE_CRA0_GROUPOFFSET 0x200 +#define MCDE_CRA0_FLOEN_SHIFT 0 +#define MCDE_CRA0_FLOEN_MASK 0x00000001 +#define MCDE_CRA0_FLOEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, FLOEN, __x) +#define MCDE_CRA0_BLENDEN_SHIFT 2 +#define MCDE_CRA0_BLENDEN_MASK 0x00000004 +#define MCDE_CRA0_BLENDEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, BLENDEN, __x) +#define MCDE_CRA0_AFLICKEN_SHIFT 3 +#define MCDE_CRA0_AFLICKEN_MASK 0x00000008 +#define MCDE_CRA0_AFLICKEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, AFLICKEN, __x) +#define MCDE_CRA0_PALEN_SHIFT 4 +#define MCDE_CRA0_PALEN_MASK 0x00000010 +#define MCDE_CRA0_PALEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, PALEN, __x) +#define MCDE_CRA0_DITHEN_SHIFT 5 +#define MCDE_CRA0_DITHEN_MASK 0x00000020 +#define MCDE_CRA0_DITHEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, DITHEN, __x) +#define MCDE_CRA0_GAMEN_SHIFT 6 +#define MCDE_CRA0_GAMEN_MASK 0x00000040 +#define MCDE_CRA0_GAMEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, GAMEN, __x) +#define MCDE_CRA0_KEYCTRL_SHIFT 7 +#define MCDE_CRA0_KEYCTRL_MASK 0x00000380 +#define MCDE_CRA0_KEYCTRL_OFF 0 +#define MCDE_CRA0_KEYCTRL_ALPHA_RGB 1 +#define MCDE_CRA0_KEYCTRL_RGB 2 +#define MCDE_CRA0_KEYCTRL_FALPHA_FRGB 4 +#define MCDE_CRA0_KEYCTRL_FRGB 5 +#define MCDE_CRA0_KEYCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, MCDE_CRA0_KEYCTRL_##__x) +#define MCDE_CRA0_KEYCTRL(__x) \ + MCDE_VAL2REG(MCDE_CRA0, KEYCTRL, __x) +#define MCDE_CRA0_BLENDCTRL_SHIFT 10 +#define MCDE_CRA0_BLENDCTRL_MASK 0x00000400 +#define MCDE_CRA0_BLENDCTRL_SOURCE 0 +#define MCDE_CRA0_BLENDCTRL_CONSTANT 1 +#define MCDE_CRA0_BLENDCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, MCDE_CRA0_BLENDCTRL_##__x) +#define MCDE_CRA0_BLENDCTRL(__x) \ + MCDE_VAL2REG(MCDE_CRA0, BLENDCTRL, __x) +#define MCDE_CRA0_FLICKMODE_SHIFT 11 +#define MCDE_CRA0_FLICKMODE_MASK 0x00001800 +#define MCDE_CRA0_FLICKMODE_FORCE_FILTER_0 0 +#define MCDE_CRA0_FLICKMODE_ADAPTIVE 1 +#define MCDE_CRA0_FLICKMODE_TEST_MODE 2 +#define MCDE_CRA0_FLICKMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, MCDE_CRA0_FLICKMODE_##__x) +#define MCDE_CRA0_FLICKMODE(__x) \ + MCDE_VAL2REG(MCDE_CRA0, FLICKMODE, __x) +#define MCDE_CRA0_FLOCKFORMAT_SHIFT 13 +#define MCDE_CRA0_FLOCKFORMAT_MASK 0x00002000 +#define MCDE_CRA0_FLOCKFORMAT_YCBCR 0 +#define MCDE_CRA0_FLOCKFORMAT_RGB 1 +#define MCDE_CRA0_FLOCKFORMAT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, MCDE_CRA0_FLOCKFORMAT_##__x) +#define MCDE_CRA0_FLOCKFORMAT(__x) \ + MCDE_VAL2REG(MCDE_CRA0, FLOCKFORMAT, __x) +#define MCDE_CRA0_PALMODE_SHIFT 14 +#define MCDE_CRA0_PALMODE_MASK 0x00004000 +#define MCDE_CRA0_PALMODE_PALETTE 0 +#define MCDE_CRA0_PALMODE_GAMMA 1 +#define MCDE_CRA0_PALMODE(__x) \ + MCDE_VAL2REG(MCDE_CRA0, PALMODE, __x) +#define MCDE_CRA0_OLEDEN_SHIFT 15 +#define MCDE_CRA0_OLEDEN_MASK 0x00008000 +#define MCDE_CRA0_OLEDEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, OLEDEN, __x) +#define MCDE_CRA0_ALPHABLEND_SHIFT 16 +#define MCDE_CRA0_ALPHABLEND_MASK 0x00FF0000 +#define MCDE_CRA0_ALPHABLEND(__x) \ + MCDE_VAL2REG(MCDE_CRA0, ALPHABLEND, __x) +#define MCDE_CRA0_ROTEN_SHIFT 24 +#define MCDE_CRA0_ROTEN_MASK 0x01000000 +#define MCDE_CRA0_ROTEN(__x) \ + MCDE_VAL2REG(MCDE_CRA0, ROTEN, __x) +#define MCDE_CRB0 0x00000A00 +#define MCDE_CRB0_FLOEN_SHIFT 0 +#define MCDE_CRB0_FLOEN_MASK 0x00000001 +#define MCDE_CRB0_FLOEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, FLOEN, __x) +#define MCDE_CRB0_BLENDEN_SHIFT 2 +#define MCDE_CRB0_BLENDEN_MASK 0x00000004 +#define MCDE_CRB0_BLENDEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, BLENDEN, __x) +#define MCDE_CRB0_AFLICKEN_SHIFT 3 +#define MCDE_CRB0_AFLICKEN_MASK 0x00000008 +#define MCDE_CRB0_AFLICKEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, AFLICKEN, __x) +#define MCDE_CRB0_PALEN_SHIFT 4 +#define MCDE_CRB0_PALEN_MASK 0x00000010 +#define MCDE_CRB0_PALEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, PALEN, __x) +#define MCDE_CRB0_DITHEN_SHIFT 5 +#define MCDE_CRB0_DITHEN_MASK 0x00000020 +#define MCDE_CRB0_DITHEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, DITHEN, __x) +#define MCDE_CRB0_GAMEN_SHIFT 6 +#define MCDE_CRB0_GAMEN_MASK 0x00000040 +#define MCDE_CRB0_GAMEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, GAMEN, __x) +#define MCDE_CRB0_KEYCTRL_SHIFT 7 +#define MCDE_CRB0_KEYCTRL_MASK 0x00000380 +#define MCDE_CRB0_KEYCTRL_OFF 0 +#define MCDE_CRB0_KEYCTRL_ALPHA_RGB 1 +#define MCDE_CRB0_KEYCTRL_RGB 2 +#define MCDE_CRB0_KEYCTRL_FALPHA_FRGB 4 +#define MCDE_CRB0_KEYCTRL_FRGB 5 +#define MCDE_CRB0_KEYCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, MCDE_CRB0_KEYCTRL_##__x) +#define MCDE_CRB0_KEYCTRL(__x) \ + MCDE_VAL2REG(MCDE_CRB0, KEYCTRL, __x) +#define MCDE_CRB0_BLENDCTRL_SHIFT 10 +#define MCDE_CRB0_BLENDCTRL_MASK 0x00000400 +#define MCDE_CRB0_BLENDCTRL_SOURCE 0 +#define MCDE_CRB0_BLENDCTRL_CONSTANT 1 +#define MCDE_CRB0_BLENDCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, MCDE_CRB0_BLENDCTRL_##__x) +#define MCDE_CRB0_BLENDCTRL(__x) \ + MCDE_VAL2REG(MCDE_CRB0, BLENDCTRL, __x) +#define MCDE_CRB0_FLICKMODE_SHIFT 11 +#define MCDE_CRB0_FLICKMODE_MASK 0x00001800 +#define MCDE_CRB0_FLICKMODE_FORCE_FILTER_0 0 +#define MCDE_CRB0_FLICKMODE_ADAPTIVE 1 +#define MCDE_CRB0_FLICKMODE_TEST_MODE 2 +#define MCDE_CRB0_FLICKMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, MCDE_CRB0_FLICKMODE_##__x) +#define MCDE_CRB0_FLICKMODE(__x) \ + MCDE_VAL2REG(MCDE_CRB0, FLICKMODE, __x) +#define MCDE_CRB0_FLOCKFORMAT_SHIFT 13 +#define MCDE_CRB0_FLOCKFORMAT_MASK 0x00002000 +#define MCDE_CRB0_FLOCKFORMAT_YCBCR 0 +#define MCDE_CRB0_FLOCKFORMAT_RGB 1 +#define MCDE_CRB0_FLOCKFORMAT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, MCDE_CRB0_FLOCKFORMAT_##__x) +#define MCDE_CRB0_FLOCKFORMAT(__x) \ + MCDE_VAL2REG(MCDE_CRB0, FLOCKFORMAT, __x) +#define MCDE_CRB0_PALMODE_SHIFT 14 +#define MCDE_CRB0_PALMODE_MASK 0x00004000 +#define MCDE_CRB0_PALMODE_PALETTE 0 +#define MCDE_CRB0_PALMODE_GAMMA 1 +#define MCDE_CRB0_PALMODE(__x) \ + MCDE_VAL2REG(MCDE_CRB0, PALMODE, __x) +#define MCDE_CRB0_OLEDEN_SHIFT 15 +#define MCDE_CRB0_OLEDEN_MASK 0x00008000 +#define MCDE_CRB0_OLEDEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, OLEDEN, __x) +#define MCDE_CRB0_ALPHABLEND_SHIFT 16 +#define MCDE_CRB0_ALPHABLEND_MASK 0x00FF0000 +#define MCDE_CRB0_ALPHABLEND(__x) \ + MCDE_VAL2REG(MCDE_CRB0, ALPHABLEND, __x) +#define MCDE_CRB0_ROTEN_SHIFT 24 +#define MCDE_CRB0_ROTEN_MASK 0x01000000 +#define MCDE_CRB0_ROTEN(__x) \ + MCDE_VAL2REG(MCDE_CRB0, ROTEN, __x) +#define MCDE_CRA1 0x00000804 +#define MCDE_CRA1_GROUPOFFSET 0x200 +#define MCDE_CRA1_PCD_SHIFT 0 +#define MCDE_CRA1_PCD_MASK 0x000003FF +#define MCDE_CRA1_PCD(__x) \ + MCDE_VAL2REG(MCDE_CRA1, PCD, __x) +#define MCDE_CRA1_CLKSEL_SHIFT 10 +#define MCDE_CRA1_CLKSEL_MASK 0x00001C00 +#define MCDE_CRA1_CLKSEL_CLKPLL72 0 +#define MCDE_CRA1_CLKSEL_CLKPLL27 2 +#define MCDE_CRA1_CLKSEL_TV1CLK 3 +#define MCDE_CRA1_CLKSEL_TV2CLK 4 +#define MCDE_CRA1_CLKSEL_MCDECLK 5 +#define MCDE_CRA1_CLKSEL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CLKSEL, MCDE_CRA1_CLKSEL_##__x) +#define MCDE_CRA1_CLKSEL(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CLKSEL, __x) +#define MCDE_CRA1_CDWIN_SHIFT 13 +#define MCDE_CRA1_CDWIN_MASK 0x0001E000 +#define MCDE_CRA1_CDWIN_8BPP_C1 0 +#define MCDE_CRA1_CDWIN_12BPP_C1 1 +#define MCDE_CRA1_CDWIN_12BPP_C2 2 +#define MCDE_CRA1_CDWIN_16BPP_C1 3 +#define MCDE_CRA1_CDWIN_16BPP_C2 4 +#define MCDE_CRA1_CDWIN_16BPP_C3 5 +#define MCDE_CRA1_CDWIN_18BPP_C1 6 +#define MCDE_CRA1_CDWIN_18BPP_C2 7 +#define MCDE_CRA1_CDWIN_24BPP 8 +#define MCDE_CRA1_CDWIN_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CDWIN, MCDE_CRA1_CDWIN_##__x) +#define MCDE_CRA1_CDWIN(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CDWIN, __x) +#define MCDE_CRA1_OUTBPP_SHIFT 25 +#define MCDE_CRA1_OUTBPP_MASK 0x1E000000 +#define MCDE_CRA1_OUTBPP_MONO1 0 +#define MCDE_CRA1_OUTBPP_MONO2 1 +#define MCDE_CRA1_OUTBPP_MONO4 2 +#define MCDE_CRA1_OUTBPP_MONO8 3 +#define MCDE_CRA1_OUTBPP_8BPP 4 +#define MCDE_CRA1_OUTBPP_12BPP 5 +#define MCDE_CRA1_OUTBPP_15BPP 6 +#define MCDE_CRA1_OUTBPP_16BPP 7 +#define MCDE_CRA1_OUTBPP_18BPP 8 +#define MCDE_CRA1_OUTBPP_24BPP 9 +#define MCDE_CRA1_OUTBPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA1, OUTBPP, MCDE_CRA1_OUTBPP_##__x) +#define MCDE_CRA1_OUTBPP(__x) \ + MCDE_VAL2REG(MCDE_CRA1, OUTBPP, __x) +#define MCDE_CRA1_BCD_SHIFT 29 +#define MCDE_CRA1_BCD_MASK 0x20000000 +#define MCDE_CRA1_BCD(__x) \ + MCDE_VAL2REG(MCDE_CRA1, BCD, __x) +#define MCDE_CRA1_CLKTYPE_SHIFT 30 +#define MCDE_CRA1_CLKTYPE_MASK 0x40000000 +#define MCDE_CRA1_CLKTYPE_TVXCLKSEL0 0 +#define MCDE_CRA1_CLKTYPE_TVXCLKSEL1 1 +#define MCDE_CRA1_CLKTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, MCDE_CRA1_CLKTYPE_##__x) +#define MCDE_CRA1_CLKTYPE(__x) \ + MCDE_VAL2REG(MCDE_CRA1, CLKTYPE, __x) +#define MCDE_CRB1 0x00000A04 +#define MCDE_CRB1_PCD_SHIFT 0 +#define MCDE_CRB1_PCD_MASK 0x000003FF +#define MCDE_CRB1_PCD(__x) \ + MCDE_VAL2REG(MCDE_CRB1, PCD, __x) +#define MCDE_CRB1_CLKSEL_SHIFT 10 +#define MCDE_CRB1_CLKSEL_MASK 0x00001C00 +#define MCDE_CRB1_CLKSEL_CLKPLL72 0 +#define MCDE_CRB1_CLKSEL_CLKPLL27 2 +#define MCDE_CRB1_CLKSEL_TV1CLK 3 +#define MCDE_CRB1_CLKSEL_TV2CLK 4 +#define MCDE_CRB1_CLKSEL_MCDECLK 5 +#define MCDE_CRB1_CLKSEL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CLKSEL, MCDE_CRB1_CLKSEL_##__x) +#define MCDE_CRB1_CLKSEL(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CLKSEL, __x) +#define MCDE_CRB1_CDWIN_SHIFT 13 +#define MCDE_CRB1_CDWIN_MASK 0x0001E000 +#define MCDE_CRB1_CDWIN_8BPP_C1 0 +#define MCDE_CRB1_CDWIN_12BPP_C1 1 +#define MCDE_CRB1_CDWIN_12BPP_C2 2 +#define MCDE_CRB1_CDWIN_16BPP_C1 3 +#define MCDE_CRB1_CDWIN_16BPP_C2 4 +#define MCDE_CRB1_CDWIN_16BPP_C3 5 +#define MCDE_CRB1_CDWIN_18BPP_C1 6 +#define MCDE_CRB1_CDWIN_18BPP_C2 7 +#define MCDE_CRB1_CDWIN_24BPP 8 +#define MCDE_CRB1_CDWIN_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CDWIN, MCDE_CRB1_CDWIN_##__x) +#define MCDE_CRB1_CDWIN(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CDWIN, __x) +#define MCDE_CRB1_OUTBPP_SHIFT 25 +#define MCDE_CRB1_OUTBPP_MASK 0x1E000000 +#define MCDE_CRB1_OUTBPP_MONO1 0 +#define MCDE_CRB1_OUTBPP_MONO2 1 +#define MCDE_CRB1_OUTBPP_MONO4 2 +#define MCDE_CRB1_OUTBPP_MONO8 3 +#define MCDE_CRB1_OUTBPP_8BPP 4 +#define MCDE_CRB1_OUTBPP_12BPP 5 +#define MCDE_CRB1_OUTBPP_15BPP 6 +#define MCDE_CRB1_OUTBPP_16BPP 7 +#define MCDE_CRB1_OUTBPP_18BPP 8 +#define MCDE_CRB1_OUTBPP_24BPP 9 +#define MCDE_CRB1_OUTBPP_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB1, OUTBPP, MCDE_CRB1_OUTBPP_##__x) +#define MCDE_CRB1_OUTBPP(__x) \ + MCDE_VAL2REG(MCDE_CRB1, OUTBPP, __x) +#define MCDE_CRB1_BCD_SHIFT 29 +#define MCDE_CRB1_BCD_MASK 0x20000000 +#define MCDE_CRB1_BCD(__x) \ + MCDE_VAL2REG(MCDE_CRB1, BCD, __x) +#define MCDE_CRB1_CLKTYPE_SHIFT 30 +#define MCDE_CRB1_CLKTYPE_MASK 0x40000000 +#define MCDE_CRB1_CLKTYPE_TVXCLKSEL0 0 +#define MCDE_CRB1_CLKTYPE_TVXCLKSEL1 1 +#define MCDE_CRB1_CLKTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, MCDE_CRB1_CLKTYPE_##__x) +#define MCDE_CRB1_CLKTYPE(__x) \ + MCDE_VAL2REG(MCDE_CRB1, CLKTYPE, __x) +#define MCDE_COLKEYA 0x00000808 +#define MCDE_COLKEYA_GROUPOFFSET 0x200 +#define MCDE_COLKEYA_KEYB_SHIFT 0 +#define MCDE_COLKEYA_KEYB_MASK 0x000000FF +#define MCDE_COLKEYA_KEYB(__x) \ + MCDE_VAL2REG(MCDE_COLKEYA, KEYB, __x) +#define MCDE_COLKEYA_KEYG_SHIFT 8 +#define MCDE_COLKEYA_KEYG_MASK 0x0000FF00 +#define MCDE_COLKEYA_KEYG(__x) \ + MCDE_VAL2REG(MCDE_COLKEYA, KEYG, __x) +#define MCDE_COLKEYA_KEYR_SHIFT 16 +#define MCDE_COLKEYA_KEYR_MASK 0x00FF0000 +#define MCDE_COLKEYA_KEYR(__x) \ + MCDE_VAL2REG(MCDE_COLKEYA, KEYR, __x) +#define MCDE_COLKEYA_KEYA_SHIFT 24 +#define MCDE_COLKEYA_KEYA_MASK 0xFF000000 +#define MCDE_COLKEYA_KEYA(__x) \ + MCDE_VAL2REG(MCDE_COLKEYA, KEYA, __x) +#define MCDE_COLKEYB 0x00000A08 +#define MCDE_COLKEYB_KEYB_SHIFT 0 +#define MCDE_COLKEYB_KEYB_MASK 0x000000FF +#define MCDE_COLKEYB_KEYB(__x) \ + MCDE_VAL2REG(MCDE_COLKEYB, KEYB, __x) +#define MCDE_COLKEYB_KEYG_SHIFT 8 +#define MCDE_COLKEYB_KEYG_MASK 0x0000FF00 +#define MCDE_COLKEYB_KEYG(__x) \ + MCDE_VAL2REG(MCDE_COLKEYB, KEYG, __x) +#define MCDE_COLKEYB_KEYR_SHIFT 16 +#define MCDE_COLKEYB_KEYR_MASK 0x00FF0000 +#define MCDE_COLKEYB_KEYR(__x) \ + MCDE_VAL2REG(MCDE_COLKEYB, KEYR, __x) +#define MCDE_COLKEYB_KEYA_SHIFT 24 +#define MCDE_COLKEYB_KEYA_MASK 0xFF000000 +#define MCDE_COLKEYB_KEYA(__x) \ + MCDE_VAL2REG(MCDE_COLKEYB, KEYA, __x) +#define MCDE_FCOLKEYA 0x0000080C +#define MCDE_FCOLKEYA_GROUPOFFSET 0x200 +#define MCDE_FCOLKEYA_FKEYB_SHIFT 0 +#define MCDE_FCOLKEYA_FKEYB_MASK 0x000000FF +#define MCDE_FCOLKEYA_FKEYB(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYB, __x) +#define MCDE_FCOLKEYA_FKEYG_SHIFT 8 +#define MCDE_FCOLKEYA_FKEYG_MASK 0x0000FF00 +#define MCDE_FCOLKEYA_FKEYG(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYG, __x) +#define MCDE_FCOLKEYA_FKEYR_SHIFT 16 +#define MCDE_FCOLKEYA_FKEYR_MASK 0x00FF0000 +#define MCDE_FCOLKEYA_FKEYR(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYR, __x) +#define MCDE_FCOLKEYA_FKEYA_SHIFT 24 +#define MCDE_FCOLKEYA_FKEYA_MASK 0xFF000000 +#define MCDE_FCOLKEYA_FKEYA(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYA, FKEYA, __x) +#define MCDE_FCOLKEYB 0x00000A0C +#define MCDE_FCOLKEYB_FKEYB_SHIFT 0 +#define MCDE_FCOLKEYB_FKEYB_MASK 0x000000FF +#define MCDE_FCOLKEYB_FKEYB(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYB, __x) +#define MCDE_FCOLKEYB_FKEYG_SHIFT 8 +#define MCDE_FCOLKEYB_FKEYG_MASK 0x0000FF00 +#define MCDE_FCOLKEYB_FKEYG(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYG, __x) +#define MCDE_FCOLKEYB_FKEYR_SHIFT 16 +#define MCDE_FCOLKEYB_FKEYR_MASK 0x00FF0000 +#define MCDE_FCOLKEYB_FKEYR(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYR, __x) +#define MCDE_FCOLKEYB_FKEYA_SHIFT 24 +#define MCDE_FCOLKEYB_FKEYA_MASK 0xFF000000 +#define MCDE_FCOLKEYB_FKEYA(__x) \ + MCDE_VAL2REG(MCDE_FCOLKEYB, FKEYA, __x) +#define MCDE_RGBCONV1A 0x00000810 +#define MCDE_RGBCONV1A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV1A_YR_GREEN_SHIFT 0 +#define MCDE_RGBCONV1A_YR_GREEN_MASK 0x000007FF +#define MCDE_RGBCONV1A_YR_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV1A, YR_GREEN, __x) +#define MCDE_RGBCONV1A_YR_RED_SHIFT 16 +#define MCDE_RGBCONV1A_YR_RED_MASK 0x07FF0000 +#define MCDE_RGBCONV1A_YR_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV1A, YR_RED, __x) +#define MCDE_RGBCONV1B 0x00000A10 +#define MCDE_RGBCONV1B_YR_GREEN_SHIFT 0 +#define MCDE_RGBCONV1B_YR_GREEN_MASK 0x000007FF +#define MCDE_RGBCONV1B_YR_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV1B, YR_GREEN, __x) +#define MCDE_RGBCONV1B_YR_RED_SHIFT 16 +#define MCDE_RGBCONV1B_YR_RED_MASK 0x07FF0000 +#define MCDE_RGBCONV1B_YR_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV1B, YR_RED, __x) +#define MCDE_RGBCONV2A 0x00000814 +#define MCDE_RGBCONV2A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV2A_CR_RED_SHIFT 0 +#define MCDE_RGBCONV2A_CR_RED_MASK 0x000007FF +#define MCDE_RGBCONV2A_CR_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV2A, CR_RED, __x) +#define MCDE_RGBCONV2A_YR_BLUE_SHIFT 16 +#define MCDE_RGBCONV2A_YR_BLUE_MASK 0x07FF0000 +#define MCDE_RGBCONV2A_YR_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV2A, YR_BLUE, __x) +#define MCDE_RGBCONV2B 0x00000A14 +#define MCDE_RGBCONV2B_CR_RED_SHIFT 0 +#define MCDE_RGBCONV2B_CR_RED_MASK 0x000007FF +#define MCDE_RGBCONV2B_CR_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV2B, CR_RED, __x) +#define MCDE_RGBCONV2B_YR_BLUE_SHIFT 16 +#define MCDE_RGBCONV2B_YR_BLUE_MASK 0x07FF0000 +#define MCDE_RGBCONV2B_YR_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV2B, YR_BLUE, __x) +#define MCDE_RGBCONV3A 0x00000818 +#define MCDE_RGBCONV3A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV3A_CR_BLUE_SHIFT 0 +#define MCDE_RGBCONV3A_CR_BLUE_MASK 0x000007FF +#define MCDE_RGBCONV3A_CR_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV3A, CR_BLUE, __x) +#define MCDE_RGBCONV3A_CR_GREEN_SHIFT 16 +#define MCDE_RGBCONV3A_CR_GREEN_MASK 0x07FF0000 +#define MCDE_RGBCONV3A_CR_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV3A, CR_GREEN, __x) +#define MCDE_RGBCONV3B 0x00000A18 +#define MCDE_RGBCONV3B_CR_BLUE_SHIFT 0 +#define MCDE_RGBCONV3B_CR_BLUE_MASK 0x000007FF +#define MCDE_RGBCONV3B_CR_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV3B, CR_BLUE, __x) +#define MCDE_RGBCONV3B_CR_GREEN_SHIFT 16 +#define MCDE_RGBCONV3B_CR_GREEN_MASK 0x07FF0000 +#define MCDE_RGBCONV3B_CR_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV3B, CR_GREEN, __x) +#define MCDE_RGBCONV4A 0x0000081C +#define MCDE_RGBCONV4A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV4A_CB_GREEN_SHIFT 0 +#define MCDE_RGBCONV4A_CB_GREEN_MASK 0x000007FF +#define MCDE_RGBCONV4A_CB_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV4A, CB_GREEN, __x) +#define MCDE_RGBCONV4A_CB_RED_SHIFT 16 +#define MCDE_RGBCONV4A_CB_RED_MASK 0x07FF0000 +#define MCDE_RGBCONV4A_CB_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV4A, CB_RED, __x) +#define MCDE_RGBCONV4B 0x00000A1C +#define MCDE_RGBCONV4B_CB_GREEN_SHIFT 0 +#define MCDE_RGBCONV4B_CB_GREEN_MASK 0x000007FF +#define MCDE_RGBCONV4B_CB_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV4B, CB_GREEN, __x) +#define MCDE_RGBCONV4B_CB_RED_SHIFT 16 +#define MCDE_RGBCONV4B_CB_RED_MASK 0x07FF0000 +#define MCDE_RGBCONV4B_CB_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV4B, CB_RED, __x) +#define MCDE_RGBCONV5A 0x00000820 +#define MCDE_RGBCONV5A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV5A_OFF_RED_SHIFT 0 +#define MCDE_RGBCONV5A_OFF_RED_MASK 0x000007FF +#define MCDE_RGBCONV5A_OFF_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV5A, OFF_RED, __x) +#define MCDE_RGBCONV5A_CB_BLUE_SHIFT 16 +#define MCDE_RGBCONV5A_CB_BLUE_MASK 0x07FF0000 +#define MCDE_RGBCONV5A_CB_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV5A, CB_BLUE, __x) +#define MCDE_RGBCONV5B 0x00000A20 +#define MCDE_RGBCONV5B_OFF_RED_SHIFT 0 +#define MCDE_RGBCONV5B_OFF_RED_MASK 0x000007FF +#define MCDE_RGBCONV5B_OFF_RED(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV5B, OFF_RED, __x) +#define MCDE_RGBCONV5B_CB_BLUE_SHIFT 16 +#define MCDE_RGBCONV5B_CB_BLUE_MASK 0x07FF0000 +#define MCDE_RGBCONV5B_CB_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV5B, CB_BLUE, __x) +#define MCDE_RGBCONV6A 0x00000824 +#define MCDE_RGBCONV6A_GROUPOFFSET 0x200 +#define MCDE_RGBCONV6A_OFF_BLUE_SHIFT 0 +#define MCDE_RGBCONV6A_OFF_BLUE_MASK 0x000007FF +#define MCDE_RGBCONV6A_OFF_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_BLUE, __x) +#define MCDE_RGBCONV6A_OFF_GREEN_SHIFT 16 +#define MCDE_RGBCONV6A_OFF_GREEN_MASK 0x07FF0000 +#define MCDE_RGBCONV6A_OFF_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV6A, OFF_GREEN, __x) +#define MCDE_RGBCONV6B 0x00000A24 +#define MCDE_RGBCONV6B_OFF_BLUE_SHIFT 0 +#define MCDE_RGBCONV6B_OFF_BLUE_MASK 0x000007FF +#define MCDE_RGBCONV6B_OFF_BLUE(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_BLUE, __x) +#define MCDE_RGBCONV6B_OFF_GREEN_SHIFT 16 +#define MCDE_RGBCONV6B_OFF_GREEN_MASK 0x07FF0000 +#define MCDE_RGBCONV6B_OFF_GREEN(__x) \ + MCDE_VAL2REG(MCDE_RGBCONV6B, OFF_GREEN, __x) +#define MCDE_FFCOEF0 0x00000828 +#define MCDE_FFCOEF0_COEFF0_N1_SHIFT 0 +#define MCDE_FFCOEF0_COEFF0_N1_MASK 0x000000FF +#define MCDE_FFCOEF0_COEFF0_N1(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N1, __x) +#define MCDE_FFCOEF0_COEFF0_N2_SHIFT 8 +#define MCDE_FFCOEF0_COEFF0_N2_MASK 0x0000FF00 +#define MCDE_FFCOEF0_COEFF0_N2(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N2, __x) +#define MCDE_FFCOEF0_COEFF0_N3_SHIFT 16 +#define MCDE_FFCOEF0_COEFF0_N3_MASK 0x00FF0000 +#define MCDE_FFCOEF0_COEFF0_N3(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF0, COEFF0_N3, __x) +#define MCDE_FFCOEF0_T0_SHIFT 24 +#define MCDE_FFCOEF0_T0_MASK 0x0F000000 +#define MCDE_FFCOEF0_T0(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF0, T0, __x) +#define MCDE_FFCOEF1 0x0000082C +#define MCDE_FFCOEF1_COEFF1_N1_SHIFT 0 +#define MCDE_FFCOEF1_COEFF1_N1_MASK 0x000000FF +#define MCDE_FFCOEF1_COEFF1_N1(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N1, __x) +#define MCDE_FFCOEF1_COEFF1_N2_SHIFT 8 +#define MCDE_FFCOEF1_COEFF1_N2_MASK 0x0000FF00 +#define MCDE_FFCOEF1_COEFF1_N2(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N2, __x) +#define MCDE_FFCOEF1_COEFF1_N3_SHIFT 16 +#define MCDE_FFCOEF1_COEFF1_N3_MASK 0x00FF0000 +#define MCDE_FFCOEF1_COEFF1_N3(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF1, COEFF1_N3, __x) +#define MCDE_FFCOEF1_T1_SHIFT 24 +#define MCDE_FFCOEF1_T1_MASK 0x0F000000 +#define MCDE_FFCOEF1_T1(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF1, T1, __x) +#define MCDE_FFCOEF2 0x00000830 +#define MCDE_FFCOEF2_COEFF2_N1_SHIFT 0 +#define MCDE_FFCOEF2_COEFF2_N1_MASK 0x000000FF +#define MCDE_FFCOEF2_COEFF2_N1(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N1, __x) +#define MCDE_FFCOEF2_COEFF2_N2_SHIFT 8 +#define MCDE_FFCOEF2_COEFF2_N2_MASK 0x0000FF00 +#define MCDE_FFCOEF2_COEFF2_N2(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N2, __x) +#define MCDE_FFCOEF2_COEFF2_N3_SHIFT 16 +#define MCDE_FFCOEF2_COEFF2_N3_MASK 0x00FF0000 +#define MCDE_FFCOEF2_COEFF2_N3(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF2, COEFF2_N3, __x) +#define MCDE_FFCOEF2_T2_SHIFT 24 +#define MCDE_FFCOEF2_T2_MASK 0x0F000000 +#define MCDE_FFCOEF2_T2(__x) \ + MCDE_VAL2REG(MCDE_FFCOEF2, T2, __x) +#define MCDE_MCDE_WDATAA 0x00000834 +#define MCDE_MCDE_WDATAA_GROUPOFFSET 0x200 +#define MCDE_MCDE_WDATAA_DC_SHIFT 24 +#define MCDE_MCDE_WDATAA_DC_MASK 0x01000000 +#define MCDE_MCDE_WDATAA_DC(__x) \ + MCDE_VAL2REG(MCDE_MCDE_WDATAA, DC, __x) +#define MCDE_MCDE_WDATAA_DATAVALUE_SHIFT 0 +#define MCDE_MCDE_WDATAA_DATAVALUE_MASK 0x00FFFFFF +#define MCDE_MCDE_WDATAA_DATAVALUE(__x) \ + MCDE_VAL2REG(MCDE_MCDE_WDATAA, DATAVALUE, __x) +#define MCDE_MCDE_WDATAB 0x00000A34 +#define MCDE_MCDE_WDATAB_DC_SHIFT 24 +#define MCDE_MCDE_WDATAB_DC_MASK 0x01000000 +#define MCDE_MCDE_WDATAB_DC(__x) \ + MCDE_VAL2REG(MCDE_MCDE_WDATAB, DC, __x) +#define MCDE_MCDE_WDATAB_DATAVALUE_SHIFT 0 +#define MCDE_MCDE_WDATAB_DATAVALUE_MASK 0x00FFFFFF +#define MCDE_MCDE_WDATAB_DATAVALUE(__x) \ + MCDE_VAL2REG(MCDE_MCDE_WDATAB, DATAVALUE, __x) +#define MCDE_TVCRA 0x00000838 +#define MCDE_TVCRA_GROUPOFFSET 0x200 +#define MCDE_TVCRA_SEL_MOD_SHIFT 0 +#define MCDE_TVCRA_SEL_MOD_MASK 0x00000001 +#define MCDE_TVCRA_SEL_MOD_LCD 0 +#define MCDE_TVCRA_SEL_MOD_TV 1 +#define MCDE_TVCRA_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, MCDE_TVCRA_SEL_MOD_##__x) +#define MCDE_TVCRA_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, SEL_MOD, __x) +#define MCDE_TVCRA_INTEREN_SHIFT 1 +#define MCDE_TVCRA_INTEREN_MASK 0x00000002 +#define MCDE_TVCRA_INTEREN(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, INTEREN, __x) +#define MCDE_TVCRA_IFIELD_SHIFT 2 +#define MCDE_TVCRA_IFIELD_MASK 0x00000004 +#define MCDE_TVCRA_IFIELD(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, IFIELD, __x) +#define MCDE_TVCRA_TVMODE_SHIFT 3 +#define MCDE_TVCRA_TVMODE_MASK 0x00000038 +#define MCDE_TVCRA_TVMODE_SDTV_656P 0 +#define MCDE_TVCRA_TVMODE_SDTV_656P_LE 3 +#define MCDE_TVCRA_TVMODE_SDTV_656P_BE 4 +#define MCDE_TVCRA_TVMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, TVMODE, MCDE_TVCRA_TVMODE_##__x) +#define MCDE_TVCRA_TVMODE(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, TVMODE, __x) +#define MCDE_TVCRA_SDTVMODE_SHIFT 6 +#define MCDE_TVCRA_SDTVMODE_MASK 0x000000C0 +#define MCDE_TVCRA_SDTVMODE_Y0CBY1CR 0 +#define MCDE_TVCRA_SDTVMODE_CBY0CRY1 1 +#define MCDE_TVCRA_SDTVMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, MCDE_TVCRA_SDTVMODE_##__x) +#define MCDE_TVCRA_SDTVMODE(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, SDTVMODE, __x) +#define MCDE_TVCRA_AVRGEN_SHIFT 8 +#define MCDE_TVCRA_AVRGEN_MASK 0x00000100 +#define MCDE_TVCRA_AVRGEN(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, AVRGEN, __x) +#define MCDE_TVCRA_CKINV_SHIFT 9 +#define MCDE_TVCRA_CKINV_MASK 0x00000200 +#define MCDE_TVCRA_CKINV(__x) \ + MCDE_VAL2REG(MCDE_TVCRA, CKINV, __x) +#define MCDE_TVCRB 0x00000A38 +#define MCDE_TVCRB_SEL_MOD_SHIFT 0 +#define MCDE_TVCRB_SEL_MOD_MASK 0x00000001 +#define MCDE_TVCRB_SEL_MOD_LCD 0 +#define MCDE_TVCRB_SEL_MOD_TV 1 +#define MCDE_TVCRB_SEL_MOD_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, MCDE_TVCRB_SEL_MOD_##__x) +#define MCDE_TVCRB_SEL_MOD(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, SEL_MOD, __x) +#define MCDE_TVCRB_INTEREN_SHIFT 1 +#define MCDE_TVCRB_INTEREN_MASK 0x00000002 +#define MCDE_TVCRB_INTEREN(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, INTEREN, __x) +#define MCDE_TVCRB_IFIELD_SHIFT 2 +#define MCDE_TVCRB_IFIELD_MASK 0x00000004 +#define MCDE_TVCRB_IFIELD(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, IFIELD, __x) +#define MCDE_TVCRB_TVMODE_SHIFT 3 +#define MCDE_TVCRB_TVMODE_MASK 0x00000038 +#define MCDE_TVCRB_TVMODE_SDTV_656P 0 +#define MCDE_TVCRB_TVMODE_SDTV_656P_LE 3 +#define MCDE_TVCRB_TVMODE_SDTV_656P_BE 4 +#define MCDE_TVCRB_TVMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, TVMODE, MCDE_TVCRB_TVMODE_##__x) +#define MCDE_TVCRB_TVMODE(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, TVMODE, __x) +#define MCDE_TVCRB_SDTVMODE_SHIFT 6 +#define MCDE_TVCRB_SDTVMODE_MASK 0x000000C0 +#define MCDE_TVCRB_SDTVMODE_Y0CBY1CR 0 +#define MCDE_TVCRB_SDTVMODE_CBY0CRY1 1 +#define MCDE_TVCRB_SDTVMODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, MCDE_TVCRB_SDTVMODE_##__x) +#define MCDE_TVCRB_SDTVMODE(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, SDTVMODE, __x) +#define MCDE_TVCRB_AVRGEN_SHIFT 8 +#define MCDE_TVCRB_AVRGEN_MASK 0x00000100 +#define MCDE_TVCRB_AVRGEN(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, AVRGEN, __x) +#define MCDE_TVCRB_CKINV_SHIFT 9 +#define MCDE_TVCRB_CKINV_MASK 0x00000200 +#define MCDE_TVCRB_CKINV(__x) \ + MCDE_VAL2REG(MCDE_TVCRB, CKINV, __x) +#define MCDE_TVBL1A 0x0000083C +#define MCDE_TVBL1A_GROUPOFFSET 0x200 +#define MCDE_TVBL1A_BEL1_SHIFT 0 +#define MCDE_TVBL1A_BEL1_MASK 0x000007FF +#define MCDE_TVBL1A_BEL1(__x) \ + MCDE_VAL2REG(MCDE_TVBL1A, BEL1, __x) +#define MCDE_TVBL1A_BSL1_SHIFT 16 +#define MCDE_TVBL1A_BSL1_MASK 0x07FF0000 +#define MCDE_TVBL1A_BSL1(__x) \ + MCDE_VAL2REG(MCDE_TVBL1A, BSL1, __x) +#define MCDE_TVBL1B 0x00000A3C +#define MCDE_TVBL1B_BEL1_SHIFT 0 +#define MCDE_TVBL1B_BEL1_MASK 0x000007FF +#define MCDE_TVBL1B_BEL1(__x) \ + MCDE_VAL2REG(MCDE_TVBL1B, BEL1, __x) +#define MCDE_TVBL1B_BSL1_SHIFT 16 +#define MCDE_TVBL1B_BSL1_MASK 0x07FF0000 +#define MCDE_TVBL1B_BSL1(__x) \ + MCDE_VAL2REG(MCDE_TVBL1B, BSL1, __x) +#define MCDE_TVISLA 0x00000840 +#define MCDE_TVISLA_GROUPOFFSET 0x200 +#define MCDE_TVISLA_FSL1_SHIFT 0 +#define MCDE_TVISLA_FSL1_MASK 0x000007FF +#define MCDE_TVISLA_FSL1(__x) \ + MCDE_VAL2REG(MCDE_TVISLA, FSL1, __x) +#define MCDE_TVISLA_FSL2_SHIFT 16 +#define MCDE_TVISLA_FSL2_MASK 0x07FF0000 +#define MCDE_TVISLA_FSL2(__x) \ + MCDE_VAL2REG(MCDE_TVISLA, FSL2, __x) +#define MCDE_TVISLB 0x00000A40 +#define MCDE_TVISLB_FSL1_SHIFT 0 +#define MCDE_TVISLB_FSL1_MASK 0x000007FF +#define MCDE_TVISLB_FSL1(__x) \ + MCDE_VAL2REG(MCDE_TVISLB, FSL1, __x) +#define MCDE_TVISLB_FSL2_SHIFT 16 +#define MCDE_TVISLB_FSL2_MASK 0x07FF0000 +#define MCDE_TVISLB_FSL2(__x) \ + MCDE_VAL2REG(MCDE_TVISLB, FSL2, __x) +#define MCDE_TVDVOA 0x00000844 +#define MCDE_TVDVOA_GROUPOFFSET 0x200 +#define MCDE_TVDVOA_DVO1_SHIFT 0 +#define MCDE_TVDVOA_DVO1_MASK 0x000007FF +#define MCDE_TVDVOA_DVO1(__x) \ + MCDE_VAL2REG(MCDE_TVDVOA, DVO1, __x) +#define MCDE_TVDVOA_DVO2_SHIFT 16 +#define MCDE_TVDVOA_DVO2_MASK 0x07FF0000 +#define MCDE_TVDVOA_DVO2(__x) \ + MCDE_VAL2REG(MCDE_TVDVOA, DVO2, __x) +#define MCDE_TVDVOB 0x00000A44 +#define MCDE_TVDVOB_DVO1_SHIFT 0 +#define MCDE_TVDVOB_DVO1_MASK 0x000007FF +#define MCDE_TVDVOB_DVO1(__x) \ + MCDE_VAL2REG(MCDE_TVDVOB, DVO1, __x) +#define MCDE_TVDVOB_DVO2_SHIFT 16 +#define MCDE_TVDVOB_DVO2_MASK 0x07FF0000 +#define MCDE_TVDVOB_DVO2(__x) \ + MCDE_VAL2REG(MCDE_TVDVOB, DVO2, __x) +#define MCDE_TVTIM1A 0x0000084C +#define MCDE_TVTIM1A_GROUPOFFSET 0x200 +#define MCDE_TVTIM1A_DHO_SHIFT 0 +#define MCDE_TVTIM1A_DHO_MASK 0x000007FF +#define MCDE_TVTIM1A_DHO(__x) \ + MCDE_VAL2REG(MCDE_TVTIM1A, DHO, __x) +#define MCDE_TVTIM1B 0x00000A4C +#define MCDE_TVTIM1B_DHO_SHIFT 0 +#define MCDE_TVTIM1B_DHO_MASK 0x000007FF +#define MCDE_TVTIM1B_DHO(__x) \ + MCDE_VAL2REG(MCDE_TVTIM1B, DHO, __x) +#define MCDE_TVLBALWA 0x00000850 +#define MCDE_TVLBALWA_GROUPOFFSET 0x200 +#define MCDE_TVLBALWA_LBW_SHIFT 0 +#define MCDE_TVLBALWA_LBW_MASK 0x000007FF +#define MCDE_TVLBALWA_LBW(__x) \ + MCDE_VAL2REG(MCDE_TVLBALWA, LBW, __x) +#define MCDE_TVLBALWA_ALW_SHIFT 16 +#define MCDE_TVLBALWA_ALW_MASK 0x07FF0000 +#define MCDE_TVLBALWA_ALW(__x) \ + MCDE_VAL2REG(MCDE_TVLBALWA, ALW, __x) +#define MCDE_TVLBALWB 0x00000A50 +#define MCDE_TVLBALWB_LBW_SHIFT 0 +#define MCDE_TVLBALWB_LBW_MASK 0x000007FF +#define MCDE_TVLBALWB_LBW(__x) \ + MCDE_VAL2REG(MCDE_TVLBALWB, LBW, __x) +#define MCDE_TVLBALWB_ALW_SHIFT 16 +#define MCDE_TVLBALWB_ALW_MASK 0x07FF0000 +#define MCDE_TVLBALWB_ALW(__x) \ + MCDE_VAL2REG(MCDE_TVLBALWB, ALW, __x) +#define MCDE_TVBL2A 0x00000854 +#define MCDE_TVBL2A_GROUPOFFSET 0x200 +#define MCDE_TVBL2A_BEL2_SHIFT 0 +#define MCDE_TVBL2A_BEL2_MASK 0x000007FF +#define MCDE_TVBL2A_BEL2(__x) \ + MCDE_VAL2REG(MCDE_TVBL2A, BEL2, __x) +#define MCDE_TVBL2A_BSL2_SHIFT 16 +#define MCDE_TVBL2A_BSL2_MASK 0x07FF0000 +#define MCDE_TVBL2A_BSL2(__x) \ + MCDE_VAL2REG(MCDE_TVBL2A, BSL2, __x) +#define MCDE_TVBL2B 0x00000A54 +#define MCDE_TVBL2B_BEL2_SHIFT 0 +#define MCDE_TVBL2B_BEL2_MASK 0x000007FF +#define MCDE_TVBL2B_BEL2(__x) \ + MCDE_VAL2REG(MCDE_TVBL2B, BEL2, __x) +#define MCDE_TVBL2B_BSL2_SHIFT 16 +#define MCDE_TVBL2B_BSL2_MASK 0x07FF0000 +#define MCDE_TVBL2B_BSL2(__x) \ + MCDE_VAL2REG(MCDE_TVBL2B, BSL2, __x) +#define MCDE_TVBLUA 0x00000858 +#define MCDE_TVBLUA_GROUPOFFSET 0x200 +#define MCDE_TVBLUA_TVBLU_SHIFT 0 +#define MCDE_TVBLUA_TVBLU_MASK 0x000000FF +#define MCDE_TVBLUA_TVBLU(__x) \ + MCDE_VAL2REG(MCDE_TVBLUA, TVBLU, __x) +#define MCDE_TVBLUA_TVBCB_SHIFT 8 +#define MCDE_TVBLUA_TVBCB_MASK 0x0000FF00 +#define MCDE_TVBLUA_TVBCB(__x) \ + MCDE_VAL2REG(MCDE_TVBLUA, TVBCB, __x) +#define MCDE_TVBLUA_TVBCR_SHIFT 16 +#define MCDE_TVBLUA_TVBCR_MASK 0x00FF0000 +#define MCDE_TVBLUA_TVBCR(__x) \ + MCDE_VAL2REG(MCDE_TVBLUA, TVBCR, __x) +#define MCDE_TVBLUB 0x00000A58 +#define MCDE_TVBLUB_TVBLU_SHIFT 0 +#define MCDE_TVBLUB_TVBLU_MASK 0x000000FF +#define MCDE_TVBLUB_TVBLU(__x) \ + MCDE_VAL2REG(MCDE_TVBLUB, TVBLU, __x) +#define MCDE_TVBLUB_TVBCB_SHIFT 8 +#define MCDE_TVBLUB_TVBCB_MASK 0x0000FF00 +#define MCDE_TVBLUB_TVBCB(__x) \ + MCDE_VAL2REG(MCDE_TVBLUB, TVBCB, __x) +#define MCDE_TVBLUB_TVBCR_SHIFT 16 +#define MCDE_TVBLUB_TVBCR_MASK 0x00FF0000 +#define MCDE_TVBLUB_TVBCR(__x) \ + MCDE_VAL2REG(MCDE_TVBLUB, TVBCR, __x) +#define MCDE_LCDTIM1A 0x00000860 +#define MCDE_LCDTIM1A_GROUPOFFSET 0x200 +#define MCDE_LCDTIM1A_IVP_SHIFT 19 +#define MCDE_LCDTIM1A_IVP_MASK 0x00080000 +#define MCDE_LCDTIM1A_IVP(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1A, IVP, __x) +#define MCDE_LCDTIM1A_IVS_SHIFT 20 +#define MCDE_LCDTIM1A_IVS_MASK 0x00100000 +#define MCDE_LCDTIM1A_IVS(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1A, IVS, __x) +#define MCDE_LCDTIM1A_IHS_SHIFT 21 +#define MCDE_LCDTIM1A_IHS_MASK 0x00200000 +#define MCDE_LCDTIM1A_IHS(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1A, IHS, __x) +#define MCDE_LCDTIM1A_IPC_SHIFT 22 +#define MCDE_LCDTIM1A_IPC_MASK 0x00400000 +#define MCDE_LCDTIM1A_IPC(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1A, IPC, __x) +#define MCDE_LCDTIM1A_IOE_SHIFT 23 +#define MCDE_LCDTIM1A_IOE_MASK 0x00800000 +#define MCDE_LCDTIM1A_IOE(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1A, IOE, __x) +#define MCDE_LCDTIM1B 0x00000A60 +#define MCDE_LCDTIM1B_IVP_SHIFT 19 +#define MCDE_LCDTIM1B_IVP_MASK 0x00080000 +#define MCDE_LCDTIM1B_IVP(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1B, IVP, __x) +#define MCDE_LCDTIM1B_IVS_SHIFT 20 +#define MCDE_LCDTIM1B_IVS_MASK 0x00100000 +#define MCDE_LCDTIM1B_IVS(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1B, IVS, __x) +#define MCDE_LCDTIM1B_IHS_SHIFT 21 +#define MCDE_LCDTIM1B_IHS_MASK 0x00200000 +#define MCDE_LCDTIM1B_IHS(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1B, IHS, __x) +#define MCDE_LCDTIM1B_IPC_SHIFT 22 +#define MCDE_LCDTIM1B_IPC_MASK 0x00400000 +#define MCDE_LCDTIM1B_IPC(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1B, IPC, __x) +#define MCDE_LCDTIM1B_IOE_SHIFT 23 +#define MCDE_LCDTIM1B_IOE_MASK 0x00800000 +#define MCDE_LCDTIM1B_IOE(__x) \ + MCDE_VAL2REG(MCDE_LCDTIM1B, IOE, __x) +#define MCDE_DITCTRLA 0x00000864 +#define MCDE_DITCTRLA_GROUPOFFSET 0x200 +#define MCDE_DITCTRLA_TEMP_SHIFT 0 +#define MCDE_DITCTRLA_TEMP_MASK 0x00000001 +#define MCDE_DITCTRLA_TEMP(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, TEMP, __x) +#define MCDE_DITCTRLA_COMP_SHIFT 1 +#define MCDE_DITCTRLA_COMP_MASK 0x00000002 +#define MCDE_DITCTRLA_COMP(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, COMP, __x) +#define MCDE_DITCTRLA_MODE_SHIFT 2 +#define MCDE_DITCTRLA_MODE_MASK 0x0000000C +#define MCDE_DITCTRLA_MODE(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, MODE, __x) +#define MCDE_DITCTRLA_MASK_SHIFT 4 +#define MCDE_DITCTRLA_MASK_MASK 0x00000010 +#define MCDE_DITCTRLA_MASK(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, MASK, __x) +#define MCDE_DITCTRLA_FOFFX_SHIFT 5 +#define MCDE_DITCTRLA_FOFFX_MASK 0x000003E0 +#define MCDE_DITCTRLA_FOFFX(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, FOFFX, __x) +#define MCDE_DITCTRLA_FOFFY_SHIFT 10 +#define MCDE_DITCTRLA_FOFFY_MASK 0x00007C00 +#define MCDE_DITCTRLA_FOFFY(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLA, FOFFY, __x) +#define MCDE_DITCTRLB 0x00000A64 +#define MCDE_DITCTRLB_TEMP_SHIFT 0 +#define MCDE_DITCTRLB_TEMP_MASK 0x00000001 +#define MCDE_DITCTRLB_TEMP(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, TEMP, __x) +#define MCDE_DITCTRLB_COMP_SHIFT 1 +#define MCDE_DITCTRLB_COMP_MASK 0x00000002 +#define MCDE_DITCTRLB_COMP(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, COMP, __x) +#define MCDE_DITCTRLB_MODE_SHIFT 2 +#define MCDE_DITCTRLB_MODE_MASK 0x0000000C +#define MCDE_DITCTRLB_MODE(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, MODE, __x) +#define MCDE_DITCTRLB_MASK_SHIFT 4 +#define MCDE_DITCTRLB_MASK_MASK 0x00000010 +#define MCDE_DITCTRLB_MASK(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, MASK, __x) +#define MCDE_DITCTRLB_FOFFX_SHIFT 5 +#define MCDE_DITCTRLB_FOFFX_MASK 0x000003E0 +#define MCDE_DITCTRLB_FOFFX(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, FOFFX, __x) +#define MCDE_DITCTRLB_FOFFY_SHIFT 10 +#define MCDE_DITCTRLB_FOFFY_MASK 0x00007C00 +#define MCDE_DITCTRLB_FOFFY(__x) \ + MCDE_VAL2REG(MCDE_DITCTRLB, FOFFY, __x) +#define MCDE_DITOFFA 0x00000868 +#define MCDE_DITOFFA_GROUPOFFSET 0x200 +#define MCDE_DITOFFA_XG_SHIFT 0 +#define MCDE_DITOFFA_XG_MASK 0x0000001F +#define MCDE_DITOFFA_XG(__x) \ + MCDE_VAL2REG(MCDE_DITOFFA, XG, __x) +#define MCDE_DITOFFA_YG_SHIFT 8 +#define MCDE_DITOFFA_YG_MASK 0x00001F00 +#define MCDE_DITOFFA_YG(__x) \ + MCDE_VAL2REG(MCDE_DITOFFA, YG, __x) +#define MCDE_DITOFFA_XB_SHIFT 16 +#define MCDE_DITOFFA_XB_MASK 0x001F0000 +#define MCDE_DITOFFA_XB(__x) \ + MCDE_VAL2REG(MCDE_DITOFFA, XB, __x) +#define MCDE_DITOFFA_YB_SHIFT 24 +#define MCDE_DITOFFA_YB_MASK 0x1F000000 +#define MCDE_DITOFFA_YB(__x) \ + MCDE_VAL2REG(MCDE_DITOFFA, YB, __x) +#define MCDE_DITOFFB 0x00000A68 +#define MCDE_DITOFFB_XG_SHIFT 0 +#define MCDE_DITOFFB_XG_MASK 0x0000001F +#define MCDE_DITOFFB_XG(__x) \ + MCDE_VAL2REG(MCDE_DITOFFB, XG, __x) +#define MCDE_DITOFFB_YG_SHIFT 8 +#define MCDE_DITOFFB_YG_MASK 0x00001F00 +#define MCDE_DITOFFB_YG(__x) \ + MCDE_VAL2REG(MCDE_DITOFFB, YG, __x) +#define MCDE_DITOFFB_XB_SHIFT 16 +#define MCDE_DITOFFB_XB_MASK 0x001F0000 +#define MCDE_DITOFFB_XB(__x) \ + MCDE_VAL2REG(MCDE_DITOFFB, XB, __x) +#define MCDE_DITOFFB_YB_SHIFT 24 +#define MCDE_DITOFFB_YB_MASK 0x1F000000 +#define MCDE_DITOFFB_YB(__x) \ + MCDE_VAL2REG(MCDE_DITOFFB, YB, __x) +#define MCDE_PAL0A 0x0000086C +#define MCDE_PAL0A_GROUPOFFSET 0x200 +#define MCDE_PAL0A_BLUE_SHIFT 0 +#define MCDE_PAL0A_BLUE_MASK 0x00000FFF +#define MCDE_PAL0A_BLUE(__x) \ + MCDE_VAL2REG(MCDE_PAL0A, BLUE, __x) +#define MCDE_PAL0A_GREEN_SHIFT 16 +#define MCDE_PAL0A_GREEN_MASK 0x0FFF0000 +#define MCDE_PAL0A_GREEN(__x) \ + MCDE_VAL2REG(MCDE_PAL0A, GREEN, __x) +#define MCDE_PAL0B 0x00000A6C +#define MCDE_PAL0B_BLUE_SHIFT 0 +#define MCDE_PAL0B_BLUE_MASK 0x00000FFF +#define MCDE_PAL0B_BLUE(__x) \ + MCDE_VAL2REG(MCDE_PAL0B, BLUE, __x) +#define MCDE_PAL0B_GREEN_SHIFT 16 +#define MCDE_PAL0B_GREEN_MASK 0x0FFF0000 +#define MCDE_PAL0B_GREEN(__x) \ + MCDE_VAL2REG(MCDE_PAL0B, GREEN, __x) +#define MCDE_PAL1A 0x00000870 +#define MCDE_PAL1A_GROUPOFFSET 0x200 +#define MCDE_PAL1A_RED_SHIFT 0 +#define MCDE_PAL1A_RED_MASK 0x00000FFF +#define MCDE_PAL1A_RED(__x) \ + MCDE_VAL2REG(MCDE_PAL1A, RED, __x) +#define MCDE_PAL1B 0x00000A70 +#define MCDE_PAL1B_RED_SHIFT 0 +#define MCDE_PAL1B_RED_MASK 0x00000FFF +#define MCDE_PAL1B_RED(__x) \ + MCDE_VAL2REG(MCDE_PAL1B, RED, __x) +#define MCDE_ROTADD0A 0x00000874 +#define MCDE_ROTADD0A_GROUPOFFSET 0x200 +#define MCDE_ROTADD0A_ROTADD0_SHIFT 3 +#define MCDE_ROTADD0A_ROTADD0_MASK 0xFFFFFFF8 +#define MCDE_ROTADD0A_ROTADD0(__x) \ + MCDE_VAL2REG(MCDE_ROTADD0A, ROTADD0, __x) +#define MCDE_ROTADD0B 0x00000A74 +#define MCDE_ROTADD0B_ROTADD0_SHIFT 3 +#define MCDE_ROTADD0B_ROTADD0_MASK 0xFFFFFFF8 +#define MCDE_ROTADD0B_ROTADD0(__x) \ + MCDE_VAL2REG(MCDE_ROTADD0B, ROTADD0, __x) +#define MCDE_ROTADD1A 0x00000878 +#define MCDE_ROTADD1A_GROUPOFFSET 0x200 +#define MCDE_ROTADD1A_ROTADD1_SHIFT 3 +#define MCDE_ROTADD1A_ROTADD1_MASK 0xFFFFFFF8 +#define MCDE_ROTADD1A_ROTADD1(__x) \ + MCDE_VAL2REG(MCDE_ROTADD1A, ROTADD1, __x) +#define MCDE_ROTADD1B 0x00000A78 +#define MCDE_ROTADD1B_ROTADD1_SHIFT 3 +#define MCDE_ROTADD1B_ROTADD1_MASK 0xFFFFFFF8 +#define MCDE_ROTADD1B_ROTADD1(__x) \ + MCDE_VAL2REG(MCDE_ROTADD1B, ROTADD1, __x) +#define MCDE_ROTACONF 0x0000087C +#define MCDE_ROTACONF_GROUPOFFSET 0x200 +#define MCDE_ROTACONF_ROTBURSTSIZE_SHIFT 0 +#define MCDE_ROTACONF_ROTBURSTSIZE_MASK 0x00000007 +#define MCDE_ROTACONF_ROTBURSTSIZE_1W 0 +#define MCDE_ROTACONF_ROTBURSTSIZE_2W 1 +#define MCDE_ROTACONF_ROTBURSTSIZE_4W 2 +#define MCDE_ROTACONF_ROTBURSTSIZE_8W 3 +#define MCDE_ROTACONF_ROTBURSTSIZE_HW_1W 4 +#define MCDE_ROTACONF_ROTBURSTSIZE_HW_2W 5 +#define MCDE_ROTACONF_ROTBURSTSIZE_HW_4W 6 +#define MCDE_ROTACONF_ROTBURSTSIZE_HW_8W 7 +#define MCDE_ROTACONF_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, \ + MCDE_ROTACONF_ROTBURSTSIZE_##__x) +#define MCDE_ROTACONF_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, ROTBURSTSIZE, __x) +#define MCDE_ROTACONF_ROTDIR_SHIFT 3 +#define MCDE_ROTACONF_ROTDIR_MASK 0x00000008 +#define MCDE_ROTACONF_ROTDIR_CCW 0 +#define MCDE_ROTACONF_ROTDIR_CW 1 +#define MCDE_ROTACONF_ROTDIR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, MCDE_ROTACONF_ROTDIR_##__x) +#define MCDE_ROTACONF_ROTDIR(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, ROTDIR, __x) +#define MCDE_ROTACONF_WR_MAXOUT_SHIFT 4 +#define MCDE_ROTACONF_WR_MAXOUT_MASK 0x00000030 +#define MCDE_ROTACONF_WR_MAXOUT_1_REQ 0 +#define MCDE_ROTACONF_WR_MAXOUT_2_REQ 1 +#define MCDE_ROTACONF_WR_MAXOUT_4_REQ 2 +#define MCDE_ROTACONF_WR_MAXOUT_8_REQ 3 +#define MCDE_ROTACONF_WR_MAXOUT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, MCDE_ROTACONF_WR_MAXOUT_##__x) +#define MCDE_ROTACONF_WR_MAXOUT(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, WR_MAXOUT, __x) +#define MCDE_ROTACONF_RD_MAXOUT_SHIFT 6 +#define MCDE_ROTACONF_RD_MAXOUT_MASK 0x000000C0 +#define MCDE_ROTACONF_RD_MAXOUT_1_REQ 0 +#define MCDE_ROTACONF_RD_MAXOUT_2_REQ 1 +#define MCDE_ROTACONF_RD_MAXOUT_4_REQ 2 +#define MCDE_ROTACONF_RD_MAXOUT_8_REQ 3 +#define MCDE_ROTACONF_RD_MAXOUT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, MCDE_ROTACONF_RD_MAXOUT_##__x) +#define MCDE_ROTACONF_RD_MAXOUT(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, RD_MAXOUT, __x) +#define MCDE_ROTACONF_STRIP_WIDTH_SHIFT 8 +#define MCDE_ROTACONF_STRIP_WIDTH_MASK 0x00007F00 +#define MCDE_ROTACONF_STRIP_WIDTH_2PIX 0 +#define MCDE_ROTACONF_STRIP_WIDTH_4PIX 1 +#define MCDE_ROTACONF_STRIP_WIDTH_8PIX 2 +#define MCDE_ROTACONF_STRIP_WIDTH_16PIX 3 +#define MCDE_ROTACONF_STRIP_WIDTH_32PIX 4 +#define MCDE_ROTACONF_STRIP_WIDTH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, \ + MCDE_ROTACONF_STRIP_WIDTH_##__x) +#define MCDE_ROTACONF_STRIP_WIDTH(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, STRIP_WIDTH, __x) +#define MCDE_ROTACONF_SINGLE_BUF_SHIFT 15 +#define MCDE_ROTACONF_SINGLE_BUF_MASK 0x00008000 +#define MCDE_ROTACONF_SINGLE_BUF(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, SINGLE_BUF, __x) +#define MCDE_ROTACONF_WR_ROPC_SHIFT 16 +#define MCDE_ROTACONF_WR_ROPC_MASK 0x00FF0000 +#define MCDE_ROTACONF_WR_ROPC(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, WR_ROPC, __x) +#define MCDE_ROTACONF_RD_ROPC_SHIFT 24 +#define MCDE_ROTACONF_RD_ROPC_MASK 0xFF000000 +#define MCDE_ROTACONF_RD_ROPC(__x) \ + MCDE_VAL2REG(MCDE_ROTACONF, RD_ROPC, __x) +#define MCDE_ROTBCONF 0x00000A7C +#define MCDE_ROTBCONF_ROTBURSTSIZE_SHIFT 0 +#define MCDE_ROTBCONF_ROTBURSTSIZE_MASK 0x00000007 +#define MCDE_ROTBCONF_ROTBURSTSIZE_1W 0 +#define MCDE_ROTBCONF_ROTBURSTSIZE_2W 1 +#define MCDE_ROTBCONF_ROTBURSTSIZE_4W 2 +#define MCDE_ROTBCONF_ROTBURSTSIZE_8W 3 +#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_1W 4 +#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_2W 5 +#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_4W 6 +#define MCDE_ROTBCONF_ROTBURSTSIZE_HW_8W 7 +#define MCDE_ROTBCONF_ROTBURSTSIZE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, \ + MCDE_ROTBCONF_ROTBURSTSIZE_##__x) +#define MCDE_ROTBCONF_ROTBURSTSIZE(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, ROTBURSTSIZE, __x) +#define MCDE_ROTBCONF_ROTDIR_SHIFT 3 +#define MCDE_ROTBCONF_ROTDIR_MASK 0x00000008 +#define MCDE_ROTBCONF_ROTDIR_CCW 0 +#define MCDE_ROTBCONF_ROTDIR_CW 1 +#define MCDE_ROTBCONF_ROTDIR_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, MCDE_ROTBCONF_ROTDIR_##__x) +#define MCDE_ROTBCONF_ROTDIR(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, ROTDIR, __x) +#define MCDE_ROTBCONF_WR_MAXOUT_SHIFT 4 +#define MCDE_ROTBCONF_WR_MAXOUT_MASK 0x00000030 +#define MCDE_ROTBCONF_WR_MAXOUT_1_REQ 0 +#define MCDE_ROTBCONF_WR_MAXOUT_2_REQ 1 +#define MCDE_ROTBCONF_WR_MAXOUT_4_REQ 2 +#define MCDE_ROTBCONF_WR_MAXOUT_8_REQ 3 +#define MCDE_ROTBCONF_WR_MAXOUT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, MCDE_ROTBCONF_WR_MAXOUT_##__x) +#define MCDE_ROTBCONF_WR_MAXOUT(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, WR_MAXOUT, __x) +#define MCDE_ROTBCONF_RD_MAXOUT_SHIFT 6 +#define MCDE_ROTBCONF_RD_MAXOUT_MASK 0x000000C0 +#define MCDE_ROTBCONF_RD_MAXOUT_1_REQ 0 +#define MCDE_ROTBCONF_RD_MAXOUT_2_REQ 1 +#define MCDE_ROTBCONF_RD_MAXOUT_4_REQ 2 +#define MCDE_ROTBCONF_RD_MAXOUT_8_REQ 3 +#define MCDE_ROTBCONF_RD_MAXOUT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, MCDE_ROTBCONF_RD_MAXOUT_##__x) +#define MCDE_ROTBCONF_RD_MAXOUT(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, RD_MAXOUT, __x) +#define MCDE_ROTBCONF_STRIP_WIDTH_SHIFT 8 +#define MCDE_ROTBCONF_STRIP_WIDTH_MASK 0x00007F00 +#define MCDE_ROTBCONF_STRIP_WIDTH_2PIX 0 +#define MCDE_ROTBCONF_STRIP_WIDTH_4PIX 1 +#define MCDE_ROTBCONF_STRIP_WIDTH_8PIX 2 +#define MCDE_ROTBCONF_STRIP_WIDTH_16PIX 3 +#define MCDE_ROTBCONF_STRIP_WIDTH_32PIX 4 +#define MCDE_ROTBCONF_STRIP_WIDTH_ENUM(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, \ + MCDE_ROTBCONF_STRIP_WIDTH_##__x) +#define MCDE_ROTBCONF_STRIP_WIDTH(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, STRIP_WIDTH, __x) +#define MCDE_ROTBCONF_SINGLE_BUF_SHIFT 15 +#define MCDE_ROTBCONF_SINGLE_BUF_MASK 0x00008000 +#define MCDE_ROTBCONF_SINGLE_BUF(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, SINGLE_BUF, __x) +#define MCDE_ROTBCONF_WR_ROPC_SHIFT 16 +#define MCDE_ROTBCONF_WR_ROPC_MASK 0x00FF0000 +#define MCDE_ROTBCONF_WR_ROPC(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, WR_ROPC, __x) +#define MCDE_ROTBCONF_RD_ROPC_SHIFT 24 +#define MCDE_ROTBCONF_RD_ROPC_MASK 0xFF000000 +#define MCDE_ROTBCONF_RD_ROPC(__x) \ + MCDE_VAL2REG(MCDE_ROTBCONF, RD_ROPC, __x) +#define MCDE_SYNCHCONFA 0x00000880 +#define MCDE_SYNCHCONFA_GROUPOFFSET 0x200 +#define MCDE_SYNCHCONFA_HWREQVEVENT_SHIFT 0 +#define MCDE_SYNCHCONFA_HWREQVEVENT_MASK 0x00000003 +#define MCDE_SYNCHCONFA_HWREQVEVENT_VSYNC 0 +#define MCDE_SYNCHCONFA_HWREQVEVENT_BACK_PORCH 1 +#define MCDE_SYNCHCONFA_HWREQVEVENT_ACTIVE_VIDEO 2 +#define MCDE_SYNCHCONFA_HWREQVEVENT_FRONT_PORCH 3 +#define MCDE_SYNCHCONFA_HWREQVEVENT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, \ + MCDE_SYNCHCONFA_HWREQVEVENT_##__x) +#define MCDE_SYNCHCONFA_HWREQVEVENT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVEVENT, __x) +#define MCDE_SYNCHCONFA_HWREQVCNT_SHIFT 2 +#define MCDE_SYNCHCONFA_HWREQVCNT_MASK 0x0000FFFC +#define MCDE_SYNCHCONFA_HWREQVCNT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, HWREQVCNT, __x) +#define MCDE_SYNCHCONFA_SWINTVEVENT_SHIFT 16 +#define MCDE_SYNCHCONFA_SWINTVEVENT_MASK 0x00030000 +#define MCDE_SYNCHCONFA_SWINTVEVENT_VSYNC 0 +#define MCDE_SYNCHCONFA_SWINTVEVENT_BACK_PORCH 1 +#define MCDE_SYNCHCONFA_SWINTVEVENT_ACTIVE_VIDEO 2 +#define MCDE_SYNCHCONFA_SWINTVEVENT_FRONT_PORCH 3 +#define MCDE_SYNCHCONFA_SWINTVEVENT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, \ + MCDE_SYNCHCONFA_SWINTVEVENT_##__x) +#define MCDE_SYNCHCONFA_SWINTVEVENT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVEVENT, __x) +#define MCDE_SYNCHCONFA_SWINTVCNT_SHIFT 18 +#define MCDE_SYNCHCONFA_SWINTVCNT_MASK 0xFFFC0000 +#define MCDE_SYNCHCONFA_SWINTVCNT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFA, SWINTVCNT, __x) +#define MCDE_SYNCHCONFB 0x00000A80 +#define MCDE_SYNCHCONFB_HWREQVEVENT_SHIFT 0 +#define MCDE_SYNCHCONFB_HWREQVEVENT_MASK 0x00000003 +#define MCDE_SYNCHCONFB_HWREQVEVENT_VSYNC 0 +#define MCDE_SYNCHCONFB_HWREQVEVENT_BACK_PORCH 1 +#define MCDE_SYNCHCONFB_HWREQVEVENT_ACTIVE_VIDEO 2 +#define MCDE_SYNCHCONFB_HWREQVEVENT_FRONT_PORCH 3 +#define MCDE_SYNCHCONFB_HWREQVEVENT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, \ + MCDE_SYNCHCONFB_HWREQVEVENT_##__x) +#define MCDE_SYNCHCONFB_HWREQVEVENT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVEVENT, __x) +#define MCDE_SYNCHCONFB_HWREQVCNT_SHIFT 2 +#define MCDE_SYNCHCONFB_HWREQVCNT_MASK 0x0000FFFC +#define MCDE_SYNCHCONFB_HWREQVCNT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, HWREQVCNT, __x) +#define MCDE_SYNCHCONFB_SWINTVEVENT_SHIFT 16 +#define MCDE_SYNCHCONFB_SWINTVEVENT_MASK 0x00030000 +#define MCDE_SYNCHCONFB_SWINTVEVENT_VSYNC 0 +#define MCDE_SYNCHCONFB_SWINTVEVENT_BACK_PORCH 1 +#define MCDE_SYNCHCONFB_SWINTVEVENT_ACTIVE_VIDEO 2 +#define MCDE_SYNCHCONFB_SWINTVEVENT_FRONT_PORCH 3 +#define MCDE_SYNCHCONFB_SWINTVEVENT_ENUM(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, \ + MCDE_SYNCHCONFB_SWINTVEVENT_##__x) +#define MCDE_SYNCHCONFB_SWINTVEVENT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVEVENT, __x) +#define MCDE_SYNCHCONFB_SWINTVCNT_SHIFT 18 +#define MCDE_SYNCHCONFB_SWINTVCNT_MASK 0xFFFC0000 +#define MCDE_SYNCHCONFB_SWINTVCNT(__x) \ + MCDE_VAL2REG(MCDE_SYNCHCONFB, SWINTVCNT, __x) +#define MCDE_CTRLA 0x00000884 +#define MCDE_CTRLA_GROUPOFFSET 0x200 +#define MCDE_CTRLA_FIFOWTRMRK_SHIFT 0 +#define MCDE_CTRLA_FIFOWTRMRK_MASK 0x000003FF +#define MCDE_CTRLA_FIFOWTRMRK(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FIFOWTRMRK, __x) +#define MCDE_CTRLA_FIFOEMPTY_SHIFT 12 +#define MCDE_CTRLA_FIFOEMPTY_MASK 0x00001000 +#define MCDE_CTRLA_FIFOEMPTY(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FIFOEMPTY, __x) +#define MCDE_CTRLA_FIFOFULL_SHIFT 13 +#define MCDE_CTRLA_FIFOFULL_MASK 0x00002000 +#define MCDE_CTRLA_FIFOFULL(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FIFOFULL, __x) +#define MCDE_CTRLA_FORMID_SHIFT 16 +#define MCDE_CTRLA_FORMID_MASK 0x00070000 +#define MCDE_CTRLA_FORMID_DSI0VID 0 +#define MCDE_CTRLA_FORMID_DSI0CMD 1 +#define MCDE_CTRLA_FORMID_DSI1VID 2 +#define MCDE_CTRLA_FORMID_DSI1CMD 3 +#define MCDE_CTRLA_FORMID_DSI2VID 4 +#define MCDE_CTRLA_FORMID_DSI2CMD 5 +#define MCDE_CTRLA_FORMID_DPIA 0 +#define MCDE_CTRLA_FORMID_DPIB 1 +#define MCDE_CTRLA_FORMID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FORMID, MCDE_CTRLA_FORMID_##__x) +#define MCDE_CTRLA_FORMID(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FORMID, __x) +#define MCDE_CTRLA_FORMTYPE_SHIFT 20 +#define MCDE_CTRLA_FORMTYPE_MASK 0x00700000 +#define MCDE_CTRLA_FORMTYPE_DPITV 0 +#define MCDE_CTRLA_FORMTYPE_DBI 1 +#define MCDE_CTRLA_FORMTYPE_DSI 2 +#define MCDE_CTRLA_FORMTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, MCDE_CTRLA_FORMTYPE_##__x) +#define MCDE_CTRLA_FORMTYPE(__x) \ + MCDE_VAL2REG(MCDE_CTRLA, FORMTYPE, __x) +#define MCDE_CTRLB 0x00000A84 +#define MCDE_CTRLB_FIFOWTRMRK_SHIFT 0 +#define MCDE_CTRLB_FIFOWTRMRK_MASK 0x000003FF +#define MCDE_CTRLB_FIFOWTRMRK(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FIFOWTRMRK, __x) +#define MCDE_CTRLB_FIFOEMPTY_SHIFT 12 +#define MCDE_CTRLB_FIFOEMPTY_MASK 0x00001000 +#define MCDE_CTRLB_FIFOEMPTY(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FIFOEMPTY, __x) +#define MCDE_CTRLB_FIFOFULL_SHIFT 13 +#define MCDE_CTRLB_FIFOFULL_MASK 0x00002000 +#define MCDE_CTRLB_FIFOFULL(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FIFOFULL, __x) +#define MCDE_CTRLB_FORMID_SHIFT 16 +#define MCDE_CTRLB_FORMID_MASK 0x00070000 +#define MCDE_CTRLB_FORMID_DSI0VID 0 +#define MCDE_CTRLB_FORMID_DSI0CMD 1 +#define MCDE_CTRLB_FORMID_DSI1VID 2 +#define MCDE_CTRLB_FORMID_DSI1CMD 3 +#define MCDE_CTRLB_FORMID_DSI2VID 4 +#define MCDE_CTRLB_FORMID_DSI2CMD 5 +#define MCDE_CTRLB_FORMID_DPIA 0 +#define MCDE_CTRLB_FORMID_DPIB 1 +#define MCDE_CTRLB_FORMID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FORMID, MCDE_CTRLB_FORMID_##__x) +#define MCDE_CTRLB_FORMID(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FORMID, __x) +#define MCDE_CTRLB_FORMTYPE_SHIFT 20 +#define MCDE_CTRLB_FORMTYPE_MASK 0x00700000 +#define MCDE_CTRLB_FORMTYPE_DPITV 0 +#define MCDE_CTRLB_FORMTYPE_DBI 1 +#define MCDE_CTRLB_FORMTYPE_DSI 2 +#define MCDE_CTRLB_FORMTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, MCDE_CTRLB_FORMTYPE_##__x) +#define MCDE_CTRLB_FORMTYPE(__x) \ + MCDE_VAL2REG(MCDE_CTRLB, FORMTYPE, __x) +#define MCDE_GAM0A 0x00000888 +#define MCDE_GAM0A_GROUPOFFSET 0x200 +#define MCDE_GAM0A_BLUE_SHIFT 0 +#define MCDE_GAM0A_BLUE_MASK 0x00FFFFFF +#define MCDE_GAM0A_BLUE(__x) \ + MCDE_VAL2REG(MCDE_GAM0A, BLUE, __x) +#define MCDE_GAM0B 0x00000A88 +#define MCDE_GAM0B_BLUE_SHIFT 0 +#define MCDE_GAM0B_BLUE_MASK 0x00FFFFFF +#define MCDE_GAM0B_BLUE(__x) \ + MCDE_VAL2REG(MCDE_GAM0B, BLUE, __x) +#define MCDE_GAM1A 0x0000088C +#define MCDE_GAM1A_GROUPOFFSET 0x200 +#define MCDE_GAM1A_GREEN_SHIFT 0 +#define MCDE_GAM1A_GREEN_MASK 0x00FFFFFF +#define MCDE_GAM1A_GREEN(__x) \ + MCDE_VAL2REG(MCDE_GAM1A, GREEN, __x) +#define MCDE_GAM1B 0x00000A8C +#define MCDE_GAM1B_GREEN_SHIFT 0 +#define MCDE_GAM1B_GREEN_MASK 0x00FFFFFF +#define MCDE_GAM1B_GREEN(__x) \ + MCDE_VAL2REG(MCDE_GAM1B, GREEN, __x) +#define MCDE_GAM2A 0x00000890 +#define MCDE_GAM2A_GROUPOFFSET 0x200 +#define MCDE_GAM2A_RED_SHIFT 0 +#define MCDE_GAM2A_RED_MASK 0x00FFFFFF +#define MCDE_GAM2A_RED(__x) \ + MCDE_VAL2REG(MCDE_GAM2A, RED, __x) +#define MCDE_GAM2B 0x00000A90 +#define MCDE_GAM2B_RED_SHIFT 0 +#define MCDE_GAM2B_RED_MASK 0x00FFFFFF +#define MCDE_GAM2B_RED(__x) \ + MCDE_VAL2REG(MCDE_GAM2B, RED, __x) +#define MCDE_OLEDCONV1A 0x00000894 +#define MCDE_OLEDCONV1A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV1A_ALPHA_RED_SHIFT 0 +#define MCDE_OLEDCONV1A_ALPHA_RED_MASK 0x00003FFF +#define MCDE_OLEDCONV1A_ALPHA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_RED, __x) +#define MCDE_OLEDCONV1A_ALPHA_GREEN_SHIFT 16 +#define MCDE_OLEDCONV1A_ALPHA_GREEN_MASK 0x3FFF0000 +#define MCDE_OLEDCONV1A_ALPHA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV1A, ALPHA_GREEN, __x) +#define MCDE_OLEDCONV1B 0x00000A94 +#define MCDE_OLEDCONV1B_ALPHA_RED_SHIFT 0 +#define MCDE_OLEDCONV1B_ALPHA_RED_MASK 0x00003FFF +#define MCDE_OLEDCONV1B_ALPHA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_RED, __x) +#define MCDE_OLEDCONV1B_ALPHA_GREEN_SHIFT 16 +#define MCDE_OLEDCONV1B_ALPHA_GREEN_MASK 0x3FFF0000 +#define MCDE_OLEDCONV1B_ALPHA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV1B, ALPHA_GREEN, __x) +#define MCDE_OLEDCONV2A 0x00000898 +#define MCDE_OLEDCONV2A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV2A_ALPHA_BLUE_SHIFT 0 +#define MCDE_OLEDCONV2A_ALPHA_BLUE_MASK 0x00003FFF +#define MCDE_OLEDCONV2A_ALPHA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV2A, ALPHA_BLUE, __x) +#define MCDE_OLEDCONV2A_BETA_RED_SHIFT 16 +#define MCDE_OLEDCONV2A_BETA_RED_MASK 0x3FFF0000 +#define MCDE_OLEDCONV2A_BETA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV2A, BETA_RED, __x) +#define MCDE_OLEDCONV2B 0x00000A98 +#define MCDE_OLEDCONV2B_ALPHA_BLUE_SHIFT 0 +#define MCDE_OLEDCONV2B_ALPHA_BLUE_MASK 0x00003FFF +#define MCDE_OLEDCONV2B_ALPHA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV2B, ALPHA_BLUE, __x) +#define MCDE_OLEDCONV2B_BETA_RED_SHIFT 16 +#define MCDE_OLEDCONV2B_BETA_RED_MASK 0x3FFF0000 +#define MCDE_OLEDCONV2B_BETA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV2B, BETA_RED, __x) +#define MCDE_OLEDCONV3A 0x0000089C +#define MCDE_OLEDCONV3A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV3A_BETA_GREEN_SHIFT 0 +#define MCDE_OLEDCONV3A_BETA_GREEN_MASK 0x00003FFF +#define MCDE_OLEDCONV3A_BETA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_GREEN, __x) +#define MCDE_OLEDCONV3A_BETA_BLUE_SHIFT 16 +#define MCDE_OLEDCONV3A_BETA_BLUE_MASK 0x3FFF0000 +#define MCDE_OLEDCONV3A_BETA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV3A, BETA_BLUE, __x) +#define MCDE_OLEDCONV3B 0x00000A9C +#define MCDE_OLEDCONV3B_BETA_GREEN_SHIFT 0 +#define MCDE_OLEDCONV3B_BETA_GREEN_MASK 0x00003FFF +#define MCDE_OLEDCONV3B_BETA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_GREEN, __x) +#define MCDE_OLEDCONV3B_BETA_BLUE_SHIFT 16 +#define MCDE_OLEDCONV3B_BETA_BLUE_MASK 0x3FFF0000 +#define MCDE_OLEDCONV3B_BETA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV3B, BETA_BLUE, __x) +#define MCDE_OLEDCONV4A 0x000008A0 +#define MCDE_OLEDCONV4A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV4A_GAMMA_RED_SHIFT 0 +#define MCDE_OLEDCONV4A_GAMMA_RED_MASK 0x00003FFF +#define MCDE_OLEDCONV4A_GAMMA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_RED, __x) +#define MCDE_OLEDCONV4A_GAMMA_GREEN_SHIFT 16 +#define MCDE_OLEDCONV4A_GAMMA_GREEN_MASK 0x3FFF0000 +#define MCDE_OLEDCONV4A_GAMMA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV4A, GAMMA_GREEN, __x) +#define MCDE_OLEDCONV4B 0x00000AA0 +#define MCDE_OLEDCONV4B_GAMMA_RED_SHIFT 0 +#define MCDE_OLEDCONV4B_GAMMA_RED_MASK 0x00003FFF +#define MCDE_OLEDCONV4B_GAMMA_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_RED, __x) +#define MCDE_OLEDCONV4B_GAMMA_GREEN_SHIFT 16 +#define MCDE_OLEDCONV4B_GAMMA_GREEN_MASK 0x3FFF0000 +#define MCDE_OLEDCONV4B_GAMMA_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV4B, GAMMA_GREEN, __x) +#define MCDE_OLEDCONV5A 0x000008A4 +#define MCDE_OLEDCONV5A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV5A_GAMMA_BLUE_SHIFT 0 +#define MCDE_OLEDCONV5A_GAMMA_BLUE_MASK 0x00003FFF +#define MCDE_OLEDCONV5A_GAMMA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV5A, GAMMA_BLUE, __x) +#define MCDE_OLEDCONV5A_OFF_RED_SHIFT 16 +#define MCDE_OLEDCONV5A_OFF_RED_MASK 0x3FFF0000 +#define MCDE_OLEDCONV5A_OFF_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV5A, OFF_RED, __x) +#define MCDE_OLEDCONV5B 0x00000AA4 +#define MCDE_OLEDCONV5B_GAMMA_BLUE_SHIFT 0 +#define MCDE_OLEDCONV5B_GAMMA_BLUE_MASK 0x00003FFF +#define MCDE_OLEDCONV5B_GAMMA_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV5B, GAMMA_BLUE, __x) +#define MCDE_OLEDCONV5B_OFF_RED_SHIFT 16 +#define MCDE_OLEDCONV5B_OFF_RED_MASK 0x3FFF0000 +#define MCDE_OLEDCONV5B_OFF_RED(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV5B, OFF_RED, __x) +#define MCDE_OLEDCONV6A 0x000008A8 +#define MCDE_OLEDCONV6A_GROUPOFFSET 0x200 +#define MCDE_OLEDCONV6A_OFF_GREEN_SHIFT 0 +#define MCDE_OLEDCONV6A_OFF_GREEN_MASK 0x00003FFF +#define MCDE_OLEDCONV6A_OFF_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_GREEN, __x) +#define MCDE_OLEDCONV6A_OFF_BLUE_SHIFT 16 +#define MCDE_OLEDCONV6A_OFF_BLUE_MASK 0x3FFF0000 +#define MCDE_OLEDCONV6A_OFF_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV6A, OFF_BLUE, __x) +#define MCDE_OLEDCONV6B 0x00000AA8 +#define MCDE_OLEDCONV6B_OFF_GREEN_SHIFT 0 +#define MCDE_OLEDCONV6B_OFF_GREEN_MASK 0x00003FFF +#define MCDE_OLEDCONV6B_OFF_GREEN(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_GREEN, __x) +#define MCDE_OLEDCONV6B_OFF_BLUE_SHIFT 16 +#define MCDE_OLEDCONV6B_OFF_BLUE_MASK 0x3FFF0000 +#define MCDE_OLEDCONV6B_OFF_BLUE(__x) \ + MCDE_VAL2REG(MCDE_OLEDCONV6B, OFF_BLUE, __x) +#define MCDE_CRC 0x00000C00 +#define MCDE_CRC_C1EN_SHIFT 2 +#define MCDE_CRC_C1EN_MASK 0x00000004 +#define MCDE_CRC_C1EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, C1EN, __x) +#define MCDE_CRC_C2EN_SHIFT 3 +#define MCDE_CRC_C2EN_MASK 0x00000008 +#define MCDE_CRC_C2EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, C2EN, __x) +#define MCDE_CRC_SYCEN0_SHIFT 7 +#define MCDE_CRC_SYCEN0_MASK 0x00000080 +#define MCDE_CRC_SYCEN0(__x) \ + MCDE_VAL2REG(MCDE_CRC, SYCEN0, __x) +#define MCDE_CRC_SYCEN1_SHIFT 8 +#define MCDE_CRC_SYCEN1_MASK 0x00000100 +#define MCDE_CRC_SYCEN1(__x) \ + MCDE_VAL2REG(MCDE_CRC, SYCEN1, __x) +#define MCDE_CRC_SIZE1_SHIFT 9 +#define MCDE_CRC_SIZE1_MASK 0x00000200 +#define MCDE_CRC_SIZE1(__x) \ + MCDE_VAL2REG(MCDE_CRC, SIZE1, __x) +#define MCDE_CRC_SIZE2_SHIFT 10 +#define MCDE_CRC_SIZE2_MASK 0x00000400 +#define MCDE_CRC_SIZE2(__x) \ + MCDE_VAL2REG(MCDE_CRC, SIZE2, __x) +#define MCDE_CRC_YUVCONVC1EN_SHIFT 15 +#define MCDE_CRC_YUVCONVC1EN_MASK 0x00008000 +#define MCDE_CRC_YUVCONVC1EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, YUVCONVC1EN, __x) +#define MCDE_CRC_CS1EN_SHIFT 16 +#define MCDE_CRC_CS1EN_MASK 0x00010000 +#define MCDE_CRC_CS1EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, CS1EN, __x) +#define MCDE_CRC_CS2EN_SHIFT 17 +#define MCDE_CRC_CS2EN_MASK 0x00020000 +#define MCDE_CRC_CS2EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, CS2EN, __x) +#define MCDE_CRC_CS1POL_SHIFT 19 +#define MCDE_CRC_CS1POL_MASK 0x00080000 +#define MCDE_CRC_CS1POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, CS1POL, __x) +#define MCDE_CRC_CS2POL_SHIFT 20 +#define MCDE_CRC_CS2POL_MASK 0x00100000 +#define MCDE_CRC_CS2POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, CS2POL, __x) +#define MCDE_CRC_CD1POL_SHIFT 21 +#define MCDE_CRC_CD1POL_MASK 0x00200000 +#define MCDE_CRC_CD1POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, CD1POL, __x) +#define MCDE_CRC_CD2POL_SHIFT 22 +#define MCDE_CRC_CD2POL_MASK 0x00400000 +#define MCDE_CRC_CD2POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, CD2POL, __x) +#define MCDE_CRC_WR1POL_SHIFT 23 +#define MCDE_CRC_WR1POL_MASK 0x00800000 +#define MCDE_CRC_WR1POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, WR1POL, __x) +#define MCDE_CRC_WR2POL_SHIFT 24 +#define MCDE_CRC_WR2POL_MASK 0x01000000 +#define MCDE_CRC_WR2POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, WR2POL, __x) +#define MCDE_CRC_RD1POL_SHIFT 25 +#define MCDE_CRC_RD1POL_MASK 0x02000000 +#define MCDE_CRC_RD1POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, RD1POL, __x) +#define MCDE_CRC_RD2POL_SHIFT 26 +#define MCDE_CRC_RD2POL_MASK 0x04000000 +#define MCDE_CRC_RD2POL(__x) \ + MCDE_VAL2REG(MCDE_CRC, RD2POL, __x) +#define MCDE_CRC_SYNCCTRL_SHIFT 29 +#define MCDE_CRC_SYNCCTRL_MASK 0x60000000 +#define MCDE_CRC_SYNCCTRL_NO_SYNC 0 +#define MCDE_CRC_SYNCCTRL_DBI0 1 +#define MCDE_CRC_SYNCCTRL_DBI1 2 +#define MCDE_CRC_SYNCCTRL_PING_PONG 3 +#define MCDE_CRC_SYNCCTRL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, MCDE_CRC_SYNCCTRL_##__x) +#define MCDE_CRC_SYNCCTRL(__x) \ + MCDE_VAL2REG(MCDE_CRC, SYNCCTRL, __x) +#define MCDE_CRC_CLAMPC1EN_SHIFT 31 +#define MCDE_CRC_CLAMPC1EN_MASK 0x80000000 +#define MCDE_CRC_CLAMPC1EN(__x) \ + MCDE_VAL2REG(MCDE_CRC, CLAMPC1EN, __x) +#define MCDE_PBCCRC0 0x00000C04 +#define MCDE_PBCCRC0_GROUPOFFSET 0x4 +#define MCDE_PBCCRC0_BSCM_SHIFT 0 +#define MCDE_PBCCRC0_BSCM_MASK 0x00000007 +#define MCDE_PBCCRC0_BSCM_1_8BIT 0 +#define MCDE_PBCCRC0_BSCM_2_8BIT 1 +#define MCDE_PBCCRC0_BSCM_3_8BIT 2 +#define MCDE_PBCCRC0_BSCM_1_16BIT 3 +#define MCDE_PBCCRC0_BSCM_2_16BIT 4 +#define MCDE_PBCCRC0_BSCM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, MCDE_PBCCRC0_BSCM_##__x) +#define MCDE_PBCCRC0_BSCM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, BSCM, __x) +#define MCDE_PBCCRC0_BSDM_SHIFT 3 +#define MCDE_PBCCRC0_BSDM_MASK 0x00000038 +#define MCDE_PBCCRC0_BSDM_1_8BIT 0 +#define MCDE_PBCCRC0_BSDM_2_8BIT 1 +#define MCDE_PBCCRC0_BSDM_3_8BIT 2 +#define MCDE_PBCCRC0_BSDM_1_16BIT 3 +#define MCDE_PBCCRC0_BSDM_2_16BIT 4 +#define MCDE_PBCCRC0_BSDM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, MCDE_PBCCRC0_BSDM_##__x) +#define MCDE_PBCCRC0_BSDM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, BSDM, __x) +#define MCDE_PBCCRC0_PDM_SHIFT 6 +#define MCDE_PBCCRC0_PDM_MASK 0x000000C0 +#define MCDE_PBCCRC0_PDM_NORMAL 0 +#define MCDE_PBCCRC0_PDM_16_TO_32 1 +#define MCDE_PBCCRC0_PDM_24_TO_32_RIGHT 2 +#define MCDE_PBCCRC0_PDM_24_TO_32_LEFT 3 +#define MCDE_PBCCRC0_PDM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, PDM, MCDE_PBCCRC0_PDM_##__x) +#define MCDE_PBCCRC0_PDM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, PDM, __x) +#define MCDE_PBCCRC0_PDCTRL_SHIFT 12 +#define MCDE_PBCCRC0_PDCTRL_MASK 0x00001000 +#define MCDE_PBCCRC0_PDCTRL(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, PDCTRL, __x) +#define MCDE_PBCCRC0_BPP_SHIFT 13 +#define MCDE_PBCCRC0_BPP_MASK 0x0000E000 +#define MCDE_PBCCRC0_BPP_8BPP 0 +#define MCDE_PBCCRC0_BPP_12BPP 1 +#define MCDE_PBCCRC0_BPP_15BPP 2 +#define MCDE_PBCCRC0_BPP_16BPP 3 +#define MCDE_PBCCRC0_BPP_18BPP 4 +#define MCDE_PBCCRC0_BPP_24BPP 5 +#define MCDE_PBCCRC0_BPP(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC0, BPP, __x) +#define MCDE_PBCCRC1 0x00000C08 +#define MCDE_PBCCRC1_BSCM_SHIFT 0 +#define MCDE_PBCCRC1_BSCM_MASK 0x00000007 +#define MCDE_PBCCRC1_BSCM_1_8BIT 0 +#define MCDE_PBCCRC1_BSCM_2_8BIT 1 +#define MCDE_PBCCRC1_BSCM_3_8BIT 2 +#define MCDE_PBCCRC1_BSCM_1_16BIT 3 +#define MCDE_PBCCRC1_BSCM_2_16BIT 4 +#define MCDE_PBCCRC1_BSCM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, MCDE_PBCCRC1_BSCM_##__x) +#define MCDE_PBCCRC1_BSCM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, BSCM, __x) +#define MCDE_PBCCRC1_BSDM_SHIFT 3 +#define MCDE_PBCCRC1_BSDM_MASK 0x00000038 +#define MCDE_PBCCRC1_BSDM_1_8BIT 0 +#define MCDE_PBCCRC1_BSDM_2_8BIT 1 +#define MCDE_PBCCRC1_BSDM_3_8BIT 2 +#define MCDE_PBCCRC1_BSDM_1_16BIT 3 +#define MCDE_PBCCRC1_BSDM_2_16BIT 4 +#define MCDE_PBCCRC1_BSDM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, MCDE_PBCCRC1_BSDM_##__x) +#define MCDE_PBCCRC1_BSDM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, BSDM, __x) +#define MCDE_PBCCRC1_PDM_SHIFT 6 +#define MCDE_PBCCRC1_PDM_MASK 0x000000C0 +#define MCDE_PBCCRC1_PDM_NORMAL 0 +#define MCDE_PBCCRC1_PDM_16_TO_32 1 +#define MCDE_PBCCRC1_PDM_24_TO_32_RIGHT 2 +#define MCDE_PBCCRC1_PDM_24_TO_32_LEFT 3 +#define MCDE_PBCCRC1_PDM_ENUM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, PDM, MCDE_PBCCRC1_PDM_##__x) +#define MCDE_PBCCRC1_PDM(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, PDM, __x) +#define MCDE_PBCCRC1_PDCTRL_SHIFT 12 +#define MCDE_PBCCRC1_PDCTRL_MASK 0x00001000 +#define MCDE_PBCCRC1_PDCTRL(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, PDCTRL, __x) +#define MCDE_PBCCRC1_BPP_SHIFT 13 +#define MCDE_PBCCRC1_BPP_MASK 0x0000E000 +#define MCDE_PBCCRC1_BPP_8BPP 0 +#define MCDE_PBCCRC1_BPP_12BPP 1 +#define MCDE_PBCCRC1_BPP_15BPP 2 +#define MCDE_PBCCRC1_BPP_16BPP 3 +#define MCDE_PBCCRC1_BPP_18BPP 4 +#define MCDE_PBCCRC1_BPP_24BPP 5 +#define MCDE_PBCCRC1_BPP(__x) \ + MCDE_VAL2REG(MCDE_PBCCRC1, BPP, __x) +#define MCDE_PBCBMRC00 0x00000C0C +#define MCDE_PBCBMRC00_GROUPOFFSET 0x4 +#define MCDE_PBCBMRC00_MUXI_SHIFT 0 +#define MCDE_PBCBMRC00_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC00_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC00, MUXI, __x) +#define MCDE_PBCBMRC01 0x00000C10 +#define MCDE_PBCBMRC01_MUXI_SHIFT 0 +#define MCDE_PBCBMRC01_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC01_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC01, MUXI, __x) +#define MCDE_PBCBMRC02 0x00000C14 +#define MCDE_PBCBMRC02_MUXI_SHIFT 0 +#define MCDE_PBCBMRC02_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC02_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC02, MUXI, __x) +#define MCDE_PBCBMRC03 0x00000C18 +#define MCDE_PBCBMRC03_MUXI_SHIFT 0 +#define MCDE_PBCBMRC03_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC03_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC03, MUXI, __x) +#define MCDE_PBCBMRC04 0x00000C1C +#define MCDE_PBCBMRC04_MUXI_SHIFT 0 +#define MCDE_PBCBMRC04_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC04_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC04, MUXI, __x) +#define MCDE_PBCBMRC10 0x00000C20 +#define MCDE_PBCBMRC10_MUXI_SHIFT 0 +#define MCDE_PBCBMRC10_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC10_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC10, MUXI, __x) +#define MCDE_PBCBMRC11 0x00000C24 +#define MCDE_PBCBMRC11_MUXI_SHIFT 0 +#define MCDE_PBCBMRC11_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC11_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC11, MUXI, __x) +#define MCDE_PBCBMRC12 0x00000C28 +#define MCDE_PBCBMRC12_MUXI_SHIFT 0 +#define MCDE_PBCBMRC12_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC12_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC12, MUXI, __x) +#define MCDE_PBCBMRC13 0x00000C2C +#define MCDE_PBCBMRC13_MUXI_SHIFT 0 +#define MCDE_PBCBMRC13_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC13_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC13, MUXI, __x) +#define MCDE_PBCBMRC14 0x00000C30 +#define MCDE_PBCBMRC14_MUXI_SHIFT 0 +#define MCDE_PBCBMRC14_MUXI_MASK 0xFFFFFFFF +#define MCDE_PBCBMRC14_MUXI(__x) \ + MCDE_VAL2REG(MCDE_PBCBMRC14, MUXI, __x) +#define MCDE_PBCBCRC00 0x00000C34 +#define MCDE_PBCBCRC00_GROUPOFFSET 0x4 +#define MCDE_PBCBCRC00_CTLI_SHIFT 0 +#define MCDE_PBCBCRC00_CTLI_MASK 0xFFFFFFFF +#define MCDE_PBCBCRC00_CTLI(__x) \ + MCDE_VAL2REG(MCDE_PBCBCRC00, CTLI, __x) +#define MCDE_PBCBCRC10 0x00000C38 +#define MCDE_PBCBCRC10_CTLI_SHIFT 0 +#define MCDE_PBCBCRC10_CTLI_MASK 0xFFFFFFFF +#define MCDE_PBCBCRC10_CTLI(__x) \ + MCDE_VAL2REG(MCDE_PBCBCRC10, CTLI, __x) +#define MCDE_PBCBCRC01 0x00000C48 +#define MCDE_PBCBCRC01_GROUPOFFSET 0x4 +#define MCDE_PBCBCRC01_CTLI_SHIFT 0 +#define MCDE_PBCBCRC01_CTLI_MASK 0xFFFFFFFF +#define MCDE_PBCBCRC01_CTLI(__x) \ + MCDE_VAL2REG(MCDE_PBCBCRC01, CTLI, __x) +#define MCDE_PBCBCRC11 0x00000C4C +#define MCDE_PBCBCRC11_CTLI_SHIFT 0 +#define MCDE_PBCBCRC11_CTLI_MASK 0xFFFFFFFF +#define MCDE_PBCBCRC11_CTLI(__x) \ + MCDE_VAL2REG(MCDE_PBCBCRC11, CTLI, __x) +#define MCDE_VSCRC0 0x00000C5C +#define MCDE_VSCRC0_GROUPOFFSET 0x4 +#define MCDE_VSCRC0_VSPMIN_SHIFT 0 +#define MCDE_VSCRC0_VSPMIN_MASK 0x00000FFF +#define MCDE_VSCRC0_VSPMIN(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPMIN, __x) +#define MCDE_VSCRC0_VSPMAX_SHIFT 12 +#define MCDE_VSCRC0_VSPMAX_MASK 0x00FFF000 +#define MCDE_VSCRC0_VSPMAX(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPMAX, __x) +#define MCDE_VSCRC0_VSPDIV_SHIFT 24 +#define MCDE_VSCRC0_VSPDIV_MASK 0x07000000 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_1 0 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_2 1 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_4 2 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_8 3 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_16 4 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_32 5 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_64 6 +#define MCDE_VSCRC0_VSPDIV_MCDECLK_DIV_128 7 +#define MCDE_VSCRC0_VSPDIV_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, MCDE_VSCRC0_VSPDIV_##__x) +#define MCDE_VSCRC0_VSPDIV(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPDIV, __x) +#define MCDE_VSCRC0_VSPOL_SHIFT 27 +#define MCDE_VSCRC0_VSPOL_MASK 0x08000000 +#define MCDE_VSCRC0_VSPOL_ACTIVE_HIGH 0 +#define MCDE_VSCRC0_VSPOL_ACTIVE_LOW 1 +#define MCDE_VSCRC0_VSPOL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, MCDE_VSCRC0_VSPOL_##__x) +#define MCDE_VSCRC0_VSPOL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSPOL, __x) +#define MCDE_VSCRC0_VSSEL_SHIFT 28 +#define MCDE_VSCRC0_VSSEL_MASK 0x10000000 +#define MCDE_VSCRC0_VSSEL_VSYNC0 0 +#define MCDE_VSCRC0_VSSEL_VSYNC1 1 +#define MCDE_VSCRC0_VSSEL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, MCDE_VSCRC0_VSSEL_##__x) +#define MCDE_VSCRC0_VSSEL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSSEL, __x) +#define MCDE_VSCRC0_VSDBL_SHIFT 29 +#define MCDE_VSCRC0_VSDBL_MASK 0xE0000000 +#define MCDE_VSCRC0_VSDBL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC0, VSDBL, __x) +#define MCDE_VSCRC1 0x00000C60 +#define MCDE_VSCRC1_VSPMIN_SHIFT 0 +#define MCDE_VSCRC1_VSPMIN_MASK 0x00000FFF +#define MCDE_VSCRC1_VSPMIN(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPMIN, __x) +#define MCDE_VSCRC1_VSPMAX_SHIFT 12 +#define MCDE_VSCRC1_VSPMAX_MASK 0x00FFF000 +#define MCDE_VSCRC1_VSPMAX(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPMAX, __x) +#define MCDE_VSCRC1_VSPDIV_SHIFT 24 +#define MCDE_VSCRC1_VSPDIV_MASK 0x07000000 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_1 0 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_2 1 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_4 2 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_8 3 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_16 4 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_32 5 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_64 6 +#define MCDE_VSCRC1_VSPDIV_MCDECLK_DIV_128 7 +#define MCDE_VSCRC1_VSPDIV_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, MCDE_VSCRC1_VSPDIV_##__x) +#define MCDE_VSCRC1_VSPDIV(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPDIV, __x) +#define MCDE_VSCRC1_VSPOL_SHIFT 27 +#define MCDE_VSCRC1_VSPOL_MASK 0x08000000 +#define MCDE_VSCRC1_VSPOL_ACTIVE_HIGH 0 +#define MCDE_VSCRC1_VSPOL_ACTIVE_LOW 1 +#define MCDE_VSCRC1_VSPOL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, MCDE_VSCRC1_VSPOL_##__x) +#define MCDE_VSCRC1_VSPOL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSPOL, __x) +#define MCDE_VSCRC1_VSSEL_SHIFT 28 +#define MCDE_VSCRC1_VSSEL_MASK 0x10000000 +#define MCDE_VSCRC1_VSSEL_VSYNC0 0 +#define MCDE_VSCRC1_VSSEL_VSYNC1 1 +#define MCDE_VSCRC1_VSSEL_ENUM(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, MCDE_VSCRC1_VSSEL_##__x) +#define MCDE_VSCRC1_VSSEL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSSEL, __x) +#define MCDE_VSCRC1_VSDBL_SHIFT 29 +#define MCDE_VSCRC1_VSDBL_MASK 0xE0000000 +#define MCDE_VSCRC1_VSDBL(__x) \ + MCDE_VAL2REG(MCDE_VSCRC1, VSDBL, __x) +#define MCDE_SCTRC 0x00000C64 +#define MCDE_SCTRC_SYNCDELC0_SHIFT 0 +#define MCDE_SCTRC_SYNCDELC0_MASK 0x000000FF +#define MCDE_SCTRC_SYNCDELC0(__x) \ + MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC0, __x) +#define MCDE_SCTRC_SYNCDELC1_SHIFT 8 +#define MCDE_SCTRC_SYNCDELC1_MASK 0x0000FF00 +#define MCDE_SCTRC_SYNCDELC1(__x) \ + MCDE_VAL2REG(MCDE_SCTRC, SYNCDELC1, __x) +#define MCDE_SCTRC_TRDELC_SHIFT 16 +#define MCDE_SCTRC_TRDELC_MASK 0x0FFF0000 +#define MCDE_SCTRC_TRDELC(__x) \ + MCDE_VAL2REG(MCDE_SCTRC, TRDELC, __x) +#define MCDE_SCSRC 0x00000C68 +#define MCDE_SCSRC_VSTAC0_SHIFT 0 +#define MCDE_SCSRC_VSTAC0_MASK 0x00000001 +#define MCDE_SCSRC_VSTAC0(__x) \ + MCDE_VAL2REG(MCDE_SCSRC, VSTAC0, __x) +#define MCDE_SCSRC_VSTAC1_SHIFT 1 +#define MCDE_SCSRC_VSTAC1_MASK 0x00000002 +#define MCDE_SCSRC_VSTAC1(__x) \ + MCDE_VAL2REG(MCDE_SCSRC, VSTAC1, __x) +#define MCDE_BCNR0 0x00000C6C +#define MCDE_BCNR0_GROUPOFFSET 0x4 +#define MCDE_BCNR0_BCN_SHIFT 0 +#define MCDE_BCNR0_BCN_MASK 0x000000FF +#define MCDE_BCNR0_BCN(__x) \ + MCDE_VAL2REG(MCDE_BCNR0, BCN, __x) +#define MCDE_BCNR1 0x00000C70 +#define MCDE_BCNR1_BCN_SHIFT 0 +#define MCDE_BCNR1_BCN_MASK 0x000000FF +#define MCDE_BCNR1_BCN(__x) \ + MCDE_VAL2REG(MCDE_BCNR1, BCN, __x) +#define MCDE_CSCDTR0 0x00000C74 +#define MCDE_CSCDTR0_GROUPOFFSET 0x4 +#define MCDE_CSCDTR0_CSACT_SHIFT 0 +#define MCDE_CSCDTR0_CSACT_MASK 0x000000FF +#define MCDE_CSCDTR0_CSACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR0, CSACT, __x) +#define MCDE_CSCDTR0_CSDEACT_SHIFT 8 +#define MCDE_CSCDTR0_CSDEACT_MASK 0x0000FF00 +#define MCDE_CSCDTR0_CSDEACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR0, CSDEACT, __x) +#define MCDE_CSCDTR0_CDACT_SHIFT 16 +#define MCDE_CSCDTR0_CDACT_MASK 0x00FF0000 +#define MCDE_CSCDTR0_CDACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR0, CDACT, __x) +#define MCDE_CSCDTR0_CDDEACT_SHIFT 24 +#define MCDE_CSCDTR0_CDDEACT_MASK 0xFF000000 +#define MCDE_CSCDTR0_CDDEACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR0, CDDEACT, __x) +#define MCDE_CSCDTR1 0x00000C78 +#define MCDE_CSCDTR1_CSACT_SHIFT 0 +#define MCDE_CSCDTR1_CSACT_MASK 0x000000FF +#define MCDE_CSCDTR1_CSACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR1, CSACT, __x) +#define MCDE_CSCDTR1_CSDEACT_SHIFT 8 +#define MCDE_CSCDTR1_CSDEACT_MASK 0x0000FF00 +#define MCDE_CSCDTR1_CSDEACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR1, CSDEACT, __x) +#define MCDE_CSCDTR1_CDACT_SHIFT 16 +#define MCDE_CSCDTR1_CDACT_MASK 0x00FF0000 +#define MCDE_CSCDTR1_CDACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR1, CDACT, __x) +#define MCDE_CSCDTR1_CDDEACT_SHIFT 24 +#define MCDE_CSCDTR1_CDDEACT_MASK 0xFF000000 +#define MCDE_CSCDTR1_CDDEACT(__x) \ + MCDE_VAL2REG(MCDE_CSCDTR1, CDDEACT, __x) +#define MCDE_RDWRTR0 0x00000C7C +#define MCDE_RDWRTR0_GROUPOFFSET 0x4 +#define MCDE_RDWRTR0_RWACT_SHIFT 0 +#define MCDE_RDWRTR0_RWACT_MASK 0x000000FF +#define MCDE_RDWRTR0_RWACT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR0, RWACT, __x) +#define MCDE_RDWRTR0_RWDEACT_SHIFT 8 +#define MCDE_RDWRTR0_RWDEACT_MASK 0x0000FF00 +#define MCDE_RDWRTR0_RWDEACT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR0, RWDEACT, __x) +#define MCDE_RDWRTR0_MOTINT_SHIFT 16 +#define MCDE_RDWRTR0_MOTINT_MASK 0x00010000 +#define MCDE_RDWRTR0_MOTINT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR0, MOTINT, __x) +#define MCDE_RDWRTR1 0x00000C80 +#define MCDE_RDWRTR1_RWACT_SHIFT 0 +#define MCDE_RDWRTR1_RWACT_MASK 0x000000FF +#define MCDE_RDWRTR1_RWACT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR1, RWACT, __x) +#define MCDE_RDWRTR1_RWDEACT_SHIFT 8 +#define MCDE_RDWRTR1_RWDEACT_MASK 0x0000FF00 +#define MCDE_RDWRTR1_RWDEACT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR1, RWDEACT, __x) +#define MCDE_RDWRTR1_MOTINT_SHIFT 16 +#define MCDE_RDWRTR1_MOTINT_MASK 0x00010000 +#define MCDE_RDWRTR1_MOTINT(__x) \ + MCDE_VAL2REG(MCDE_RDWRTR1, MOTINT, __x) +#define MCDE_DOTR0 0x00000C84 +#define MCDE_DOTR0_GROUPOFFSET 0x4 +#define MCDE_DOTR0_DOACT_SHIFT 0 +#define MCDE_DOTR0_DOACT_MASK 0x000000FF +#define MCDE_DOTR0_DOACT(__x) \ + MCDE_VAL2REG(MCDE_DOTR0, DOACT, __x) +#define MCDE_DOTR0_DODEACT_SHIFT 8 +#define MCDE_DOTR0_DODEACT_MASK 0x0000FF00 +#define MCDE_DOTR0_DODEACT(__x) \ + MCDE_VAL2REG(MCDE_DOTR0, DODEACT, __x) +#define MCDE_DOTR1 0x00000C88 +#define MCDE_DOTR1_DOACT_SHIFT 0 +#define MCDE_DOTR1_DOACT_MASK 0x000000FF +#define MCDE_DOTR1_DOACT(__x) \ + MCDE_VAL2REG(MCDE_DOTR1, DOACT, __x) +#define MCDE_DOTR1_DODEACT_SHIFT 8 +#define MCDE_DOTR1_DODEACT_MASK 0x0000FF00 +#define MCDE_DOTR1_DODEACT(__x) \ + MCDE_VAL2REG(MCDE_DOTR1, DODEACT, __x) +#define MCDE_WDATADC0 0x00000C94 +#define MCDE_WDATADC0_GROUPOFFSET 0x4 +#define MCDE_WDATADC0_DATAVALUE_SHIFT 0 +#define MCDE_WDATADC0_DATAVALUE_MASK 0x00FFFFFF +#define MCDE_WDATADC0_DATAVALUE(__x) \ + MCDE_VAL2REG(MCDE_WDATADC0, DATAVALUE, __x) +#define MCDE_WDATADC0_DC_SHIFT 24 +#define MCDE_WDATADC0_DC_MASK 0x01000000 +#define MCDE_WDATADC0_DC(__x) \ + MCDE_VAL2REG(MCDE_WDATADC0, DC, __x) +#define MCDE_WDATADC1 0x00000C98 +#define MCDE_WDATADC1_DATAVALUE_SHIFT 0 +#define MCDE_WDATADC1_DATAVALUE_MASK 0x00FFFFFF +#define MCDE_WDATADC1_DATAVALUE(__x) \ + MCDE_VAL2REG(MCDE_WDATADC1, DATAVALUE, __x) +#define MCDE_WDATADC1_DC_SHIFT 24 +#define MCDE_WDATADC1_DC_MASK 0x01000000 +#define MCDE_WDATADC1_DC(__x) \ + MCDE_VAL2REG(MCDE_WDATADC1, DC, __x) +#define MCDE_RDATADC0 0x00000C9C +#define MCDE_RDATADC0_GROUPOFFSET 0x4 +#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_SHIFT 0 +#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF +#define MCDE_RDATADC0_DATAREADFROMDISPLAYMODULE(__x) \ + MCDE_VAL2REG(MCDE_RDATADC0, DATAREADFROMDISPLAYMODULE, __x) +#define MCDE_RDATADC0_STARTREAD_SHIFT 16 +#define MCDE_RDATADC0_STARTREAD_MASK 0x00010000 +#define MCDE_RDATADC0_STARTREAD(__x) \ + MCDE_VAL2REG(MCDE_RDATADC0, STARTREAD, __x) +#define MCDE_RDATADC1 0x00000CA0 +#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_SHIFT 0 +#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE_MASK 0x0000FFFF +#define MCDE_RDATADC1_DATAREADFROMDISPLAYMODULE(__x) \ + MCDE_VAL2REG(MCDE_RDATADC1, DATAREADFROMDISPLAYMODULE, __x) +#define MCDE_RDATADC1_STARTREAD_SHIFT 16 +#define MCDE_RDATADC1_STARTREAD_MASK 0x00010000 +#define MCDE_RDATADC1_STARTREAD(__x) \ + MCDE_VAL2REG(MCDE_RDATADC1, STARTREAD, __x) +#define MCDE_STATC 0x00000CA4 +#define MCDE_STATC_STATBUSY0_SHIFT 0 +#define MCDE_STATC_STATBUSY0_MASK 0x00000001 +#define MCDE_STATC_STATBUSY0(__x) \ + MCDE_VAL2REG(MCDE_STATC, STATBUSY0, __x) +#define MCDE_STATC_STATBUSY1_SHIFT 5 +#define MCDE_STATC_STATBUSY1_MASK 0x00000020 +#define MCDE_STATC_STATBUSY1(__x) \ + MCDE_VAL2REG(MCDE_STATC, STATBUSY1, __x) +#define MCDE_CTRLC0 0x00000CA8 +#define MCDE_CTRLC0_GROUPOFFSET 0x4 +#define MCDE_CTRLC0_FIFOWTRMRK_SHIFT 0 +#define MCDE_CTRLC0_FIFOWTRMRK_MASK 0x000000FF +#define MCDE_CTRLC0_FIFOWTRMRK(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FIFOWTRMRK, __x) +#define MCDE_CTRLC0_FIFOEMPTY_SHIFT 12 +#define MCDE_CTRLC0_FIFOEMPTY_MASK 0x00001000 +#define MCDE_CTRLC0_FIFOEMPTY(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FIFOEMPTY, __x) +#define MCDE_CTRLC0_FIFOFULL_SHIFT 13 +#define MCDE_CTRLC0_FIFOFULL_MASK 0x00002000 +#define MCDE_CTRLC0_FIFOFULL(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FIFOFULL, __x) +#define MCDE_CTRLC0_FORMID_SHIFT 16 +#define MCDE_CTRLC0_FORMID_MASK 0x00070000 +#define MCDE_CTRLC0_FORMID_DSI0VID 0 +#define MCDE_CTRLC0_FORMID_DSI0CMD 1 +#define MCDE_CTRLC0_FORMID_DSI1VID 2 +#define MCDE_CTRLC0_FORMID_DSI1CMD 3 +#define MCDE_CTRLC0_FORMID_DSI2VID 4 +#define MCDE_CTRLC0_FORMID_DSI2CMD 5 +#define MCDE_CTRLC0_FORMID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FORMID, MCDE_CTRLC0_FORMID_##__x) +#define MCDE_CTRLC0_FORMID(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FORMID, __x) +#define MCDE_CTRLC0_FORMTYPE_SHIFT 20 +#define MCDE_CTRLC0_FORMTYPE_MASK 0x00700000 +#define MCDE_CTRLC0_FORMTYPE_DPITV 0 +#define MCDE_CTRLC0_FORMTYPE_DBI 1 +#define MCDE_CTRLC0_FORMTYPE_DSI 2 +#define MCDE_CTRLC0_FORMTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, MCDE_CTRLC0_FORMTYPE_##__x) +#define MCDE_CTRLC0_FORMTYPE(__x) \ + MCDE_VAL2REG(MCDE_CTRLC0, FORMTYPE, __x) +#define MCDE_CTRLC1 0x00000CAC +#define MCDE_CTRLC1_FIFOWTRMRK_SHIFT 0 +#define MCDE_CTRLC1_FIFOWTRMRK_MASK 0x000000FF +#define MCDE_CTRLC1_FIFOWTRMRK(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FIFOWTRMRK, __x) +#define MCDE_CTRLC1_FIFOEMPTY_SHIFT 12 +#define MCDE_CTRLC1_FIFOEMPTY_MASK 0x00001000 +#define MCDE_CTRLC1_FIFOEMPTY(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FIFOEMPTY, __x) +#define MCDE_CTRLC1_FIFOFULL_SHIFT 13 +#define MCDE_CTRLC1_FIFOFULL_MASK 0x00002000 +#define MCDE_CTRLC1_FIFOFULL(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FIFOFULL, __x) +#define MCDE_CTRLC1_FORMID_SHIFT 16 +#define MCDE_CTRLC1_FORMID_MASK 0x00070000 +#define MCDE_CTRLC1_FORMID_DSI0VID 0 +#define MCDE_CTRLC1_FORMID_DSI0CMD 1 +#define MCDE_CTRLC1_FORMID_DSI1VID 2 +#define MCDE_CTRLC1_FORMID_DSI1CMD 3 +#define MCDE_CTRLC1_FORMID_DSI2VID 4 +#define MCDE_CTRLC1_FORMID_DSI2CMD 5 +#define MCDE_CTRLC1_FORMID_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FORMID, MCDE_CTRLC1_FORMID_##__x) +#define MCDE_CTRLC1_FORMID(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FORMID, __x) +#define MCDE_CTRLC1_FORMTYPE_SHIFT 20 +#define MCDE_CTRLC1_FORMTYPE_MASK 0x00700000 +#define MCDE_CTRLC1_FORMTYPE_DPITV 0 +#define MCDE_CTRLC1_FORMTYPE_DBI 1 +#define MCDE_CTRLC1_FORMTYPE_DSI 2 +#define MCDE_CTRLC1_FORMTYPE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, MCDE_CTRLC1_FORMTYPE_##__x) +#define MCDE_CTRLC1_FORMTYPE(__x) \ + MCDE_VAL2REG(MCDE_CTRLC1, FORMTYPE, __x) +#define MCDE_DSIVID0CONF0 0x00000E00 +#define MCDE_DSIVID0CONF0_GROUPOFFSET 0x20 +#define MCDE_DSIVID0CONF0_BLANKING_SHIFT 0 +#define MCDE_DSIVID0CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSIVID0CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, BLANKING, __x) +#define MCDE_DSIVID0CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSIVID0CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSIVID0CONF0_VID_MODE_CMD 0 +#define MCDE_DSIVID0CONF0_VID_MODE_VID 1 +#define MCDE_DSIVID0CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, \ + MCDE_DSIVID0CONF0_VID_MODE_##__x) +#define MCDE_DSIVID0CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, VID_MODE, __x) +#define MCDE_DSIVID0CONF0_CMD8_SHIFT 13 +#define MCDE_DSIVID0CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSIVID0CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, CMD8, __x) +#define MCDE_DSIVID0CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSIVID0CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSIVID0CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, BIT_SWAP, __x) +#define MCDE_DSIVID0CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSIVID0CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSIVID0CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, BYTE_SWAP, __x) +#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSIVID0CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSIVID0CONF0_PACKING_SHIFT 20 +#define MCDE_DSIVID0CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSIVID0CONF0_PACKING_RGB565 0 +#define MCDE_DSIVID0CONF0_PACKING_RGB666 1 +#define MCDE_DSIVID0CONF0_PACKING_RGB888 2 +#define MCDE_DSIVID0CONF0_PACKING_BGR888 3 +#define MCDE_DSIVID0CONF0_PACKING_HDTV 4 +#define MCDE_DSIVID0CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, \ + MCDE_DSIVID0CONF0_PACKING_##__x) +#define MCDE_DSIVID0CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CONF0, PACKING, __x) +#define MCDE_DSICMD0CONF0 0x00000E20 +#define MCDE_DSICMD0CONF0_BLANKING_SHIFT 0 +#define MCDE_DSICMD0CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSICMD0CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, BLANKING, __x) +#define MCDE_DSICMD0CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSICMD0CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSICMD0CONF0_VID_MODE_CMD 0 +#define MCDE_DSICMD0CONF0_VID_MODE_VID 1 +#define MCDE_DSICMD0CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, \ + MCDE_DSICMD0CONF0_VID_MODE_##__x) +#define MCDE_DSICMD0CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, VID_MODE, __x) +#define MCDE_DSICMD0CONF0_CMD8_SHIFT 13 +#define MCDE_DSICMD0CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSICMD0CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, CMD8, __x) +#define MCDE_DSICMD0CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSICMD0CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSICMD0CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, BIT_SWAP, __x) +#define MCDE_DSICMD0CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSICMD0CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSICMD0CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, BYTE_SWAP, __x) +#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSICMD0CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSICMD0CONF0_PACKING_SHIFT 20 +#define MCDE_DSICMD0CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSICMD0CONF0_PACKING_RGB565 0 +#define MCDE_DSICMD0CONF0_PACKING_RGB666 1 +#define MCDE_DSICMD0CONF0_PACKING_RGB888 2 +#define MCDE_DSICMD0CONF0_PACKING_BGR888 3 +#define MCDE_DSICMD0CONF0_PACKING_HDTV 4 +#define MCDE_DSICMD0CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, \ + MCDE_DSICMD0CONF0_PACKING_##__x) +#define MCDE_DSICMD0CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CONF0, PACKING, __x) +#define MCDE_DSIVID1CONF0 0x00000E40 +#define MCDE_DSIVID1CONF0_BLANKING_SHIFT 0 +#define MCDE_DSIVID1CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSIVID1CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, BLANKING, __x) +#define MCDE_DSIVID1CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSIVID1CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSIVID1CONF0_VID_MODE_CMD 0 +#define MCDE_DSIVID1CONF0_VID_MODE_VID 1 +#define MCDE_DSIVID1CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, \ + MCDE_DSIVID1CONF0_VID_MODE_##__x) +#define MCDE_DSIVID1CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, VID_MODE, __x) +#define MCDE_DSIVID1CONF0_CMD8_SHIFT 13 +#define MCDE_DSIVID1CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSIVID1CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, CMD8, __x) +#define MCDE_DSIVID1CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSIVID1CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSIVID1CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, BIT_SWAP, __x) +#define MCDE_DSIVID1CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSIVID1CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSIVID1CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, BYTE_SWAP, __x) +#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSIVID1CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSIVID1CONF0_PACKING_SHIFT 20 +#define MCDE_DSIVID1CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSIVID1CONF0_PACKING_RGB565 0 +#define MCDE_DSIVID1CONF0_PACKING_RGB666 1 +#define MCDE_DSIVID1CONF0_PACKING_RGB888 2 +#define MCDE_DSIVID1CONF0_PACKING_BGR888 3 +#define MCDE_DSIVID1CONF0_PACKING_HDTV 4 +#define MCDE_DSIVID1CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, \ + MCDE_DSIVID1CONF0_PACKING_##__x) +#define MCDE_DSIVID1CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CONF0, PACKING, __x) +#define MCDE_DSICMD1CONF0 0x00000E60 +#define MCDE_DSICMD1CONF0_BLANKING_SHIFT 0 +#define MCDE_DSICMD1CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSICMD1CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, BLANKING, __x) +#define MCDE_DSICMD1CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSICMD1CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSICMD1CONF0_VID_MODE_CMD 0 +#define MCDE_DSICMD1CONF0_VID_MODE_VID 1 +#define MCDE_DSICMD1CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, \ + MCDE_DSICMD1CONF0_VID_MODE_##__x) +#define MCDE_DSICMD1CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, VID_MODE, __x) +#define MCDE_DSICMD1CONF0_CMD8_SHIFT 13 +#define MCDE_DSICMD1CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSICMD1CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, CMD8, __x) +#define MCDE_DSICMD1CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSICMD1CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSICMD1CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, BIT_SWAP, __x) +#define MCDE_DSICMD1CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSICMD1CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSICMD1CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, BYTE_SWAP, __x) +#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSICMD1CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSICMD1CONF0_PACKING_SHIFT 20 +#define MCDE_DSICMD1CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSICMD1CONF0_PACKING_RGB565 0 +#define MCDE_DSICMD1CONF0_PACKING_RGB666 1 +#define MCDE_DSICMD1CONF0_PACKING_RGB888 2 +#define MCDE_DSICMD1CONF0_PACKING_BGR888 3 +#define MCDE_DSICMD1CONF0_PACKING_HDTV 4 +#define MCDE_DSICMD1CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, \ + MCDE_DSICMD1CONF0_PACKING_##__x) +#define MCDE_DSICMD1CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CONF0, PACKING, __x) +#define MCDE_DSIVID2CONF0 0x00000E80 +#define MCDE_DSIVID2CONF0_BLANKING_SHIFT 0 +#define MCDE_DSIVID2CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSIVID2CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, BLANKING, __x) +#define MCDE_DSIVID2CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSIVID2CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSIVID2CONF0_VID_MODE_CMD 0 +#define MCDE_DSIVID2CONF0_VID_MODE_VID 1 +#define MCDE_DSIVID2CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, \ + MCDE_DSIVID2CONF0_VID_MODE_##__x) +#define MCDE_DSIVID2CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, VID_MODE, __x) +#define MCDE_DSIVID2CONF0_CMD8_SHIFT 13 +#define MCDE_DSIVID2CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSIVID2CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, CMD8, __x) +#define MCDE_DSIVID2CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSIVID2CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSIVID2CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, BIT_SWAP, __x) +#define MCDE_DSIVID2CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSIVID2CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSIVID2CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, BYTE_SWAP, __x) +#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSIVID2CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSIVID2CONF0_PACKING_SHIFT 20 +#define MCDE_DSIVID2CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSIVID2CONF0_PACKING_RGB565 0 +#define MCDE_DSIVID2CONF0_PACKING_RGB666 1 +#define MCDE_DSIVID2CONF0_PACKING_RGB888 2 +#define MCDE_DSIVID2CONF0_PACKING_BGR888 3 +#define MCDE_DSIVID2CONF0_PACKING_HDTV 4 +#define MCDE_DSIVID2CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, \ + MCDE_DSIVID2CONF0_PACKING_##__x) +#define MCDE_DSIVID2CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CONF0, PACKING, __x) +#define MCDE_DSICMD2CONF0 0x00000EA0 +#define MCDE_DSICMD2CONF0_BLANKING_SHIFT 0 +#define MCDE_DSICMD2CONF0_BLANKING_MASK 0x000000FF +#define MCDE_DSICMD2CONF0_BLANKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, BLANKING, __x) +#define MCDE_DSICMD2CONF0_VID_MODE_SHIFT 12 +#define MCDE_DSICMD2CONF0_VID_MODE_MASK 0x00001000 +#define MCDE_DSICMD2CONF0_VID_MODE_CMD 0 +#define MCDE_DSICMD2CONF0_VID_MODE_VID 1 +#define MCDE_DSICMD2CONF0_VID_MODE_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, \ + MCDE_DSICMD2CONF0_VID_MODE_##__x) +#define MCDE_DSICMD2CONF0_VID_MODE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, VID_MODE, __x) +#define MCDE_DSICMD2CONF0_CMD8_SHIFT 13 +#define MCDE_DSICMD2CONF0_CMD8_MASK 0x00002000 +#define MCDE_DSICMD2CONF0_CMD8(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, CMD8, __x) +#define MCDE_DSICMD2CONF0_BIT_SWAP_SHIFT 16 +#define MCDE_DSICMD2CONF0_BIT_SWAP_MASK 0x00010000 +#define MCDE_DSICMD2CONF0_BIT_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, BIT_SWAP, __x) +#define MCDE_DSICMD2CONF0_BYTE_SWAP_SHIFT 17 +#define MCDE_DSICMD2CONF0_BYTE_SWAP_MASK 0x00020000 +#define MCDE_DSICMD2CONF0_BYTE_SWAP(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, BYTE_SWAP, __x) +#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_SHIFT 18 +#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN_MASK 0x00040000 +#define MCDE_DSICMD2CONF0_DCSVID_NOTGEN(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, DCSVID_NOTGEN, __x) +#define MCDE_DSICMD2CONF0_PACKING_SHIFT 20 +#define MCDE_DSICMD2CONF0_PACKING_MASK 0x00700000 +#define MCDE_DSICMD2CONF0_PACKING_RGB565 0 +#define MCDE_DSICMD2CONF0_PACKING_RGB666 1 +#define MCDE_DSICMD2CONF0_PACKING_RGB888 2 +#define MCDE_DSICMD2CONF0_PACKING_BGR888 3 +#define MCDE_DSICMD2CONF0_PACKING_HDTV 4 +#define MCDE_DSICMD2CONF0_PACKING_ENUM(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, \ + MCDE_DSICMD2CONF0_PACKING_##__x) +#define MCDE_DSICMD2CONF0_PACKING(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CONF0, PACKING, __x) +#define MCDE_DSIVID0FRAME 0x00000E04 +#define MCDE_DSIVID0FRAME_GROUPOFFSET 0x20 +#define MCDE_DSIVID0FRAME_FRAME_SHIFT 0 +#define MCDE_DSIVID0FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSIVID0FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0FRAME, FRAME, __x) +#define MCDE_DSICMD0FRAME 0x00000E24 +#define MCDE_DSICMD0FRAME_FRAME_SHIFT 0 +#define MCDE_DSICMD0FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSICMD0FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0FRAME, FRAME, __x) +#define MCDE_DSIVID1FRAME 0x00000E44 +#define MCDE_DSIVID1FRAME_FRAME_SHIFT 0 +#define MCDE_DSIVID1FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSIVID1FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1FRAME, FRAME, __x) +#define MCDE_DSICMD1FRAME 0x00000E64 +#define MCDE_DSICMD1FRAME_FRAME_SHIFT 0 +#define MCDE_DSICMD1FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSICMD1FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1FRAME, FRAME, __x) +#define MCDE_DSIVID2FRAME 0x00000E84 +#define MCDE_DSIVID2FRAME_FRAME_SHIFT 0 +#define MCDE_DSIVID2FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSIVID2FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2FRAME, FRAME, __x) +#define MCDE_DSICMD2FRAME 0x00000EA4 +#define MCDE_DSICMD2FRAME_FRAME_SHIFT 0 +#define MCDE_DSICMD2FRAME_FRAME_MASK 0x00FFFFFF +#define MCDE_DSICMD2FRAME_FRAME(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2FRAME, FRAME, __x) +#define MCDE_DSIVID0PKT 0x00000E08 +#define MCDE_DSIVID0PKT_GROUPOFFSET 0x20 +#define MCDE_DSIVID0PKT_PACKET_SHIFT 0 +#define MCDE_DSIVID0PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSIVID0PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0PKT, PACKET, __x) +#define MCDE_DSICMD0PKT 0x00000E28 +#define MCDE_DSICMD0PKT_PACKET_SHIFT 0 +#define MCDE_DSICMD0PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSICMD0PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0PKT, PACKET, __x) +#define MCDE_DSIVID1PKT 0x00000E48 +#define MCDE_DSIVID1PKT_PACKET_SHIFT 0 +#define MCDE_DSIVID1PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSIVID1PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1PKT, PACKET, __x) +#define MCDE_DSICMD1PKT 0x00000E68 +#define MCDE_DSICMD1PKT_PACKET_SHIFT 0 +#define MCDE_DSICMD1PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSICMD1PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1PKT, PACKET, __x) +#define MCDE_DSIVID2PKT 0x00000E88 +#define MCDE_DSIVID2PKT_PACKET_SHIFT 0 +#define MCDE_DSIVID2PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSIVID2PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2PKT, PACKET, __x) +#define MCDE_DSICMD2PKT 0x00000EA8 +#define MCDE_DSICMD2PKT_PACKET_SHIFT 0 +#define MCDE_DSICMD2PKT_PACKET_MASK 0x0000FFFF +#define MCDE_DSICMD2PKT_PACKET(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2PKT, PACKET, __x) +#define MCDE_DSIVID0SYNC 0x00000E0C +#define MCDE_DSIVID0SYNC_GROUPOFFSET 0x20 +#define MCDE_DSIVID0SYNC_DMA_SHIFT 0 +#define MCDE_DSIVID0SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSIVID0SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0SYNC, DMA, __x) +#define MCDE_DSIVID0SYNC_SW_SHIFT 16 +#define MCDE_DSIVID0SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSIVID0SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0SYNC, SW, __x) +#define MCDE_DSICMD0SYNC 0x00000E2C +#define MCDE_DSICMD0SYNC_DMA_SHIFT 0 +#define MCDE_DSICMD0SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSICMD0SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0SYNC, DMA, __x) +#define MCDE_DSICMD0SYNC_SW_SHIFT 16 +#define MCDE_DSICMD0SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSICMD0SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0SYNC, SW, __x) +#define MCDE_DSIVID1SYNC 0x00000E4C +#define MCDE_DSIVID1SYNC_DMA_SHIFT 0 +#define MCDE_DSIVID1SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSIVID1SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1SYNC, DMA, __x) +#define MCDE_DSIVID1SYNC_SW_SHIFT 16 +#define MCDE_DSIVID1SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSIVID1SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1SYNC, SW, __x) +#define MCDE_DSICMD1SYNC 0x00000E6C +#define MCDE_DSICMD1SYNC_DMA_SHIFT 0 +#define MCDE_DSICMD1SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSICMD1SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1SYNC, DMA, __x) +#define MCDE_DSICMD1SYNC_SW_SHIFT 16 +#define MCDE_DSICMD1SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSICMD1SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1SYNC, SW, __x) +#define MCDE_DSIVID2SYNC 0x00000E8C +#define MCDE_DSIVID2SYNC_DMA_SHIFT 0 +#define MCDE_DSIVID2SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSIVID2SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2SYNC, DMA, __x) +#define MCDE_DSIVID2SYNC_SW_SHIFT 16 +#define MCDE_DSIVID2SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSIVID2SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2SYNC, SW, __x) +#define MCDE_DSICMD2SYNC 0x00000EAC +#define MCDE_DSICMD2SYNC_DMA_SHIFT 0 +#define MCDE_DSICMD2SYNC_DMA_MASK 0x00000FFF +#define MCDE_DSICMD2SYNC_DMA(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2SYNC, DMA, __x) +#define MCDE_DSICMD2SYNC_SW_SHIFT 16 +#define MCDE_DSICMD2SYNC_SW_MASK 0x0FFF0000 +#define MCDE_DSICMD2SYNC_SW(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2SYNC, SW, __x) +#define MCDE_DSIVID0CMDW 0x00000E10 +#define MCDE_DSIVID0CMDW_GROUPOFFSET 0x20 +#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSIVID0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSIVID0CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSIVID0CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSIVID0CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSIVID0CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0CMDW, CMDW_START, __x) +#define MCDE_DSICMD0CMDW 0x00000E30 +#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSICMD0CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSICMD0CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSICMD0CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSICMD0CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSICMD0CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0CMDW, CMDW_START, __x) +#define MCDE_DSIVID1CMDW 0x00000E50 +#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSIVID1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSIVID1CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSIVID1CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSIVID1CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSIVID1CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1CMDW, CMDW_START, __x) +#define MCDE_DSICMD1CMDW 0x00000E70 +#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSICMD1CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSICMD1CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSICMD1CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSICMD1CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSICMD1CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1CMDW, CMDW_START, __x) +#define MCDE_DSIVID2CMDW 0x00000E90 +#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSIVID2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSIVID2CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSIVID2CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSIVID2CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSIVID2CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2CMDW, CMDW_START, __x) +#define MCDE_DSICMD2CMDW 0x00000EB0 +#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_SHIFT 0 +#define MCDE_DSICMD2CMDW_CMDW_CONTINUE_MASK 0x0000FFFF +#define MCDE_DSICMD2CMDW_CMDW_CONTINUE(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_CONTINUE, __x) +#define MCDE_DSICMD2CMDW_CMDW_START_SHIFT 16 +#define MCDE_DSICMD2CMDW_CMDW_START_MASK 0xFFFF0000 +#define MCDE_DSICMD2CMDW_CMDW_START(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2CMDW, CMDW_START, __x) +#define MCDE_DSIVID0DELAY0 0x00000E14 +#define MCDE_DSIVID0DELAY0_GROUPOFFSET 0x20 +#define MCDE_DSIVID0DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSIVID0DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSIVID0DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0DELAY0, INTPKTDEL, __x) +#define MCDE_DSICMD0DELAY0 0x00000E34 +#define MCDE_DSICMD0DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSICMD0DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSICMD0DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0DELAY0, INTPKTDEL, __x) +#define MCDE_DSIVID1DELAY0 0x00000E54 +#define MCDE_DSIVID1DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSIVID1DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSIVID1DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1DELAY0, INTPKTDEL, __x) +#define MCDE_DSICMD1DELAY0 0x00000E74 +#define MCDE_DSICMD1DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSICMD1DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSICMD1DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1DELAY0, INTPKTDEL, __x) +#define MCDE_DSIVID2DELAY0 0x00000E94 +#define MCDE_DSIVID2DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSIVID2DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSIVID2DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2DELAY0, INTPKTDEL, __x) +#define MCDE_DSICMD2DELAY0 0x00000EB4 +#define MCDE_DSICMD2DELAY0_INTPKTDEL_SHIFT 0 +#define MCDE_DSICMD2DELAY0_INTPKTDEL_MASK 0x0000FFFF +#define MCDE_DSICMD2DELAY0_INTPKTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2DELAY0, INTPKTDEL, __x) +#define MCDE_DSIVID0DELAY1 0x00000E18 +#define MCDE_DSIVID0DELAY1_GROUPOFFSET 0x20 +#define MCDE_DSIVID0DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSIVID0DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSIVID0DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0DELAY1, TEREQDEL, __x) +#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSIVID0DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID0DELAY1, FRAMESTARTDEL, __x) +#define MCDE_DSICMD0DELAY1 0x00000E38 +#define MCDE_DSICMD0DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSICMD0DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSICMD0DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0DELAY1, TEREQDEL, __x) +#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSICMD0DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD0DELAY1, FRAMESTARTDEL, __x) +#define MCDE_DSIVID1DELAY1 0x00000E58 +#define MCDE_DSIVID1DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSIVID1DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSIVID1DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1DELAY1, TEREQDEL, __x) +#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSIVID1DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID1DELAY1, FRAMESTARTDEL, __x) +#define MCDE_DSICMD1DELAY1 0x00000E78 +#define MCDE_DSICMD1DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSICMD1DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSICMD1DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1DELAY1, TEREQDEL, __x) +#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSICMD1DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD1DELAY1, FRAMESTARTDEL, __x) +#define MCDE_DSIVID2DELAY1 0x00000E98 +#define MCDE_DSIVID2DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSIVID2DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSIVID2DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2DELAY1, TEREQDEL, __x) +#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSIVID2DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSIVID2DELAY1, FRAMESTARTDEL, __x) +#define MCDE_DSICMD2DELAY1 0x00000EB8 +#define MCDE_DSICMD2DELAY1_TEREQDEL_SHIFT 0 +#define MCDE_DSICMD2DELAY1_TEREQDEL_MASK 0x00000FFF +#define MCDE_DSICMD2DELAY1_TEREQDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2DELAY1, TEREQDEL, __x) +#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_SHIFT 16 +#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL_MASK 0x00FF0000 +#define MCDE_DSICMD2DELAY1_FRAMESTARTDEL(__x) \ + MCDE_VAL2REG(MCDE_DSICMD2DELAY1, FRAMESTARTDEL, __x) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 37096246c93..14e568be223 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -349,6 +349,22 @@ config IMX2_WDT To compile this driver as a module, choose M here: the module will be called imx2_wdt. +config UX500_WATCHDOG + bool "ST-Ericsson Ux500 watchdog" + depends on UX500_SOC_DB8500 || UX500_SOC_DB5500 + default y + help + Say Y here to include Watchdog timer support for the + watchdog existing in the prcmu of ST-Ericsson Ux500 series platforms. + This watchdog is used to reset the system and thus cannot be + compiled as a module. + +config UX500_WATCHDOG_DEBUG + bool "ST-Ericsson Ux500 watchdog DEBUG" + depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && DEBUG_FS + help + Say Y here to add various debugfs entries in wdog/ + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index e8f479a1640..738a0f3ad21 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o +obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c index 7c741dc987b..96ae2465c25 100644 --- a/drivers/watchdog/mpcore_wdt.c +++ b/drivers/watchdog/mpcore_wdt.c @@ -35,11 +35,13 @@ #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/cpufreq.h> +#include <linux/kexec.h> #include <asm/smp_twd.h> struct mpcore_wdt { - unsigned long timer_alive; + cpumask_t timer_alive; struct device *dev; void __iomem *base; int irq; @@ -50,6 +52,8 @@ struct mpcore_wdt { static struct platform_device *mpcore_wdt_pdev; static DEFINE_SPINLOCK(wdt_lock); +static DEFINE_PER_CPU(unsigned long, mpcore_wdt_rate); + #define TIMER_MARGIN 60 static int mpcore_margin = TIMER_MARGIN; module_param(mpcore_margin, int, 0); @@ -70,6 +74,8 @@ MODULE_PARM_DESC(mpcore_noboot, "MPcore watchdog action, " "set to 1 to ignore reboots, 0 to reboot (default=" __MODULE_STRING(ONLY_TESTING) ")"); +#define MPCORE_WDT_PERIPHCLK_PRESCALER 2 + /* * This is the interrupt handler. Note that we only use this * in testing mode, so don't actually do a reboot here. @@ -102,9 +108,8 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) spin_lock(&wdt_lock); /* Assume prescale is set to 256 */ - count = __raw_readl(wdt->base + TWD_WDOG_COUNTER); - count = (0xFFFFFFFFU - count) * (HZ / 5); - count = (count / 256) * mpcore_margin; + count = per_cpu(mpcore_wdt_rate, smp_processor_id()) / 256; + count = count*mpcore_margin; /* Reload the counter */ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); @@ -112,6 +117,56 @@ static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt) spin_unlock(&wdt_lock); } +static void mpcore_wdt_set_rate(unsigned long new_rate) +{ + unsigned long count; + unsigned long long rate_tmp; + unsigned long old_rate; + + spin_lock(&wdt_lock); + old_rate = per_cpu(mpcore_wdt_rate, smp_processor_id()); + per_cpu(mpcore_wdt_rate, smp_processor_id()) = new_rate; + + if (mpcore_wdt_dev) { + struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev); + count = readl(wdt->base + TWD_WDOG_COUNTER); + /* The goal: count = count * (new_rate/old_rate); */ + rate_tmp = (unsigned long long)count * new_rate; + do_div(rate_tmp, old_rate); + count = rate_tmp; + writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); + wdt->perturb = wdt->perturb ? 0 : 1; + } + spin_unlock(&wdt_lock); +} + +static void mpcore_wdt_update_cpu_frequency_on_cpu(void *data) +{ + struct cpufreq_freqs *freq = data; + mpcore_wdt_set_rate((freq->new * 1000) / + MPCORE_WDT_PERIPHCLK_PRESCALER); +} + +static int mpcore_wdt_cpufreq_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_freqs *freq = data; + + if (event == CPUFREQ_RESUMECHANGE || + (event == CPUFREQ_PRECHANGE && freq->new > freq->old) || + (event == CPUFREQ_POSTCHANGE && freq->new < freq->old)) + smp_call_function_single(freq->cpu, + mpcore_wdt_update_cpu_frequency_on_cpu, + freq, 1); + + return 0; +} + +static struct notifier_block mpcore_wdt_cpufreq_notifier_block = { + .notifier_call = mpcore_wdt_cpufreq_notifier, +}; + + static void mpcore_wdt_stop(struct mpcore_wdt *wdt) { spin_lock(&wdt_lock); @@ -146,6 +201,20 @@ static int mpcore_wdt_set_heartbeat(int t) return 0; } +static int mpcore_wdt_stop_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev); + printk(KERN_INFO "Stopping watchdog on non-crashing core %u\n", + smp_processor_id()); + mpcore_wdt_stop(wdt); + return NOTIFY_STOP; +} + +static struct notifier_block mpcore_wdt_stop_block = { + .notifier_call = mpcore_wdt_stop_notifier, +}; + /* * /dev/watchdog handling */ @@ -153,7 +222,7 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file) { struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_pdev); - if (test_and_set_bit(0, &wdt->timer_alive)) + if (cpumask_test_and_set_cpu(smp_processor_id(), &wdt->timer_alive)) return -EBUSY; if (nowayout) @@ -161,6 +230,9 @@ static int mpcore_wdt_open(struct inode *inode, struct file *file) file->private_data = wdt; + atomic_notifier_chain_register(&crash_percpu_notifier_list, + &mpcore_wdt_stop_block); + /* * Activate timer */ @@ -184,7 +256,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file) "unexpected close, not stopping watchdog!\n"); mpcore_wdt_keepalive(wdt); } - clear_bit(0, &wdt->timer_alive); + cpumask_clear_cpu(smp_processor_id(), &wdt->timer_alive); wdt->expect_close = 0; return 0; } @@ -427,6 +499,8 @@ static struct platform_driver mpcore_wdt_driver = { static int __init mpcore_wdt_init(void) { + int i; + /* * Check that the margin value is within it's range; * if not reset to the default @@ -437,6 +511,18 @@ static int __init mpcore_wdt_init(void) TIMER_MARGIN); } + cpufreq_register_notifier(&mpcore_wdt_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + for_each_online_cpu(i) + per_cpu(mpcore_wdt_rate, i) = + (cpufreq_get(i) * 1000) / MPCORE_WDT_PERIPHCLK_PRESCALER; + + for_each_online_cpu(i) + pr_info("mpcore_wdt: rate for core %d is %lu.%02luMHz.\n", i, + per_cpu(mpcore_wdt_rate, i) / 1000000, + (per_cpu(mpcore_wdt_rate, i) / 10000) % 100); + pr_info("MPcore Watchdog Timer: 0.1. mpcore_noboot=%d mpcore_margin=%d sec (nowayout= %d)\n", mpcore_noboot, mpcore_margin, nowayout); diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c new file mode 100644 index 00000000000..a1e8c2dbf10 --- /dev/null +++ b/drivers/watchdog/ux500_wdt.c @@ -0,0 +1,454 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * License Terms: GNU General Public License v2 + * + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson + * + * Heavily based upon geodewdt.c + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/err.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/mfd/dbx500-prcmu.h> + +#define WATCHDOG_TIMEOUT 600 /* 10 minutes */ + +#define WDT_FLAGS_OPEN 1 +#define WDT_FLAGS_ORPHAN 2 + +static unsigned long wdt_flags; + +static int timeout = WATCHDOG_TIMEOUT; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, + "Watchdog timeout in seconds. default=" + __MODULE_STRING(WATCHDOG_TIMEOUT) "."); + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +static u8 wdog_id; +static bool wdt_en; +static bool wdt_auto_off = false; +static bool safe_close; + +static int ux500_wdt_open(struct inode *inode, struct file *file) +{ + if (!timeout) + return -ENODEV; + + if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) + return -EBUSY; + + if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) + __module_get(THIS_MODULE); + + prcmu_enable_a9wdog(wdog_id); + wdt_en = true; + + return nonseekable_open(inode, file); +} + +static int ux500_wdt_release(struct inode *inode, struct file *file) +{ + if (safe_close) { + prcmu_disable_a9wdog(wdog_id); + module_put(THIS_MODULE); + } else { + pr_crit("Unexpected close - watchdog is not stopping.\n"); + prcmu_kick_a9wdog(wdog_id); + + set_bit(WDT_FLAGS_ORPHAN, &wdt_flags); + } + + clear_bit(WDT_FLAGS_OPEN, &wdt_flags); + safe_close = false; + return 0; +} + +static ssize_t ux500_wdt_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + if (!len) + return len; + + if (!nowayout) { + size_t i; + safe_close = false; + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + + if (c == 'V') + safe_close = true; + } + } + + prcmu_kick_a9wdog(wdog_id); + + return len; +} + +static long ux500_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int interval; + + static const struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, + .firmware_version = 1, + .identity = "Ux500 WDT", + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, + sizeof(ident)) ? -EFAULT : 0; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + + case WDIOC_SETOPTIONS: + { + int options; + int ret = -EINVAL; + + if (get_user(options, p)) + return -EFAULT; + + if (options & WDIOS_DISABLECARD) { + prcmu_disable_a9wdog(wdog_id); + wdt_en = false; + ret = 0; + } + + if (options & WDIOS_ENABLECARD) { + prcmu_enable_a9wdog(wdog_id); + wdt_en = true; + ret = 0; + } + + return ret; + } + case WDIOC_KEEPALIVE: + return prcmu_kick_a9wdog(wdog_id); + + case WDIOC_SETTIMEOUT: + if (get_user(interval, p)) + return -EFAULT; + + if (cpu_is_u8500()) { + /* 28 bit resolution in ms, becomes 268435.455 s */ + if (interval > 268435 || interval < 0) + return -EINVAL; + } else if (cpu_is_u5500()) { + /* 32 bit resolution in ms, becomes 4294967.295 s */ + if (interval > 4294967 || interval < 0) + return -EINVAL; + } else + return -EINVAL; + + timeout = interval; + prcmu_disable_a9wdog(wdog_id); + prcmu_load_a9wdog(wdog_id, timeout * 1000); + prcmu_enable_a9wdog(wdog_id); + + /* Fall through */ + case WDIOC_GETTIMEOUT: + return put_user(timeout, p); + + default: + return -ENOTTY; + } + + return 0; +} + +static const struct file_operations ux500_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = ux500_wdt_write, + .unlocked_ioctl = ux500_wdt_ioctl, + .open = ux500_wdt_open, + .release = ux500_wdt_release, +}; + +static struct miscdevice ux500_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &ux500_wdt_fops, +}; + +#ifdef CONFIG_UX500_WATCHDOG_DEBUG +enum wdog_dbg { + WDOG_DBG_CONFIG, + WDOG_DBG_LOAD, + WDOG_DBG_KICK, + WDOG_DBG_EN, + WDOG_DBG_DIS, +}; + +static ssize_t wdog_dbg_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + unsigned long val; + int err; + enum wdog_dbg v = (enum wdog_dbg)((struct seq_file *) + (file->private_data))->private; + + switch(v) { + case WDOG_DBG_CONFIG: + err = kstrtoul_from_user(user_buf, count, 0, &val); + + if (!err) { + wdt_auto_off = val != 0; + (void) prcmu_config_a9wdog(1, + wdt_auto_off); + } + else { + pr_err("ux500_wdt:dbg: unknown value\n"); + } + break; + case WDOG_DBG_LOAD: + err = kstrtoul_from_user(user_buf, count, 0, &val); + + if (!err) { + timeout = val; + /* Convert seconds to ms */ + prcmu_disable_a9wdog(wdog_id); + prcmu_load_a9wdog(wdog_id, timeout * 1000); + prcmu_enable_a9wdog(wdog_id); + } + else { + pr_err("ux500_wdt:dbg: unknown value\n"); + } + break; + case WDOG_DBG_KICK: + (void) prcmu_kick_a9wdog(wdog_id); + break; + case WDOG_DBG_EN: + wdt_en = true; + (void) prcmu_enable_a9wdog(wdog_id); + break; + case WDOG_DBG_DIS: + wdt_en = false; + (void) prcmu_disable_a9wdog(wdog_id); + break; + } + + return count; +} + +static int wdog_dbg_read(struct seq_file *s, void *p) +{ + enum wdog_dbg v = (enum wdog_dbg)s->private; + + switch(v) { + case WDOG_DBG_CONFIG: + seq_printf(s,"wdog is on id %d, auto off on sleep: %s\n", + (int)wdog_id, + wdt_auto_off ? "enabled": "disabled"); + break; + case WDOG_DBG_LOAD: + /* In 1s */ + seq_printf(s, "wdog load is: %d s\n", + timeout); + break; + case WDOG_DBG_KICK: + break; + case WDOG_DBG_EN: + case WDOG_DBG_DIS: + seq_printf(s, "wdog is %sabled\n", + wdt_en ? "en" : "dis"); + break; + } + return 0; +} + +static int wdog_dbg_open(struct inode *inode, + struct file *file) +{ + return single_open(file, wdog_dbg_read, inode->i_private); +} + +static const struct file_operations wdog_dbg_fops = { + .open = wdog_dbg_open, + .write = wdog_dbg_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int __init wdog_dbg_init(void) +{ + struct dentry *wdog_dir; + + wdog_dir = debugfs_create_dir("wdog", NULL); + if (IS_ERR_OR_NULL(wdog_dir)) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_u8("id", + S_IWUGO | S_IRUGO, wdog_dir, + &wdog_id))) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_file("config", + S_IWUGO | S_IRUGO, wdog_dir, + (void *)WDOG_DBG_CONFIG, + &wdog_dbg_fops))) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_file("load", + S_IWUGO | S_IRUGO, wdog_dir, + (void *)WDOG_DBG_LOAD, + &wdog_dbg_fops))) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_file("kick", + S_IWUGO, wdog_dir, + (void *)WDOG_DBG_KICK, + &wdog_dbg_fops))) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_file("enable", + S_IWUGO | S_IRUGO, wdog_dir, + (void *)WDOG_DBG_EN, + &wdog_dbg_fops))) + goto fail; + + if (IS_ERR_OR_NULL(debugfs_create_file("disable", + S_IWUGO | S_IRUGO, wdog_dir, + (void *)WDOG_DBG_DIS, + &wdog_dbg_fops))) + goto fail; + + return 0; +fail: + pr_err("ux500:wdog: Failed to initialize wdog dbg\n"); + debugfs_remove_recursive(wdog_dir); + + return -EFAULT; +} + +#else +static inline int __init wdog_dbg_init(void) +{ + return 0; +} +#endif + +static int __init ux500_wdt_probe(struct platform_device *pdev) +{ + int ret; + + /* Number of watch dogs */ + prcmu_config_a9wdog(1, wdt_auto_off); + /* convert to ms */ + prcmu_load_a9wdog(wdog_id, timeout * 1000); + + ret = misc_register(&ux500_wdt_miscdev); + if (ret < 0) { + dev_err(&pdev->dev, "failed to register misc\n"); + return ret; + } + + ret = wdog_dbg_init(); + if (ret < 0) + goto fail; + + dev_info(&pdev->dev, "initialized\n"); + + return 0; +fail: + misc_deregister(&ux500_wdt_miscdev); + return ret; +} + +static int __exit ux500_wdt_remove(struct platform_device *dev) +{ + prcmu_disable_a9wdog(wdog_id); + wdt_en = false; + misc_deregister(&ux500_wdt_miscdev); + return 0; +} +#ifdef CONFIG_PM +static int ux500_wdt_suspend(struct platform_device *pdev, + pm_message_t state) +{ + if (wdt_en && cpu_is_u5500()) { + prcmu_disable_a9wdog(wdog_id); + return 0; + } + + if (wdt_en && !wdt_auto_off) { + prcmu_disable_a9wdog(wdog_id); + prcmu_config_a9wdog(1, true); + + prcmu_load_a9wdog(wdog_id, timeout * 1000); + prcmu_enable_a9wdog(wdog_id); + } + return 0; +} + +static int ux500_wdt_resume(struct platform_device *pdev) +{ + if (wdt_en && cpu_is_u5500()) { + prcmu_load_a9wdog(wdog_id, timeout * 1000); + prcmu_enable_a9wdog(wdog_id); + return 0; + } + + if (wdt_en && !wdt_auto_off) { + prcmu_disable_a9wdog(wdog_id); + prcmu_config_a9wdog(1, wdt_auto_off); + + prcmu_load_a9wdog(wdog_id, timeout * 1000); + prcmu_enable_a9wdog(wdog_id); + } + return 0; +} + +#else +#define ux500_wdt_suspend NULL +#define ux500_wdt_resume NULL +#endif +static struct platform_driver ux500_wdt_driver = { + .remove = __exit_p(ux500_wdt_remove), + .driver = { + .owner = THIS_MODULE, + .name = "ux500_wdt", + }, + .suspend = ux500_wdt_suspend, + .resume = ux500_wdt_resume, +}; + +static int __init ux500_wdt_init(void) +{ + return platform_driver_probe(&ux500_wdt_driver, ux500_wdt_probe); +} +module_init(ux500_wdt_init); + +MODULE_AUTHOR("Jonas Aaberg <jonas.aberg@stericsson.com>"); +MODULE_DESCRIPTION("Ux500 Watchdog Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); |