summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@stericsson.com>2011-04-11 14:37:04 +0200
committerUlf Hansson <ulf.hansson@stericsson.com>2011-09-19 15:56:12 +0200
commit594d18437bd58f550f82b12d779653b1f18dad23 (patch)
tree7ca43192c1539fbe1d97c2808c28b2b49722b2da
parent7759be585e6774826da8fb0d12daf7164292fdcc (diff)
ux500: pm: add use-case governor
Signed-off-by: Vincent Guittot <vincent.guittot@stericsson.com> Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> [ANDROID]: ux500: pm: update sysfs interface for usecase governor A separate sysfs node is added fro each use-case. It is now possible to enable multiple use-cases simultaneously. If multiple use-cases are enabled, each configuration (of all enabled uses-caes) is compared and the configuration which is requesting highest performance is selected. Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> [ANDROID]: ux500: pm: usecase-gov Act on interrupts per s If the number of interrupts per seconds are above a certain level, exit from the asked mode. [ANDROID]: ux500: pm: update low-power-audio config in usecse-governor Update low-power-audio configuration to plug-out second cpu. This makes auto, voice-call and low-power-audio configurations to be same. Auto mode is activated by default (without any user space interaction). This makes voice-call and low-power-audio configurations redundant. However, analysis work is still on going for adding other settings that may become part of usecase governor so keeping all configurations in there for now. Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> [ANDROID]: ux500: pm: update usecase governor dependencies in Kconfig [ANDROID]: ARM: ux500: pm: usecase gov depends on PM Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> [ANDROID]: ux500: pm: minor code cleanup in UC governor Signed-off-by: Martin Persson <martin.persson@stericsson.com> [ANDROID] ux500: pm: disable auto mode in usecase governor In absence of auto mode there is no need to schedule work on early suspend so schedule work only when needed. Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> [ANDROID]: pm: force ondemand governor to choose higher frequency In stead of overriding frequency selected by ondemand governor in u8500_cpufreq_target(), force ondemand governor to select higher frequency when wlan and usb workaround is enabled. ANDROID tag is used to keep the patch internal. Otherwise it does not have any Android dependency. Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> [ANDROID]: ARM: u8500: usecaseg: Tiny code clean-up Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com> arm: ux500: halve the frequencies of some clocks in vc use case This patch adds a new APE OPP (50% with ACLK and DMACLK frequencies at 25%) which is used during the voice call use case. Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> Signed-off-by: Mattias Nilsson <mattias.i.nilsson@stericsson.com> ux500: pm: Add min cpufreq to UC governor Add a minimum cpu frequency limit to each use case in the use case governor. Signed-off-by: Martin Persson <martin.persson@stericsson.com> ux500: pm: allow usecase governor to force cpu idle states Also update low-power-audio configuration to force ApSleep state. ux500: pm: move use case definitions to a header file ux500: pm: add pl310 prefetch control in use-case governor ux500: pm: cosmetic changes in use-case governor Signed-off-by: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-regs.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu.h7
-rw-r--r--arch/arm/mach-ux500/pm/Kconfig9
-rw-r--r--arch/arm/mach-ux500/pm/Makefile1
-rw-r--r--arch/arm/mach-ux500/pm/usecase_gov.c885
-rw-r--r--arch/arm/mach-ux500/prcmu-qos-power.c48
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c8
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c45
-rw-r--r--drivers/cpuidle/governors/menu.c58
-rw-r--r--drivers/mfd/db8500-prcmu.c69
10 files changed, 1101 insertions, 30 deletions
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-regs.h b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
index 50c51b5605e..4f6f2f0394c 100644
--- a/arch/arm/mach-ux500/include/mach/prcmu-regs.h
+++ b/arch/arm/mach-ux500/include/mach/prcmu-regs.h
@@ -17,6 +17,7 @@
#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
+#define PRCM_ACLK_MGT_OFF 0x004
#define PRCM_SVACLK_MGT_OFF 0x008
#define PRCM_SIACLK_MGT_OFF 0x00C
#define PRCM_SGACLK_MGT_OFF 0x014
diff --git a/arch/arm/mach-ux500/include/mach/prcmu.h b/arch/arm/mach-ux500/include/mach/prcmu.h
index 6c7584d69d8..92fd7f79a4d 100644
--- a/arch/arm/mach-ux500/include/mach/prcmu.h
+++ b/arch/arm/mach-ux500/include/mach/prcmu.h
@@ -153,12 +153,14 @@ enum prcmu_clock {
* @APE_NO_CHANGE: The APE operating point is unchanged
* @APE_100_OPP: The new APE operating point is ape100opp
* @APE_50_OPP: 50%
+ * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%.
*/
enum ape_opp {
APE_OPP_INIT = 0x00,
APE_NO_CHANGE = 0x01,
APE_100_OPP = 0x02,
- APE_50_OPP = 0x03
+ APE_50_OPP = 0x03,
+ APE_50_PARTLY_25_OPP = 0xFF,
};
/**
@@ -492,6 +494,7 @@ static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
void prcmu_qos_force_opp(int, s32);
+void prcmu_qos_voice_call_override(bool enable);
int prcmu_qos_requirement(int pm_qos_class);
int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
@@ -512,6 +515,8 @@ static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
+static inline void prcmu_qos_voice_call_override(bool enable) {}
+
static inline int prcmu_qos_requirement(int prcmu_qos_class)
{
return 0;
diff --git a/arch/arm/mach-ux500/pm/Kconfig b/arch/arm/mach-ux500/pm/Kconfig
index d070e0a669f..47727b60775 100644
--- a/arch/arm/mach-ux500/pm/Kconfig
+++ b/arch/arm/mach-ux500/pm/Kconfig
@@ -73,3 +73,12 @@ config UX500_CONTEXT
depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM
help
This is needed for ApSleep and deeper sleep states.
+
+config UX500_USECASE_GOVERNOR
+ bool "UX500 use-case governor"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (CPU_FREQ && CPU_IDLE && HOTPLUG_CPU && \
+ EARLYSUSPEND && UX500_L2X0_PREFETCH_CTRL && PM)
+ default y
+ help
+ Adjusts CPU_IDLE, CPU_FREQ, HOTPLUG_CPU and L2 cache parameters
diff --git a/arch/arm/mach-ux500/pm/Makefile b/arch/arm/mach-ux500/pm/Makefile
index d297ea7370b..b237ec6fa16 100644
--- a/arch/arm/mach-ux500/pm/Makefile
+++ b/arch/arm/mach-ux500/pm/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_U8500_CPUIDLE_DEBUG) += cpuidle_dbg.o
obj-$(CONFIG_UX500_CONTEXT) += context.o context_arm.o context-db8500.o context-db5500.o
obj-$(CONFIG_UX500_SUSPEND) += suspend.o
obj-$(CONFIG_UX500_SUSPEND_DBG) += suspend_dbg.o
+obj-$(CONFIG_UX500_USECASE_GOVERNOR) += usecase_gov.o
diff --git a/arch/arm/mach-ux500/pm/usecase_gov.c b/arch/arm/mach-ux500/pm/usecase_gov.c
new file mode 100644
index 00000000000..78f157d7f60
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/usecase_gov.c
@@ -0,0 +1,885 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@stericsson.com> for ST-Ericsson
+ * Author: Vincent Guittot <vincent.guittot@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <linux/ktime.h>
+#include <linux/cpufreq.h>
+#include <mach/prcmu.h>
+
+#define CPULOAD_MEAS_DELAY 3000 /* 3 secondes of delta */
+
+/* debug */
+static unsigned long debug;
+
+#define hp_printk \
+ if (debug) \
+ printk \
+
+enum ux500_uc {
+ UX500_UC_NORMAL = 0,
+ UX500_UC_AUTO, /* Add use case below this. */
+ UX500_UC_VC,
+ UX500_UC_LPA,
+ UX500_UC_USER, /* Add use case above this. */
+ UX500_UC_MAX,
+};
+
+/* cpu load monitor struct */
+#define LOAD_MONITOR 4
+struct hotplug_cpu_info {
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_io;
+ unsigned int load[LOAD_MONITOR];
+ unsigned int io[LOAD_MONITOR];
+ unsigned int idx;
+};
+
+static DEFINE_PER_CPU(struct hotplug_cpu_info, hotplug_info);
+
+/* Auto trigger criteria */
+/* loadavg threshold */
+static unsigned long lower_threshold = 175;
+static unsigned long upper_threshold = 450;
+/* load balancing */
+static unsigned long max_unbalance = 210;
+/* trend load */
+static unsigned long trend_unbalance = 40;
+static unsigned long min_trend = 5;
+/* instant load */
+static unsigned long max_instant = 85;
+
+/* Number of interrupts per second before exiting auto mode */
+static u32 exit_irq_per_s = 1000;
+static u64 old_num_irqs;
+
+static DEFINE_MUTEX(user_config_mutex);
+static bool user_config_updated;
+static enum ux500_uc current_uc = UX500_UC_MAX;
+static bool is_work_scheduled;
+static bool is_early_suspend;
+
+static unsigned int cpuidle_deepest_state;
+
+struct usecase_config {
+ char *name;
+ unsigned long max_freq;
+ unsigned long min_freq; /* if no requirement set 0 */
+ unsigned long cpuidle_multiplier;
+ bool second_cpu_online;
+ bool l2_prefetch_en;
+ bool enable;
+ unsigned int forced_state; /* Forced cpu idle state. */
+ bool vc_override; /* QOS override for voice-call. */
+};
+
+static struct usecase_config usecase_conf[UX500_UC_MAX] = {
+ [UX500_UC_NORMAL] = {
+ .name = "normal",
+ .max_freq = 1000000,
+ .min_freq = 200000,
+ .cpuidle_multiplier = 1024,
+ .second_cpu_online = true,
+ .l2_prefetch_en = true,
+ .enable = true,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_AUTO] = {
+ .name = "auto",
+ .max_freq = 400000,
+ .min_freq = 200000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = true,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_VC] = {
+ .name = "voice-call",
+ .max_freq = 200000,
+ .min_freq = 200000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = true,
+ },
+ [UX500_UC_LPA] = {
+ .name = "low-power-audio",
+ .max_freq = 400000,
+ .min_freq = 400000,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0, /* Updated dynamically */
+ .vc_override = false,
+ },
+};
+
+/* daemon */
+static struct delayed_work work_usecase;
+static struct early_suspend usecase_early_suspend;
+
+/* calculate loadavg */
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+extern int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
+extern int cpuidle_set_multiplier(unsigned int value);
+extern int cpuidle_force_state(unsigned int state);
+
+static unsigned long determine_loadavg(void)
+{
+ unsigned long avg = 0;
+ unsigned long avnrun[3];
+
+ get_avenrun(avnrun, FIXED_1 / 200, 0);
+ avg += (LOAD_INT(avnrun[0]) * 100) + (LOAD_FRAC(avnrun[0]) % 100);
+
+ return avg;
+}
+
+static unsigned long determine_cpu_load(void)
+{
+ int i;
+ unsigned long total_load = 0;
+
+ /* get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load, iowait;
+ unsigned int idle_time, iowait_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time_us(i, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ info->prev_cpu_wall);
+ info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ info->prev_cpu_idle);
+ info->prev_cpu_idle = cur_idle_time;
+
+ /* how much io wait time has passed since last iteration? */
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ info->prev_cpu_io);
+ info->prev_cpu_io = cur_iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+ info->load[info->idx] = load;
+ hp_printk("cpu %d load %u ", i, load);
+
+ /* iowait is the percentage of time not spent in io wait */
+ iowait = 100 * (iowait_time) / wall_time;
+ info->io[info->idx++] = load;
+ hp_printk("iowait %u\n", iowait);
+
+ if (info->idx >= LOAD_MONITOR)
+ info->idx = 0;
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_load_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ hp_printk("cpu %d load trend %u\n", i, load);
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_balance_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+ unsigned long min_load = (unsigned long) (-1);
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ if (min_load > load)
+ min_load = load;
+ total_load += load;
+ }
+
+ if (min_load > min_trend)
+ total_load = (100 * total_load) / min_load;
+ else
+ total_load = 50 << num_online_cpus();
+
+ return total_load;
+}
+
+static void init_cpu_load_trend(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct hotplug_cpu_info *info;
+ int j;
+
+ info = &per_cpu(hotplug_info, i);
+
+ info->prev_cpu_idle = get_cpu_idle_time_us(i,
+ &(info->prev_cpu_wall));
+ info->prev_cpu_io = get_cpu_iowait_time_us(i,
+ &(info->prev_cpu_wall));
+
+ for (j = 0; j < LOAD_MONITOR; j++) {
+ info->load[j] = 100;
+ info->io[j] = 100;
+ }
+ info->idx = 0;
+ }
+}
+
+static u32 get_num_interrupts_per_s(void)
+{
+ int cpu;
+ int i;
+ u64 num_irqs = 0;
+ ktime_t now;
+ static ktime_t last;
+ unsigned int delta;
+ u32 irqs = 0;
+
+ now = ktime_get();
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < NR_IRQS; i++)
+ num_irqs += kstat_irqs_cpu(i, cpu);
+ }
+ pr_debug("%s: total num irqs: %lld, previous %lld\n",
+ __func__, num_irqs, old_num_irqs);
+
+ if (old_num_irqs > 0) {
+ delta = (u32)ktime_to_ms(ktime_sub(now, last)) / 1000;
+ irqs = ((u32)(num_irqs - old_num_irqs)) / delta;
+ }
+
+ old_num_irqs = num_irqs;
+ last = now;
+
+ pr_debug("delta irqs per sec:%d\n", irqs);
+
+ return irqs;
+}
+
+static void set_cpu_config(enum ux500_uc new_uc)
+{
+ struct cpufreq_policy policy;
+ int err;
+ bool update = false;
+ u32 min_freq, max_freq;
+
+ if (new_uc != current_uc)
+ update = true;
+ else if ((user_config_updated) && (new_uc == UX500_UC_USER))
+ update = true;
+
+ pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
+ __func__, new_uc, current_uc, update);
+
+ if (!update)
+ goto exit;
+
+ /* Cpu hotplug */
+ if (!(usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() > 1))
+ cpu_down(1);
+ else if ((usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() < 2))
+ cpu_up(1);
+
+ /* Cpu freq */
+ err = cpufreq_get_policy(&policy, 0);
+ if (err)
+ pr_err("usecase-gov: get cpufreq policy failed\n");
+
+ /* If requirement is 0, use current policy value */
+ min_freq = usecase_conf[new_uc].min_freq ?
+ usecase_conf[new_uc].min_freq : policy.min;
+
+ max_freq = usecase_conf[new_uc].max_freq ?
+ usecase_conf[new_uc].max_freq : policy.max;
+
+ /*
+ * cpufreq fw does not allow frequency change if
+ * "current min freq" > "new max freq" or
+ * "current max freq" < "new min freq".
+ * Thus the intermediate steps below.
+ */
+ if (policy.min > max_freq) {
+ err = cpufreq_update_freq(0, min_freq, policy.max);
+ if (err)
+ pr_err("usecase-gov: update min cpufreq failed\n");
+ }
+ if (policy.max < min_freq) {
+ err = cpufreq_update_freq(0, policy.min, max_freq);
+ if (err)
+ pr_err("usecase-gov: update max cpufreq failed\n");
+ }
+
+ err = cpufreq_update_freq(0, min_freq, max_freq);
+ if (err)
+ pr_err("usecase-gov: update min/max cpufreq failed\n");
+
+ /* Cpu idle */
+ cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);
+
+ /* L2 prefetch */
+ if (usecase_conf[new_uc].l2_prefetch_en)
+ outer_prefetch_enable();
+ else
+ outer_prefetch_disable();
+
+ /* Force cpuidle state */
+ cpuidle_force_state(usecase_conf[new_uc].forced_state);
+
+ /* QOS override */
+ prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);
+
+ current_uc = new_uc;
+
+exit:
+ /* Its ok to clear even if new_uc != UX500_UC_USER */
+ user_config_updated = false;
+}
+
+/*
+ * Start load measurment every 6 s in order detrmine if can unplug one CPU.
+ * In order to not corrupt measurment, the first load average is not done
+ * here call in early suspend.
+ */
+static void usecase_earlysuspend_callback(struct early_suspend *h)
+{
+ init_cpu_load_trend();
+
+ mutex_lock(&user_config_mutex);
+
+ is_early_suspend = true;
+
+ if (usecase_conf[UX500_UC_AUTO].enable ||
+ usecase_conf[UX500_UC_USER].enable) {
+
+ is_work_scheduled = true;
+
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+ }
+
+ mutex_unlock(&user_config_mutex);
+}
+
+/* Stop measurement, call LCD early resume */
+static void usecase_lateresume_callback(struct early_suspend *h)
+{
+ mutex_lock(&user_config_mutex);
+
+ if (is_work_scheduled) {
+ cancel_delayed_work_sync(&work_usecase);
+ is_work_scheduled = false;
+ }
+
+ is_early_suspend = false;
+
+ set_cpu_config(UX500_UC_NORMAL);
+
+ mutex_unlock(&user_config_mutex);
+}
+
+static void delayed_usecase_work(struct work_struct *work)
+{
+ unsigned long avg, load, trend, balance;
+ bool inc_perf = false;
+ bool dec_perf = false;
+ u32 irqs_per_s;
+
+ /* determine loadavg */
+ avg = determine_loadavg();
+ hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
+ avg, lower_threshold, upper_threshold);
+
+ /* determine instant load */
+ load = determine_cpu_load();
+ hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);
+
+ /* determine load trend */
+ trend = determine_cpu_load_trend();
+ hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
+ trend, min_trend, trend_unbalance);
+
+ /* determine load balancing */
+ balance = determine_cpu_balance_trend();
+ hp_printk("load balancing trend = %lu min %lu\n",
+ balance, max_unbalance);
+
+ irqs_per_s = get_num_interrupts_per_s();
+
+ /* Dont let configuration change in the middle of our calculations. */
+ mutex_lock(&user_config_mutex);
+
+ /* detect "instant" load increase */
+ if (load > max_instant || irqs_per_s > exit_irq_per_s) {
+ inc_perf = true;
+ } else if (!usecase_conf[UX500_UC_USER].enable &&
+ usecase_conf[UX500_UC_AUTO].enable) {
+ /* detect high loadavg use case */
+ if (avg > upper_threshold)
+ inc_perf = true;
+ /* detect idle use case */
+ else if (trend < min_trend)
+ dec_perf = true;
+ /* detect unbalanced low cpu load use case */
+ else if ((balance > max_unbalance) && (trend < trend_unbalance))
+ dec_perf = true;
+ /* detect low loadavg use case */
+ else if (avg < lower_threshold)
+ dec_perf = true;
+ /* All user use cases disabled, current load not triggering
+ * any change.
+ */
+ else if (user_config_updated)
+ dec_perf = true;
+ } else {
+ dec_perf = true;
+ }
+
+ /*
+ * set_cpu_config() will not update the config unless it has been
+ * changed.
+ */
+ if (dec_perf) {
+ if (usecase_conf[UX500_UC_USER].enable)
+ set_cpu_config(UX500_UC_USER);
+ else if (usecase_conf[UX500_UC_AUTO].enable)
+ set_cpu_config(UX500_UC_AUTO);
+ } else if (inc_perf) {
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&user_config_mutex);
+
+ /* reprogramm scheduled work */
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+
+}
+
+static struct dentry *usecase_dir;
+
+#ifdef CONFIG_DEBUG_FS
+#define define_set(_name) \
+static ssize_t set_##_name(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ int err; \
+ long unsigned i; \
+ \
+ err = kstrtoul_from_user(user_buf, count, 0, &i); \
+ \
+ if (err) \
+ return err; \
+ \
+ _name = i; \
+ hp_printk("New value : %lu\n", _name); \
+ \
+ return count; \
+}
+
+define_set(upper_threshold);
+define_set(lower_threshold);
+define_set(max_unbalance);
+define_set(trend_unbalance);
+define_set(min_trend);
+define_set(max_instant);
+define_set(debug);
+
+#define define_print(_name) \
+static ssize_t print_##_name(struct seq_file *s, void *p) \
+{ \
+ return seq_printf(s, "%lu\n", _name); \
+}
+
+define_print(upper_threshold);
+define_print(lower_threshold);
+define_print(max_unbalance);
+define_print(trend_unbalance);
+define_print(min_trend);
+define_print(max_instant);
+define_print(debug);
+
+#define define_open(_name) \
+static ssize_t open_##_name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, print_##_name, inode->i_private); \
+}
+
+define_open(upper_threshold);
+define_open(lower_threshold);
+define_open(max_unbalance);
+define_open(trend_unbalance);
+define_open(min_trend);
+define_open(max_instant);
+define_open(debug);
+
+#define define_dbg_file(_name) \
+static const struct file_operations fops_##_name = { \
+ .open = open_##_name, \
+ .write = set_##_name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+}; \
+static struct dentry *file_##_name;
+
+define_dbg_file(upper_threshold);
+define_dbg_file(lower_threshold);
+define_dbg_file(max_unbalance);
+define_dbg_file(trend_unbalance);
+define_dbg_file(min_trend);
+define_dbg_file(max_instant);
+define_dbg_file(debug);
+
+struct dbg_file {
+ struct dentry **file;
+ const struct file_operations *fops;
+ const char *name;
+};
+
+#define define_dbg_entry(_name) \
+{ \
+ .file = &file_##_name, \
+ .fops = &fops_##_name, \
+ .name = #_name \
+}
+
+static struct dbg_file debug_entry[] = {
+ define_dbg_entry(upper_threshold),
+ define_dbg_entry(lower_threshold),
+ define_dbg_entry(max_unbalance),
+ define_dbg_entry(trend_unbalance),
+ define_dbg_entry(min_trend),
+ define_dbg_entry(max_instant),
+ define_dbg_entry(debug),
+};
+
+static int setup_debugfs(void)
+{
+ int i;
+ usecase_dir = debugfs_create_dir("usecase", NULL);
+
+ if (IS_ERR_OR_NULL(usecase_dir))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(debug_entry); i++) {
+ if (IS_ERR_OR_NULL(debugfs_create_file(debug_entry[i].name,
+ S_IWUGO | S_IRUGO,
+ usecase_dir,
+ NULL,
+ debug_entry[i].fops)))
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(debugfs_create_u32("exit_irq_per_s",
+ S_IWUGO | S_IRUGO, usecase_dir,
+ &exit_irq_per_s)))
+ goto fail;
+ return 0;
+fail:
+ debugfs_remove_recursive(usecase_dir);
+ return -EINVAL;
+}
+#else
+static int setup_debugfs(void)
+{
+ return 0;
+}
+#endif
+
+static void usecase_update_user_config(void)
+{
+ int i;
+ bool config_enable = false;
+ struct usecase_config *user_conf = &usecase_conf[UX500_UC_USER];
+
+ mutex_lock(&user_config_mutex);
+
+ user_conf->max_freq = 0;
+ user_conf->min_freq = 0;
+ user_conf->cpuidle_multiplier = 0;
+ user_conf->second_cpu_online = false;
+ user_conf->l2_prefetch_en = false;
+ user_conf->forced_state = cpuidle_deepest_state;
+ user_conf->vc_override = true; /* A single false will clear it. */
+
+ /* Dont include Auto and Normal modes in this */
+ for (i = (UX500_UC_AUTO + 1); i < UX500_UC_USER; i++) {
+ if (!usecase_conf[i].enable)
+ continue;
+
+ config_enable = true;
+
+ if (usecase_conf[i].max_freq > user_conf->max_freq)
+ user_conf->max_freq = usecase_conf[i].max_freq;
+ /* It's the highest min freq requirement that should be used */
+ if (usecase_conf[i].min_freq > user_conf->min_freq)
+ user_conf->min_freq = usecase_conf[i].min_freq;
+
+ if (usecase_conf[i].cpuidle_multiplier >
+ user_conf->cpuidle_multiplier)
+ user_conf->cpuidle_multiplier =
+ usecase_conf[i].cpuidle_multiplier;
+
+ user_conf->second_cpu_online |=
+ usecase_conf[i].second_cpu_online;
+
+ user_conf->l2_prefetch_en |=
+ usecase_conf[i].l2_prefetch_en;
+
+ /* Take the shallowest state. */
+ if (usecase_conf[i].forced_state < user_conf->forced_state)
+ user_conf->forced_state = usecase_conf[i].forced_state;
+
+ /* Only override QOS if all enabled configurations are
+ * requesting it.
+ */
+ if (!usecase_conf[i].vc_override)
+ user_conf->vc_override = false;
+ }
+
+ user_conf->enable = config_enable;
+ user_config_updated = true;
+
+ mutex_unlock(&user_config_mutex);
+}
+
+struct usecase_devclass_attr {
+ struct sysdev_class_attribute class_attr;
+ u32 index;
+};
+
+static struct usecase_devclass_attr usecase_dc_attr[UX500_UC_MAX];
+
+static struct attribute *dbs_attributes[UX500_UC_MAX + 1] = {NULL};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "usecase",
+};
+
+static ssize_t show_current(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ enum ux500_uc display_uc = (current_uc == UX500_UC_MAX) ?
+ UX500_UC_NORMAL : current_uc;
+
+ return sprintf(buf, "max_freq: %ld\n"
+ "cpuidle_multiplier: %ld\n"
+ "second_cpu_online: %s\n"
+ "l2_prefetch_en: %s\n"
+ "forced_state: %d\n"
+ "vc_override: %s\n",
+ usecase_conf[display_uc].max_freq,
+ usecase_conf[display_uc].cpuidle_multiplier,
+ usecase_conf[display_uc].second_cpu_online ? "true" : "false",
+ usecase_conf[display_uc].l2_prefetch_en ? "true" : "false",
+ usecase_conf[display_uc].forced_state,
+ usecase_conf[display_uc].vc_override ? "true" : "false");
+}
+
+static ssize_t show_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ return sprintf(buf, "%u\n",
+ usecase_conf[uattr->index].enable);
+}
+
+static ssize_t store_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ ret = sscanf(buf, "%u", &input);
+
+ /* Normal mode cant be changed. */
+ if ((ret != 1) || (uattr->index == 0))
+ return -EINVAL;
+
+ usecase_conf[uattr->index].enable = (bool)input;
+
+ usecase_update_user_config();
+
+ mutex_lock(&user_config_mutex);
+
+ if (usecase_conf[UX500_UC_AUTO].enable ||
+ usecase_conf[UX500_UC_USER].enable) {
+ if (is_early_suspend && !is_work_scheduled) {
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+ is_work_scheduled = true;
+ }
+ } else if (is_work_scheduled) {
+ cancel_delayed_work_sync(&work_usecase);
+ is_work_scheduled = false;
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&user_config_mutex);
+
+ return count;
+}
+
+static int usecase_sysfs_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < (ARRAY_SIZE(usecase_conf) - 1); i++) {
+ usecase_dc_attr[i].class_attr.attr.name = usecase_conf[i].name;
+ usecase_dc_attr[i].class_attr.attr.mode = 0644;
+ usecase_dc_attr[i].class_attr.show = show_dc_attr;
+ usecase_dc_attr[i].class_attr.store = store_dc_attr;
+ usecase_dc_attr[i].index = i;
+
+ dbs_attributes[i] = &(usecase_dc_attr[i].class_attr.attr);
+ }
+ usecase_dc_attr[UX500_UC_USER].class_attr.attr.name = "current";
+ usecase_dc_attr[UX500_UC_USER].class_attr.attr.mode = 0644;
+ usecase_dc_attr[UX500_UC_USER].class_attr.show = show_current;
+ usecase_dc_attr[UX500_UC_USER].class_attr.store = NULL;
+ usecase_dc_attr[UX500_UC_USER].index = UX500_UC_USER;
+ dbs_attributes[UX500_UC_USER] =
+ &(usecase_dc_attr[UX500_UC_USER].class_attr.attr);
+
+ err = sysfs_create_group(&(cpu_sysdev_class.kset.kobj),
+ &dbs_attr_group);
+ if (err)
+ pr_err("usecase-gov: sysfs_create_group"
+ " failed with error = %d\n", err);
+
+ return err;
+}
+
+#include "cpuidle.h"
+
+static void usecase_cpuidle_init(void)
+{
+ int max_states;
+ int i;
+ struct cstate *state = ux500_ci_get_cstates(&max_states);
+
+ for (i = 0; i < max_states; i++)
+ if ((state[i].APE == APE_OFF) && (state[i].ARM == ARM_RET))
+ break;
+
+ usecase_conf[UX500_UC_LPA].forced_state = i;
+
+ cpuidle_deepest_state = max_states - 1;
+}
+
+/* initialize devices */
+static int __init init_usecase_devices(void)
+{
+ int err;
+
+ pr_info("Use-case governor initialized\n");
+
+ /* add early_suspend callback */
+ usecase_early_suspend.level = 200;
+ usecase_early_suspend.suspend = usecase_earlysuspend_callback;
+ usecase_early_suspend.resume = usecase_lateresume_callback;
+ register_early_suspend(&usecase_early_suspend);
+
+ /* register delayed queuework */
+ INIT_DELAYED_WORK_DEFERRABLE(&work_usecase,
+ delayed_usecase_work);
+
+ init_cpu_load_trend();
+
+ err = setup_debugfs();
+ if (err)
+ goto error;
+ err = usecase_sysfs_init();
+ if (err)
+ goto error2;
+
+ usecase_cpuidle_init();
+
+ return 0;
+error2:
+ debugfs_remove_recursive(usecase_dir);
+error:
+ unregister_early_suspend(&usecase_early_suspend);
+ return err;
+}
+
+device_initcall(init_usecase_devices);
diff --git a/arch/arm/mach-ux500/prcmu-qos-power.c b/arch/arm/mach-ux500/prcmu-qos-power.c
index 050bcf2f7ad..082393bd7eb 100644
--- a/arch/arm/mach-ux500/prcmu-qos-power.c
+++ b/arch/arm/mach-ux500/prcmu-qos-power.c
@@ -96,8 +96,11 @@ static struct prcmu_qos_object *prcmu_qos_array[] = {
&ddr_opp_qos
};
+static DEFINE_MUTEX(prcmu_qos_mutex);
static DEFINE_SPINLOCK(prcmu_qos_lock);
+static bool ape_opp_forced_to_50_partly_25;
+
static unsigned long cpufreq_opp_delay = HZ / 5;
unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
@@ -141,6 +144,8 @@ static void update_target(int target)
int update = 0;
u8 op;
+ mutex_lock(&prcmu_qos_mutex);
+
spin_lock_irqsave(&prcmu_qos_lock, flags);
extreme_value = prcmu_qos_array[target]->default_value;
@@ -188,7 +193,7 @@ static void update_target(int target)
default:
pr_err("prcmu qos: Incorrect ddr target value (%d)",
extreme_value);
- return;
+ goto unlock_and_return;
}
prcmu_debug_ddr_opp_log(op);
prcmu_set_ddr_opp(op);
@@ -205,12 +210,17 @@ static void update_target(int target)
default:
pr_err("prcmu qos: Incorrect ape target value (%d)",
extreme_value);
- return;
+ goto unlock_and_return;
}
- prcmu_set_ape_opp(op);
+
+ if (!ape_opp_forced_to_50_partly_25)
+ (void)prcmu_set_ape_opp(op);
+
prcmu_debug_ape_opp_log(op);
}
}
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
}
void prcmu_qos_force_opp(int prcmu_qos_class, s32 i)
@@ -219,6 +229,38 @@ void prcmu_qos_force_opp(int prcmu_qos_class, s32 i)
update_target(prcmu_qos_class);
}
+void prcmu_qos_voice_call_override(bool enable)
+{
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ ape_opp_forced_to_50_partly_25 = enable;
+
+ if (enable) {
+ (void)prcmu_set_ape_opp(APE_50_PARTLY_25_OPP);
+ goto unlock_and_return;
+ }
+
+ /* Disable: set the OPP according to the current target value. */
+ switch (atomic_read(
+ &prcmu_qos_array[PRCMU_QOS_APE_OPP]->target_value)) {
+ case 50:
+ op = APE_50_OPP;
+ break;
+ case 100:
+ op = APE_100_OPP;
+ break;
+ default:
+ goto unlock_and_return;
+ }
+
+ (void)prcmu_set_ape_opp(op);
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
/**
* prcmu_qos_requirement - returns current prcmu qos expectation
* @prcmu_qos_class: identification of which qos value is requested
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a5bbbf60ae8..bcef88a89b0 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -390,6 +390,10 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
+/*** delete after deprecation time ***/
+extern bool wlan_mode_on;
+extern bool usb_mode_on;
+
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -494,7 +498,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
}
/* Check for frequency increase */
- if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
+ if ((max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) ||
+ /* A work around for wlan and usb performance issues */
+ (usb_mode_on || wlan_mode_on)) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
this_dbs_info->rate_mult =
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
index 4b863110bd8..629b0b88e6c 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -58,6 +58,10 @@ static enum arm_opp idx2opp[] = {
#define WLAN_PROBE_DELAY 3000 /* 3 seconds */
#define WLAN_LIMIT (3000/3) /* If we have more than 1000 irqs per second */
+#define USB_PROBE_DELAY 1000 /* 1 seconds */
+#define USB_LIMIT (200) /* If we have more than 200 irqs per second */
+static struct delayed_work work_usb_workaround;
+bool usb_mode_on;
static struct delayed_work work_wlan_workaround;
bool wlan_mode_on;
@@ -84,6 +88,40 @@ static void wlan_load(struct work_struct *work)
msecs_to_jiffies(WLAN_PROBE_DELAY));
}
+static void usb_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > USB_LIMIT)
+ usb_mode_on = true;
+ else
+ usb_mode_on = false;
+
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+}
+
+void cpufreq_usb_connect_notify(bool connect)
+{
+ if (connect) {
+ schedule_delayed_work_on(0,
+ &work_usb_workaround,
+ msecs_to_jiffies(USB_PROBE_DELAY));
+ } else {
+ cancel_delayed_work_sync(&work_usb_workaround);
+ usb_mode_on = false;
+ }
+}
+
static struct freq_attr *db8500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
@@ -107,10 +145,6 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
if (target_freq > policy->cpuinfo.max_freq)
target_freq = policy->cpuinfo.max_freq;
- /* A work around for wlan performance issues */
- if (wlan_mode_on)
- target_freq = policy->cpuinfo.max_freq;
-
/* Lookup the next frequency */
if (cpufreq_frequency_table_target
(policy, freq_table, target_freq, relation, &idx)) {
@@ -169,6 +203,9 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
&work_wlan_workaround,
msecs_to_jiffies(WLAN_PROBE_DELAY));
+ INIT_DELAYED_WORK_DEFERRABLE(&work_usb_workaround,
+ usb_load);
+
pr_info("db8500-cpufreq : Available frequencies:\n");
while (freq_table[i].frequency != CPUFREQ_TABLE_END)
pr_info(" %d Mhz\n", freq_table[i++].frequency/1000);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 95f4c391e4f..ed28f774ca4 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -124,6 +124,7 @@ struct menu_device {
};
static int tune_multiplier = 1024;
+static int forced_state;
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
@@ -290,26 +291,34 @@ static int menu_select(struct cpuidle_device *dev)
if (data->expected_us > 5)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
- /*
- * Find the idle state with the lowest power while satisfying
- * our constraints.
- */
- for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
- struct cpuidle_state *s = &dev->states[i];
-
- if (s->flags & CPUIDLE_FLAG_IGNORE)
- continue;
- if (s->target_residency > data->predicted_us)
- continue;
- if (s->exit_latency > latency_req)
- continue;
- if (s->exit_latency * multiplier > data->predicted_us)
- continue;
-
- if (s->power_usage < power_usage) {
- power_usage = s->power_usage;
- data->last_state_idx = i;
- data->exit_us = s->exit_latency;
+ WARN((forced_state >= dev->state_count), \
+ "Forced state value out of range.\n");
+
+ if ((forced_state != 0) && (forced_state < dev->state_count)) {
+ data->exit_us = dev->states[forced_state].exit_latency;
+ data->last_state_idx = forced_state;
+ } else {
+ /*
+ * Find the idle state with the lowest power while satisfying
+ * our constraints.
+ */
+ for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
+ struct cpuidle_state *s = &dev->states[i];
+
+ if (s->flags & CPUIDLE_FLAG_IGNORE)
+ continue;
+ if (s->target_residency > data->predicted_us)
+ continue;
+ if (s->exit_latency > latency_req)
+ continue;
+ if (s->exit_latency * multiplier > data->predicted_us)
+ continue;
+
+ if (s->power_usage < power_usage) {
+ power_usage = s->power_usage;
+ data->last_state_idx = i;
+ data->exit_us = s->exit_latency;
+ }
}
}
@@ -402,6 +411,15 @@ int cpuidle_set_multiplier(unsigned int value)
}
EXPORT_SYMBOL(cpuidle_set_multiplier);
+/* Writing 0 will remove the forced state. */
+int cpuidle_force_state(unsigned int state)
+{
+ forced_state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL(cpuidle_force_state);
+
static ssize_t show_multiplier(struct sysdev_class *class,
struct sysdev_class_attribute *attr,
char *buf)
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index f9e3c5e55d7..b226b155e29 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -340,11 +340,13 @@ static struct {
* mb1_transfer - state needed for mailbox 1 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
+ * @ape_opp: The current APE OPP.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
+ u8 ape_opp;
struct {
u8 header;
u8 arm_opp;
@@ -938,6 +940,53 @@ int prcmu_set_ddr_opp(u8 opp)
return 0;
}
+
+/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
+static void request_even_slower_clocks(bool enable)
+{
+ const u8 clock_reg_offset[] = {
+ PRCM_ACLK_MGT_OFF,
+ PRCM_DMACLK_MGT_OFF
+ };
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&clk_mgt_lock, flags);
+
+ /* Grab the HW semaphore. */
+ while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
+ cpu_relax();
+
+ for (i = 0; i < ARRAY_SIZE(clock_reg_offset); i++) {
+ u32 val;
+ u32 div;
+
+ val = readl(_PRCMU_BASE + clock_reg_offset[i]);
+ div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
+ if (enable) {
+ if ((div <= 1) || (div > 15)) {
+ pr_err("prcmu: Bad clock divider %d in %s\n",
+ div, __func__);
+ goto unlock_and_return;
+ }
+ div <<= 1;
+ } else {
+ if (div <= 2)
+ goto unlock_and_return;
+ div >>= 1;
+ }
+ val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
+ (div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
+ writel(val, (_PRCMU_BASE + clock_reg_offset[i]));
+ }
+
+unlock_and_return:
+ /* Release the HW semaphore. */
+ writel(0, PRCM_SEM);
+
+ spin_unlock_irqrestore(&clk_mgt_lock, flags);
+}
+
/**
* set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
@@ -949,14 +998,24 @@ int prcmu_set_ape_opp(u8 opp)
{
int r = 0;
+ if (opp == mb1_transfer.ape_opp)
+ return 0;
+
mutex_lock(&mb1_transfer.lock);
+ if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
+ request_even_slower_clocks(false);
+
+ if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP))
+ goto skip_message;
+
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
- writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
+ writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
+ (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
@@ -965,6 +1024,13 @@ int prcmu_set_ape_opp(u8 opp)
(mb1_transfer.ack.ape_opp != opp))
r = -EIO;
+skip_message:
+ if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
+ (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
+ request_even_slower_clocks(true);
+ if (!r)
+ mb1_transfer.ape_opp = opp;
+
mutex_unlock(&mb1_transfer.lock);
return r;
@@ -2126,6 +2192,7 @@ void __init db8500_prcmu_early_init(void)
init_completion(&mb0_transfer.ac_wake_work);
mutex_init(&mb1_transfer.lock);
init_completion(&mb1_transfer.work);
+ mb1_transfer.ape_opp = APE_NO_CHANGE;
mutex_init(&mb2_transfer.lock);
init_completion(&mb2_transfer.work);
spin_lock_init(&mb2_transfer.auto_pm_lock);