summaryrefslogtreecommitdiff
path: root/arch/arm/mach-ux500/pm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-ux500/pm')
-rw-r--r--arch/arm/mach-ux500/pm/Kconfig70
-rw-r--r--arch/arm/mach-ux500/pm/Makefile12
-rw-r--r--arch/arm/mach-ux500/pm/context-db5500.c407
-rw-r--r--arch/arm/mach-ux500/pm/context-db8500.c456
-rw-r--r--arch/arm/mach-ux500/pm/context.c962
-rw-r--r--arch/arm/mach-ux500/pm/context_arm.S409
-rw-r--r--arch/arm/mach-ux500/pm/performance.c224
-rw-r--r--arch/arm/mach-ux500/pm/pm.c221
-rw-r--r--arch/arm/mach-ux500/pm/prcmu-qos-power.c722
-rw-r--r--arch/arm/mach-ux500/pm/runtime.c509
-rw-r--r--arch/arm/mach-ux500/pm/scu.h25
-rw-r--r--arch/arm/mach-ux500/pm/suspend.c273
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.c165
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.h63
-rw-r--r--arch/arm/mach-ux500/pm/timer.c193
-rw-r--r--arch/arm/mach-ux500/pm/usecase_gov.c973
16 files changed, 5684 insertions, 0 deletions
diff --git a/arch/arm/mach-ux500/pm/Kconfig b/arch/arm/mach-ux500/pm/Kconfig
new file mode 100644
index 00000000000..12004ba9858
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Kconfig
@@ -0,0 +1,70 @@
+config DBX500_PRCMU_QOS_POWER
+ bool "DBX500 PRCMU power QoS support"
+ depends on (MFD_DB5500_PRCMU || MFD_DB8500_PRCMU)
+ default y
+ help
+ Add support for PRCMU power Quality of Service
+
+config UX500_CONTEXT
+ bool "Context save/restore support for UX500"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM
+ help
+ This is needed for ApSleep and deeper sleep states.
+
+config UX500_PM_PERFORMANCE
+ bool "Performance supervision"
+ depends on DBX500_PRCMU_QOS_POWER
+ default y
+ help
+ Enable supervision of events which may require a boost
+ of platform performance.
+
+config UX500_CONSOLE_UART_GPIO_PIN
+ int "The pin number of the console UART GPIO pin"
+ default 29
+ depends on UX500_SUSPEND_DBG_WAKE_ON_UART || UX500_CPUIDLE_DEBUG
+ help
+ GPIO pin number of the GPIO pin connected to the console UART RX line.
+
+ Board-specific code can change this.
+
+config UX500_SUSPEND
+ bool "Suspend to mem and standby support"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && PM && SUSPEND
+ select UX500_CONTEXT
+ help
+ Add support for suspend.
+
+config UX500_SUSPEND_STANDBY
+ bool "Suspend Standby goes to ApSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo standby > /sys/power/state puts the system into ApSleep.
+
+config UX500_SUSPEND_MEM
+ bool "Suspend Mem goes to ApDeepSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo mem > /sys/power/state puts the system into ApDeepSleep else
+ it will do the same as echo standby > /sys/power/state.
+
+config UX500_SUSPEND_DBG
+ bool "Suspend debug"
+ depends on UX500_SUSPEND && DEBUG_FS
+ help
+ Add debug support for suspend.
+
+config UX500_SUSPEND_DBG_WAKE_ON_UART
+ bool "Suspend wakes on console UART"
+ depends on UX500_SUSPEND_DBG
+ help
+ Wake up on uart interrupts. Makes it possible for the console to wake up system.
+
+config UX500_USECASE_GOVERNOR
+ bool "UX500 use-case governor"
+ depends on (UX500_SOC_DB8500 || UX500_SOC_DB5500) && \
+ (CPU_FREQ && CPU_IDLE && HOTPLUG_CPU && \
+ EARLYSUSPEND && UX500_L2X0_PREFETCH_CTRL && PM)
+ default y
+ help
+ Adjusts CPU_IDLE, CPU_FREQ, HOTPLUG_CPU and L2 cache parameters
diff --git a/arch/arm/mach-ux500/pm/Makefile b/arch/arm/mach-ux500/pm/Makefile
new file mode 100644
index 00000000000..c0af28e5d3e
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/Makefile
@@ -0,0 +1,12 @@
+#
+# Power save related files
+#
+obj-y := pm.o runtime.o
+
+obj-$(CONFIG_DBX500_PRCMU_QOS_POWER) += prcmu-qos-power.o
+obj-$(CONFIG_UX500_CONTEXT) += context.o context_arm.o context-db8500.o context-db5500.o
+obj-$(CONFIG_UX500_CPUIDLE) += timer.o
+obj-$(CONFIG_UX500_SUSPEND) += suspend.o
+obj-$(CONFIG_UX500_SUSPEND_DBG) += suspend_dbg.o
+obj-$(CONFIG_UX500_PM_PERFORMANCE) += performance.o
+obj-$(CONFIG_UX500_USECASE_GOVERNOR) += usecase_gov.o
diff --git a/arch/arm/mach-ux500/pm/context-db5500.c b/arch/arm/mach-ux500/pm/context-db5500.c
new file mode 100644
index 00000000000..9842785c05a
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db5500.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Sundar Iyer <sundar.iyer@stericsson.com>,
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/* These registers are DB5500 specific */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x0
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x4
+
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x18
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x1C
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x20
+
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x2C
+
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+#define NODE_HIBW2_DDR_IN_3_PRIORITY 0xC0C
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC30
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC34
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+#define NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+
+#define NODE_ESRAM0_IN_0_LIMIT 0x1024
+#define NODE_ESRAM0_IN_1_LIMIT 0x1028
+#define NODE_ESRAM0_IN_2_LIMIT 0x102C
+#define NODE_ESRAM0_OUT_0_PRIORITY 0x1030
+
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1424
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1428
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x142C
+#define NODE_ESRAM1_2_OUT_0_PRIORITY 0x1430
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1824
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1828
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x182C
+#define NODE_ESRAM3_4_OUT_0_PRIORITY 0x1830
+
+/*
+ * Save ICN (Interconnect or Interconnect nodes) configuration registers
+ * TODO: This can be optimized, for example if we have
+ * a static ICN configuration.
+ */
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[2];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio_reg;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio_reg;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[3];
+ u32 esram_in_lim[3];
+ u32 esram_out_prio_reg;
+
+ u32 esram12_in_prio[3];
+ u32 esram12_in_arb_lim[3];
+ u32 esram12_out_prio_reg;
+
+ u32 esram34_in_prio[3];
+ u32 esram34_in_arb_lim[3];
+ u32 esram34_out_prio;
+} context_icn;
+
+
+void u5500_context_save_icn(void)
+{
+ void __iomem *base = context_icn.base;
+
+ /* hibw1 */
+ context_icn.hibw1_esram_in_pri[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl_relaxed(base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl_relaxed(base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio_reg =
+ readl_relaxed(base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ context_icn.hibw2_esram_in_pri[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl_relaxed(base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[3] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ context_icn.hibw2_ddr_in_limit[3] =
+ readl_relaxed(base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio_reg =
+ readl_relaxed(base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ context_icn.esram_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ context_icn.esram_in_lim[0] =
+ readl_relaxed(base + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram_in_lim[1] =
+ readl_relaxed(base + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram_in_lim[2] =
+ readl_relaxed(base + NODE_ESRAM0_IN_2_LIMIT);
+
+ context_icn.esram_out_prio_reg =
+ readl_relaxed(base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ context_icn.esram12_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl_relaxed(base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram12_out_prio_reg =
+ readl_relaxed(base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ context_icn.esram34_in_prio[0] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl_relaxed(base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram34_out_prio =
+ readl_relaxed(base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+}
+
+/*
+ * Restore ICN configuration registers
+ */
+void u5500_context_restore_icn(void)
+{
+ void __iomem *base = context_icn.base;
+
+ /* hibw1 */
+ writel_relaxed(context_icn.hibw1_esram_in_pri[0],
+ base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[1],
+ base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[0],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[1],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[2],
+ base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[0],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[1],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[2],
+ base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[0],
+ base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[1],
+ base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[2],
+ base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[0],
+ base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[1],
+ base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[2],
+ base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_out_prio_reg,
+ base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ writel_relaxed(context_icn.hibw2_esram_in_pri[0],
+ base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_esram_in_pri[1],
+ base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[0],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[1],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[2],
+ base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[0],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[1],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[2],
+ base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[0],
+ base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[1],
+ base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[2],
+ base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[3],
+ base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[0],
+ base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[1],
+ base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[2],
+ base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[3],
+ base + NODE_HIBW2_DDR_IN_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_out_prio_reg,
+ base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ writel_relaxed(context_icn.esram_in_prio[0],
+ base + NODE_ESRAM0_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram_in_prio[1],
+ base + NODE_ESRAM0_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram_in_prio[2],
+ base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram_in_lim[0],
+ base + NODE_ESRAM0_IN_0_LIMIT);
+ writel_relaxed(context_icn.esram_in_lim[1],
+ base + NODE_ESRAM0_IN_1_LIMIT);
+ writel_relaxed(context_icn.esram_in_lim[2],
+ base + NODE_ESRAM0_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.esram_out_prio_reg,
+ base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ writel_relaxed(context_icn.esram12_in_prio[0],
+ base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[1],
+ base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[2],
+ base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram12_in_arb_lim[0],
+ base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[1],
+ base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[2],
+ base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ writel_relaxed(context_icn.esram12_out_prio_reg,
+ base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ writel_relaxed(context_icn.esram34_in_prio[0],
+ base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[1],
+ base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[2],
+ base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.esram34_in_arb_lim[0],
+ base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[1],
+ base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[2],
+ base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ writel_relaxed(context_icn.esram34_out_prio,
+ base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+
+}
+
+void u5500_context_init(void)
+{
+ context_icn.base = ioremap(U5500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context-db8500.c b/arch/arm/mach-ux500/pm/context-db8500.c
new file mode 100644
index 00000000000..3ba73e51a6d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db8500.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer for ST-Ericsson
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/context.h>
+
+/*
+ * ST-Interconnect context
+ */
+
+/* priority, bw limiter register offsets */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x00
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x04
+#define NODE_HIBW1_ESRAM_IN_2_PRIORITY 0x08
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x2C
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x30
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x34
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x38
+#define NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT 0x3C
+#define NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT 0x40
+#define NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT 0x44
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC24
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC28
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC2C
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC30
+
+/*
+ * Note the following addresses are presented in
+ * db8500 design spec v3.1 and v3.3, table 10.
+ * But their addresses are not the same as in the
+ * description. The addresses in the description
+ * of each registers are correct.
+ * NODE_HIBW2_DDR_IN_3_LIMIT is only present in v1.
+ *
+ * Faulty registers addresses in table 10:
+ * NODE_HIBW2_DDR_IN_2_LIMIT 0xC38
+ * NODE_HIBW2_DDR_IN_3_LIMIT 0xC3C
+ * NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC40
+ */
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+#define NODE_ESRAM0_IN_3_PRIORITY 0x100C
+#define NODE_ESRAM0_IN_0_LIMIT 0x1030
+#define NODE_ESRAM0_IN_1_LIMIT 0x1034
+#define NODE_ESRAM0_IN_2_LIMIT 0x1038
+#define NODE_ESRAM0_IN_3_LIMIT 0x103C
+/* common */
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+#define NODE_ESRAM1_2_IN_3_PRIORITY 0x140C
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1430
+#define NODE_ESRAM1_2_IN_0_ARB_2_LIMIT 0x1434
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1438
+#define NODE_ESRAM1_2_IN_1_ARB_2_LIMIT 0x143C
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x1440
+#define NODE_ESRAM1_2_IN_2_ARB_2_LIMIT 0x1444
+#define NODE_ESRAM1_2_IN_3_ARB_1_LIMIT 0x1448
+#define NODE_ESRAM1_2_IN_3_ARB_2_LIMIT 0x144C
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+#define NODE_ESRAM3_4_IN_3_PRIORITY 0x180C
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1830
+#define NODE_ESRAM3_4_IN_0_ARB_2_LIMIT 0x1834
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1838
+#define NODE_ESRAM3_4_IN_1_ARB_2_LIMIT 0x183C
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x1840
+#define NODE_ESRAM3_4_IN_2_ARB_2_LIMIT 0x1844
+#define NODE_ESRAM3_4_IN_3_ARB_1_LIMIT 0x1848
+#define NODE_ESRAM3_4_IN_3_ARB_2_LIMIT 0x184C
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[3];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_esram_in2_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[4];
+ u32 esram_in_lim[4];
+ u32 esram0_in_prio[4];
+ u32 esram0_in_lim[4];
+ u32 esram12_in_prio[4];
+ u32 esram12_in_arb_lim[8];
+ u32 esram34_in_prio[4];
+ u32 esram34_in_arb_lim[8];
+} context_icn;
+
+/**
+ * u8500_context_save_icn() - save ICN context
+ *
+ */
+void u8500_context_save_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ context_icn.hibw1_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ context_icn.hibw1_esram_in_pri[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in2_arb[0] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in2_arb[1] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in2_arb[2] =
+ readl_relaxed(b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ context_icn.hibw2_esram_in_pri[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl_relaxed(b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_1_LIMIT);
+
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl_relaxed(b + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio =
+ readl_relaxed(b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ context_icn.esram0_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram0_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram0_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_PRIORITY);
+ context_icn.esram0_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ context_icn.esram0_in_lim[0] =
+ readl_relaxed(b + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram0_in_lim[1] =
+ readl_relaxed(b + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram0_in_lim[2] =
+ readl_relaxed(b + NODE_ESRAM0_IN_2_LIMIT);
+ context_icn.esram0_in_lim[3] =
+ readl_relaxed(b + NODE_ESRAM0_IN_3_LIMIT);
+
+ context_icn.esram12_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ context_icn.esram12_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ context_icn.esram34_in_prio[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ context_icn.esram34_in_prio[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[3] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[4] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[5] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[6] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[7] =
+ readl_relaxed(b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+/**
+ * u8500_context_restore_icn() - restore ICN context
+ *
+ */
+void u8500_context_restore_icn(void)
+{
+ void __iomem *b = context_icn.base;
+
+ writel_relaxed(context_icn.hibw1_esram_in_pri[0],
+ b + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[1],
+ b + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_esram_in_pri[2],
+ b + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in0_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in1_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[0],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[1],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw1_esram_in2_arb[2],
+ b + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[0],
+ b + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[1],
+ b + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw1_ddr_in_prio[2],
+ b + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[0],
+ b + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[1],
+ b + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw1_ddr_in_limit[2],
+ b + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel_relaxed(context_icn.hibw1_ddr_out_prio,
+ b + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in_pri[0],
+ b + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_esram_in_pri[1],
+ b + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in0_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[0],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[1],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_esram_in1_arblimit[2],
+ b + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[0],
+ b + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[1],
+ b + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_prio[2],
+ b + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[0],
+ b + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[1],
+ b + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_in_limit[2],
+ b + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel_relaxed(context_icn.hibw2_ddr_out_prio,
+ b + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_prio[0],
+ b + NODE_ESRAM0_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[1],
+ b + NODE_ESRAM0_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[2],
+ b + NODE_ESRAM0_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram0_in_prio[3],
+ b + NODE_ESRAM0_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram0_in_lim[0],
+ b + NODE_ESRAM0_IN_0_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[1],
+ b + NODE_ESRAM0_IN_1_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[2],
+ b + NODE_ESRAM0_IN_2_LIMIT);
+ writel_relaxed(context_icn.esram0_in_lim[3],
+ b + NODE_ESRAM0_IN_3_LIMIT);
+
+ writel_relaxed(context_icn.esram12_in_prio[0],
+ b + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[1],
+ b + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[2],
+ b + NODE_ESRAM1_2_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram12_in_prio[3],
+ b + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram12_in_arb_lim[0],
+ b + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[1],
+ b + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[2],
+ b + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[3],
+ b + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[4],
+ b + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[5],
+ b + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[6],
+ b + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram12_in_arb_lim[7],
+ b + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ writel_relaxed(context_icn.esram34_in_prio[0],
+ b + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[1],
+ b + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[2],
+ b + NODE_ESRAM3_4_IN_2_PRIORITY);
+ writel_relaxed(context_icn.esram34_in_prio[3],
+ b + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ writel_relaxed(context_icn.esram34_in_arb_lim[0],
+ b + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[1],
+ b + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[2],
+ b + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[3],
+ b + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[4],
+ b + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[5],
+ b + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[6],
+ b + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ writel_relaxed(context_icn.esram34_in_arb_lim[7],
+ b + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+}
+
+void u8500_context_init(void)
+{
+ context_icn.base = ioremap(U8500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context.c b/arch/arm/mach-ux500/pm/context.c
new file mode 100644
index 00000000000..ffd73f3ed52
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context.c
@@ -0,0 +1,962 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/nomadik.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/pm.h>
+#include <mach/context.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/smp_twd.h>
+
+#include "scu.h"
+#include "../product.h"
+#include "../prcc.h"
+
+#define GPIO_NUM_BANKS 9
+#define GPIO_NUM_SAVE_REGISTERS 7
+
+/*
+ * TODO:
+ * - Use the "UX500*"-macros instead where possible
+ */
+
+#define U8500_BACKUPRAM_SIZE SZ_64K
+
+#define U8500_PUBLIC_BOOT_ROM_BASE (U8500_BOOT_ROM_BASE + 0x17000)
+#define U5500_PUBLIC_BOOT_ROM_BASE (U5500_BOOT_ROM_BASE + 0x18000)
+
+/*
+ * Special dedicated addresses in backup RAM. The 5500 addresses are identical
+ * to the 8500 ones.
+ */
+#define U8500_EXT_RAM_LOC_BACKUPRAM_ADDR 0x80151FDC
+#define U8500_CPU0_CP15_CR_BACKUPRAM_ADDR 0x80151F80
+#define U8500_CPU1_CP15_CR_BACKUPRAM_ADDR 0x80151FA0
+
+#define U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FD8
+#define U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FE0
+
+#define GIC_DIST_ENABLE_NS 0x0
+
+/* 32 interrupts fits in 4 bytes */
+#define GIC_DIST_ENABLE_SET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 32)
+#define GIC_DIST_ENABLE_SET_CPU_NUM (IRQ_SHPI_START / 32)
+#define GIC_DIST_ENABLE_SET_SPI0 GIC_DIST_ENABLE_SET
+#define GIC_DIST_ENABLE_SET_SPI32 (GIC_DIST_ENABLE_SET + IRQ_SHPI_START / 8)
+
+#define GIC_DIST_ENABLE_CLEAR_0 GIC_DIST_ENABLE_CLEAR
+#define GIC_DIST_ENABLE_CLEAR_32 (GIC_DIST_ENABLE_CLEAR + 4)
+#define GIC_DIST_ENABLE_CLEAR_64 (GIC_DIST_ENABLE_CLEAR + 8)
+#define GIC_DIST_ENABLE_CLEAR_96 (GIC_DIST_ENABLE_CLEAR + 12)
+#define GIC_DIST_ENABLE_CLEAR_128 (GIC_DIST_ENABLE_CLEAR + 16)
+
+#define GIC_DIST_PRI_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) / 4)
+#define GIC_DIST_PRI_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_PRI_SPI0 GIC_DIST_PRI
+#define GIC_DIST_PRI_SPI32 (GIC_DIST_PRI + IRQ_SHPI_START)
+
+#define GIC_DIST_SPI_TARGET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 4)
+#define GIC_DIST_SPI_TARGET_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_SPI_TARGET_SPI0 GIC_DIST_TARGET
+#define GIC_DIST_SPI_TARGET_SPI32 (GIC_DIST_TARGET + IRQ_SHPI_START)
+
+/* 16 interrupts per 4 bytes */
+#define GIC_DIST_CONFIG_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) \
+ / 16)
+#define GIC_DIST_CONFIG_CPU_NUM (IRQ_SHPI_START / 16)
+#define GIC_DIST_CONFIG_SPI0 GIC_DIST_CONFIG
+#define GIC_DIST_CONFIG_SPI32 (GIC_DIST_CONFIG + IRQ_SHPI_START / 4)
+
+/* TODO! Move STM reg offsets to suitable place */
+#define STM_CR_OFFSET 0x00
+#define STM_MMC_OFFSET 0x08
+#define STM_TER_OFFSET 0x10
+
+#define TPIU_PORT_SIZE 0x4
+#define TPIU_TRIGGER_COUNTER 0x104
+#define TPIU_TRIGGER_MULTIPLIER 0x108
+#define TPIU_CURRENT_TEST_PATTERN 0x204
+#define TPIU_TEST_PATTERN_REPEAT 0x208
+#define TPIU_FORMATTER 0x304
+#define TPIU_FORMATTER_SYNC 0x308
+#define TPIU_LOCK_ACCESS_REGISTER 0xFB0
+
+#define TPIU_UNLOCK_CODE 0xc5acce55
+
+#define SCU_FILTER_STARTADDR 0x40
+#define SCU_FILTER_ENDADDR 0x44
+#define SCU_ACCESS_CTRL_SAC 0x50
+
+/* The context of the Trace Port Interface Unit (TPIU) */
+static struct {
+ void __iomem *base;
+ u32 port_size;
+ u32 trigger_counter;
+ u32 trigger_multiplier;
+ u32 current_test_pattern;
+ u32 test_pattern_repeat;
+ u32 formatter;
+ u32 formatter_sync;
+} context_tpiu;
+
+static struct {
+ void __iomem *base;
+ u32 cr;
+ u32 mmc;
+ u32 ter;
+} context_stm_ape;
+
+struct context_gic_cpu {
+ void __iomem *base;
+ u32 ctrl;
+ u32 primask;
+ u32 binpoint;
+};
+static DEFINE_PER_CPU(struct context_gic_cpu, context_gic_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ns;
+ u32 enable_set[GIC_DIST_ENABLE_SET_COMMON_NUM]; /* IRQ 32 to 160 */
+ u32 priority_level[GIC_DIST_PRI_COMMON_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_COMMON_NUM];
+ u32 config[GIC_DIST_CONFIG_COMMON_NUM];
+} context_gic_dist_common;
+
+struct context_gic_dist_cpu {
+ void __iomem *base;
+ u32 enable_set[GIC_DIST_ENABLE_SET_CPU_NUM]; /* IRQ 0 to 31 */
+ u32 priority_level[GIC_DIST_PRI_CPU_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_CPU_NUM];
+ u32 config[GIC_DIST_CONFIG_CPU_NUM];
+};
+static DEFINE_PER_CPU(struct context_gic_dist_cpu, context_gic_dist_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ctrl;
+ u32 cpu_pwrstatus;
+ u32 inv_all_nonsecure;
+ u32 filter_start_addr;
+ u32 filter_end_addr;
+ u32 access_ctrl_sac;
+} context_scu;
+
+#define UX500_NR_PRCC_BANKS 5
+static struct {
+ void __iomem *base;
+ struct clk *clk;
+ u32 bus_clk;
+ u32 kern_clk;
+} context_prcc[UX500_NR_PRCC_BANKS];
+
+static u32 backup_sram_storage[NR_CPUS] = {
+ IO_ADDRESS(U8500_CPU0_CP15_CR_BACKUPRAM_ADDR),
+ IO_ADDRESS(U8500_CPU1_CP15_CR_BACKUPRAM_ADDR),
+};
+
+static u32 gpio_bankaddr[GPIO_NUM_BANKS] = {IO_ADDRESS(U8500_GPIOBANK0_BASE),
+ IO_ADDRESS(U8500_GPIOBANK1_BASE),
+ IO_ADDRESS(U8500_GPIOBANK2_BASE),
+ IO_ADDRESS(U8500_GPIOBANK3_BASE),
+ IO_ADDRESS(U8500_GPIOBANK4_BASE),
+ IO_ADDRESS(U8500_GPIOBANK5_BASE),
+ IO_ADDRESS(U8500_GPIOBANK6_BASE),
+ IO_ADDRESS(U8500_GPIOBANK7_BASE),
+ IO_ADDRESS(U8500_GPIOBANK8_BASE)
+};
+
+static u32 gpio_save[GPIO_NUM_BANKS][GPIO_NUM_SAVE_REGISTERS];
+
+/*
+ * Stacks and stack pointers
+ */
+static DEFINE_PER_CPU(u32[128], varm_registers_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_registers_pointer);
+
+static DEFINE_PER_CPU(u32[128], varm_cp15_backup_stack);
+static DEFINE_PER_CPU(u32 *, varm_cp15_pointer);
+
+static ATOMIC_NOTIFIER_HEAD(context_ape_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(context_arm_notifier_list);
+
+/*
+ * Register a simple callback for handling vape context save/restore
+ */
+int context_ape_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_ape_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_ape_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_ape_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_unregister);
+
+/*
+ * Register a simple callback for handling varm context save/restore
+ */
+int context_arm_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_arm_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_arm_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_arm_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_unregister);
+
+static void save_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ context_prcc[i].bus_clk =
+ readl(context_prcc[i].base + PRCC_PCKSR);
+ context_prcc[i].kern_clk =
+ readl(context_prcc[i].base + PRCC_KCKSR);
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void restore_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ clk_enable(context_prcc[i].clk);
+
+ writel(~context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_PCKDIS);
+ writel(~context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCKDIS);
+
+ writel(context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_PCKEN);
+ writel(context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCKEN);
+ /*
+ * Consider having a while over KCK/BCK_STATUS
+ * to check that all clocks get disabled/enabled
+ */
+
+ clk_disable(context_prcc[i].clk);
+ }
+}
+
+static void save_stm_ape(void)
+{
+ /*
+ * TODO: Check with PRCMU developers how STM is handled by PRCMU
+ * firmware. According to DB5500 design spec there is a "flush"
+ * mechanism supposed to be used by the PRCMU before power down,
+ * PRCMU fw might save/restore the following three registers
+ * at the same time.
+ */
+ context_stm_ape.cr = readl(context_stm_ape.base +
+ STM_CR_OFFSET);
+ context_stm_ape.mmc = readl(context_stm_ape.base +
+ STM_MMC_OFFSET);
+ context_stm_ape.ter = readl(context_stm_ape.base +
+ STM_TER_OFFSET);
+}
+
+static void restore_stm_ape(void)
+{
+ writel(context_stm_ape.ter,
+ context_stm_ape.base + STM_TER_OFFSET);
+ writel(context_stm_ape.mmc,
+ context_stm_ape.base + STM_MMC_OFFSET);
+ writel(context_stm_ape.cr,
+ context_stm_ape.base + STM_CR_OFFSET);
+}
+
+static bool inline tpiu_clocked(void)
+{
+ return ux500_jtag_enabled();
+}
+
+/*
+ * Save the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void save_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ context_tpiu.port_size = readl(context_tpiu.base +
+ TPIU_PORT_SIZE);
+ context_tpiu.trigger_counter = readl(context_tpiu.base +
+ TPIU_TRIGGER_COUNTER);
+ context_tpiu.trigger_multiplier = readl(context_tpiu.base +
+ TPIU_TRIGGER_MULTIPLIER);
+ context_tpiu.current_test_pattern = readl(context_tpiu.base +
+ TPIU_CURRENT_TEST_PATTERN);
+ context_tpiu.test_pattern_repeat = readl(context_tpiu.base +
+ TPIU_TEST_PATTERN_REPEAT);
+ context_tpiu.formatter = readl(context_tpiu.base +
+ TPIU_FORMATTER);
+ context_tpiu.formatter_sync = readl(context_tpiu.base +
+ TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Restore the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void restore_tpiu(void)
+{
+ if (!tpiu_clocked())
+ return;
+
+ writel(TPIU_UNLOCK_CODE,
+ context_tpiu.base + TPIU_LOCK_ACCESS_REGISTER);
+
+ writel(context_tpiu.port_size,
+ context_tpiu.base + TPIU_PORT_SIZE);
+ writel(context_tpiu.trigger_counter,
+ context_tpiu.base + TPIU_TRIGGER_COUNTER);
+ writel(context_tpiu.trigger_multiplier,
+ context_tpiu.base + TPIU_TRIGGER_MULTIPLIER);
+ writel(context_tpiu.current_test_pattern,
+ context_tpiu.base + TPIU_CURRENT_TEST_PATTERN);
+ writel(context_tpiu.test_pattern_repeat,
+ context_tpiu.base + TPIU_TEST_PATTERN_REPEAT);
+ writel(context_tpiu.formatter,
+ context_tpiu.base + TPIU_FORMATTER);
+ writel(context_tpiu.formatter_sync,
+ context_tpiu.base + TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Save GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void save_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ c_gic_cpu->ctrl = readl_relaxed(c_gic_cpu->base + GIC_CPU_CTRL);
+ c_gic_cpu->primask = readl_relaxed(c_gic_cpu->base + GIC_CPU_PRIMASK);
+ c_gic_cpu->binpoint = readl_relaxed(c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Restore GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void restore_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ writel_relaxed(c_gic_cpu->ctrl, c_gic_cpu->base + GIC_CPU_CTRL);
+ writel_relaxed(c_gic_cpu->primask, c_gic_cpu->base + GIC_CPU_PRIMASK);
+ writel_relaxed(c_gic_cpu->binpoint, c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Save GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void save_gic_dist_common(void)
+{
+ int i;
+
+ context_gic_dist_common.ns = readl_relaxed(context_gic_dist_common.base
+ + GIC_DIST_ENABLE_NS);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ context_gic_dist_common.enable_set[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ context_gic_dist_common.priority_level[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ context_gic_dist_common.spi_target[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ context_gic_dist_common.config[i] =
+ readl_relaxed(context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+}
+
+/*
+ * Restore GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void restore_gic_dist_common(void)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.config[i],
+ context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.spi_target[i],
+ context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.priority_level[i],
+ context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ writel_relaxed(context_gic_dist_common.enable_set[i],
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ writel_relaxed(context_gic_dist_common.ns,
+ context_gic_dist_common.base + GIC_DIST_ENABLE_NS);
+}
+
+/*
+ * Save GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * save_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void save_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ c_gic->enable_set[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ c_gic->priority_level[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ c_gic->spi_target[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ c_gic->config[i] =
+ readl_relaxed(c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+}
+
+/*
+ * Restore GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * restore_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void restore_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ writel_relaxed(c_gic->config[i],
+ c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ writel_relaxed(c_gic->spi_target[i],
+ c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ writel_relaxed(c_gic->priority_level[i],
+ c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ writel_relaxed(c_gic->enable_set[i],
+ c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+}
+
+/*
+ * Disable interrupts that are not necessary
+ * to have turned on during ApDeepSleep.
+ */
+void context_gic_dist_disable_unneeded_irqs(void)
+{
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_0);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_32);
+
+ /* Leave PRCMU IRQ 0 and 1 enabled */
+ writel(0xffff3fff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_64);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_96);
+
+ writel(0xffffffff,
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_CLEAR_128);
+}
+
+static void save_scu(void)
+{
+ context_scu.ctrl =
+ readl_relaxed(context_scu.base + SCU_CTRL);
+ context_scu.cpu_pwrstatus =
+ readl_relaxed(context_scu.base + SCU_CPU_STATUS);
+ context_scu.inv_all_nonsecure =
+ readl_relaxed(context_scu.base + SCU_INVALIDATE);
+ context_scu.filter_start_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_STARTADDR);
+ context_scu.filter_end_addr =
+ readl_relaxed(context_scu.base + SCU_FILTER_ENDADDR);
+ context_scu.access_ctrl_sac =
+ readl_relaxed(context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+static void restore_scu(void)
+{
+ writel_relaxed(context_scu.ctrl,
+ context_scu.base + SCU_CTRL);
+ writel_relaxed(context_scu.cpu_pwrstatus,
+ context_scu.base + SCU_CPU_STATUS);
+ writel_relaxed(context_scu.inv_all_nonsecure,
+ context_scu.base + SCU_INVALIDATE);
+ writel_relaxed(context_scu.filter_start_addr,
+ context_scu.base + SCU_FILTER_STARTADDR);
+ writel_relaxed(context_scu.filter_end_addr,
+ context_scu.base + SCU_FILTER_ENDADDR);
+ writel_relaxed(context_scu.access_ctrl_sac,
+ context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+/*
+ * Save VAPE context
+ */
+void context_vape_save(void)
+{
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_SAVE, NULL);
+
+ if (cpu_is_u5500())
+ u5500_context_save_icn();
+ if (cpu_is_u8500())
+ u8500_context_save_icn();
+
+ save_stm_ape();
+
+ save_tpiu();
+
+ save_prcc();
+}
+
+/*
+ * Restore VAPE context
+ */
+void context_vape_restore(void)
+{
+ restore_prcc();
+
+ restore_tpiu();
+
+ restore_stm_ape();
+
+ if (cpu_is_u5500())
+ u5500_context_restore_icn();
+ if (cpu_is_u8500())
+ u8500_context_restore_icn();
+
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_RESTORE, NULL);
+}
+
+/*
+ * Save GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_save(void)
+{
+ int i;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ gpio_save[i][0] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ gpio_save[i][1] = readl(gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ gpio_save[i][2] = readl(gpio_bankaddr[i] + NMK_GPIO_PDIS);
+ gpio_save[i][3] = readl(gpio_bankaddr[i] + NMK_GPIO_DIR);
+ gpio_save[i][4] = readl(gpio_bankaddr[i] + NMK_GPIO_DAT);
+ gpio_save[i][6] = readl(gpio_bankaddr[i] + NMK_GPIO_SLPC);
+ }
+}
+
+/*
+ * Restore GPIO registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore(void)
+{
+ int i;
+ u32 output_state;
+ u32 pull_up;
+ u32 pull_down;
+ u32 pull;
+
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][2], gpio_bankaddr[i] + NMK_GPIO_PDIS);
+
+ writel(gpio_save[i][3], gpio_bankaddr[i] + NMK_GPIO_DIR);
+
+ /* Set the high outputs. outpute_state = GPIO_DIR & GPIO_DAT */
+ output_state = gpio_save[i][3] & gpio_save[i][4];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATS);
+
+ /*
+ * Set the low outputs.
+ * outpute_state = ~(GPIO_DIR & GPIO_DAT) & GPIO_DIR
+ */
+ output_state = ~(gpio_save[i][3] & gpio_save[i][4]) &
+ gpio_save[i][3];
+ writel(output_state, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ /*
+ * Restore pull up/down.
+ * Only write pull up/down settings on inputs where
+ * PDIS is not set.
+ * pull = (~GPIO_DIR & ~GPIO_PDIS)
+ */
+ pull = (~gpio_save[i][3] & ~gpio_save[i][2]);
+ nmk_gpio_read_pull(i, &pull_up);
+
+ pull_down = pull & ~pull_up;
+ pull_up = pull & pull_up;
+ /* Set pull ups */
+ writel(pull_up, gpio_bankaddr[i] + NMK_GPIO_DATS);
+ /* Set pull downs */
+ writel(pull_down, gpio_bankaddr[i] + NMK_GPIO_DATC);
+
+ writel(gpio_save[i][6], gpio_bankaddr[i] + NMK_GPIO_SLPC);
+
+ }
+}
+
+/*
+ * Restore GPIO mux registers that might be modified
+ * for power save reasons.
+ */
+void context_gpio_restore_mux(void)
+{
+ int i;
+
+ /* Change mux settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(gpio_save[i][0], gpio_bankaddr[i] + NMK_GPIO_AFSLA);
+ writel(gpio_save[i][1], gpio_bankaddr[i] + NMK_GPIO_AFSLB);
+ }
+}
+
+/*
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (Not done.)
+ * - Set SLPM=0 for the IOs you want to switch. (We assume that all
+ * SLPM registers already are 0 except for the ones that wants to
+ * have the mux connected in sleep (e.g modem STM)).
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers (Not done.)
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ */
+void context_gpio_mux_safe_switch(bool begin)
+{
+ int i;
+
+ static u32 rwimsc[GPIO_NUM_BANKS];
+ static u32 fwimsc[GPIO_NUM_BANKS];
+
+ if (begin) {
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ /* Save registers */
+ rwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ fwimsc[i] = readl(gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+
+ /* Prevent spurious wakeups */
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(0, gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+
+ ux500_pm_prcmu_set_ioforce(true);
+ } else {
+ ux500_pm_prcmu_set_ioforce(false);
+
+ /* Restore wake up settings */
+ for (i = 0; i < GPIO_NUM_BANKS; i++) {
+ writel(rwimsc[i], gpio_bankaddr[i] + NMK_GPIO_RWIMSC);
+ writel(fwimsc[i], gpio_bankaddr[i] + NMK_GPIO_FWIMSC);
+ }
+ }
+}
+
+/*
+ * Save common
+ *
+ * This function must be called once for all cores before going to deep sleep.
+ */
+void context_varm_save_common(void)
+{
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_SAVE, NULL);
+
+ /* Save common parts */
+ save_gic_dist_common();
+ save_scu();
+}
+
+/*
+ * Restore common
+ *
+ * This function must be called once for all cores when waking up from deep
+ * sleep.
+ */
+void context_varm_restore_common(void)
+{
+ /* Restore common parts */
+ restore_scu();
+ restore_gic_dist_common();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_RESTORE, NULL);
+}
+
+/*
+ * Save core
+ *
+ * This function must be called once for each cpu core before going to deep
+ * sleep.
+ */
+void context_varm_save_core(void)
+{
+ int cpu = smp_processor_id();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_SAVE, NULL);
+
+ per_cpu(varm_cp15_pointer, cpu) = per_cpu(varm_cp15_backup_stack, cpu);
+
+ /* Save core */
+ twd_save();
+ save_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ save_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ context_save_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+}
+
+/*
+ * Restore core
+ *
+ * This function must be called once for each cpu core when waking up from
+ * deep sleep.
+ */
+void context_varm_restore_core(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Restore core */
+ context_restore_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+ restore_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ restore_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ twd_restore();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_RESTORE, NULL);
+}
+
+/*
+ * Save CPU registers
+ *
+ * This function saves ARM registers.
+ */
+void context_save_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(varm_registers_pointer, cpu) =
+ per_cpu(varm_registers_backup_stack, cpu);
+ context_save_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * Restore CPU registers
+ *
+ * This function restores ARM registers.
+ */
+void context_restore_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ context_restore_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * This function stores CP15 registers related to cache and mmu
+ * in backup SRAM. It also stores stack pointer, CPSR
+ * and return address for the PC in backup SRAM and
+ * does wait for interrupt.
+ */
+void context_save_to_sram_and_wfi(bool cleanL2cache)
+{
+ int cpu = smp_processor_id();
+
+ context_save_to_sram_and_wfi_internal(backup_sram_storage[cpu],
+ cleanL2cache);
+}
+
+static int __init context_init(void)
+{
+ int i;
+ void __iomem *ux500_backup_ptr;
+
+ /* allocate backup pointer for RAM data */
+ ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(U8500_BACKUPRAM_SIZE));
+
+ if (!ux500_backup_ptr) {
+ pr_warning("context: could not allocate backup memory\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * ROM code addresses to store backup contents,
+ * pass the physical address of back up to ROM code
+ */
+ writel(virt_to_phys(ux500_backup_ptr),
+ IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR));
+
+ if (cpu_is_u5500()) {
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U5500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U5500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U5500_SCU_BASE, SZ_4K);
+
+ context_prcc[0].base = ioremap(U5500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U5500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U5500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U5500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U5500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U5500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U5500_GIC_CPU_BASE, SZ_4K);
+ } else if (cpu_is_u8500()) {
+ /* Give logical address to backup RAM. For both CPUs */
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K);
+
+ /* PERIPH4 is always on, so no need saving prcc */
+ context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K);
+
+ context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K);
+ per_cpu(context_gic_cpu, 0).base = ioremap(U8500_GIC_CPU_BASE, SZ_4K);
+ }
+
+ per_cpu(context_gic_dist_cpu, 0).base = context_gic_dist_common.base;
+
+ for (i = 1; i < num_possible_cpus(); i++) {
+ per_cpu(context_gic_cpu, i).base
+ = per_cpu(context_gic_cpu, 0).base;
+ per_cpu(context_gic_dist_cpu, i).base
+ = per_cpu(context_gic_dist_cpu, 0).base;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(context_prcc); i++) {
+ const int clusters[] = {1, 2, 3, 5, 6};
+ char clkname[10];
+
+ snprintf(clkname, sizeof(clkname), "PERIPH%d", clusters[i]);
+
+ context_prcc[i].clk = clk_get_sys(clkname, NULL);
+ BUG_ON(IS_ERR(context_prcc[i].clk));
+ }
+
+ if (cpu_is_u8500()) {
+ u8500_context_init();
+ } else if (cpu_is_u5500()) {
+ u5500_context_init();
+ } else {
+ printk(KERN_ERR "context: unknown hardware!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+subsys_initcall(context_init);
diff --git a/arch/arm/mach-ux500/pm/context_arm.S b/arch/arm/mach-ux500/pm/context_arm.S
new file mode 100644
index 00000000000..edb894d6a35
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context_arm.S
@@ -0,0 +1,409 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/*
+ * Save and increment macro
+ */
+.macro SAVE_AND_INCREMENT FROM_REG TO_REG
+ str \FROM_REG, [\TO_REG], #+4
+.endm
+
+/*
+ * Decrement and restore macro
+ */
+.macro DECREMENT_AND_RESTORE FROM_REG TO_REG
+ ldr \TO_REG, [\FROM_REG, #-4]!
+.endm
+
+/*
+ * Save ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * + {sp, lr}^
+ * + cpsr
+ * + {r3, r8-r14} (FIQ mode: r3=spsr)
+ * + {r3, r13, r14} (IRQ mode: r3=spsr)
+ * + {r3, r13, r14} (abort mode: r3=spsr)
+ * + {r3, r13, r14} (undef mode: r3=spsr)
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_save_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ARM( stmia r1, {sp, lr}^ ) @ Store user mode sp and lr
+ @ registers
+ARM( add r1, r1, #8 ) @ Update backup pointer (not
+ @ done in previous instruction)
+THUMB( str sp, [r1], #+4 )
+THUMB( str lr, [r1], #+4 )
+
+ mrs r2, cpsr @ Get CPSR
+ SAVE_AND_INCREMENT r2 r1 @ Save CPSR register
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ @ The suffix to CPSR refers to which field(s) of the CPSR is
+ @ rereferenced (you can specify one or more). Defined fields are:
+ @
+ @ c - control
+ @ x - extension
+ @ s - status
+ @ f - flags
+
+ orr r3, r2, #0x11 @ Save FIQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r8-r14} )
+THUMB( stmia r1!, {r3, r8-r12, r14} )
+THUMB( str r13, [r1], #+4 )
+
+
+ orr r3, r2, #0x12 @ Save IRQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x17 @ Save abort mode registers +
+ @ common mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x1B @ Save undef mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ARM( stmia r1!, {r3, r13, r14} )
+THUMB( stmia r1!, {r3, r14} )
+THUMB( str r13, [r1], #+4 )
+
+ orr r3, r2, #0x13 @ Return to supervisor mode
+ msr cpsr_cxsf, r3
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * - {r3, r13, r14} (undef mode: spsr=r3)
+ * - {r3, r13, r14} (abort mode: spsr=r3)
+ * - {r3, r13, r14} (IRQ mode: spsr=r3)
+ * - {r3, r8-r14} (FIQ mode: spsr=r3)
+ * - cpsr
+ * - {sp, lr}^
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_restore_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrs r2, cpsr @ Get CPSR
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ orr r3, r2, #0x1b @ Restore undef mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x17 @ Restore abort mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x12 @ Restore IRQ mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r13, r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r14} )
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x11 @ Restore FIQ mode registers
+ msr cpsr_cxsf, r3
+ARM( ldmdb r1!, {r3, r8-r14} )
+THUMB( ldr r13, [r1], #-4 )
+THUMB( ldmdb r1!, {r3, r8-r12, r14} )
+
+ msr spsr_cxsf, r3
+
+ DECREMENT_AND_RESTORE r1 r3 @ Restore cpsr register
+ msr cpsr_cxsf, r3
+
+ARM( ldmdb r1, {sp, lr}^ ) @ Restore sp and lr registers
+ARM( sub r1, r1, #8 ) @ Update backup pointer (not
+ @ done in previous instruction)
+THUMB( ldr lr, [r1], #-4 )
+THUMB( ldr sp, [r1], #-4 )
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Save CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * TTBR0, TTBR1, TTBRC, DACR CP15 registers are restored by boot ROM from SRAM.
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrc p15, 0, r2, c12, c0, 0 @ Read Non-secure Vector Base
+ @ Address Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c13, c0, 1 @ Read Context ID Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 2 @ Read Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 3 @ Read Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible.
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 4 @ Read Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only.
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c9, c12, 0 @ Read PMNC Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 1 @ Read PMCNTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 5 @ Read PMSELR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 0 @ Read PMCCNTR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 1 @ Read PMXEVTYPER Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 0 @ Read PMUSERENR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 1 @ Read PMINTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 2 @ Read PMINTENCLR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c1, c0, 2 @ Read CPACR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_restore_cp15_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack (r3 is saved due
+ @ to 8 byte aligned stack)
+ ldr r1, [r0] @ Read backup stack pointer
+
+ DECREMENT_AND_RESTORE r1 r2 @ Write CPACR register
+ mcr p15, 0, r2, c1, c0, 2
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 2 @ Write PMINTENCLR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 1 @ Write PMINTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 0 @ Write PMUSERENR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 1 @ Write PMXEVTYPER Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 0 @ Write PMCCNTR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 5 @ Write PMSELR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 1 @ Write PMCNTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 0 @ Write PMNC Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 4 @ Write Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 3 @ Write Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 2 @ Write Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 1 @ Write Context ID Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c12, c0, 0 @ Write Non-secure Vector Base
+ @ Address Register
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+/*
+ * L1 cache clean function. Commit 'dirty' data from L1
+ * to L2 cache.
+ *
+ * r0, r1, r2, used locally
+ *
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_clean_l1_cache_all)
+
+ mov r0, #0 @ swith to cache level 0
+ @ (L1 cache)
+ mcr p15, 2, r0, c0, c0, 0 @ select current cache level
+ @ in cssr
+
+ dmb
+ mov r1, #0 @ r1 = way index
+wayLoopL1clean:
+ mov r0, #0 @ r0 = line index
+lineLoopL1clean:
+ mov r2, r1, lsl #30 @ TODO: OK to hard-code
+ @ SoC-specific L1 cache details?
+ mov r3, r0, lsl #5
+ add r2, r3
+@ add r2, r0, lsl #5
+ mcr p15, 0, r2, c7, c10, 2 @ Clean cache by set/way
+ add r0, r0, #1
+ cmp r0, #256 @ TODO: Ok with hard-coded
+ @ set/way sizes or do we have to
+ @ read them from ARM regs? Is it
+ @ set correctly in silicon?
+ bne lineLoopL1clean
+ add r1, r1, #1
+ cmp r1, #4 @ TODO: Ditto, sizes...
+ bne wayLoopL1clean
+
+ dsb
+ isb
+ mov pc, lr
+
+ENDPROC(context_clean_l1_cache_all)
+
+/*
+ * Last saves to backup RAM, cache clean and WFI
+ *
+ * r0 = address to backup_sram_storage base adress
+ * r1 = indicate whether also L2 cache should be cleaned
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_to_sram_and_wfi_internal)
+
+ stmfd sp!, {r2-r12, lr} @ save on stack.
+
+ mrc p15, 0, r2, c1, c0, 0 @ read cp15 system control
+ @ register
+ str r2, [r0, #0x00]
+ mrc p15, 0, r2, c2, c0, 0 @ read cp15 ttb0 register
+ str r2, [r0, #0x04]
+ mrc p15, 0, r2, c2, c0, 1 @ read cp15 ttb1 register
+ str r2, [r0, #0x08]
+ mrc p15, 0, r2, c2, c0, 2 @ read cp15 ttb control register
+ str r2, [r0, #0x0C]
+ mrc p15, 0, r2, c3, c0, 0 @ read domain access control
+ @ register
+ str r2, [r0, #0x10]
+
+ ldr r2, =return_here
+ str r2, [r0, #0x14] @ save program counter restore
+ @ value to backup_sram_storage
+ mrs r2, cpsr
+ str r2, [r0, #0x18] @ save cpsr to
+ @ backup_sram_storage
+ str sp, [r0, #0x1c] @ save sp to backup_sram_storage
+
+ mov r4, r1 @ Set r4 = cleanL2cache, r1
+ @ will be destroyed by
+ @ v7_clean_l1_cache_all
+
+ bl context_clean_l1_cache_all @ Commit all dirty data in L1
+ @ cache to L2 without
+ @ invalidating
+
+ dsb @ data synchronization barrier
+ isb @ instruction synchronization
+ @ barrier
+ wfi @ wait for interrupt
+
+return_here: @ both cores return here
+ @ now we are out deep sleep
+ @ with all the context lost
+ @ except pc, sp and cpsr
+
+ ldmfd sp!, {r2-r12, pc} @ restore from stack
+
diff --git a/arch/arm/mach-ux500/pm/performance.c b/arch/arm/mach-ux500/pm/performance.c
new file mode 100644
index 00000000000..04aca3cb5bd
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/performance.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Johan Rudholm <johan.rudholm@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/genhd.h>
+#include <linux/major.h>
+#include <linux/cdev.h>
+#include <linux/kernel_stat.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpu.h>
+#include <linux/pm_qos.h>
+
+#include <mach/irqs.h>
+
+#define WLAN_PROBE_DELAY 3000 /* 3 seconds */
+#define WLAN_LIMIT (3000/3) /* If we have more than 1000 irqs per second */
+
+/*
+ * MMC TODO:
+ * o Develop a more power-aware algorithm
+ * o Make the parameters visible through debugfs
+ * o Get the value of CONFIG_MMC_BLOCK_MINORS in runtime instead, since
+ * it may be altered by drivers/mmc/card/block.c
+ */
+
+/* Sample reads and writes every n ms */
+#define PERF_MMC_PROBE_DELAY 1000
+/* Read threshold, sectors/second */
+#define PERF_MMC_LIMIT_READ 10240
+/* Write threshold, sectors/second */
+#define PERF_MMC_LIMIT_WRITE 8192
+/* Nr of MMC devices */
+#define PERF_MMC_HOSTS 8
+
+/*
+ * Rescan for new MMC devices every
+ * PERF_MMC_PROBE_DELAY * PERF_MMC_RESCAN_CYCLES ms
+ */
+#define PERF_MMC_RESCAN_CYCLES 10
+
+#ifdef CONFIG_MMC_BLOCK
+static struct delayed_work work_mmc;
+#endif
+
+static struct delayed_work work_wlan_workaround;
+static struct pm_qos_request wlan_pm_qos_latency;
+static bool wlan_pm_qos_is_latency_0;
+
+static void wlan_load(struct work_struct *work)
+{
+ int cpu;
+ unsigned int num_irqs = 0;
+ static unsigned int old_num_irqs = UINT_MAX;
+
+ for_each_online_cpu(cpu)
+ num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu);
+
+ if ((num_irqs > old_num_irqs) &&
+ (num_irqs - old_num_irqs) > WLAN_LIMIT) {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 125);
+ if (!wlan_pm_qos_is_latency_0) {
+ /*
+ * The wake up latency is set to 0 to prevent
+ * the system from going to sleep. This improves
+ * the wlan throughput in DMA mode.
+ * The wake up latency from sleep adds ~5% overhead
+ * for TX in some cases.
+ * This change doesn't increase performance for wlan
+ * PIO since the CPU usage prevents sleep in this mode.
+ */
+ pm_qos_add_request(&wlan_pm_qos_latency,
+ PM_QOS_CPU_DMA_LATENCY, 0);
+ wlan_pm_qos_is_latency_0 = true;
+ }
+ } else {
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "wlan", 25);
+ if (wlan_pm_qos_is_latency_0) {
+ pm_qos_remove_request(&wlan_pm_qos_latency);
+ wlan_pm_qos_is_latency_0 = false;
+ }
+ }
+
+ old_num_irqs = num_irqs;
+
+ schedule_delayed_work_on(0,
+ &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+}
+
+#ifdef CONFIG_MMC_BLOCK
+/*
+ * Loop through every CONFIG_MMC_BLOCK_MINORS'th minor device for
+ * MMC_BLOCK_MAJOR, get the struct gendisk for each device. Returns
+ * nr of found disks. Populate mmc_disks.
+ */
+static int scan_mmc_devices(struct gendisk *mmc_disks[])
+{
+ dev_t devnr;
+ int i, j = 0, part;
+ struct gendisk *mmc_devices[256 / CONFIG_MMC_BLOCK_MINORS];
+
+ memset(&mmc_devices, 0, sizeof(mmc_devices));
+
+ for (i = 0; i * CONFIG_MMC_BLOCK_MINORS < 256; i++) {
+ devnr = MKDEV(MMC_BLOCK_MAJOR, i * CONFIG_MMC_BLOCK_MINORS);
+ mmc_devices[i] = get_gendisk(devnr, &part);
+
+ /* Invalid capacity of device, do not add to list */
+ if (!mmc_devices[i] || !get_capacity(mmc_devices[i]))
+ continue;
+
+ mmc_disks[j] = mmc_devices[i];
+ j++;
+
+ if (j == PERF_MMC_HOSTS)
+ break;
+ }
+
+ return j;
+}
+
+/*
+ * Sample sectors read and written to any MMC devices, update PRCMU
+ * qos requirement
+ */
+static void mmc_load(struct work_struct *work)
+{
+ static unsigned long long old_sectors_read[PERF_MMC_HOSTS];
+ static unsigned long long old_sectors_written[PERF_MMC_HOSTS];
+ static struct gendisk *mmc_disks[PERF_MMC_HOSTS];
+ static int cycle, nrdisk;
+ static bool old_mode;
+ unsigned long long sectors;
+ bool new_mode = false;
+ int i;
+
+ if (!cycle) {
+ memset(&mmc_disks, 0, sizeof(mmc_disks));
+ nrdisk = scan_mmc_devices(mmc_disks);
+ cycle = PERF_MMC_RESCAN_CYCLES;
+ }
+ cycle--;
+
+ for (i = 0; i < nrdisk; i++) {
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[READ]);
+
+ if (old_sectors_read[i] &&
+ sectors > old_sectors_read[i] &&
+ (sectors - old_sectors_read[i]) >
+ PERF_MMC_LIMIT_READ)
+ new_mode = true;
+
+ old_sectors_read[i] = sectors;
+ sectors = part_stat_read(&(mmc_disks[i]->part0),
+ sectors[WRITE]);
+
+ if (old_sectors_written[i] &&
+ sectors > old_sectors_written[i] &&
+ (sectors - old_sectors_written[i]) >
+ PERF_MMC_LIMIT_WRITE)
+ new_mode = true;
+
+ old_sectors_written[i] = sectors;
+ }
+
+ if (!old_mode && new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 125);
+
+ if (old_mode && !new_mode)
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "mmc", 25);
+
+ old_mode = new_mode;
+
+ schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+
+}
+#endif /* CONFIG_MMC_BLOCK */
+
+static int __init performance_register(void)
+{
+ int ret;
+
+#ifdef CONFIG_MMC_BLOCK
+ ret = prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "mmc", 25);
+ if (ret) {
+ pr_err("%s: Failed to add PRCMU req for mmc\n", __func__);
+ goto out;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_mmc, mmc_load);
+
+ schedule_delayed_work(&work_mmc,
+ msecs_to_jiffies(PERF_MMC_PROBE_DELAY));
+#endif
+
+ ret = prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "wlan", 25);
+ if (ret) {
+ pr_err("%s: Failed to add PRCMU req for wlan\n", __func__);
+ goto out;
+ }
+
+ INIT_DELAYED_WORK_DEFERRABLE(&work_wlan_workaround,
+ wlan_load);
+
+ schedule_delayed_work_on(0, &work_wlan_workaround,
+ msecs_to_jiffies(WLAN_PROBE_DELAY));
+out:
+ return ret;
+}
+late_initcall(performance_register);
diff --git a/arch/arm/mach-ux500/pm/pm.c b/arch/arm/mach-ux500/pm/pm.c
new file mode 100644
index 00000000000..691642e4200
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/pm.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/processor.h>
+
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+#define STABILIZATION_TIME 30 /* us */
+#define GIC_FREEZE_DELAY 1 /* us */
+
+#define PRCM_ARM_WFI_STANDBY_CPU0_WFI 0x8
+#define PRCM_ARM_WFI_STANDBY_CPU1_WFI 0x10
+
+/* Dual A9 core interrupt management unit registers */
+#define PRCM_A9_MASK_REQ 0x328
+#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
+#define PRCM_A9_MASK_ACK 0x32c
+
+#define PRCM_ARMITMSK31TO0 0x11c
+#define PRCM_ARMITMSK63TO32 0x120
+#define PRCM_ARMITMSK95TO64 0x124
+#define PRCM_ARMITMSK127TO96 0x128
+#define PRCM_POWER_STATE_VAL 0x25C
+#define PRCM_ARMITVAL31TO0 0x260
+#define PRCM_ARMITVAL63TO32 0x264
+#define PRCM_ARMITVAL95TO64 0x268
+#define PRCM_ARMITVAL127TO96 0x26C
+
+/* ARM WFI Standby signal register */
+#define PRCM_ARM_WFI_STANDBY 0x130
+
+/* IO force */
+#define PRCM_IOCR 0x310
+#define PRCM_IOCR_IOFORCE 0x1
+
+#ifdef CONFIG_UX500_SUSPEND
+int ux500_console_uart_gpio_pin = CONFIG_UX500_CONSOLE_UART_GPIO_PIN;
+#endif
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+static u32 u5500_gpio_banks[] = {U5500_GPIOBANK0_BASE,
+ U5500_GPIOBANK1_BASE,
+ U5500_GPIOBANK2_BASE,
+ U5500_GPIOBANK3_BASE,
+ U5500_GPIOBANK4_BASE,
+ U5500_GPIOBANK5_BASE,
+ U5500_GPIOBANK6_BASE,
+ U5500_GPIOBANK7_BASE};
+
+static u32 ux500_gpio_wks[ARRAY_SIZE(u8500_gpio_banks)];
+
+inline int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on)
+{
+ return 0;
+}
+
+/* Decouple GIC from the interrupt bus */
+void ux500_pm_gic_decouple(void)
+{
+ prcmu_write_masked(PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ);
+
+ while (!prcmu_read(PRCM_A9_MASK_REQ))
+ cpu_relax();
+
+ /* TODO: Use the ack bit when possible */
+ udelay(GIC_FREEZE_DELAY); /* Wait for the GIC to freeze */
+}
+
+/* Recouple GIC with the interrupt bus */
+void ux500_pm_gic_recouple(void)
+{
+ prcmu_write_masked(PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ 0);
+
+ /* TODO: Use the ack bit when possible */
+}
+
+#define GIC_NUMBER_REGS 5
+bool ux500_pm_gic_pending_interrupt(void)
+{
+ u32 pr; /* Pending register */
+ u32 er; /* Enable register */
+ int i;
+
+ /* 5 registers. STI & PPI not skipped */
+ for (i = 0; i < GIC_NUMBER_REGS; i++) {
+
+ pr = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_PENDING_SET + i * 4);
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + i * 4);
+
+ if (pr & er)
+ return true; /* There is a pending interrupt */
+ }
+ return false;
+}
+
+#define GIC_NUMBER_SPI_REGS 4
+bool ux500_pm_prcmu_pending_interrupt(void)
+{
+ u32 it;
+ u32 im;
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* There are 4 registers */
+
+ it = prcmu_read(PRCM_ARMITVAL31TO0 + i * 4);
+ im = prcmu_read(PRCM_ARMITMSK31TO0 + i * 4);
+
+ if (it & im)
+ return true; /* There is a pending interrupt */
+ }
+
+ return false;
+}
+
+void ux500_pm_prcmu_set_ioforce(bool enable)
+{
+ if (enable)
+ prcmu_write_masked(PRCM_IOCR,
+ PRCM_IOCR_IOFORCE,
+ PRCM_IOCR_IOFORCE);
+ else
+ prcmu_write_masked(PRCM_IOCR,
+ PRCM_IOCR_IOFORCE,
+ 0);
+}
+
+void ux500_pm_prcmu_copy_gic_settings(void)
+{
+ u32 er; /* Enable register */
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* 4*32 SPI interrupts */
+ /* +1 due to skip STI and PPI */
+ er = readl_relaxed(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + (i + 1) * 4);
+ prcmu_write(PRCM_ARMITMSK31TO0 + i * 4, er);
+ }
+}
+
+void ux500_pm_gpio_save_wake_up_status(void)
+{
+ int num_banks;
+ u32 *banks;
+ int i;
+
+ if (cpu_is_u5500()) {
+ num_banks = ARRAY_SIZE(u5500_gpio_banks);
+ banks = u5500_gpio_banks;
+ } else {
+ num_banks = ARRAY_SIZE(u8500_gpio_banks);
+ banks = u8500_gpio_banks;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ for (i = 0; i < num_banks; i++)
+ ux500_gpio_wks[i] = readl(__io_address(banks[i]) + NMK_GPIO_WKS);
+
+ nmk_gpio_clocks_disable();
+}
+
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_num)
+{
+ if (WARN_ON(cpu_is_u5500() && bank_num >=
+ ARRAY_SIZE(u5500_gpio_banks)))
+ return 0;
+
+ if (WARN_ON(cpu_is_u8500() && bank_num >=
+ ARRAY_SIZE(u8500_gpio_banks)))
+ return 0;
+
+ return ux500_gpio_wks[bank_num];
+}
+
+/* Check if the other CPU is in WFI */
+bool ux500_pm_other_cpu_wfi(void)
+{
+ if (smp_processor_id()) {
+ /* We are CPU 1 => check if CPU0 is in WFI */
+ if (prcmu_read(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU0_WFI)
+ return true;
+ } else {
+ /* We are CPU 0 => check if CPU1 is in WFI */
+ if (prcmu_read(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU1_WFI)
+ return true;
+ }
+
+ return false;
+}
diff --git a/arch/arm/mach-ux500/pm/prcmu-qos-power.c b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
new file mode 100644
index 00000000000..a600a57dc13
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/prcmu-qos-power.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Martin Persson
+ * Per Fransson <per.xx.fransson@stericsson.com>
+ *
+ * Quality of Service for the U8500 PRCM Unit interface driver
+ *
+ * Strongly influenced by kernel/pm_qos_params.c.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpufreq-dbx500.h>
+
+#include <mach/prcmu-debug.h>
+
+#define ARM_THRESHOLD_FREQ (400000)
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *,
+ unsigned long, void *);
+
+static s32 cpufreq_requirement_queued;
+static s32 cpufreq_requirement_set;
+
+/*
+ * locking rule: all changes to requirements or prcmu_qos_object list
+ * and prcmu_qos_objects need to happen with prcmu_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+
+struct prcmu_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice prcmu_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 force_value;
+ atomic_t target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct prcmu_qos_object null_qos;
+static BLOCKING_NOTIFIER_HEAD(prcmu_ape_opp_notifier);
+static BLOCKING_NOTIFIER_HEAD(prcmu_ddr_opp_notifier);
+
+static struct prcmu_qos_object ape_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ape_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ape_opp_notifier,
+ .name = "ape_opp",
+ /* Target value in % APE OPP */
+ .default_value = 50,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(50),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object ddr_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(ddr_opp_qos.requirements.list)
+ },
+ .notifiers = &prcmu_ddr_opp_notifier,
+ .name = "ddr_opp",
+ /* Target value in % DDR OPP */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(25),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object arm_opp_qos = {
+ .requirements = {
+ LIST_HEAD_INIT(arm_opp_qos.requirements.list)
+ },
+ /*
+ * No notifier on ARM opp qos request, since this won't actually
+ * do anything, except changing limits for cpufreq
+ */
+ .name = "arm_opp",
+ /* Target value in % ARM OPP, note can be 125% */
+ .default_value = 25,
+ .force_value = 0,
+ .target_value = ATOMIC_INIT(25),
+ .comparitor = max_compare
+};
+
+static struct prcmu_qos_object *prcmu_qos_array[] = {
+ &null_qos,
+ &ape_opp_qos,
+ &ddr_opp_qos,
+ &arm_opp_qos,
+};
+
+static DEFINE_MUTEX(prcmu_qos_mutex);
+static DEFINE_SPINLOCK(prcmu_qos_lock);
+
+static bool ape_opp_forced_to_50_partly_25;
+
+static unsigned long cpufreq_opp_delay = HZ / 5;
+
+unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
+{
+ return cpufreq_opp_delay;
+}
+
+static struct notifier_block qos_delayed_cpufreq_notifier_block = {
+ .notifier_call = qos_delayed_cpufreq_notifier,
+};
+
+void prcmu_qos_set_cpufreq_opp_delay(unsigned long n)
+{
+ if (n == 0) {
+ cpufreq_unregister_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+ } else if (cpufreq_opp_delay != 0) {
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ cpufreq_opp_delay = n;
+}
+#ifdef CONFIG_CPU_FREQ
+static void update_cpu_limits(s32 extreme_value)
+{
+ int cpu;
+ struct cpufreq_policy policy;
+ int ret;
+ int min_freq, max_freq;
+
+ for_each_online_cpu(cpu) {
+ ret = cpufreq_get_policy(&policy, cpu);
+ if (ret) {
+ pr_err("prcmu qos: get cpufreq policy failed (cpu%d)\n",
+ cpu);
+ continue;
+ }
+
+ ret = dbx500_cpufreq_get_limits(cpu, extreme_value,
+ &min_freq, &max_freq);
+ if (ret)
+ continue;
+ /*
+ * cpufreq fw does not allow frequency change if
+ * "current min freq" > "new max freq" or
+ * "current max freq" < "new min freq".
+ * Thus the intermediate steps below.
+ */
+ if (policy.min > max_freq) {
+ ret = cpufreq_update_freq(cpu, min_freq, policy.max);
+ if (ret)
+ pr_err("prcmu qos: update min cpufreq failed (1)\n");
+ }
+ if (policy.max < min_freq) {
+ ret = cpufreq_update_freq(cpu, policy.min, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (2)\n");
+ }
+
+ ret = cpufreq_update_freq(cpu, min_freq, max_freq);
+ if (ret)
+ pr_err("prcmu qos: update max cpufreq failed (3)\n");
+ }
+
+}
+#else
+static inline void update_cpu_limits(s32 extreme_value) { }
+#endif
+/* static helper function */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ bool update = false;
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ extreme_value = prcmu_qos_array[target]->default_value;
+
+ if (prcmu_qos_array[target]->force_value != 0) {
+ extreme_value = prcmu_qos_array[target]->force_value;
+ update = true;
+ } else {
+ list_for_each_entry(node,
+ &prcmu_qos_array[target]->requirements.list,
+ list) {
+ extreme_value = prcmu_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (atomic_read(&prcmu_qos_array[target]->target_value)
+ != extreme_value) {
+ update = true;
+ atomic_set(&prcmu_qos_array[target]->target_value,
+ extreme_value);
+ pr_debug("prcmu qos: new target for qos %d is %d\n",
+ target, atomic_read(
+ &prcmu_qos_array[target]->target_value
+ ));
+ }
+ }
+
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+
+ if (!update)
+ goto unlock_and_return;
+
+ if (prcmu_qos_array[target]->notifiers)
+ blocking_notifier_call_chain(prcmu_qos_array[target]->notifiers,
+ (unsigned long)extreme_value,
+ NULL);
+ switch (target) {
+ case PRCMU_QOS_DDR_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = DDR_50_OPP;
+ pr_debug("prcmu qos: set ddr opp to 50%%\n");
+ break;
+ case 100:
+ op = DDR_100_OPP;
+ pr_debug("prcmu qos: set ddr opp to 100%%\n");
+ break;
+ case 25:
+ /* 25% DDR OPP is not supported on 5500 */
+ if (!cpu_is_u5500()) {
+ op = DDR_25_OPP;
+ pr_debug("prcmu qos: set ddr opp to 25%%\n");
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect ddr target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+ prcmu_set_ddr_opp(op);
+ prcmu_debug_ddr_opp_log(op);
+ break;
+ case PRCMU_QOS_APE_OPP:
+ switch (extreme_value) {
+ case 50:
+ op = APE_50_OPP;
+ pr_debug("prcmu qos: set ape opp to 50%%\n");
+ break;
+ case 100:
+ op = APE_100_OPP;
+ pr_debug("prcmu qos: set ape opp to 100%%\n");
+ break;
+ default:
+ pr_err("prcmu qos: Incorrect ape target value (%d)",
+ extreme_value);
+ goto unlock_and_return;
+ }
+
+ if (!ape_opp_forced_to_50_partly_25)
+ (void)prcmu_set_ape_opp(op);
+ prcmu_debug_ape_opp_log(op);
+ break;
+ case PRCMU_QOS_ARM_OPP:
+ {
+ mutex_unlock(&prcmu_qos_mutex);
+ /*
+ * We can't hold the mutex since changing cpufreq
+ * will trigger an prcmu fw callback.
+ */
+ update_cpu_limits(extreme_value);
+ /* Return since the lock is unlocked */
+ return;
+
+ break;
+ }
+ default:
+ pr_err("prcmu qos: Incorrect target\n");
+ break;
+ }
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+void prcmu_qos_force_opp(int prcmu_qos_class, s32 i)
+{
+ prcmu_qos_array[prcmu_qos_class]->force_value = i;
+ update_target(prcmu_qos_class);
+}
+
+void prcmu_qos_voice_call_override(bool enable)
+{
+ u8 op;
+
+ mutex_lock(&prcmu_qos_mutex);
+
+ ape_opp_forced_to_50_partly_25 = enable;
+
+ if (enable) {
+ (void)prcmu_set_ape_opp(APE_50_PARTLY_25_OPP);
+ goto unlock_and_return;
+ }
+
+ /* Disable: set the OPP according to the current target value. */
+ switch (atomic_read(
+ &prcmu_qos_array[PRCMU_QOS_APE_OPP]->target_value)) {
+ case 50:
+ op = APE_50_OPP;
+ break;
+ case 100:
+ op = APE_100_OPP;
+ break;
+ default:
+ goto unlock_and_return;
+ }
+
+ (void)prcmu_set_ape_opp(op);
+
+unlock_and_return:
+ mutex_unlock(&prcmu_qos_mutex);
+}
+
+/**
+ * prcmu_qos_requirement - returns current prcmu qos expectation
+ * @prcmu_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int prcmu_qos_requirement(int prcmu_qos_class)
+{
+ return atomic_read(&prcmu_qos_array[prcmu_qos_class]->target_value);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_requirement);
+
+/**
+ * prcmu_qos_add_requirement - inserts new qos request into the list
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the prcmu_qos_class list of requested
+ * qos performance characteristics. It recomputes the aggregate QoS
+ * expectations for the prcmu_qos_class of parameters.
+ */
+int prcmu_qos_add_requirement(int prcmu_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep == NULL)
+ return -ENOMEM;
+
+ if (value == PRCMU_QOS_DEFAULT_VALUE)
+ dep->value = prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_add(&dep->list,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ update_target(prcmu_qos_class);
+
+ return 0;
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_requirement);
+
+/**
+ * prcmu_qos_update_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requirement for the prcmu_qos_class of parameters
+ * along with updating the target prcmu_qos_class value.
+ *
+ * If the named request isn't in the list then no change is made.
+ */
+int prcmu_qos_update_requirement(int prcmu_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PRCMU_QOS_DEFAULT_VALUE)
+ node->value =
+ prcmu_qos_array[prcmu_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_update_requirement);
+
+/**
+ * prcmu_qos_remove_requirement - modifies an existing qos request
+ * @prcmu_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from prcmu_qos_class list of parameters and
+ * recompute the current target value for the prcmu_qos_class.
+ */
+void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&prcmu_qos_lock, flags);
+ list_for_each_entry(node,
+ &prcmu_qos_array[prcmu_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&prcmu_qos_lock, flags);
+ if (pending_update)
+ update_target(prcmu_qos_class);
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_requirement);
+
+/**
+ * prcmu_qos_add_notifier - sets notification entry for changes to target value
+ * @prcmu_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_add_notifier(int prcmu_qos_class, struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_register(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_add_notifier);
+
+/**
+ * prcmu_qos_remove_notifier - deletes notification entry from chain.
+ * @prcmu_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * upon changes to the prcmu_qos_class target value.
+ */
+int prcmu_qos_remove_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier)
+{
+ int retval = -EINVAL;
+ if (prcmu_qos_array[prcmu_qos_class]->notifiers)
+ retval = blocking_notifier_chain_unregister(
+ prcmu_qos_array[prcmu_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(prcmu_qos_remove_notifier);
+
+#define USER_QOS_NAME_LEN 32
+
+static int prcmu_qos_power_open(struct inode *inode, struct file *filp,
+ long prcmu_qos_class)
+{
+ int ret;
+ char name[USER_QOS_NAME_LEN];
+
+ filp->private_data = (void *)prcmu_qos_class;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ ret = prcmu_qos_add_requirement(prcmu_qos_class, name,
+ PRCMU_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+
+ return -EPERM;
+}
+
+
+static int prcmu_qos_ape_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_APE_OPP);
+}
+
+static int prcmu_qos_ddr_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_DDR_OPP);
+}
+
+static int prcmu_qos_arm_power_open(struct inode *inode, struct file *filp)
+{
+ return prcmu_qos_power_open(inode, filp, PRCMU_QOS_ARM_OPP);
+}
+
+static int prcmu_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_remove_requirement(prcmu_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t prcmu_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int prcmu_qos_class;
+ char name[USER_QOS_NAME_LEN];
+
+ prcmu_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ snprintf(name, USER_QOS_NAME_LEN, "file_%08x", (unsigned int)filp);
+ prcmu_qos_update_requirement(prcmu_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ape_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ape_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+/* Functions to provide QoS to user space */
+static const struct file_operations prcmu_qos_ddr_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_ddr_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+static const struct file_operations prcmu_qos_arm_power_fops = {
+ .write = prcmu_qos_power_write,
+ .open = prcmu_qos_arm_power_open,
+ .release = prcmu_qos_power_release,
+};
+
+static int register_prcmu_qos_misc(struct prcmu_qos_object *qos,
+ const struct file_operations *fops)
+{
+ qos->prcmu_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->prcmu_qos_power_miscdev.name = qos->name;
+ qos->prcmu_qos_power_miscdev.fops = fops;
+
+ return misc_register(&qos->prcmu_qos_power_miscdev);
+}
+
+static void qos_delayed_work_up_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq", 100);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq", 100);
+ cpufreq_requirement_set = 100;
+}
+
+static void qos_delayed_work_down_fn(struct work_struct *work)
+{
+ prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+}
+
+static DECLARE_DELAYED_WORK(qos_delayed_work_up, qos_delayed_work_up_fn);
+static DECLARE_DELAYED_WORK(qos_delayed_work_down, qos_delayed_work_down_fn);
+
+static int qos_delayed_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ s32 new_ddr_target;
+
+ /* Only react once per transition and only for one core, e.g. core 0 */
+ if (event != CPUFREQ_POSTCHANGE || freq->cpu != 0)
+ return 0;
+
+ /*
+ * APE and DDR OPP are always handled together in this solution.
+ * Hence no need to check both DDR and APE opp in the code below.
+ */
+
+ /* Which DDR OPP are we aiming for? */
+ if (freq->new > ARM_THRESHOLD_FREQ)
+ new_ddr_target = 100;
+ else
+ new_ddr_target = PRCMU_QOS_DEFAULT_VALUE;
+
+ if (new_ddr_target == cpufreq_requirement_queued) {
+ /*
+ * We're already at, or going to, the target requirement.
+ * This is only a fluctuation within the interval
+ * corresponding to the same DDR requirement.
+ */
+ return 0;
+ }
+ cpufreq_requirement_queued = new_ddr_target;
+
+ if (freq->new > ARM_THRESHOLD_FREQ) {
+ cancel_delayed_work_sync(&qos_delayed_work_down);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_up,
+ cpufreq_opp_delay);
+ } else {
+ cancel_delayed_work_sync(&qos_delayed_work_up);
+ /*
+ * Only schedule this requirement if it is not the current
+ * one.
+ */
+ if (new_ddr_target != cpufreq_requirement_set)
+ schedule_delayed_work(&qos_delayed_work_down,
+ cpufreq_opp_delay);
+ }
+
+ return 0;
+}
+
+static int __init prcmu_qos_power_init(void)
+{
+ int ret;
+
+ /* 25% DDR OPP is not supported on u5500 */
+ if (cpu_is_u5500()) {
+ ddr_opp_qos.default_value = 50;
+ atomic_set(&ddr_opp_qos.target_value, 50);
+ }
+
+ ret = register_prcmu_qos_misc(&ape_opp_qos, &prcmu_qos_ape_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ape qos: setup failed\n");
+ return ret;
+ }
+
+ ret = register_prcmu_qos_misc(&ddr_opp_qos, &prcmu_qos_ddr_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu ddr qos: setup failed\n");
+ return ret;
+ }
+
+ ret = register_prcmu_qos_misc(&arm_opp_qos, &prcmu_qos_arm_power_fops);
+ if (ret < 0) {
+ pr_err("prcmu arm qos: setup failed\n");
+ return ret;
+ }
+
+ prcmu_qos_add_requirement(PRCMU_QOS_DDR_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, "cpufreq",
+ PRCMU_QOS_DEFAULT_VALUE);
+ cpufreq_requirement_set = PRCMU_QOS_DEFAULT_VALUE;
+ cpufreq_requirement_queued = PRCMU_QOS_DEFAULT_VALUE;
+
+ cpufreq_register_notifier(&qos_delayed_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ return ret;
+}
+
+late_initcall(prcmu_qos_power_init);
diff --git a/arch/arm/mach-ux500/pm/runtime.c b/arch/arm/mach-ux500/pm/runtime.c
new file mode 100644
index 00000000000..710bd8aae3d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/runtime.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson> for ST-Ericsson
+ *
+ * Based on:
+ * Runtime PM support code for SuperH Mobile ARM
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/regulator/dbx500-prcmu.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <plat/pincfg.h>
+
+#include "../pins.h"
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct ux500_regulator *regulator;
+ struct ux500_pins *pins;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_ENABLED, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+ }
+
+ if (test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_put(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_put(prd->regulator);
+ }
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ prd->pins = ux500_pins_get(dev_name(dev));
+
+ prd->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(prd->regulator))
+ prd->regulator = NULL;
+
+ if (prd->pins || prd->regulator) {
+ dev_info(dev, "managed by runtime pm: %s%s\n",
+ prd->pins ? "pins " : "",
+ prd->regulator ? "regulator " : "");
+
+ set_bit(BIT_ACTIVE, &prd->flags);
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+static void platform_pm_runtime_used(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd)
+ set_bit(BIT_ONCE, &prd->flags);
+}
+
+static int ux500_pd_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static void ux500_pd_disable(struct pm_runtime_data *prd)
+{
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+
+ clear_bit(BIT_ENABLED, &prd->flags);
+ }
+}
+
+static int ux500_pd_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_bug(dev, prd);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ux500_pd_disable(prd);
+
+ return 0;
+}
+
+static void ux500_pd_enable(struct pm_runtime_data *prd)
+{
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_enable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_enable(prd->regulator);
+
+ set_bit(BIT_ENABLED, &prd->flags);
+ }
+}
+
+static int ux500_pd_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_used(dev, prd);
+ ux500_pd_enable(prd);
+
+ return pm_generic_runtime_resume(dev);
+}
+
+static int ux500_pd_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /* Already is runtime suspended? Nothing to do. */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended for some
+ * reason. We still need to do the power save stuff when going into
+ * suspend, so force it here.
+ */
+ return ux500_pd_runtime_suspend(dev);
+}
+
+static int ux500_pd_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /*
+ * Already was runtime suspended? No need to resume here, runtime
+ * resume will take care of it.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended,
+ * but we forced it down in suspend_noirq above. Bring it
+ * up since pm-runtime thinks it is not suspended.
+ */
+ return ux500_pd_runtime_resume(dev);
+}
+#ifdef CONFIG_UX500_SUSPEND
+static int ux500_pd_amba_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+ bool is_suspended = pm_runtime_status_suspended(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->suspend_noirq;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_suspend_noirq(dev);
+
+ if (!ret && !is_suspended)
+ ux500_pd_disable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+ bool is_suspended = pm_runtime_status_suspended(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->resume_noirq;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_resume_noirq(dev);
+
+ if (!ret && !is_suspended)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+#else
+static int ux500_pd_amba_suspend_noirq(struct device *dev)
+{
+ return 0;
+}
+static int ux500_pd_amba_resume_noirq(struct device *dev)
+{
+ return 0;
+}
+#endif
+static int ux500_pd_amba_runtime_suspend(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do this first, to make sure pins is not in undefined state after
+ * drivers has run their runtime suspend. This also means that drivers
+ * are not able to use their pins/regulators during runtime suspend.
+ */
+ ux500_pd_disable(prd);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_suspend;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_suspend(dev);
+
+ if (ret)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus pm functions by calling generic
+ * pm directly. A future fix could be to implement a
+ * "pm_bus_generic_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_resume;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_resume(dev);
+
+ /*
+ * Restore pins/regulator after drivers has runtime resumed, due
+ * to that we must not have pins in undefined state. This also means
+ * that drivers are not able to use their pins/regulators during
+ * runtime resume.
+ */
+ if (!ret)
+ ux500_pd_enable(prd);
+
+ return ret;
+}
+
+static int ux500_pd_amba_runtime_idle(struct device *dev)
+{
+ int (*callback)(struct device *) = NULL;
+ int ret;
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /*
+ * Do not bypass AMBA bus runtime functions by calling generic runtime
+ * directly. A future fix could be to implement a
+ * "pm_bus_generic_runtime_*" API which we can use instead.
+ */
+ if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_idle;
+
+ if (callback)
+ ret = callback(dev);
+ else
+ ret = pm_generic_runtime_idle(dev);
+
+ return ret;
+}
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action,
+ void *data,
+ bool enable)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd) {
+ devres_add(dev, prd);
+ platform_pm_runtime_init(dev, prd);
+ if (enable)
+ ux500_pd_enable(prd);
+ } else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+static int ux500_pd_plat_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return ux500_pd_bus_notify(nb, action, data, false);
+}
+
+static int ux500_pd_amba_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return ux500_pd_bus_notify(nb, action, data, true);
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+#define ux500_pd_suspend_noirq NULL
+#define ux500_pd_resume_noirq NULL
+#define ux500_pd_runtime_idle NULL
+#define ux500_pd_runtime_suspend NULL
+#define ux500_pd_runtime_resume NULL
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ux500_regulator *regulator = NULL;
+ struct ux500_pins *pins = NULL;
+ struct device *dev = data;
+ const char *onoff = NULL;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_enable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_enable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "on";
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_disable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_disable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "off";
+ break;
+ }
+
+ if (pins || regulator) {
+ dev_info(dev, "runtime pm disabled, forced %s: %s%s\n",
+ onoff,
+ pins ? "pins " : "",
+ regulator ? "regulator " : "");
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+struct dev_pm_domain ux500_amba_dev_power_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(ux500_pd_amba_runtime_suspend,
+ ux500_pd_amba_runtime_resume,
+ ux500_pd_amba_runtime_idle)
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = ux500_pd_amba_suspend_noirq,
+ .resume_noirq = ux500_pd_amba_resume_noirq,
+ },
+};
+
+struct dev_pm_domain ux500_dev_power_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(ux500_pd_runtime_suspend,
+ ux500_pd_runtime_resume,
+ ux500_pd_runtime_idle)
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = ux500_pd_suspend_noirq,
+ .resume_noirq = ux500_pd_resume_noirq,
+ },
+};
+
+static struct notifier_block ux500_pd_platform_notifier = {
+ .notifier_call = ux500_pd_plat_bus_notify,
+};
+
+static struct notifier_block ux500_pd_amba_notifier = {
+ .notifier_call = ux500_pd_amba_bus_notify,
+};
+
+static int __init ux500_pm_runtime_platform_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &ux500_pd_platform_notifier);
+ return 0;
+}
+core_initcall(ux500_pm_runtime_platform_init);
+
+/*
+ * The amba bus itself gets registered in a core_initcall, so we can't use
+ * that.
+ */
+static int __init ux500_pm_runtime_amba_init(void)
+{
+ bus_register_notifier(&amba_bustype, &ux500_pd_amba_notifier);
+ return 0;
+}
+arch_initcall(ux500_pm_runtime_amba_init);
diff --git a/arch/arm/mach-ux500/pm/scu.h b/arch/arm/mach-ux500/pm/scu.h
new file mode 100644
index 00000000000..a09e86a9d3c
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/scu.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASMARM_ARCH_SCU_H
+#define __ASMARM_ARCH_SCU_H
+
+#include <mach/hardware.h>
+
+#define SCU_BASE U8500_SCU_BASE
+/*
+ * * SCU registers
+ * */
+#define SCU_CTRL 0x00
+#define SCU_CONFIG 0x04
+#define SCU_CPU_STATUS 0x08
+#define SCU_INVALIDATE 0x0c
+#define SCU_FPGA_REVISION 0x10
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/suspend.c b/arch/arm/mach-ux500/pm/suspend.c
new file mode 100644
index 00000000000..c5cf6260fbd
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ */
+
+#include <linux/suspend.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/gpio/nomadik.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500-debug.h>
+#include <linux/regulator/dbx500-prcmu.h>
+
+#include <mach/context.h>
+#include <mach/pm.h>
+#include <mach/id.h>
+
+#include "suspend_dbg.h"
+
+static void (*pins_suspend_force)(void);
+static void (*pins_suspend_force_mux)(void);
+
+static suspend_state_t suspend_state = PM_SUSPEND_ON;
+
+void suspend_set_pins_force_fn(void (*force)(void), void (*force_mux)(void))
+{
+ pins_suspend_force = force;
+ pins_suspend_force_mux = force_mux;
+}
+
+static atomic_t block_sleep = ATOMIC_INIT(0);
+
+void suspend_block_sleep(void)
+{
+ atomic_inc(&block_sleep);
+}
+
+void suspend_unblock_sleep(void)
+{
+ atomic_dec(&block_sleep);
+}
+
+static bool sleep_is_blocked(void)
+{
+ return (atomic_read(&block_sleep) != 0);
+}
+
+static int suspend(bool do_deepsleep)
+{
+ bool pins_force = pins_suspend_force_mux && pins_suspend_force;
+ int ret = 0;
+
+ if (sleep_is_blocked()) {
+ pr_info("suspend/resume: interrupted by modem.\n");
+ return -EBUSY;
+ }
+
+ nmk_gpio_clocks_enable();
+
+ ux500_suspend_dbg_add_wake_on_uart();
+
+ nmk_gpio_wakeups_suspend();
+
+ /* configure the prcm for a sleep wakeup */
+ if (cpu_is_u9500())
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB) | PRCMU_WAKEUP(HSI0));
+ else
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB));
+
+ context_vape_save();
+
+ if (pins_force) {
+ /*
+ * Save GPIO settings before applying power save
+ * settings
+ */
+ context_gpio_save();
+
+ /* Apply GPIO power save mux settings */
+ context_gpio_mux_safe_switch(true);
+ pins_suspend_force_mux();
+ context_gpio_mux_safe_switch(false);
+
+ /* Apply GPIO power save settings */
+ pins_suspend_force();
+ }
+
+ ux500_pm_gic_decouple();
+
+ if (ux500_pm_gic_pending_interrupt()) {
+ pr_info("suspend/resume: pending interrupt\n");
+
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+ ret = -EBUSY;
+
+ goto exit;
+ }
+ ux500_pm_prcmu_set_ioforce(true);
+
+ if (do_deepsleep) {
+ context_varm_save_common();
+ context_varm_save_core();
+ context_gic_dist_disable_unneeded_irqs();
+ context_save_cpu_registers();
+
+ /*
+ * Due to we have only 100us between requesting a powerstate
+ * and wfi, we clean the cache before as well to assure the
+ * final cache clean before wfi has as little as possible to
+ * do.
+ */
+ context_clean_l1_cache_all();
+
+ (void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
+ false, false);
+ context_save_to_sram_and_wfi(true);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+ context_varm_restore_common();
+
+ } else {
+
+ context_clean_l1_cache_all();
+ (void) prcmu_set_power_state(APEXECUTE_TO_APSLEEP,
+ false, false);
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+ }
+
+ context_vape_restore();
+
+ /* If GPIO woke us up then save the pins that caused the wake up */
+ ux500_pm_gpio_save_wake_up_status();
+
+ ux500_suspend_dbg_sleep_status(do_deepsleep);
+
+ /* APE was turned off, restore IO ring */
+ ux500_pm_prcmu_set_ioforce(false);
+
+exit:
+ if (pins_force) {
+ /* Restore gpio settings */
+ context_gpio_mux_safe_switch(true);
+ context_gpio_restore_mux();
+ context_gpio_mux_safe_switch(false);
+ context_gpio_restore();
+ }
+
+ /* This is what cpuidle wants */
+ if (cpu_is_u9500())
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB) | PRCMU_WAKEUP(HSI0));
+ else
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB));
+
+ nmk_gpio_wakeups_resume();
+
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ nmk_gpio_clocks_disable();
+
+ return ret;
+}
+
+static int ux500_suspend_enter(suspend_state_t state)
+{
+ if (ux500_suspend_enabled()) {
+ if (ux500_suspend_deepsleep_enabled() &&
+ state == PM_SUSPEND_MEM)
+ return suspend(true);
+ if (ux500_suspend_sleep_enabled())
+ return suspend(false);
+ }
+
+ ux500_suspend_dbg_add_wake_on_uart();
+ /*
+ * Set IOFORCE in order to wake on GPIO the same way
+ * as in deeper sleep.
+ * (U5500 is not ready for IOFORCE)
+ */
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(true);
+
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+
+ if (!cpu_is_u5500())
+ ux500_pm_prcmu_set_ioforce(false);
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ return 0;
+}
+
+static int ux500_suspend_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
+}
+
+static int ux500_suspend_prepare(void)
+{
+ int ret;
+
+ ret = regulator_suspend_prepare(suspend_state);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int ux500_suspend_prepare_late(void)
+{
+ /* ESRAM to retention instead of OFF until ROM is fixed */
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+
+ ab8500_regulator_debug_force();
+ ux500_regulator_suspend_debug();
+ return 0;
+}
+
+static void ux500_suspend_wake(void)
+{
+ ux500_regulator_resume_debug();
+ ab8500_regulator_debug_restore();
+ (void) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
+}
+
+static void ux500_suspend_finish(void)
+{
+ (void)regulator_suspend_finish();
+}
+
+static int ux500_suspend_begin(suspend_state_t state)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 125);
+ suspend_state = state;
+ return ux500_suspend_dbg_begin(state);
+}
+
+static void ux500_suspend_end(void)
+{
+ (void) prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "suspend", 25);
+ suspend_state = PM_SUSPEND_ON;
+}
+
+static struct platform_suspend_ops ux500_suspend_ops = {
+ .enter = ux500_suspend_enter,
+ .valid = ux500_suspend_valid,
+ .prepare = ux500_suspend_prepare,
+ .prepare_late = ux500_suspend_prepare_late,
+ .wake = ux500_suspend_wake,
+ .finish = ux500_suspend_finish,
+ .begin = ux500_suspend_begin,
+ .end = ux500_suspend_end,
+};
+
+static __init int ux500_suspend_init(void)
+{
+ ux500_suspend_dbg_init();
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "suspend", 25);
+ suspend_set_ops(&ux500_suspend_ops);
+ return 0;
+}
+device_initcall(ux500_suspend_init);
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.c b/arch/arm/mach-ux500/pm/suspend_dbg.c
new file mode 100644
index 00000000000..1b7d871ba52
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/mfd/dbx500-prcmu.h>
+
+#include <mach/pm.h>
+
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+static u32 sleep_enabled = 1;
+#else
+static u32 sleep_enabled;
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_MEM
+static u32 deepsleep_enabled = 1;
+#else
+static u32 deepsleep_enabled;
+#endif
+
+static u32 suspend_enabled = 1;
+
+static u32 deepsleeps_done;
+static u32 deepsleeps_failed;
+static u32 sleeps_done;
+static u32 sleeps_failed;
+static u32 suspend_count;
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(ux500_console_uart_gpio_pin), 1);
+ irq_set_irq_type(GPIO_TO_IRQ(ux500_console_uart_gpio_pin),
+ IRQ_TYPE_EDGE_BOTH);
+}
+
+void ux500_suspend_dbg_remove_wake_on_uart(void)
+{
+ irq_set_irq_wake(GPIO_TO_IRQ(ux500_console_uart_gpio_pin), 0);
+}
+#endif
+
+bool ux500_suspend_enabled(void)
+{
+ return suspend_enabled != 0;
+}
+
+bool ux500_suspend_sleep_enabled(void)
+{
+ return sleep_enabled != 0;
+}
+
+bool ux500_suspend_deepsleep_enabled(void)
+{
+ return deepsleep_enabled != 0;
+}
+
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep)
+{
+ enum prcmu_power_status prcmu_status;
+
+ prcmu_status = prcmu_get_power_state_result();
+
+ if (is_deepsleep) {
+ pr_info("Returning from ApDeepSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == PRCMU_DEEP_SLEEP_OK ?
+ "Success" : "Fail!");
+ if (prcmu_status == PRCMU_DEEP_SLEEP_OK)
+ deepsleeps_done++;
+ else
+ deepsleeps_failed++;
+ } else {
+ pr_info("Returning from ApSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == PRCMU_SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == PRCMU_SLEEP_OK)
+ sleeps_done++;
+ else
+ sleeps_failed++;
+ }
+}
+
+int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ suspend_count++;
+ return 0;
+}
+
+void ux500_suspend_dbg_init(void)
+{
+ struct dentry *suspend_dir;
+ struct dentry *file;
+
+ suspend_dir = debugfs_create_dir("suspend", NULL);
+ if (IS_ERR_OR_NULL(suspend_dir))
+ return;
+
+ file = debugfs_create_bool("sleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &sleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("deepsleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &deepsleep_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_bool("enable", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &suspend_enabled);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("count", S_IRUGO,
+ suspend_dir,
+ &suspend_count);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("sleep_count", S_IRUGO,
+ suspend_dir,
+ &sleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_count", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_done);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+
+ file = debugfs_create_u32("sleep_failed", S_IRUGO,
+ suspend_dir,
+ &sleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ file = debugfs_create_u32("deepsleep_failed", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_failed);
+ if (IS_ERR_OR_NULL(file))
+ goto error;
+
+ return;
+error:
+ if (!IS_ERR_OR_NULL(suspend_dir))
+ debugfs_remove_recursive(suspend_dir);
+}
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.h b/arch/arm/mach-ux500/pm/suspend_dbg.h
new file mode 100644
index 00000000000..29bfec7e269
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef UX500_SUSPEND_DBG_H
+#define UX500_SUSPEND_DBG_H
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void);
+void ux500_suspend_dbg_remove_wake_on_uart(void);
+#else
+static inline void ux500_suspend_dbg_add_wake_on_uart(void) { }
+static inline void ux500_suspend_dbg_remove_wake_on_uart(void) { }
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_DBG
+bool ux500_suspend_enabled(void);
+bool ux500_suspend_sleep_enabled(void);
+bool ux500_suspend_deepsleep_enabled(void);
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep);
+void ux500_suspend_dbg_init(void);
+int ux500_suspend_dbg_begin(suspend_state_t state);
+
+#else
+static inline bool ux500_suspend_enabled(void)
+{
+ return true;
+}
+static inline bool ux500_suspend_sleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+ return true;
+#else
+ return false;
+#endif
+}
+static inline bool ux500_suspend_deepsleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_MEM
+ return true;
+#else
+ return false;
+#endif
+}
+static inline void ux500_suspend_dbg_sleep_status(bool is_deepsleep) { }
+static inline void ux500_suspend_dbg_init(void) { }
+
+static inline int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/timer.c b/arch/arm/mach-ux500/pm/timer.c
new file mode 100644
index 00000000000..61f92bf73da
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/timer.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * The RTC timer block is a ST Microelectronics variant of ARM PL031.
+ * Clockwatch part is the same as PL031, while the timer part is only
+ * present on the ST Microelectronics variant.
+ * Here only the timer part is used.
+ *
+ * The timer part is quite troublesome to program correctly. Lots
+ * of long delays must be there in order to secure that you actually get what
+ * you wrote.
+ *
+ * In other words, this timer is and should only used from cpuidle during
+ * special conditions when the surroundings are know in order to be able
+ * to remove the number of delays.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+
+#include <asm/errno.h>
+
+#include <mach/hardware.h>
+
+#define RTC_IMSC 0x10
+#define RTC_MIS 0x18
+#define RTC_ICR 0x1C
+#define RTC_TDR 0x20
+#define RTC_TLR1 0x24
+#define RTC_TCR 0x28
+
+#define RTC_TLR2 0x2C
+#define RTC_TPR1 0x3C
+
+#define RTC_TCR_RTTOS (1 << 0)
+#define RTC_TCR_RTTEN (1 << 1)
+#define RTC_TCR_RTTSS (1 << 2)
+
+#define RTC_IMSC_TIMSC (1 << 1)
+#define RTC_ICR_TIC (1 << 1)
+#define RTC_MIS_RTCTMIS (1 << 1)
+
+#define RTC_TCR_RTTPS_2 (1 << 4)
+#define RTC_TCR_RTTPS_3 (2 << 4)
+#define RTC_TCR_RTTPS_4 (3 << 4)
+#define RTC_TCR_RTTPS_5 (4 << 4)
+#define RTC_TCR_RTTPS_6 (5 << 4)
+#define RTC_TCR_RTTPS_7 (6 << 4)
+#define RTC_TCR_RTTPS_8 (7 << 4)
+
+#define WRITE_DELAY 130 /* 4 cycles plus margin */
+
+/*
+ * Count down measure point. It just have to be high to differ
+ * from scheduled values.
+ */
+#define MEASURE_VAL 0xffffffff
+
+/* Just a value bigger than any reason able scheduled timeout. */
+#define MEASURE_VAL_LIMIT 0xf0000000
+
+#define TICKS_TO_NS(x) ((s64)x * 30518)
+#define US_TO_TICKS(x) ((u32)((1000 * x) / 30518))
+
+static void __iomem *rtc_base;
+static bool measure_latency;
+
+#ifdef CONFIG_UX500_CPUIDLE_DEBUG
+
+/*
+ * The plan here is to be able to measure the ApSleep/ApDeepSleep exit latency
+ * by having a know timer pattern.
+ * The first entry in the pattern, LR1, is the value that the scheduler
+ * wants us to sleep. The second pattern in a high value, too large to be
+ * scheduled, so we can differ between a running scheduled value and a
+ * time measure value.
+ * When a RTT interrupt has occured, the block will automatically start
+ * to execute the measure value in LR2 and when the ARM is awake, it reads
+ * how far the RTT has decreased the value loaded from LR2 and from that
+ * calculate how long time it took to wake up.
+ */
+ktime_t u8500_rtc_exit_latency_get(void)
+{
+ u32 ticks;
+
+ if (measure_latency) {
+ ticks = MEASURE_VAL - readl(rtc_base + RTC_TDR);
+
+ /*
+ * Check if we are actually counting on a LR2 value.
+ * If not we have woken on another interrupt.
+ */
+ if (ticks < MEASURE_VAL_LIMIT) {
+ /* convert 32 kHz ticks to ns */
+ return ktime_set(0, TICKS_TO_NS(ticks));
+ }
+ }
+ return ktime_set(0, 0);
+}
+
+static void measure_latency_start(void)
+{
+ udelay(WRITE_DELAY);
+ /*
+ * Disable RTT and clean self-start due to we want to restart,
+ * not continue from current pattern. (See below)
+ */
+ writel(0, rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+
+ /*
+ * Program LR2 (load register two) to maximum value to ease
+ * identification of timer interrupt vs other.
+ */
+ writel(MEASURE_VAL, rtc_base + RTC_TLR2);
+ /*
+ * Set Load Register execution pattern, bit clear
+ * means pick LR1, bit set means LR2
+ * 0xfe, binary 11111110 means first do LR1 then do
+ * LR2 seven times
+ */
+ writel(0xfe, rtc_base + RTC_TPR1);
+
+ udelay(WRITE_DELAY);
+
+ /*
+ * Enable self-start, plus a pattern of eight.
+ */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTPS_8,
+ rtc_base + RTC_TCR);
+ udelay(WRITE_DELAY);
+}
+
+void ux500_rtcrtt_measure_latency(bool enable)
+{
+ if (enable) {
+ measure_latency_start();
+ } else {
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+ writel(RTC_IMSC_TIMSC, rtc_base + RTC_IMSC);
+ }
+ measure_latency = enable;
+}
+#else
+static inline void measure_latency_start(void) { }
+static inline void ux500_rtcrtt_measure_latency(bool enable)
+{
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+ writel(RTC_IMSC_TIMSC, rtc_base + RTC_IMSC);
+}
+#endif
+
+void ux500_rtcrtt_off(void)
+{
+ if (measure_latency) {
+ measure_latency_start();
+ } else {
+ /* Clear eventual interrupts */
+ if (readl(rtc_base + RTC_MIS) & RTC_MIS_RTCTMIS)
+ writel(RTC_ICR_TIC, rtc_base + RTC_ICR);
+
+ /* Disable, self start and oneshot mode */
+ writel(RTC_TCR_RTTSS | RTC_TCR_RTTOS, rtc_base + RTC_TCR);
+ }
+}
+
+void ux500_rtcrtt_next(u32 time_us)
+{
+ writel(US_TO_TICKS(time_us), rtc_base + RTC_TLR1);
+}
+
+static int __init ux500_rtcrtt_init(void)
+{
+ if (cpu_is_u8500()) {
+ rtc_base = __io_address(U8500_RTC_BASE);
+ } else if (cpu_is_u5500()) {
+ rtc_base = __io_address(U5500_RTC_BASE);
+ } else {
+ pr_err("timer-rtt: Unknown DB Asic!\n");
+ return -EINVAL;
+ }
+ ux500_rtcrtt_measure_latency(false);
+ return 0;
+}
+subsys_initcall(ux500_rtcrtt_init);
diff --git a/arch/arm/mach-ux500/pm/usecase_gov.c b/arch/arm/mach-ux500/pm/usecase_gov.c
new file mode 100644
index 00000000000..5c7fe403c2f
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/usecase_gov.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@stericsson.com> for ST-Ericsson
+ * Author: Vincent Guittot <vincent.guittot@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/kernel_stat.h>
+#include <linux/ktime.h>
+#include <linux/cpufreq.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/cpufreq-dbx500.h>
+
+#include "../../../../drivers/cpuidle/cpuidle-dbx500.h"
+
+
+#define CPULOAD_MEAS_DELAY 3000 /* 3 secondes of delta */
+
+/* debug */
+static unsigned long debug;
+
+#define hp_printk \
+ if (debug) \
+ printk \
+
+enum ux500_uc {
+ UX500_UC_NORMAL = 0,
+ UX500_UC_AUTO, /* Add use case below this. */
+ UX500_UC_VC,
+ UX500_UC_LPA,
+ UX500_UC_USER, /* Add use case above this. */
+ UX500_UC_MAX,
+};
+
+/* cpu load monitor struct */
+#define LOAD_MONITOR 4
+struct hotplug_cpu_info {
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_io;
+ unsigned int load[LOAD_MONITOR];
+ unsigned int io[LOAD_MONITOR];
+ unsigned int idx;
+};
+
+static DEFINE_PER_CPU(struct hotplug_cpu_info, hotplug_info);
+
+/* Auto trigger criteria */
+/* loadavg threshold */
+static unsigned long lower_threshold = 175;
+static unsigned long upper_threshold = 450;
+/* load balancing */
+static unsigned long max_unbalance = 210;
+/* trend load */
+static unsigned long trend_unbalance = 40;
+static unsigned long min_trend = 5;
+/* instant load */
+static unsigned long max_instant = 85;
+
+/* Number of interrupts per second before exiting auto mode */
+static u32 exit_irq_per_s = 1000;
+static u64 old_num_irqs;
+
+static DEFINE_MUTEX(usecase_mutex);
+static bool user_config_updated;
+static enum ux500_uc current_uc = UX500_UC_MAX;
+static bool is_work_scheduled;
+static bool is_early_suspend;
+static bool uc_master_enable = true;
+
+static unsigned int cpuidle_deepest_state;
+
+struct usecase_config {
+ char *name;
+ /* Minimum required ARM OPP. if no requirement set 25 */
+ unsigned int min_arm_opp;
+ /* Only use max_arm_opp if you know what you're doing */
+ unsigned int max_arm_opp;
+ unsigned long cpuidle_multiplier;
+ bool second_cpu_online;
+ bool l2_prefetch_en;
+ bool enable;
+ unsigned int forced_state; /* Forced cpu idle state. */
+ bool vc_override; /* QOS override for voice-call. */
+};
+
+static struct usecase_config usecase_conf[UX500_UC_MAX] = {
+ [UX500_UC_NORMAL] = {
+ .name = "normal",
+ .min_arm_opp = 25,
+ .cpuidle_multiplier = 1024,
+ .second_cpu_online = true,
+ .l2_prefetch_en = true,
+ .enable = true,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_AUTO] = {
+ .name = "auto",
+ .min_arm_opp = 25,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = true,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = false,
+ },
+ [UX500_UC_VC] = {
+ .name = "voice-call",
+ .min_arm_opp = 50,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = true,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0,
+ .vc_override = true,
+ },
+ [UX500_UC_LPA] = {
+ .name = "low-power-audio",
+ .min_arm_opp = 50,
+ .cpuidle_multiplier = 0,
+ .second_cpu_online = false,
+ .l2_prefetch_en = false,
+ .enable = false,
+ .forced_state = 0, /* Updated dynamically */
+ .vc_override = false,
+ },
+};
+
+/* daemon */
+static struct delayed_work work_usecase;
+static struct early_suspend usecase_early_suspend;
+
+/* calculate loadavg */
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+extern int cpufreq_update_freq(int cpu, unsigned int min, unsigned int max);
+extern int cpuidle_set_multiplier(unsigned int value);
+extern int cpuidle_force_state(unsigned int state);
+
+static unsigned long determine_loadavg(void)
+{
+ unsigned long avg = 0;
+ unsigned long avnrun[3];
+
+ get_avenrun(avnrun, FIXED_1 / 200, 0);
+ avg += (LOAD_INT(avnrun[0]) * 100) + (LOAD_FRAC(avnrun[0]) % 100);
+
+ return avg;
+}
+
+static unsigned long determine_cpu_load(void)
+{
+ int i;
+ unsigned long total_load = 0;
+
+ /* get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load, iowait;
+ unsigned int idle_time, iowait_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);
+ cur_iowait_time = get_cpu_iowait_time_us(i, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ info->prev_cpu_wall);
+ info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ info->prev_cpu_idle);
+ info->prev_cpu_idle = cur_idle_time;
+
+ /* how much io wait time has passed since last iteration? */
+ iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
+ info->prev_cpu_io);
+ info->prev_cpu_io = cur_iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+ info->load[info->idx] = load;
+ hp_printk("cpu %d load %u ", i, load);
+
+ /* iowait is the percentage of time not spent in io wait */
+ iowait = 100 * (iowait_time) / wall_time;
+ info->io[info->idx++] = load;
+ hp_printk("iowait %u\n", iowait);
+
+ if (info->idx >= LOAD_MONITOR)
+ info->idx = 0;
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_load_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ hp_printk("cpu %d load trend %u\n", i, load);
+
+ total_load += load;
+ }
+
+ return total_load;
+}
+
+static unsigned long determine_cpu_balance_trend(void)
+{
+ int i, j, k;
+ unsigned long total_load = 0;
+ unsigned long min_load = (unsigned long) (-1);
+
+ /* Get cpu load of each cpu */
+ for_each_online_cpu(i) {
+ unsigned int load = 0;
+ struct hotplug_cpu_info *info;
+
+ info = &per_cpu(hotplug_info, i);
+
+ for (k = 0, j = info->idx; k < LOAD_MONITOR; k++, j++)
+ load += info->load[j];
+
+ load /= LOAD_MONITOR;
+
+ if (min_load > load)
+ min_load = load;
+ total_load += load;
+ }
+
+ if (min_load > min_trend)
+ total_load = (100 * total_load) / min_load;
+ else
+ total_load = 50 << num_online_cpus();
+
+ return total_load;
+}
+
+static void init_cpu_load_trend(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct hotplug_cpu_info *info;
+ int j;
+
+ info = &per_cpu(hotplug_info, i);
+
+ info->prev_cpu_idle = get_cpu_idle_time_us(i,
+ &(info->prev_cpu_wall));
+ info->prev_cpu_io = get_cpu_iowait_time_us(i,
+ &(info->prev_cpu_wall));
+
+ for (j = 0; j < LOAD_MONITOR; j++) {
+ info->load[j] = 100;
+ info->io[j] = 100;
+ }
+ info->idx = 0;
+ }
+}
+
+static u32 get_num_interrupts_per_s(void)
+{
+ int cpu;
+ int i;
+ u64 num_irqs = 0;
+ ktime_t now;
+ static ktime_t last;
+ unsigned int delta;
+ u32 irqs = 0;
+
+ now = ktime_get();
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < NR_IRQS; i++)
+ num_irqs += kstat_irqs_cpu(i, cpu);
+ }
+ pr_debug("%s: total num irqs: %lld, previous %lld\n",
+ __func__, num_irqs, old_num_irqs);
+
+ if (old_num_irqs > 0) {
+ delta = (u32)ktime_to_ms(ktime_sub(now, last)) / 1000;
+ irqs = ((u32)(num_irqs - old_num_irqs)) / delta;
+ }
+
+ old_num_irqs = num_irqs;
+ last = now;
+
+ pr_debug("delta irqs per sec:%d\n", irqs);
+
+ return irqs;
+}
+
+static int set_cpufreq(int cpu, int min_freq, int max_freq)
+{
+ int ret;
+ struct cpufreq_policy policy;
+
+ ret = cpufreq_get_policy(&policy, cpu);
+ if (ret < 0) {
+ pr_err("usecase-gov: failed to read policy\n");
+ return ret;
+ }
+
+ if (policy.min > max_freq) {
+ ret = cpufreq_update_freq(cpu, min_freq, policy.max);
+ if (ret)
+ pr_err("usecase-gov: update min cpufreq failed (1)\n");
+ }
+ if (policy.max < min_freq) {
+ ret = cpufreq_update_freq(cpu, policy.min, max_freq);
+ if (ret)
+ pr_err("usecase-gov: update max cpufreq failed (2)\n");
+ }
+
+ ret = cpufreq_update_freq(cpu, min_freq, max_freq);
+ if (ret)
+ pr_err("usecase-gov: update min-max cpufreq failed\n");
+
+ return ret;
+}
+
+static void set_cpu_config(enum ux500_uc new_uc)
+{
+ bool update = false;
+ int ret;
+ int cpu;
+ static struct cpufreq_policy original_cpufreq_policy;
+
+ if (new_uc != current_uc)
+ update = true;
+ else if ((user_config_updated) && (new_uc == UX500_UC_USER))
+ update = true;
+
+ pr_debug("%s: new_usecase=%d, current_usecase=%d, update=%d\n",
+ __func__, new_uc, current_uc, update);
+
+ if (!update)
+ goto exit;
+
+ /* Cpu hotplug */
+ if (!(usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() > 1))
+ cpu_down(1);
+ else if ((usecase_conf[new_uc].second_cpu_online) &&
+ (num_online_cpus() < 2))
+ cpu_up(1);
+
+ if (usecase_conf[new_uc].max_arm_opp) {
+ int max_freq;
+
+ max_freq = dbx500_cpufreq_percent2freq(usecase_conf[new_uc].max_arm_opp);
+
+ ret = cpufreq_get_policy(&original_cpufreq_policy, 0);
+ if (ret < 0)
+ pr_err("usecase-gov: fail to get cpufreq policy\n");
+
+ for_each_online_cpu(cpu) {
+ set_cpufreq(cpu,
+ original_cpufreq_policy.min,
+ max_freq);
+ }
+ }
+
+ if (new_uc == UX500_UC_NORMAL &&
+ usecase_conf[current_uc].max_arm_opp) {
+ /*
+ * Reset cpufreq limits to what is was before. Yes, overwrite
+ * any changes done outside usecase governors control.
+ */
+ for_each_online_cpu(cpu) {
+ set_cpufreq(cpu,
+ original_cpufreq_policy.min,
+ original_cpufreq_policy.max);
+ }
+ }
+
+ prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP,
+ "usecase", usecase_conf[new_uc].min_arm_opp);
+
+ /* Cpu idle */
+ cpuidle_set_multiplier(usecase_conf[new_uc].cpuidle_multiplier);
+
+ /* L2 prefetch */
+ if (usecase_conf[new_uc].l2_prefetch_en)
+ outer_prefetch_enable();
+ else
+ outer_prefetch_disable();
+
+ /* Force cpuidle state */
+ cpuidle_force_state(usecase_conf[new_uc].forced_state);
+
+ /* QOS override */
+ prcmu_qos_voice_call_override(usecase_conf[new_uc].vc_override);
+
+ current_uc = new_uc;
+
+exit:
+ /* Its ok to clear even if new_uc != UX500_UC_USER */
+ user_config_updated = false;
+}
+
+void usecase_update_governor_state(void)
+{
+ bool cancel_work = false;
+
+ mutex_lock(&usecase_mutex);
+
+ if (uc_master_enable && (usecase_conf[UX500_UC_AUTO].enable ||
+ usecase_conf[UX500_UC_USER].enable)) {
+ /*
+ * Usecases are enabled. If we are in early suspend put
+ * governor to work.
+ */
+ if (is_early_suspend && !is_work_scheduled) {
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+ is_work_scheduled = true;
+ } else if (!is_early_suspend && is_work_scheduled) {
+ /* Exiting from early suspend. */
+ cancel_work = true;
+ }
+
+ } else if (is_work_scheduled) {
+ /* No usecase enabled or governor is not enabled. */
+ cancel_work = true;
+ }
+
+ if (cancel_work) {
+ cancel_delayed_work_sync(&work_usecase);
+ is_work_scheduled = false;
+
+ /* Set the default settings before exiting. */
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+}
+
+/*
+ * Start load measurment every 6 s in order detrmine if can unplug one CPU.
+ * In order to not corrupt measurment, the first load average is not done
+ * here call in early suspend.
+ */
+static void usecase_earlysuspend_callback(struct early_suspend *h)
+{
+ init_cpu_load_trend();
+
+ is_early_suspend = true;
+
+ usecase_update_governor_state();
+}
+
+/* Stop measurement, call LCD early resume */
+static void usecase_lateresume_callback(struct early_suspend *h)
+{
+ is_early_suspend = false;
+
+ usecase_update_governor_state();
+}
+
+static void delayed_usecase_work(struct work_struct *work)
+{
+ unsigned long avg, load, trend, balance;
+ bool inc_perf = false;
+ bool dec_perf = false;
+ u32 irqs_per_s;
+
+ /* determine loadavg */
+ avg = determine_loadavg();
+ hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
+ avg, lower_threshold, upper_threshold);
+
+ /* determine instant load */
+ load = determine_cpu_load();
+ hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);
+
+ /* determine load trend */
+ trend = determine_cpu_load_trend();
+ hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
+ trend, min_trend, trend_unbalance);
+
+ /* determine load balancing */
+ balance = determine_cpu_balance_trend();
+ hp_printk("load balancing trend = %lu min %lu\n",
+ balance, max_unbalance);
+
+ irqs_per_s = get_num_interrupts_per_s();
+
+ /* Dont let configuration change in the middle of our calculations. */
+ mutex_lock(&usecase_mutex);
+
+ /* detect "instant" load increase */
+ if (load > max_instant || irqs_per_s > exit_irq_per_s) {
+ inc_perf = true;
+ } else if (!usecase_conf[UX500_UC_USER].enable &&
+ usecase_conf[UX500_UC_AUTO].enable) {
+ /* detect high loadavg use case */
+ if (avg > upper_threshold)
+ inc_perf = true;
+ /* detect idle use case */
+ else if (trend < min_trend)
+ dec_perf = true;
+ /* detect unbalanced low cpu load use case */
+ else if ((balance > max_unbalance) && (trend < trend_unbalance))
+ dec_perf = true;
+ /* detect low loadavg use case */
+ else if (avg < lower_threshold)
+ dec_perf = true;
+ /* All user use cases disabled, current load not triggering
+ * any change.
+ */
+ else if (user_config_updated)
+ dec_perf = true;
+ } else {
+ dec_perf = true;
+ }
+
+ /*
+ * set_cpu_config() will not update the config unless it has been
+ * changed.
+ */
+ if (dec_perf) {
+ if (usecase_conf[UX500_UC_USER].enable)
+ set_cpu_config(UX500_UC_USER);
+ else if (usecase_conf[UX500_UC_AUTO].enable)
+ set_cpu_config(UX500_UC_AUTO);
+ } else if (inc_perf) {
+ set_cpu_config(UX500_UC_NORMAL);
+ }
+
+ mutex_unlock(&usecase_mutex);
+
+ /* reprogramm scheduled work */
+ schedule_delayed_work_on(0, &work_usecase,
+ msecs_to_jiffies(CPULOAD_MEAS_DELAY));
+
+}
+
+static struct dentry *usecase_dir;
+
+#ifdef CONFIG_DEBUG_FS
+#define define_set(_name) \
+static ssize_t set_##_name(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ int err; \
+ long unsigned i; \
+ \
+ err = kstrtoul_from_user(user_buf, count, 0, &i); \
+ \
+ if (err) \
+ return err; \
+ \
+ _name = i; \
+ hp_printk("New value : %lu\n", _name); \
+ \
+ return count; \
+}
+
+define_set(upper_threshold);
+define_set(lower_threshold);
+define_set(max_unbalance);
+define_set(trend_unbalance);
+define_set(min_trend);
+define_set(max_instant);
+define_set(debug);
+
+#define define_print(_name) \
+static ssize_t print_##_name(struct seq_file *s, void *p) \
+{ \
+ return seq_printf(s, "%lu\n", _name); \
+}
+
+define_print(upper_threshold);
+define_print(lower_threshold);
+define_print(max_unbalance);
+define_print(trend_unbalance);
+define_print(min_trend);
+define_print(max_instant);
+define_print(debug);
+
+#define define_open(_name) \
+static ssize_t open_##_name(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, print_##_name, inode->i_private); \
+}
+
+define_open(upper_threshold);
+define_open(lower_threshold);
+define_open(max_unbalance);
+define_open(trend_unbalance);
+define_open(min_trend);
+define_open(max_instant);
+define_open(debug);
+
+#define define_dbg_file(_name) \
+static const struct file_operations fops_##_name = { \
+ .open = open_##_name, \
+ .write = set_##_name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+}; \
+static struct dentry *file_##_name;
+
+define_dbg_file(upper_threshold);
+define_dbg_file(lower_threshold);
+define_dbg_file(max_unbalance);
+define_dbg_file(trend_unbalance);
+define_dbg_file(min_trend);
+define_dbg_file(max_instant);
+define_dbg_file(debug);
+
+struct dbg_file {
+ struct dentry **file;
+ const struct file_operations *fops;
+ const char *name;
+};
+
+#define define_dbg_entry(_name) \
+{ \
+ .file = &file_##_name, \
+ .fops = &fops_##_name, \
+ .name = #_name \
+}
+
+static struct dbg_file debug_entry[] = {
+ define_dbg_entry(upper_threshold),
+ define_dbg_entry(lower_threshold),
+ define_dbg_entry(max_unbalance),
+ define_dbg_entry(trend_unbalance),
+ define_dbg_entry(min_trend),
+ define_dbg_entry(max_instant),
+ define_dbg_entry(debug),
+};
+
+static int setup_debugfs(void)
+{
+ int i;
+ usecase_dir = debugfs_create_dir("usecase", NULL);
+
+ if (IS_ERR_OR_NULL(usecase_dir))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(debug_entry); i++) {
+ if (IS_ERR_OR_NULL(debugfs_create_file(debug_entry[i].name,
+ S_IWUGO | S_IRUGO,
+ usecase_dir,
+ NULL,
+ debug_entry[i].fops)))
+ goto fail;
+ }
+
+ if (IS_ERR_OR_NULL(debugfs_create_u32("exit_irq_per_s",
+ S_IWUGO | S_IRUGO, usecase_dir,
+ &exit_irq_per_s)))
+ goto fail;
+ return 0;
+fail:
+ debugfs_remove_recursive(usecase_dir);
+ return -EINVAL;
+}
+#else
+static int setup_debugfs(void)
+{
+ return 0;
+}
+#endif
+
+static void usecase_update_user_config(void)
+{
+ int i;
+ bool config_enable = false;
+ struct usecase_config *user_conf = &usecase_conf[UX500_UC_USER];
+
+ mutex_lock(&usecase_mutex);
+
+ user_conf->min_arm_opp = 25;
+ user_conf->max_arm_opp = 0;
+ user_conf->cpuidle_multiplier = 0;
+ user_conf->second_cpu_online = false;
+ user_conf->l2_prefetch_en = false;
+ user_conf->forced_state = cpuidle_deepest_state;
+ user_conf->vc_override = true; /* A single false will clear it. */
+
+ /* Dont include Auto and Normal modes in this */
+ for (i = (UX500_UC_AUTO + 1); i < UX500_UC_USER; i++) {
+ if (!usecase_conf[i].enable)
+ continue;
+
+ config_enable = true;
+
+ /* It's the highest arm opp requirement that should be used */
+ if (usecase_conf[i].min_arm_opp > user_conf->min_arm_opp)
+ user_conf->min_arm_opp = usecase_conf[i].min_arm_opp;
+
+ if (usecase_conf[i].max_arm_opp > user_conf->max_arm_opp)
+ user_conf->max_arm_opp = usecase_conf[i].max_arm_opp;
+
+ if (usecase_conf[i].cpuidle_multiplier >
+ user_conf->cpuidle_multiplier)
+ user_conf->cpuidle_multiplier =
+ usecase_conf[i].cpuidle_multiplier;
+
+ user_conf->second_cpu_online |=
+ usecase_conf[i].second_cpu_online;
+
+ user_conf->l2_prefetch_en |=
+ usecase_conf[i].l2_prefetch_en;
+
+ /* Take the shallowest state. */
+ if (usecase_conf[i].forced_state < user_conf->forced_state)
+ user_conf->forced_state = usecase_conf[i].forced_state;
+
+ /* Only override QOS if all enabled configurations are
+ * requesting it.
+ */
+ if (!usecase_conf[i].vc_override)
+ user_conf->vc_override = false;
+ }
+
+ user_conf->enable = config_enable;
+ user_config_updated = true;
+
+ mutex_unlock(&usecase_mutex);
+}
+
+struct usecase_devclass_attr {
+ struct sysdev_class_attribute class_attr;
+ u32 index;
+};
+
+/* One for each usecase except "user" + current + enable */
+#define UX500_NUM_SYSFS_NODES (UX500_UC_USER + 2)
+#define UX500_CURRENT_NODE_INDEX (UX500_NUM_SYSFS_NODES - 1)
+#define UX500_ENABLE_NODE_INDEX (UX500_NUM_SYSFS_NODES - 2)
+
+static struct usecase_devclass_attr usecase_dc_attr[UX500_NUM_SYSFS_NODES];
+
+static struct attribute *dbs_attributes[UX500_NUM_SYSFS_NODES + 1] = {NULL};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "usecase",
+};
+
+static ssize_t show_current(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ enum ux500_uc display_uc = (current_uc == UX500_UC_MAX) ?
+ UX500_UC_NORMAL : current_uc;
+
+ return sprintf(buf, "min_arm_opp: %d\n"
+ "max_arm_opp: %d\n"
+ "cpuidle_multiplier: %ld\n"
+ "second_cpu_online: %s\n"
+ "l2_prefetch_en: %s\n"
+ "forced_state: %d\n"
+ "vc_override: %s\n",
+ usecase_conf[display_uc].min_arm_opp,
+ usecase_conf[display_uc].max_arm_opp,
+ usecase_conf[display_uc].cpuidle_multiplier,
+ usecase_conf[display_uc].second_cpu_online ? "true" : "false",
+ usecase_conf[display_uc].l2_prefetch_en ? "true" : "false",
+ usecase_conf[display_uc].forced_state,
+ usecase_conf[display_uc].vc_override ? "true" : "false");
+}
+
+static ssize_t show_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", uc_master_enable);
+}
+
+static ssize_t store_enable(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ uc_master_enable = (bool) input;
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static ssize_t show_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+{
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ return sprintf(buf, "%u\n",
+ usecase_conf[uattr->index].enable);
+}
+
+static ssize_t store_dc_attr(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ struct usecase_devclass_attr *uattr =
+ container_of(attr, struct usecase_devclass_attr, class_attr);
+
+ ret = sscanf(buf, "%u", &input);
+
+ /* Normal mode cant be changed. */
+ if ((ret != 1) || (uattr->index == 0))
+ return -EINVAL;
+
+ usecase_conf[uattr->index].enable = (bool)input;
+
+ usecase_update_user_config();
+
+ usecase_update_governor_state();
+
+ return count;
+}
+
+static int usecase_sysfs_init(void)
+{
+ int err;
+ int i;
+
+ /* Last two nodes are not based on usecase configurations */
+ for (i = 0; i < (UX500_NUM_SYSFS_NODES - 2); i++) {
+ usecase_dc_attr[i].class_attr.attr.name = usecase_conf[i].name;
+ usecase_dc_attr[i].class_attr.attr.mode = 0644;
+ usecase_dc_attr[i].class_attr.show = show_dc_attr;
+ usecase_dc_attr[i].class_attr.store = store_dc_attr;
+ usecase_dc_attr[i].index = i;
+
+ dbs_attributes[i] = &(usecase_dc_attr[i].class_attr.attr);
+ }
+
+ /* sysfs current */
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.name =
+ "current";
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.show =
+ show_current;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.store =
+ NULL;
+ usecase_dc_attr[UX500_CURRENT_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_CURRENT_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_CURRENT_NODE_INDEX].class_attr.attr);
+
+ /* sysfs enable */
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.name =
+ "enable";
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr.mode =
+ 0644;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.show =
+ show_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.store =
+ store_enable;
+ usecase_dc_attr[UX500_ENABLE_NODE_INDEX].index =
+ 0;
+ dbs_attributes[UX500_ENABLE_NODE_INDEX] =
+ &(usecase_dc_attr[UX500_ENABLE_NODE_INDEX].class_attr.attr);
+
+ err = sysfs_create_group(&(cpu_sysdev_class.kset.kobj),
+ &dbs_attr_group);
+ if (err)
+ pr_err("usecase-gov: sysfs_create_group"
+ " failed with error = %d\n", err);
+
+ return err;
+}
+
+static void usecase_cpuidle_init(void)
+{
+ int max_states;
+ int i;
+ struct cstate *state = ux500_ci_get_cstates(&max_states);
+
+ for (i = 0; i < max_states; i++)
+ if ((state[i].APE == APE_OFF) && (state[i].ARM == ARM_RET))
+ break;
+
+ usecase_conf[UX500_UC_LPA].forced_state = i;
+
+ cpuidle_deepest_state = max_states - 1;
+}
+
+/* initialize devices */
+static int __init init_usecase_devices(void)
+{
+ int err;
+
+ pr_info("Use-case governor initialized\n");
+
+ /* add early_suspend callback */
+ usecase_early_suspend.level = 200;
+ usecase_early_suspend.suspend = usecase_earlysuspend_callback;
+ usecase_early_suspend.resume = usecase_lateresume_callback;
+ register_early_suspend(&usecase_early_suspend);
+
+ /* register delayed queuework */
+ INIT_DELAYED_WORK_DEFERRABLE(&work_usecase,
+ delayed_usecase_work);
+
+ init_cpu_load_trend();
+
+ err = setup_debugfs();
+ if (err)
+ goto error;
+ err = usecase_sysfs_init();
+ if (err)
+ goto error2;
+
+ usecase_cpuidle_init();
+
+ prcmu_qos_add_requirement(PRCMU_QOS_ARM_OPP, "usecase", 25);
+
+ return 0;
+error2:
+ debugfs_remove_recursive(usecase_dir);
+error:
+ unregister_early_suspend(&usecase_early_suspend);
+ return err;
+}
+
+device_initcall(init_usecase_devices);