summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorPhilippe Langlais <philippe.langlais@linaro.org>2011-04-04 16:51:20 +0200
committerUlf Hansson <ulf.hansson@stericsson.com>2011-09-19 15:14:45 +0200
commitc4ab5dcf04fced0bb048fc8b5969a6d50884c5d0 (patch)
treeac4bff4912c62249a31447441f15df8478faeb22 /arch/arm
parent7c3bcf59903b5ca497ff3c58b3aac8d793e36997 (diff)
mach-ux500: pm: merge all 2.6.35 power management features
Add cpu idle support, Re introduce temporary implementation of atomic regulators Signed-off-by: Philippe Langlais <philippe.langlais@linaro.org>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mach-ux500/include/mach/regulator.h88
-rw-r--r--arch/arm/mach-ux500/pm/Kconfig68
-rw-r--r--arch/arm/mach-ux500/pm/Makefile7
-rw-r--r--arch/arm/mach-ux500/pm/context-db5500.c395
-rw-r--r--arch/arm/mach-ux500/pm/context-db8500.c467
-rw-r--r--arch/arm/mach-ux500/pm/context.c740
-rw-r--r--arch/arm/mach-ux500/pm/context.h83
-rwxr-xr-xarch/arm/mach-ux500/pm/context_arm.S427
-rw-r--r--arch/arm/mach-ux500/pm/cpuidle.c878
-rw-r--r--arch/arm/mach-ux500/pm/cpuidle.h75
-rw-r--r--arch/arm/mach-ux500/pm/cpuidle_dbg.c568
-rw-r--r--arch/arm/mach-ux500/pm/cpuidle_dbg.h59
-rw-r--r--arch/arm/mach-ux500/pm/pm.c346
-rw-r--r--arch/arm/mach-ux500/pm/pm.h110
-rw-r--r--arch/arm/mach-ux500/pm/runtime.c297
-rw-r--r--arch/arm/mach-ux500/pm/suspend.c172
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.c199
-rw-r--r--arch/arm/mach-ux500/pm/suspend_dbg.h60
-rw-r--r--arch/arm/mach-ux500/regulator-db8500.c82
19 files changed, 5121 insertions, 0 deletions
diff --git a/arch/arm/mach-ux500/include/mach/regulator.h b/arch/arm/mach-ux500/include/mach/regulator.h
new file mode 100644
index 00000000000..75ff3340359
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/regulator.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Bengt Jonsson <bengt.jonsson@stericsson.com> for ST-Ericsson,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ * License Terms: GNU General Public License v2
+ *
+ */
+
+#ifndef MACH_UX500_REGULATOR_H
+#define MACH_UX500_REGULATOR_H
+
+#include <linux/device.h>
+
+struct ux500_regulator;
+
+#ifdef CONFIG_REGULATOR
+/*
+ * NOTE! The device will be connected to the correct regulator by this
+ * new framework. A list with connections will match up dev_name(dev)
+ * to the specific regulator. This follows the same principle as the
+ * normal regulator framework.
+ *
+ * This framework shall only be used in special cases when a regulator
+ * has to be enabled/disabled in atomic context.
+ */
+
+/**
+ * ux500_regulator_get()
+ *
+ * @dev: Drivers device struct
+ *
+ * Returns a ux500_regulator struct. Shall be used as argument for
+ * ux500_regulator_atomic_enable/disable calls.
+ * Return ERR_PTR(-EINVAL) upon no matching regulator found.
+ */
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev);
+
+/**
+ * ux500_regulator_atomic_enable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ * The enable/disable functions keep an internal counter, so every
+ * enable must be paired with an disable in order to turn off regulator.
+ */
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_atomic_disable()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ *
+ */
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator);
+
+/**
+ * ux500_regulator_put()
+ *
+ * @regulator: Regulator handle, provided from ux500_regulator_get.
+ */
+void ux500_regulator_put(struct ux500_regulator *regulator);
+#else
+static inline struct ux500_regulator *__must_check
+ux500_regulator_get(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int
+ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline int
+ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/Kconfig b/arch/arm/mach-ux500/pm/Kconfig
index 22171f843ba..74d9db42279 100644
--- a/arch/arm/mach-ux500/pm/Kconfig
+++ b/arch/arm/mach-ux500/pm/Kconfig
@@ -4,3 +4,71 @@ config U8500_CPUFREQ
default y
help
Add support for CPU Frequency scaling for U8500.
+
+config U8500_CPUIDLE
+ tristate "CPUIdle support"
+ depends on UX500_SOC_DB8500 && CPU_IDLE && U8500_PRCMU
+ default y
+ select GENERIC_CLOCKEVENTS_BROADCAST
+ select UX500_CONTEXT
+ help
+ Add support for CPUIdle for U8500.
+
+config U8500_CPUIDLE_DEEPEST_STATE
+ int "Deepest sleep state"
+ default 2
+ depends on U8500_CPUIDLE
+ help
+ Set deepest sleep state. See the cstate struct in cpuidle.c.
+
+config U8500_CPUIDLE_DEBUG
+ bool "CPUIdle debug"
+ depends on U8500_CPUIDLE && DEBUG_FS
+ default y
+ help
+ Add debugging support for CPUIdle for U8500.
+
+config UX500_SUSPEND
+ bool "Suspend to mem and standby support"
+ depends on UX500_SOC_DB8500 && PM
+ select UX500_CONTEXT
+ help
+ Add support for suspend.
+
+config UX500_SUSPEND_STANDBY
+ bool "Suspend Standby goes to ApSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo standby > /sys/power/state puts the system into ApSleep.
+
+config UX500_SUSPEND_MEM
+ bool "Suspend Mem goes to ApDeepSleep"
+ depends on UX500_SUSPEND
+ help
+ If yes, echo mem > /sys/power/state puts the system into ApDeepSleep else
+ it will do the same as echo standby > /sys/power/state.
+
+config UX500_SUSPEND_DBG
+ bool "Suspend debug"
+ depends on UX500_SUSPEND && DEBUG_FS
+ help
+ Add debug support for suspend.
+
+config UX500_SUSPEND_DBG_WAKE_ON_UART
+ bool "Suspend wakes on console UART"
+ depends on UX500_SUSPEND_DBG
+ help
+ Wake up on uart interrupts. Makes it possible for the console to wake up system.
+
+config UX500_CONSOLE_UART_GPIO_PIN
+ int "The pin number of the console UART GPIO pin"
+ default 29
+ depends on UX500_SUSPEND_DBG_WAKE_ON_UART || U8500_CPUIDLE_DEBUG
+ help
+ GPIO pin number of the GPIO pin connected to the console UART RX line.
+
+config UX500_CONTEXT
+ bool "Context save/restore support for UX500"
+ depends on UX500_SOC_DB8500 || UX500_SOC_DB5500
+ help
+ This is needed for ApSleep and deeper sleep states.
diff --git a/arch/arm/mach-ux500/pm/Makefile b/arch/arm/mach-ux500/pm/Makefile
index 1e843409489..7f5f5930a0e 100644
--- a/arch/arm/mach-ux500/pm/Makefile
+++ b/arch/arm/mach-ux500/pm/Makefile
@@ -1,4 +1,11 @@
#
# Power save related files
#
+obj-y := pm.o runtime.o
+
+obj-$(CONFIG_U8500_CPUIDLE) += cpuidle.o
+obj-$(CONFIG_U8500_CPUIDLE_DEBUG) += cpuidle_dbg.o
+obj-$(CONFIG_UX500_CONTEXT) += context.o context_arm.o context-db8500.o context-db5500.o
obj-$(CONFIG_U8500_CPUFREQ) += cpufreq.o
+obj-$(CONFIG_UX500_SUSPEND) += suspend.o
+obj-$(CONFIG_UX500_SUSPEND_DBG) += suspend_dbg.o
diff --git a/arch/arm/mach-ux500/pm/context-db5500.c b/arch/arm/mach-ux500/pm/context-db5500.c
new file mode 100644
index 00000000000..f9a8376620f
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db5500.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Sundar Iyer <sundar.iyer@stericsson.com>,
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+#include "context.h"
+
+/* These registers are DB5500 specific */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x0
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x4
+
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x18
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x1C
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x20
+
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x2C
+
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC24
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC28
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC2C
+
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC30
+
+#define NODE_ESRAM0_IN_0_PRIORITY 0x1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0x1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0x1008
+
+#define NODE_ESRAM0_IN_0_LIMIT 0x1024
+#define NODE_ESRAM0_IN_1_LIMIT 0x1028
+#define NODE_ESRAM0_IN_2_LIMIT 0x102C
+#define NODE_ESRAM0_OUT_0_PRIORITY 0x1030
+
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1424
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1428
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x142C
+#define NODE_ESRAM1_2_OUT_0_PRIORITY 0x1430
+
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1824
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1828
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x182C
+#define NODE_ESRAM3_4_OUT_0_PRIORITY 0x1830
+
+/*
+ * Save ICN (Interconnect or Interconnect nodes) configuration registers
+ * TODO: This can be optimized, for example if we have
+ * a static ICN configuration.
+ */
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[2];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio_reg;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[3];
+ u32 hibw2_ddr_in_limit[3];
+ u32 hibw2_ddr_out_prio_reg;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[3];
+ u32 esram_in_lim[3];
+ u32 esram_out_prio_reg;
+
+ u32 esram12_in_prio[3];
+ u32 esram12_in_arb_lim[3];
+ u32 esram12_out_prio_reg;
+
+ u32 esram34_in_prio[3];
+ u32 esram34_in_arb_lim[3];
+ u32 esram34_out_prio;
+} context_icn;
+
+
+void u5500_context_save_icn(void)
+{
+ /* hibw1 */
+ context_icn.hibw1_esram_in_pri[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readl(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readl(context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio_reg =
+ readl(context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ context_icn.hibw2_esram_in_pri[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readl(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ context_icn.hibw2_ddr_in_limit[2] =
+ readl(context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio_reg =
+ readl(context_icn.base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ context_icn.esram_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ context_icn.esram_in_lim[0] =
+ readl(context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram_in_lim[1] =
+ readl(context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram_in_lim[2] =
+ readl(context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+
+ context_icn.esram_out_prio_reg =
+ readl(context_icn.base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ context_icn.esram12_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readl(context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram12_out_prio_reg =
+ readl(context_icn.base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ context_icn.esram34_in_prio[0] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readl(context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ context_icn.esram34_out_prio =
+ readl(context_icn.base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+}
+
+/*
+ * Restore ICN configuration registers
+ */
+void u5500_context_restore_icn(void)
+{
+
+ /* hibw1 */
+ writel(context_icn.hibw1_esram_in_pri[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw1_esram_in_pri[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+
+ writel(context_icn.hibw1_esram_in0_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_esram_in1_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel(context_icn.hibw1_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel(context_icn.hibw1_ddr_out_prio_reg,
+ context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ /* hibw2 */
+ writel(context_icn.hibw2_esram_in_pri[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw2_esram_in_pri[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel(context_icn.hibw2_esram_in0_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_esram_in1_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ writel(context_icn.hibw2_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ writel(context_icn.hibw2_ddr_out_prio_reg,
+ context_icn.base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ /* ESRAM0 */
+ writel(context_icn.esram_in_prio[0],
+ context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ writel(context_icn.esram_in_prio[1],
+ context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ writel(context_icn.esram_in_prio[2],
+ context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+
+ writel(context_icn.esram_in_lim[0],
+ context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ writel(context_icn.esram_in_lim[1],
+ context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ writel(context_icn.esram_in_lim[2],
+ context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+
+ writel(context_icn.esram_out_prio_reg,
+ context_icn.base + NODE_ESRAM0_OUT_0_PRIORITY);
+
+ /* ESRAM1-2 */
+ writel(context_icn.esram12_in_prio[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel(context_icn.esram12_in_prio[1],
+ context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel(context_icn.esram12_in_prio[2],
+ context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+
+ writel(context_icn.esram12_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+
+ writel(context_icn.esram12_out_prio_reg,
+ context_icn.base + NODE_ESRAM1_2_OUT_0_PRIORITY);
+
+ /* ESRAM3-4 */
+ writel(context_icn.esram34_in_prio[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel(context_icn.esram34_in_prio[1],
+ context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel(context_icn.esram34_in_prio[2],
+ context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+
+ writel(context_icn.esram34_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+
+ writel(context_icn.esram34_out_prio,
+ context_icn.base + NODE_ESRAM3_4_OUT_0_PRIORITY);
+
+}
+
+void u5500_context_init(void)
+{
+ context_icn.base = ioremap(U5500_ICN_BASE, SZ_4K);
+}
diff --git a/arch/arm/mach-ux500/pm/context-db8500.c b/arch/arm/mach-ux500/pm/context-db8500.c
new file mode 100644
index 00000000000..f43a2a81f8e
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context-db8500.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ *
+ */
+
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+#include "context.h"
+
+/*
+ * ST-Interconnect context
+ */
+
+/* priority, bw limiter register offsets */
+#define NODE_HIBW1_ESRAM_IN_0_PRIORITY 0x00
+#define NODE_HIBW1_ESRAM_IN_1_PRIORITY 0x04
+#define NODE_HIBW1_ESRAM_IN_2_PRIORITY 0x08
+#define NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT 0x24
+#define NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT 0x28
+#define NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT 0x2C
+#define NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT 0x30
+#define NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT 0x34
+#define NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT 0x38
+#define NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT 0x3C
+#define NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT 0x40
+#define NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT 0x44
+#define NODE_HIBW1_DDR_IN_0_PRIORITY 0x400
+#define NODE_HIBW1_DDR_IN_1_PRIORITY 0x404
+#define NODE_HIBW1_DDR_IN_2_PRIORITY 0x408
+#define NODE_HIBW1_DDR_IN_0_LIMIT 0x424
+#define NODE_HIBW1_DDR_IN_1_LIMIT 0x428
+#define NODE_HIBW1_DDR_IN_2_LIMIT 0x42C
+#define NODE_HIBW1_DDR_OUT_0_PRIORITY 0x430
+#define NODE_HIBW2_ESRAM_IN_0_PRIORITY 0x800
+#define NODE_HIBW2_ESRAM_IN_1_PRIORITY 0x804
+#define NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT 0x818
+#define NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT 0x81C
+#define NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT 0x820
+#define NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT 0x824
+#define NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT 0x828
+#define NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT 0x82C
+#define NODE_HIBW2_DDR_IN_0_PRIORITY 0xC00
+#define NODE_HIBW2_DDR_IN_1_PRIORITY 0xC04
+#define NODE_HIBW2_DDR_IN_2_PRIORITY 0xC08
+/* only in v1 */
+#define NODE_HIBW2_DDR_IN_3_PRIORITY 0xC0C
+/* address update between v1 and v2 */
+#define NODE_HIBW2_DDR_IN_0_LIMIT_V1 0xC30
+#define NODE_HIBW2_DDR_IN_1_LIMIT_V1 0xC34
+#define NODE_HIBW2_DDR_IN_0_LIMIT 0xC24
+#define NODE_HIBW2_DDR_IN_1_LIMIT 0xC28
+/* only in v2 */
+#define NODE_HIBW2_DDR_IN_2_LIMIT 0xC2C
+#define NODE_HIBW2_DDR_OUT_0_PRIORITY 0xC30
+#define NODE_ESRAM0_IN_0_PRIORITY 0X1000
+#define NODE_ESRAM0_IN_1_PRIORITY 0X1004
+#define NODE_ESRAM0_IN_2_PRIORITY 0X1008
+#define NODE_ESRAM0_IN_3_PRIORITY 0X100C
+#define NODE_ESRAM0_IN_0_LIMIT 0X1030
+#define NODE_ESRAM0_IN_1_LIMIT 0X1034
+#define NODE_ESRAM0_IN_2_LIMIT 0X1038
+#define NODE_ESRAM0_IN_3_LIMIT 0X103C
+/* common */
+#define NODE_ESRAM1_2_IN_0_PRIORITY 0x1400
+#define NODE_ESRAM1_2_IN_1_PRIORITY 0x1404
+#define NODE_ESRAM1_2_IN_2_PRIORITY 0x1408
+#define NODE_ESRAM1_2_IN_3_PRIORITY 0x140C
+#define NODE_ESRAM1_2_IN_0_ARB_1_LIMIT 0x1430
+#define NODE_ESRAM1_2_IN_0_ARB_2_LIMIT 0x1434
+#define NODE_ESRAM1_2_IN_1_ARB_1_LIMIT 0x1438
+#define NODE_ESRAM1_2_IN_1_ARB_2_LIMIT 0x143C
+#define NODE_ESRAM1_2_IN_2_ARB_1_LIMIT 0x1440
+#define NODE_ESRAM1_2_IN_2_ARB_2_LIMIT 0x1444
+#define NODE_ESRAM1_2_IN_3_ARB_1_LIMIT 0x1448
+#define NODE_ESRAM1_2_IN_3_ARB_2_LIMIT 0x144C
+#define NODE_ESRAM3_4_IN_0_PRIORITY 0x1800
+#define NODE_ESRAM3_4_IN_1_PRIORITY 0x1804
+#define NODE_ESRAM3_4_IN_2_PRIORITY 0x1808
+#define NODE_ESRAM3_4_IN_3_PRIORITY 0x180C
+#define NODE_ESRAM3_4_IN_0_ARB_1_LIMIT 0x1830
+#define NODE_ESRAM3_4_IN_0_ARB_2_LIMIT 0x1834
+#define NODE_ESRAM3_4_IN_1_ARB_1_LIMIT 0x1838
+#define NODE_ESRAM3_4_IN_1_ARB_2_LIMIT 0x183C
+#define NODE_ESRAM3_4_IN_2_ARB_1_LIMIT 0x1840
+#define NODE_ESRAM3_4_IN_2_ARB_2_LIMIT 0x1844
+#define NODE_ESRAM3_4_IN_3_ARB_1_LIMIT 0x1848
+#define NODE_ESRAM3_4_IN_3_ARB_2_LIMIT 0x184C
+
+static struct {
+ void __iomem *base;
+ u32 hibw1_esram_in_pri[3];
+ u32 hibw1_esram_in0_arb[3];
+ u32 hibw1_esram_in1_arb[3];
+ u32 hibw1_esram_in2_arb[3];
+ u32 hibw1_ddr_in_prio[3];
+ u32 hibw1_ddr_in_limit[3];
+ u32 hibw1_ddr_out_prio;
+
+ /* HiBw2 node registers */
+ u32 hibw2_esram_in_pri[2];
+ u32 hibw2_esram_in0_arblimit[3];
+ u32 hibw2_esram_in1_arblimit[3];
+ u32 hibw2_ddr_in_prio[4];
+ u32 hibw2_ddr_in_limit[4];
+ u32 hibw2_ddr_out_prio;
+
+ /* ESRAM node registers */
+ u32 esram_in_prio[4];
+ u32 esram_in_lim[4];
+ u32 esram0_in_prio[4];
+ u32 esram0_in_lim[4];
+ u32 esram12_in_prio[4];
+ u32 esram12_in_arb_lim[8];
+ u32 esram34_in_prio[4];
+ u32 esram34_in_arb_lim[8];
+} context_icn;
+
+/**
+ * u8500_context_save_icn() - save ICN context
+ *
+ */
+void u8500_context_save_icn(void)
+{
+
+ context_icn.hibw1_esram_in_pri[0] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw1_esram_in_pri[1] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ context_icn.hibw1_esram_in_pri[2] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ context_icn.hibw1_esram_in0_arb[0] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in0_arb[1] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in0_arb[2] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in1_arb[0] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in1_arb[1] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in1_arb[2] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw1_esram_in2_arb[0] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ context_icn.hibw1_esram_in2_arb[1] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ context_icn.hibw1_esram_in2_arb[2] =
+ readb(context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ context_icn.hibw1_ddr_in_prio[0] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[1] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ context_icn.hibw1_ddr_in_prio[2] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ context_icn.hibw1_ddr_in_limit[0] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ context_icn.hibw1_ddr_in_limit[1] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ context_icn.hibw1_ddr_in_limit[2] =
+ readb(context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ context_icn.hibw1_ddr_out_prio =
+ readb(context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ context_icn.hibw2_esram_in_pri[0] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ context_icn.hibw2_esram_in_pri[1] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ context_icn.hibw2_esram_in0_arblimit[0] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[1] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in0_arblimit[2] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ context_icn.hibw2_esram_in1_arblimit[0] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[1] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ context_icn.hibw2_esram_in1_arblimit[2] =
+ readb(context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ context_icn.hibw2_ddr_in_prio[0] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[1] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ context_icn.hibw2_ddr_in_prio[2] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+
+ if (cpu_is_u8500v1()) {
+ context_icn.hibw2_ddr_in_prio[3] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+
+ context_icn.hibw2_ddr_in_limit[0] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT_V1);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT_V1);
+ }
+
+ if (cpu_is_u8500v2()) {
+ context_icn.hibw2_ddr_in_limit[0] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ context_icn.hibw2_ddr_in_limit[1] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+
+ context_icn.hibw2_ddr_in_limit[2] =
+ readb(context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+
+ context_icn.hibw2_ddr_out_prio =
+ readb(context_icn.base +
+ NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ context_icn.esram0_in_prio[0] =
+ readb(context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ context_icn.esram0_in_prio[1] =
+ readb(context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ context_icn.esram0_in_prio[2] =
+ readb(context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+ context_icn.esram0_in_prio[3] =
+ readb(context_icn.base + NODE_ESRAM0_IN_3_PRIORITY);
+
+ context_icn.esram0_in_lim[0] =
+ readb(context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ context_icn.esram0_in_lim[1] =
+ readb(context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ context_icn.esram0_in_lim[2] =
+ readb(context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+ context_icn.esram0_in_lim[3] =
+ readb(context_icn.base + NODE_ESRAM0_IN_3_LIMIT);
+ }
+ context_icn.esram12_in_prio[0] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ context_icn.esram12_in_prio[1] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ context_icn.esram12_in_prio[2] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+ context_icn.esram12_in_prio[3] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ context_icn.esram12_in_arb_lim[0] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[1] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[2] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[3] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[4] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[5] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ context_icn.esram12_in_arb_lim[6] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ context_icn.esram12_in_arb_lim[7] =
+ readb(context_icn.base + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ context_icn.esram34_in_prio[0] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ context_icn.esram34_in_prio[1] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ context_icn.esram34_in_prio[2] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+ context_icn.esram34_in_prio[3] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ context_icn.esram34_in_arb_lim[0] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[1] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[2] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[3] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[4] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[5] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ context_icn.esram34_in_arb_lim[6] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ context_icn.esram34_in_arb_lim[7] =
+ readb(context_icn.base + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+
+}
+
+/**
+ * u8500_context_restore_icn() - restore ICN context
+ *
+ */
+void u8500_context_restore_icn(void)
+{
+ writel(context_icn.hibw1_esram_in_pri[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw1_esram_in_pri[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_PRIORITY);
+ writel(context_icn.hibw1_esram_in_pri[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_2_PRIORITY);
+
+ writel(context_icn.hibw1_esram_in0_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in0_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_esram_in1_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in1_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_esram_in2_arb[0],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_1_LIMIT);
+ writel(context_icn.hibw1_esram_in2_arb[1],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_2_LIMIT);
+ writel(context_icn.hibw1_esram_in2_arb[2],
+ context_icn.base + NODE_HIBW1_ESRAM_IN_2_ARB_3_LIMIT);
+
+ writel(context_icn.hibw1_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw1_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_PRIORITY);
+
+ writel(context_icn.hibw1_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW1_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW1_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw1_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW1_DDR_IN_2_LIMIT);
+
+ writel(context_icn.hibw1_ddr_out_prio,
+ context_icn.base + NODE_HIBW1_DDR_OUT_0_PRIORITY);
+
+ writel(context_icn.hibw2_esram_in_pri[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_PRIORITY);
+ writel(context_icn.hibw2_esram_in_pri[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_PRIORITY);
+
+ writel(context_icn.hibw2_esram_in0_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in0_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_0_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_esram_in1_arblimit[0],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_1_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[1],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_2_LIMIT);
+ writel(context_icn.hibw2_esram_in1_arblimit[2],
+ context_icn.base + NODE_HIBW2_ESRAM_IN_1_ARB_3_LIMIT);
+
+ writel(context_icn.hibw2_ddr_in_prio[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_prio[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_PRIORITY);
+ if (cpu_is_u8500v1()) {
+ writel(context_icn.hibw2_ddr_in_prio[3],
+ context_icn.base + NODE_HIBW2_DDR_IN_3_PRIORITY);
+ writel(context_icn.hibw2_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT_V1);
+ writel(context_icn.hibw2_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT_V1);
+ }
+ if (cpu_is_u8500v2()) {
+ writel(context_icn.hibw2_ddr_in_limit[0],
+ context_icn.base + NODE_HIBW2_DDR_IN_0_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[1],
+ context_icn.base + NODE_HIBW2_DDR_IN_1_LIMIT);
+ writel(context_icn.hibw2_ddr_in_limit[2],
+ context_icn.base + NODE_HIBW2_DDR_IN_2_LIMIT);
+ writel(context_icn.hibw2_ddr_out_prio,
+ context_icn.base + NODE_HIBW2_DDR_OUT_0_PRIORITY);
+
+ writel(context_icn.esram0_in_prio[0],
+ context_icn.base + NODE_ESRAM0_IN_0_PRIORITY);
+ writel(context_icn.esram0_in_prio[1],
+ context_icn.base + NODE_ESRAM0_IN_1_PRIORITY);
+ writel(context_icn.esram0_in_prio[2],
+ context_icn.base + NODE_ESRAM0_IN_2_PRIORITY);
+ writel(context_icn.esram0_in_prio[3],
+ context_icn.base + NODE_ESRAM0_IN_3_PRIORITY);
+
+ writel(context_icn.esram0_in_lim[0],
+ context_icn.base + NODE_ESRAM0_IN_0_LIMIT);
+ writel(context_icn.esram0_in_lim[1],
+ context_icn.base + NODE_ESRAM0_IN_1_LIMIT);
+ writel(context_icn.esram0_in_lim[2],
+ context_icn.base + NODE_ESRAM0_IN_2_LIMIT);
+ writel(context_icn.esram0_in_lim[3],
+ context_icn.base + NODE_ESRAM0_IN_3_LIMIT);
+ }
+
+ writel(context_icn.esram12_in_prio[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_PRIORITY);
+ writel(context_icn.esram12_in_prio[1],
+ context_icn.base + NODE_ESRAM1_2_IN_1_PRIORITY);
+ writel(context_icn.esram12_in_prio[2],
+ context_icn.base + NODE_ESRAM1_2_IN_2_PRIORITY);
+ writel(context_icn.esram12_in_prio[3],
+ context_icn.base + NODE_ESRAM1_2_IN_3_PRIORITY);
+
+ writel(context_icn.esram12_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM1_2_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM1_2_IN_0_ARB_2_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM1_2_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[3],
+ context_icn.base + NODE_ESRAM1_2_IN_1_ARB_2_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[4],
+ context_icn.base + NODE_ESRAM1_2_IN_2_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[5],
+ context_icn.base + NODE_ESRAM1_2_IN_2_ARB_2_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[6],
+ context_icn.base + NODE_ESRAM1_2_IN_3_ARB_1_LIMIT);
+ writel(context_icn.esram12_in_arb_lim[7],
+ context_icn.base + NODE_ESRAM1_2_IN_3_ARB_2_LIMIT);
+
+ writel(context_icn.esram34_in_prio[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_PRIORITY);
+ writel(context_icn.esram34_in_prio[1],
+ context_icn.base + NODE_ESRAM3_4_IN_1_PRIORITY);
+ writel(context_icn.esram34_in_prio[2],
+ context_icn.base + NODE_ESRAM3_4_IN_2_PRIORITY);
+ writel(context_icn.esram34_in_prio[3],
+ context_icn.base + NODE_ESRAM3_4_IN_3_PRIORITY);
+
+ writel(context_icn.esram34_in_arb_lim[0],
+ context_icn.base + NODE_ESRAM3_4_IN_0_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[1],
+ context_icn.base + NODE_ESRAM3_4_IN_0_ARB_2_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[2],
+ context_icn.base + NODE_ESRAM3_4_IN_1_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[3],
+ context_icn.base + NODE_ESRAM3_4_IN_1_ARB_2_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[4],
+ context_icn.base + NODE_ESRAM3_4_IN_2_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[5],
+ context_icn.base + NODE_ESRAM3_4_IN_2_ARB_2_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[6],
+ context_icn.base + NODE_ESRAM3_4_IN_3_ARB_1_LIMIT);
+ writel(context_icn.esram34_in_arb_lim[7],
+ context_icn.base + NODE_ESRAM3_4_IN_3_ARB_2_LIMIT);
+
+}
+
+void u8500_context_init(void)
+{
+ context_icn.base = ioremap(U8500_ICN_BASE, SZ_8K);
+}
diff --git a/arch/arm/mach-ux500/pm/context.c b/arch/arm/mach-ux500/pm/context.c
new file mode 100644
index 00000000000..48bb03eafb1
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context.c
@@ -0,0 +1,740 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2011
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>,
+ * Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com>,
+ * Sundar Iyer for ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/scu.h>
+
+#include "context.h"
+
+/*
+ * TODO:
+ * - Use the "UX500*"-macros instead where possible
+ */
+
+#define U8500_BACKUPRAM_SIZE SZ_64K
+
+#define U8500_PUBLIC_BOOT_ROM_BASE (U8500_BOOT_ROM_BASE + 0x17000)
+
+/* Special dedicated addresses in backup RAM */
+#define U8500_EXT_RAM_LOC_BACKUPRAM_ADDR 0x80151FDC
+#define U8500_CPU0_CP15_CR_BACKUPRAM_ADDR 0x80151F80
+#define U8500_CPU1_CP15_CR_BACKUPRAM_ADDR 0x80151FA0
+
+/* For v1.x */
+#define U8500_CPU0_BACKUPRAM_ADDR_BACKUPRAM_LOG_ADDR 0x80151FD8
+#define U8500_CPU1_BACKUPRAM_ADDR_BACKUPRAM_LOG_ADDR 0x80151FE0
+
+/* For v2.0 and later */
+#define U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FD8
+#define U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR 0x80151FE0
+
+#define GIC_DIST_ENABLE_NS 0x0
+
+/* 32 interrupts fits in 4 bytes */
+#define GIC_DIST_ENABLE_SET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 32)
+#define GIC_DIST_ENABLE_SET_CPU_NUM (IRQ_SHPI_START / 32)
+#define GIC_DIST_ENABLE_SET_SPI0 GIC_DIST_ENABLE_SET
+#define GIC_DIST_ENABLE_SET_SPI32 (GIC_DIST_ENABLE_SET + IRQ_SHPI_START / 8)
+
+#define GIC_DIST_PRI_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) / 4)
+#define GIC_DIST_PRI_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_PRI_SPI0 GIC_DIST_PRI
+#define GIC_DIST_PRI_SPI32 (GIC_DIST_PRI + IRQ_SHPI_START)
+
+#define GIC_DIST_SPI_TARGET_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - \
+ IRQ_SHPI_START) / 4)
+#define GIC_DIST_SPI_TARGET_CPU_NUM (IRQ_SHPI_START / 4)
+#define GIC_DIST_SPI_TARGET_SPI0 GIC_DIST_TARGET
+#define GIC_DIST_SPI_TARGET_SPI32 (GIC_DIST_TARGET + IRQ_SHPI_START)
+
+/* 16 interrupts per 4 bytes */
+#define GIC_DIST_CONFIG_COMMON_NUM ((DBX500_NR_INTERNAL_IRQS - IRQ_SHPI_START) \
+ / 16)
+#define GIC_DIST_CONFIG_CPU_NUM (IRQ_SHPI_START / 16)
+#define GIC_DIST_CONFIG_SPI0 GIC_DIST_CONFIG
+#define GIC_DIST_CONFIG_SPI32 (GIC_DIST_CONFIG + IRQ_SHPI_START / 4)
+
+/* TODO! Move STM reg offsets to suitable place */
+#define STM_CR_OFFSET 0x00
+#define STM_MMC_OFFSET 0x08
+#define STM_TER_OFFSET 0x10
+
+#define TPIU_PORT_SIZE 0x4
+#define TPIU_TRIGGER_COUNTER 0x104
+#define TPIU_TRIGGER_MULTIPLIER 0x108
+#define TPIU_CURRENT_TEST_PATTERN 0x204
+#define TPIU_TEST_PATTERN_REPEAT 0x208
+#define TPIU_FORMATTER 0x304
+#define TPIU_FORMATTER_SYNC 0x308
+#define TPIU_LOCK_ACCESS_REGISTER 0xFB0
+
+#define TPIU_UNLOCK_CODE 0xc5acce55
+
+#define SCU_FILTER_STARTADDR 0x40
+#define SCU_FILTER_ENDADDR 0x44
+#define SCU_ACCESS_CTRL_SAC 0x50
+
+/*
+ * Periph clock cluster context
+ */
+#define PRCC_BCK_EN 0x00
+#define PRCC_KCK_EN 0x08
+#define PRCC_BCK_STATUS 0x10
+#define PRCC_KCK_STATUS 0x14
+
+/* The context of the Trace Port Interface Unit (TPIU) */
+static struct {
+ void __iomem *base;
+ u32 port_size;
+ u32 trigger_counter;
+ u32 trigger_multiplier;
+ u32 current_test_pattern;
+ u32 test_pattern_repeat;
+ u32 formatter;
+ u32 formatter_sync;
+} context_tpiu;
+
+static struct {
+ void __iomem *base;
+ u32 cr;
+ u32 mmc;
+ u32 ter;
+} context_stm_ape;
+
+struct context_gic_cpu {
+ void __iomem *base;
+ u32 ctrl;
+ u32 primask;
+ u32 binpoint;
+};
+static DEFINE_PER_CPU(struct context_gic_cpu, context_gic_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ns;
+ u32 enable_set[GIC_DIST_ENABLE_SET_COMMON_NUM]; /* IRQ 32 to 160 */
+ u32 priority_level[GIC_DIST_PRI_COMMON_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_COMMON_NUM];
+ u32 config[GIC_DIST_CONFIG_COMMON_NUM];
+} context_gic_dist_common;
+
+struct context_gic_dist_cpu {
+ void __iomem *base;
+ u32 enable_set[GIC_DIST_ENABLE_SET_CPU_NUM]; /* IRQ 0 to 31 */
+ u32 priority_level[GIC_DIST_PRI_CPU_NUM];
+ u32 spi_target[GIC_DIST_SPI_TARGET_CPU_NUM];
+ u32 config[GIC_DIST_CONFIG_CPU_NUM];
+};
+static DEFINE_PER_CPU(struct context_gic_dist_cpu, context_gic_dist_cpu);
+
+static struct {
+ void __iomem *base;
+ u32 ctrl;
+ u32 cpu_pwrstatus;
+ u32 inv_all_nonsecure;
+ u32 filter_start_addr;
+ u32 filter_end_addr;
+ u32 access_ctrl_sac;
+} context_scu;
+
+#define UX500_NR_PRCC_BANKS 5
+static struct {
+ void __iomem *base;
+ u32 bus_clk;
+ u32 kern_clk;
+} context_prcc[UX500_NR_PRCC_BANKS];
+
+static u32 backup_sram_storage[NR_CPUS] = {
+ IO_ADDRESS(U8500_CPU0_CP15_CR_BACKUPRAM_ADDR),
+ IO_ADDRESS(U8500_CPU1_CP15_CR_BACKUPRAM_ADDR),
+};
+
+/*
+ * Stacks and stack pointers
+ */
+static DEFINE_PER_CPU(u32, varm_registers_backup_stack[1024]);
+static DEFINE_PER_CPU(u32 *, varm_registers_pointer);
+
+static DEFINE_PER_CPU(u32, varm_cp15_backup_stack[1024]);
+static DEFINE_PER_CPU(u32 *, varm_cp15_pointer);
+
+
+static ATOMIC_NOTIFIER_HEAD(context_ape_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(context_arm_notifier_list);
+
+/*
+ * Register a simple callback for handling vape context save/restore
+ */
+int context_ape_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_ape_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_ape_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_ape_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_ape_notifier_unregister);
+
+/*
+ * Register a simple callback for handling varm context save/restore
+ */
+int context_arm_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&context_arm_notifier_list, nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_register);
+
+/*
+ * Remove a previously registered callback
+ */
+int context_arm_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&context_arm_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(context_arm_notifier_unregister);
+
+static void save_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ context_prcc[i].bus_clk =
+ readl(context_prcc[i].base + PRCC_BCK_STATUS);
+ context_prcc[i].kern_clk =
+ readl(context_prcc[i].base + PRCC_KCK_STATUS);
+ }
+}
+
+static void restore_prcc(void)
+{
+ int i;
+
+ for (i = 0; i < UX500_NR_PRCC_BANKS; i++) {
+ writel(context_prcc[i].bus_clk,
+ context_prcc[i].base + PRCC_BCK_EN);
+ writel(context_prcc[i].kern_clk,
+ context_prcc[i].base + PRCC_KCK_EN);
+ }
+}
+
+static void save_stm_ape(void)
+{
+ /*
+ * TODO: Check with PRCMU developers how STM is handled by PRCMU
+ * firmware. According to DB5500 design spec there is a "flush"
+ * mechanism supposed to be used by the PRCMU before power down,
+ * PRCMU fw might save/restore the following three registers
+ * at the same time.
+ */
+ context_stm_ape.cr = readl(context_stm_ape.base +
+ STM_CR_OFFSET);
+ context_stm_ape.mmc = readl(context_stm_ape.base +
+ STM_MMC_OFFSET);
+ context_stm_ape.ter = readl(context_stm_ape.base +
+ STM_TER_OFFSET);
+}
+
+static void restore_stm_ape(void)
+{
+ writel(context_stm_ape.ter,
+ context_stm_ape.base + STM_TER_OFFSET);
+ writel(context_stm_ape.mmc,
+ context_stm_ape.base + STM_MMC_OFFSET);
+ writel(context_stm_ape.cr,
+ context_stm_ape.base + STM_CR_OFFSET);
+}
+
+/*
+ * Save the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void save_tpiu(void)
+{
+ context_tpiu.port_size = readl(context_tpiu.base +
+ TPIU_PORT_SIZE);
+ context_tpiu.trigger_counter = readl(context_tpiu.base +
+ TPIU_TRIGGER_COUNTER);
+ context_tpiu.trigger_multiplier = readl(context_tpiu.base +
+ TPIU_TRIGGER_MULTIPLIER);
+ context_tpiu.current_test_pattern = readl(context_tpiu.base +
+ TPIU_CURRENT_TEST_PATTERN);
+ context_tpiu.test_pattern_repeat = readl(context_tpiu.base +
+ TPIU_TEST_PATTERN_REPEAT);
+ context_tpiu.formatter = readl(context_tpiu.base +
+ TPIU_FORMATTER);
+ context_tpiu.formatter_sync = readl(context_tpiu.base +
+ TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Restore the context of the Trace Port Interface Unit (TPIU).
+ * Saving/restoring is needed for the PTM tracing to work together
+ * with the sleep states ApSleep and ApDeepSleep.
+ */
+static void restore_tpiu(void)
+{
+ writel(TPIU_UNLOCK_CODE,
+ context_tpiu.base + TPIU_LOCK_ACCESS_REGISTER);
+
+ writel(context_tpiu.port_size,
+ context_tpiu.base + TPIU_PORT_SIZE);
+ writel(context_tpiu.trigger_counter,
+ context_tpiu.base + TPIU_TRIGGER_COUNTER);
+ writel(context_tpiu.trigger_multiplier,
+ context_tpiu.base + TPIU_TRIGGER_MULTIPLIER);
+ writel(context_tpiu.current_test_pattern,
+ context_tpiu.base + TPIU_CURRENT_TEST_PATTERN);
+ writel(context_tpiu.test_pattern_repeat,
+ context_tpiu.base + TPIU_TEST_PATTERN_REPEAT);
+ writel(context_tpiu.formatter,
+ context_tpiu.base + TPIU_FORMATTER);
+ writel(context_tpiu.formatter_sync,
+ context_tpiu.base + TPIU_FORMATTER_SYNC);
+}
+
+/*
+ * Save GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+
+static void save_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ c_gic_cpu->ctrl = readl(c_gic_cpu->base + GIC_CPU_CTRL);
+ c_gic_cpu->primask = readl(c_gic_cpu->base + GIC_CPU_PRIMASK);
+ c_gic_cpu->binpoint = readl(c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Restore GIC CPU IF registers
+ *
+ * This is per cpu so it needs to be called for each one.
+ */
+static void restore_gic_if_cpu(struct context_gic_cpu *c_gic_cpu)
+{
+ writel(c_gic_cpu->ctrl, c_gic_cpu->base + GIC_CPU_CTRL);
+ writel(c_gic_cpu->primask, c_gic_cpu->base + GIC_CPU_PRIMASK);
+ writel(c_gic_cpu->binpoint, c_gic_cpu->base + GIC_CPU_BINPOINT);
+}
+
+/*
+ * Save GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+
+static void save_gic_dist_common(void)
+{
+ int i;
+
+ context_gic_dist_common.ns = readl(context_gic_dist_common.base +
+ GIC_DIST_ENABLE_NS);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ context_gic_dist_common.enable_set[i] =
+ readl(context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ context_gic_dist_common.priority_level[i] =
+ readl(context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ context_gic_dist_common.spi_target[i] =
+ readl(context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ context_gic_dist_common.spi_target[i] =
+ readl(context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+}
+
+/*
+ * Restore GIC Distributor Common registers
+ *
+ * This context is common. Only one CPU needs to call.
+ *
+ * Save SPI (Shared Peripheral Interrupt) settings, IRQ 32-159.
+ */
+static void restore_gic_dist_common(void)
+{
+
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_COMMON_NUM; i++)
+ writel(context_gic_dist_common.spi_target[i],
+ context_gic_dist_common.base +
+ GIC_DIST_CONFIG_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_COMMON_NUM; i++)
+ writel(context_gic_dist_common.spi_target[i],
+ context_gic_dist_common.base +
+ GIC_DIST_SPI_TARGET_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_COMMON_NUM; i++)
+ writel(context_gic_dist_common.priority_level[i],
+ context_gic_dist_common.base +
+ GIC_DIST_PRI_SPI32 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_COMMON_NUM; i++)
+ writel(context_gic_dist_common.enable_set[i],
+ context_gic_dist_common.base +
+ GIC_DIST_ENABLE_SET_SPI32 + i * 4);
+
+ writel(context_gic_dist_common.ns,
+ context_gic_dist_common.base + GIC_DIST_ENABLE_NS);
+}
+
+
+
+/*
+ * Save GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * save_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void save_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+ int i;
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ c_gic->enable_set[i] =
+ readl(c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ c_gic->priority_level[i] =
+ readl(c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ c_gic->spi_target[i] =
+ readl(c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ c_gic->spi_target[i] =
+ readl(c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+}
+
+/*
+ * Restore GIC Dist CPU registers
+ *
+ * This needs to be called by all cpu:s which will not call
+ * restore_gic_dist_common(). Only the registers of the GIC which are
+ * banked will be saved.
+ */
+static void restore_gic_dist_cpu(struct context_gic_dist_cpu *c_gic)
+{
+
+ int i;
+
+ for (i = 0; i < GIC_DIST_CONFIG_CPU_NUM; i++)
+ writel(c_gic->spi_target[i],
+ c_gic->base +
+ GIC_DIST_CONFIG_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_SPI_TARGET_CPU_NUM; i++)
+ writel(c_gic->spi_target[i],
+ c_gic->base +
+ GIC_DIST_SPI_TARGET_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_PRI_CPU_NUM; i++)
+ writel(c_gic->priority_level[i],
+ c_gic->base +
+ GIC_DIST_PRI_SPI0 + i * 4);
+
+ for (i = 0; i < GIC_DIST_ENABLE_SET_CPU_NUM; i++)
+ writel(c_gic->enable_set[i],
+ c_gic->base +
+ GIC_DIST_ENABLE_SET_SPI0 + i * 4);
+}
+static void save_scu(void)
+{
+ context_scu.ctrl =
+ readl(context_scu.base + SCU_CTRL);
+ context_scu.cpu_pwrstatus =
+ readl(context_scu.base + SCU_CPU_STATUS);
+ context_scu.inv_all_nonsecure =
+ readl(context_scu.base + SCU_INVALIDATE);
+ context_scu.filter_start_addr =
+ readl(context_scu.base + SCU_FILTER_STARTADDR);
+ context_scu.filter_end_addr =
+ readl(context_scu.base + SCU_FILTER_ENDADDR);
+ context_scu.access_ctrl_sac =
+ readl(context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+static void restore_scu(void)
+{
+ writel(context_scu.ctrl,
+ context_scu.base + SCU_CTRL);
+ writel(context_scu.cpu_pwrstatus,
+ context_scu.base + SCU_CPU_STATUS);
+ writel(context_scu.inv_all_nonsecure,
+ context_scu.base + SCU_INVALIDATE);
+ writel(context_scu.filter_start_addr,
+ context_scu.base + SCU_FILTER_STARTADDR);
+ writel(context_scu.filter_end_addr,
+ context_scu.base + SCU_FILTER_ENDADDR);
+ writel(context_scu.access_ctrl_sac,
+ context_scu.base + SCU_ACCESS_CTRL_SAC);
+}
+
+
+/*
+ * Save VAPE context
+ */
+void context_vape_save(void)
+{
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_SAVE, NULL);
+
+ if (cpu_is_u5500())
+ u5500_context_save_icn();
+ if (cpu_is_u8500())
+ u8500_context_save_icn();
+
+ save_stm_ape();
+
+ save_tpiu();
+
+ save_prcc();
+
+}
+
+/*
+ * Restore VAPE context
+ */
+void context_vape_restore(void)
+{
+ restore_prcc();
+
+ restore_tpiu();
+
+ restore_stm_ape();
+
+ if (cpu_is_u5500())
+ u5500_context_restore_icn();
+ if (cpu_is_u8500())
+ u8500_context_restore_icn();
+
+ atomic_notifier_call_chain(&context_ape_notifier_list,
+ CONTEXT_APE_RESTORE, NULL);
+}
+
+/*
+ * Save common
+ *
+ * This function must be called once for all cores before going to deep sleep.
+ */
+void context_varm_save_common(void)
+{
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_SAVE, NULL);
+
+ /* Save common parts */
+ save_gic_dist_common();
+ save_scu();
+}
+
+/*
+ * Restore common
+ *
+ * This function must be called once for all cores when waking up from deep
+ * sleep.
+ */
+void context_varm_restore_common(void)
+{
+ /* Restore common parts */
+ restore_scu();
+ restore_gic_dist_common();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_COMMON_RESTORE, NULL);
+}
+
+/*
+ * Save core
+ *
+ * This function must be called once for each cpu core before going to deep
+ * sleep.
+ */
+void context_varm_save_core(void)
+{
+ int cpu = smp_processor_id();
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_SAVE, NULL);
+
+ per_cpu(varm_cp15_pointer, cpu) = per_cpu(varm_cp15_backup_stack, cpu);
+
+ /* Save core */
+ save_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+ save_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ context_save_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+}
+
+/*
+ * Restore core
+ *
+ * This function must be called once for each cpu core when waking up from
+ * deep sleep.
+ */
+void context_varm_restore_core(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Restore core */
+ context_restore_cp15_registers(&per_cpu(varm_cp15_pointer, cpu));
+ restore_gic_dist_cpu(&per_cpu(context_gic_dist_cpu, cpu));
+ restore_gic_if_cpu(&per_cpu(context_gic_cpu, cpu));
+
+ atomic_notifier_call_chain(&context_arm_notifier_list,
+ CONTEXT_ARM_CORE_RESTORE, NULL);
+
+}
+
+/*
+ * Save CPU registers
+ *
+ * This function saves ARM registers.
+ */
+void context_save_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(varm_registers_pointer, cpu) =
+ per_cpu(varm_registers_backup_stack, cpu);
+ context_save_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * Restore CPU registers
+ *
+ * This function restores ARM registers.
+ */
+void context_restore_cpu_registers(void)
+{
+ int cpu = smp_processor_id();
+
+ context_restore_arm_registers(&per_cpu(varm_registers_pointer, cpu));
+}
+
+/*
+ * This function stores CP15 registers related to cache and mmu
+ * in backup SRAM. It also stores stack pointer, CPSR
+ * and return address for the PC in backup SRAM and
+ * does wait for interrupt.
+ */
+void context_save_to_sram_and_wfi(bool cleanL2cache)
+{
+ int cpu = smp_processor_id();
+
+ context_save_to_sram_and_wfi_internal(backup_sram_storage[cpu],
+ cleanL2cache);
+}
+
+static int __init context_init(void)
+{
+ int i;
+ void __iomem *ux500_backup_ptr;
+
+ /* allocate backup pointer for RAM data */
+ ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(U8500_BACKUPRAM_SIZE));
+
+ if (!ux500_backup_ptr) {
+ pr_warning("context: could not allocate backup memory\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * ROM code addresses to store backup contents,
+ * pass the physical address of back up to ROM code
+ */
+ writel(virt_to_phys(ux500_backup_ptr),
+ IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR));
+
+ /* Give logical address to backup RAM. For both CPUs */
+ if (cpu_is_u8500v20_or_later()) {
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
+ } else {
+ writel(IO_ADDRESS(U8500_BACKUPRAM0_BASE),
+ IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_BACKUPRAM_LOG_ADDR));
+
+ writel(IO_ADDRESS(U8500_BACKUPRAM0_BASE),
+ IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_BACKUPRAM_LOG_ADDR));
+ }
+
+ /* FIXME: To UX500 */
+ context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K);
+ context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K);
+ context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ per_cpu(context_gic_cpu, i).base = ioremap(U8500_GIC_CPU_BASE,
+ SZ_4K);
+ per_cpu(context_gic_dist_cpu, i).base =
+ ioremap(U8500_GIC_DIST_BASE,
+ SZ_4K);
+ }
+
+ context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K);
+
+ /* PERIPH4 is always on, so no need saving prcc */
+ context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K);
+ context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K);
+ context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K);
+ context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K);
+ context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K);
+
+ if (cpu_is_u8500()) {
+ u8500_context_init();
+ } else if (cpu_is_u5500()) {
+ u5500_context_init();
+ } else {
+ printk(KERN_ERR "context: unknown hardware!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+subsys_initcall(context_init);
+
diff --git a/arch/arm/mach-ux500/pm/context.h b/arch/arm/mach-ux500/pm/context.h
new file mode 100644
index 00000000000..d043f8c805d
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_UX500_CONTEXT
+
+/* Defines to be with
+ * context_ape_notifier_register
+ */
+#define CONTEXT_APE_SAVE 0 /* APE save */
+#define CONTEXT_APE_RESTORE 1 /* APE restore */
+
+/* Defines to be with
+ * context_arm_notifier_register
+ */
+#define CONTEXT_ARM_CORE_SAVE 0 /* Called for each ARM core */
+#define CONTEXT_ARM_CORE_RESTORE 1 /* Called for each ARM core */
+#define CONTEXT_ARM_COMMON_SAVE 2 /* Called when ARM common is saved */
+#define CONTEXT_ARM_COMMON_RESTORE 3 /* Called when ARM common is restored */
+
+int context_ape_notifier_register(struct notifier_block *nb);
+int context_ape_notifier_unregister(struct notifier_block *nb);
+
+int context_arm_notifier_register(struct notifier_block *nb);
+int context_arm_notifier_unregister(struct notifier_block *nb);
+
+void context_vape_save(void);
+void context_vape_restore(void);
+
+void context_varm_save_common(void);
+void context_varm_restore_common(void);
+
+void context_varm_save_core(void);
+void context_varm_restore_core(void);
+
+void context_save_cpu_registers(void);
+void context_restore_cpu_registers(void);
+
+/*
+ * cleanL2cache - Indicate if L2 cache should be cleaned.
+ * Note that L1 cache is always cleaned.
+ */
+void context_save_to_sram_and_wfi(bool cleanL2cache);
+
+void context_clean_l1_cache_all(void);
+void context_save_arm_registers(u32 **backup_stack);
+void context_restore_arm_registers(u32 **backup_stack);
+
+void context_save_cp15_registers(u32 **backup_stack);
+void context_restore_cp15_registers(u32 **backup_stack);
+
+void context_save_to_sram_and_wfi_internal(u32 backup_sram_storage,
+ bool cleanL2cache);
+
+/* DB specific functions in either context-db8500 or context-db5500 */
+void u8500_context_save_icn(void);
+void u8500_context_restore_icn(void);
+void u8500_context_init(void);
+
+void u5500_context_save_icn(void);
+void u5500_context_restore_icn(void);
+void u5500_context_init(void);
+
+#else
+
+static inline void context_varm_save_core(void) {}
+static inline void context_save_cpu_registers(void) {}
+static inline void context_save_to_sram_and_wfi(bool cleanL2cache) {}
+static inline void context_restore_cpu_registers(void) {}
+static inline void context_varm_restore_core(void) {}
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/context_arm.S b/arch/arm/mach-ux500/pm/context_arm.S
new file mode 100755
index 00000000000..c1db2d8304c
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/context_arm.S
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <asm/hardware/cache-l2x0.h>
+
+/*
+ * Save and increment macro
+ */
+.macro SAVE_AND_INCREMENT FROM_REG TO_REG
+ str \FROM_REG, [\TO_REG], #+4
+.endm
+
+/*
+ * Decrement and restore macro
+ */
+.macro DECREMENT_AND_RESTORE FROM_REG TO_REG
+ ldr \TO_REG, [\FROM_REG, #-4]!
+.endm
+
+/*
+ * Save ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * + {sp, lr}^
+ * + cpsr
+ * + {r3, r8-r14} (FIQ mode: r3=spsr)
+ * + {r3, r13, r14} (IRQ mode: r3=spsr)
+ * + {r3, r13, r14} (abort mode: r3=spsr)
+ * + {r3, r13, r14} (undef mode: r3=spsr)
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_save_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ stmia r1, {sp, lr}^ @ Store user mode sp and lr
+ @ registers
+ add r1, r1, #8 @ Update backup pointer (not
+ @ done in previous instruction)
+
+ mrs r2, cpsr @ Get CPSR
+ SAVE_AND_INCREMENT r2 r1 @ Save CPSR register
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ @ The suffix to CPSR refers to which field(s) of the CPSR is
+ @ rereferenced (you can specify one or more). Defined fields are:
+ @
+ @ c - control
+ @ x - extension
+ @ s - status
+ @ f - flags
+
+ orr r3, r2, #0x11 @ Save FIQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r8-r14}
+
+ orr r3, r2, #0x12 @ Save IRQ mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x17 @ Save abort mode registers +
+ @ common mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x1B @ Save undef mode registers
+ msr cpsr_cxsf, r3
+ mrs r3, spsr
+ stmia r1!, {r3, r13, r14}
+
+ orr r3, r2, #0x13 @ Return to supervisor mode
+ msr cpsr_cxsf, r3
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore ARM registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * Backup stack operations:
+ * - {r3, r13, r14} (undef mode: spsr=r3)
+ * - {r3, r13, r14} (abort mode: spsr=r3)
+ * - {r3, r13, r14} (IRQ mode: spsr=r3)
+ * - {r3, r8-r14} (FIQ mode: spsr=r3)
+ * - cpsr
+ * - {sp, lr}^
+ */
+ .align
+ .section ".text", "ax"
+ENTRY(context_restore_arm_registers)
+ stmfd sp!, {r1, r2, r3, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrs r2, cpsr @ Get CPSR
+ orr r2, r2, #0xc0 @ Disable FIQ and IRQ
+ bic r2, r2, #0x1f @ Setup r2 to change mode
+
+ orr r3, r2, #0x1b @ Restore undef mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x17 @ Restore abort mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x12 @ Restore IRQ mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r13, r14}
+ msr spsr_cxsf, r3
+
+ orr r3, r2, #0x11 @ Restore FIQ mode registers
+ msr cpsr_cxsf, r3
+ ldmdb r1!, {r3, r8-r14}
+ msr spsr_cxsf, r3
+
+ DECREMENT_AND_RESTORE r1 r3 @ Restore cpsr register
+ msr cpsr_cxsf, r3
+
+ ldmdb r1, {sp, lr}^ @ Restore sp and lr registers
+ sub r1, r1, #8 @ Update backup pointer (not
+ @ done in previous instruction)
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, r3, pc} @ Restore registers and return
+
+
+
+/*
+ * Save CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ *
+ * TTBR0, TTBR1, TTBRC, DACR CP15 registers are restored by boot ROM from SRAM.
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_cp15_registers)
+ stmfd sp!, {r1, r2, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ mrc p15, 0, r2, c12, c0, 0 @ Read Non-secure Vector Base
+ @ Address Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c13, c0, 1 @ Read Context ID Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 2 @ Read Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 3 @ Read Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible.
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c13, c0, 4 @ Read Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only.
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c9, c12, 0 @ Read PMNC Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 1 @ Read PMCNTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c12, 5 @ Read PMSELR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 0 @ Read PMCCNTR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c13, 1 @ Read PMXEVTYPER Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 0 @ Read PMUSERENR Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 1 @ Read PMINTENSET Register
+ SAVE_AND_INCREMENT r2 r1
+ mrc p15, 0, r2, c9, c14, 2 @ Read PMINTENCLR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ mrc p15, 0, r2, c1, c0, 2 @ Read CPACR Register
+ SAVE_AND_INCREMENT r2 r1
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, pc} @ Restore registers and return
+
+
+
+/*
+ * Restore CP15 registers
+ *
+ * This function must be called in supervisor mode.
+ *
+ * r0 = address to backup stack pointer
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_restore_cp15_registers)
+ stmfd sp!, {r1, r2, lr} @ Save on stack
+ ldr r1, [r0] @ Read backup stack pointer
+
+ DECREMENT_AND_RESTORE r1 r2 @ Write CPACR register
+ mcr p15, 0, r2, c1, c0, 2
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 2 @ Write PMINTENCLR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 1 @ Write PMINTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c14, 0 @ Write PMUSERENR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 1 @ Write PMXEVTYPER Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c13, 0 @ Write PMCCNTR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 5 @ Write PMSELR Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 1 @ Write PMCNTENSET Register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c9, c12, 0 @ Write PMNC Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 2, r2, c0, c0, 0 @ Cache Size Selection Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 4 @ Write Thread ID registers,
+ @ this register is privileged
+ @ R/W accessible only
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 3 @ Write Thread ID registers,
+ @ this register is user
+ @ read-only and privileged R/W
+ @ accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 2 @ Write Thread ID registers,
+ @ this register is both user
+ @ and privileged R/W accessible
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c13, c0, 1 @ Write Context ID Register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 1 @ Access normal memory region
+ @ remap register
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c10, c2, 0 @ Access primary memory region
+ @ remap register
+
+ DECREMENT_AND_RESTORE r1 r2
+ mcr p15, 0, r2, c12, c0, 0 @ Write Non-secure Vector Base
+ @ Address Register
+
+ str r1, [r0] @ Write backup stack pointer
+ ldmfd sp!, {r1, r2, pc} @ Restore registers and return
+
+
+/*
+ * L1 cache clean function. Commit 'dirty' data from L1
+ * to L2 cache.
+ *
+ * r0, r1, r2, used locally
+ *
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_clean_l1_cache_all)
+
+ mov r0, #0 @ swith to cache level 0
+ @ (L1 cache)
+ mcr p15, 2, r0, c0, c0, 0 @ select current cache level
+ @ in cssr
+
+ dmb
+ mov r1, #0 @ r1 = way index
+wayLoopL1clean:
+ mov r0, #0 @ r0 = line index
+lineLoopL1clean:
+ mov r2, r1, lsl #30 @ TODO: OK to hard-code
+ @ SoC-specific L1 cache details?
+ add r2, r0, lsl #5
+ mcr p15, 0, r2, c7, c10, 2 @ Clean cache by set/way
+ add r0, r0, #1
+ cmp r0, #256 @ TODO: Ok with hard-coded
+ @ set/way sizes or do we have to
+ @ read them from ARM regs? Is it
+ @ set correctly in silicon?
+ bne lineLoopL1clean
+ add r1, r1, #1
+ cmp r1, #4 @ TODO: Ditto, sizes...
+ bne wayLoopL1clean
+
+ dsb
+ isb
+ mov pc, lr
+
+ENDPROC(context_clean_l1_cache_all)
+
+
+/*
+ * L2 cache clean function. Commit from PL310 L2 cache
+ * controller to DDR SDRAM.
+ *
+ * r0, r2 used locally
+ *
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(ux500_clean_l2_cache_all)
+
+ ldr r0, =IO_ADDRESS(U8500_L2CC_BASE)
+
+ ldr r1, =0xff @ TODO: Ok to assume 8-way cache
+ @ on Ux500?
+ str r1, [r0, #L2X0_CLEAN_WAY]
+ L2busywaitLoopClean:
+ ldr r1, [r0, #L2X0_CLEAN_WAY]
+ cmp r1, #0 @ All bits in L2X0_CLEAN_WAY
+ @ will be zero once clean is
+ @ finished
+ bne L2busywaitLoopClean
+
+ ldr r1, =0x0
+ str r1, [r0, #L2X0_CACHE_SYNC]
+ @ l2x0 C code busy-wait here to
+ @ ensure no background op is
+ @ running.
+ @ In our case we have already
+ @ checked this after the cache
+ @ clean and CACHE_SYNC is atomic
+ @ according to refman
+ mov pc, lr
+
+
+/*
+ * Last saves and WFI
+ *
+ * r0 = address to backup_sram_storage base adress
+ * r1 = indicate whether L2 cache should be cleaned
+ */
+ .align 4
+ .section ".text", "ax"
+ENTRY(context_save_to_sram_and_wfi_internal)
+
+ stmfd sp!, {r2-r12, lr} @ save on stack.
+
+ mrc p15, 0, r2, c1, c0, 0 @ read cp15 system control
+ @ register
+ str r2, [r0, #0x00]
+ mrc p15, 0, r2, c2, c0, 0 @ read cp15 ttb0 register
+ str r2, [r0, #0x04]
+ mrc p15, 0, r2, c2, c0, 1 @ read cp15 ttb1 register
+ str r2, [r0, #0x08]
+ mrc p15, 0, r2, c2, c0, 2 @ read cp15 ttb control register
+ str r2, [r0, #0x0C]
+ mrc p15, 0, r2, c3, c0, 0 @ read domain access control
+ @ register
+ str r2, [r0, #0x10]
+
+ ldr r2, =return_here
+ str r2, [r0, #0x14] @ save program counter restore
+ @ value to backup_sram_storage
+ mrs r2, cpsr
+ str r2, [r0, #0x18] @ save cpsr to
+ @ backup_sram_storage
+ str sp, [r0, #0x1c] @ save sp to backup_sram_storage
+
+ mov r4, r1 @ Set r4 = cleanL2cache, r2
+ @ will be destroyed by
+ @ v7_clean_l1_cache_all
+
+
+ bl context_clean_l1_cache_all @ Commit all dirty data in L1
+ @ cache to L2 without
+ @ invalidating
+
+ dsb
+ cmp r4, #0
+
+ blne ux500_clean_l2_cache_all @ If r2 != FALSE then clean all
+ @ dirty data in L2 cache, no
+ @ invalidate
+
+ dsb @ data synchronization barrier
+ isb @ instruction synchronization
+ @ barrier
+ wfi @ wait for interrupt
+
+return_here: @ both cores return here
+ @ now we are out deep sleep
+ @ with all the context lost
+ @ except pc, sp and cpsr
+
+ ldmfd sp!, {r2-r12, pc} @ restore from stack
+
diff --git a/arch/arm/mach-ux500/pm/cpuidle.c b/arch/arm/mach-ux500/pm/cpuidle.c
new file mode 100644
index 00000000000..564990d8803
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/cpuidle.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson. Loosly based on cpuidle.c by Sundar Iyer.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+
+#include <plat/gpio.h>
+
+#include <mach/prcmu-fw-api.h>
+
+#include "cpuidle.h"
+#include "cpuidle_dbg.h"
+#include "context.h"
+#include "pm.h"
+#include "../regulator-u8500.h"
+#include "../timer-rtt.h"
+
+#define DEEP_SLEEP_WAKE_UP_LATENCY 8500
+#define SLEEP_WAKE_UP_LATENCY 800
+#define UL_PLL_START_UP_LATENCY 8000 /* us */
+#define RTC_PROGRAM_TIME 400 /* us */
+
+static struct cstate cstates[] = {
+ {
+ .enter_latency = 0,
+ .exit_latency = 0,
+ .threshold = 0,
+ .power_usage = 10,
+ .APE = APE_ON,
+ .ARM = ARM_ON,
+ .ARM_PLL = ARM_PLL_ON,
+ .UL_PLL = UL_PLL_ON,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_SHALLOW | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_WFI,
+ .desc = "Wait for interrupt ",
+ },
+ {
+ .enter_latency = RTC_PROGRAM_TIME,
+ .exit_latency = 450,
+ .threshold = 500 + RTC_PROGRAM_TIME,
+ .power_usage = 5,
+ .APE = APE_ON,
+ .ARM = ARM_RET,
+ .ARM_PLL = ARM_PLL_ON,
+ .UL_PLL = UL_PLL_ON,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_SHALLOW | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_IDLE,
+ .desc = "ApIdle ",
+ },
+ {
+ .enter_latency = RTC_PROGRAM_TIME,
+ .exit_latency = 570,
+ .threshold = 600 + RTC_PROGRAM_TIME,
+ .power_usage = 4,
+ .APE = APE_ON,
+ .ARM = ARM_RET,
+ .ARM_PLL = ARM_PLL_OFF,
+ .UL_PLL = UL_PLL_ON,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_SHALLOW | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_IDLE,
+ .desc = "ApIdle, ARM PLL off ",
+ },
+ {
+ .enter_latency = RTC_PROGRAM_TIME + 50,
+ .exit_latency = SLEEP_WAKE_UP_LATENCY,
+ .threshold = 800 + RTC_PROGRAM_TIME,
+ .power_usage = 3,
+ .APE = APE_OFF,
+ .ARM = ARM_RET,
+ .ARM_PLL = ARM_PLL_OFF,
+ .UL_PLL = UL_PLL_ON,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_BALANCED | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_SLEEP,
+ .desc = "ApSleep ",
+ },
+ {
+ .enter_latency = RTC_PROGRAM_TIME + 50,
+ .exit_latency = (SLEEP_WAKE_UP_LATENCY +
+ UL_PLL_START_UP_LATENCY),
+ .threshold = (2 * (SLEEP_WAKE_UP_LATENCY +
+ UL_PLL_START_UP_LATENCY + 50) +
+ RTC_PROGRAM_TIME),
+ .power_usage = 2,
+ .APE = APE_OFF,
+ .ARM = ARM_RET,
+ .ARM_PLL = ARM_PLL_OFF,
+ .UL_PLL = UL_PLL_OFF,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_BALANCED | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_SLEEP,
+ .desc = "ApSleep, UL PLL off ",
+ },
+#ifdef ENABLE_AP_DEEP_IDLE
+ {
+ .enter_latency = RTC_PROGRAM_TIME + 200,
+ .exit_latency = (DEEP_SLEEP_WAKE_UP_LATENCY +
+ RTC_PROGRAM_TIME),
+ .threshold = 8700,
+ .power_usage = 2,
+ .APE = APE_ON,
+ .ARM = ARM_OFF,
+ .ARM_PLL = ARM_PLL_OFF,
+ .UL_PLL = UL_PLL_ON,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_DEEP | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_DEEP_IDLE,
+ .desc = "ApDeepIdle, UL PLL off ",
+ },
+#endif
+ {
+ .enter_latency = RTC_PROGRAM_TIME + 250,
+ .exit_latency = (DEEP_SLEEP_WAKE_UP_LATENCY +
+ RTC_PROGRAM_TIME),
+ .threshold = 9000,
+ .power_usage = 1,
+ .APE = APE_OFF,
+ .ARM = ARM_OFF,
+ .ARM_PLL = ARM_PLL_OFF,
+ .UL_PLL = UL_PLL_OFF,
+ .ESRAM = ESRAM_RET,
+ .flags = CPUIDLE_FLAG_DEEP | CPUIDLE_FLAG_TIME_VALID,
+ .state = CI_DEEP_SLEEP,
+ .desc = "ApDeepsleep, UL PLL off",
+ },
+};
+
+struct cpu_state {
+ bool restore_arm_core;
+ bool ready_deep_sleep;
+ bool always_on_timer_migrated;
+ ktime_t sched_wake_up;
+ struct cpuidle_device dev;
+ int this_cpu;
+};
+
+static DEFINE_PER_CPU(struct cpu_state, *cpu_state);
+
+static DEFINE_SPINLOCK(cpuidle_lock);
+static bool restore_ape; /* protected by cpuidle_lock */
+static bool restore_arm_common; /* protected by cpuidle_lock */
+
+static atomic_t idle_cpus_counter = ATOMIC_INIT(0);
+
+struct cstate *ux500_ci_get_cstates(int *len)
+{
+ if (len != NULL)
+ (*len) = ARRAY_SIZE(cstates);
+ return cstates;
+}
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpuidle & hotplug - plug or unplug a cpu in idle sequence
+ */
+void ux500_cpuidle_plug(int cpu)
+{
+ atomic_dec(&idle_cpus_counter);
+ wmb();
+}
+void ux500_cpuidle_unplug(int cpu)
+{
+ atomic_inc(&idle_cpus_counter);
+ wmb();
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+static void migrate_to_always_on_timer(struct cpu_state *state)
+{
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+ &state->this_cpu);
+
+ state->always_on_timer_migrated = true;
+ smp_wmb();
+}
+
+static void migrate_to_local_timer(struct cpu_state *state)
+{
+ /* Use the ARM local timer for this cpu */
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+ &state->this_cpu);
+
+ state->always_on_timer_migrated = false;
+ smp_wmb();
+}
+
+static void restore_sequence(struct cpu_state *state)
+{
+ unsigned long iflags;
+ ktime_t t;
+
+ spin_lock_irqsave(&cpuidle_lock, iflags);
+
+ /*
+ * Remove wake up time i.e. set wake up
+ * far ahead
+ */
+ t = ktime_add_us(ktime_get(), 1000000000); /* 16 minutes ahead */
+ state->sched_wake_up = t;
+ smp_wmb();
+
+ smp_rmb();
+ if (state->restore_arm_core) {
+
+ state->restore_arm_core = false;
+ smp_wmb();
+
+ context_varm_restore_core();
+ }
+
+ smp_rmb();
+ if (restore_arm_common) {
+
+ restore_arm_common = false;
+ smp_wmb();
+
+ /* Restore gic settings */
+ context_varm_restore_common();
+ }
+
+ smp_rmb();
+ if (restore_ape) {
+
+ restore_ape = false;
+ smp_wmb();
+
+ /*
+ * APE has been turned off. Save GPIO wake up cause before
+ * clearing ioforce.
+ */
+ context_vape_restore();
+
+ ux500_pm_gpio_save_wake_up_status();
+
+ /* Restore IO ring */
+ ux500_pm_prcmu_set_ioforce(false);
+
+ ux500_ci_dbg_console_handle_ape_resume();
+
+ }
+
+ spin_unlock_irqrestore(&cpuidle_lock, iflags);
+
+ smp_rmb();
+ if (state->always_on_timer_migrated)
+ migrate_to_local_timer(state);
+
+
+}
+
+/**
+ * get_remaining_sleep_time() - returns remaining sleep time in
+ * microseconds (us)
+ */
+static int get_remaining_sleep_time(void)
+{
+ ktime_t now;
+ int cpu;
+ unsigned long iflags;
+ int t;
+ int remaining_sleep_time = INT_MAX;
+
+ now = ktime_get();
+
+ /*
+ * Check next schedule to expire considering both
+ * cpus
+ */
+ spin_lock_irqsave(&cpuidle_lock, iflags);
+ for_each_online_cpu(cpu) {
+ t = ktime_to_us(ktime_sub(per_cpu(cpu_state,
+ cpu)->sched_wake_up,
+ now));
+ if (t < remaining_sleep_time)
+ remaining_sleep_time = t;
+ }
+ spin_unlock_irqrestore(&cpuidle_lock, iflags);
+
+ return remaining_sleep_time;
+}
+
+static bool cores_ready_deep_sleep(void)
+{
+ int cpu;
+ int this_cpu;
+
+ this_cpu = smp_processor_id();
+
+ for_each_online_cpu(cpu) {
+ if (cpu != this_cpu) {
+ smp_rmb();
+ if (!per_cpu(cpu_state, cpu)->ready_deep_sleep)
+ return false;
+ }
+ }
+
+ return true;
+
+}
+
+static bool cores_timer_migrated(void)
+{
+ int cpu;
+ int this_cpu;
+
+ this_cpu = smp_processor_id();
+
+ for_each_online_cpu(cpu) {
+ if (cpu != this_cpu) {
+ smp_rmb();
+ if (!per_cpu(cpu_state, cpu)->always_on_timer_migrated)
+ return false;
+ }
+ }
+
+ return true;
+
+}
+
+static int determine_sleep_state(int idle_cpus,
+ int gov_cstate)
+{
+ int i;
+ int sleep_time;
+ bool power_state_req;
+
+ /* If first cpu to sleep, go to most shallow sleep state */
+ if (idle_cpus < num_online_cpus())
+ return 0;
+
+ /* If other CPU is going to WFI, but not yet there wait. */
+ while (1) {
+ if (ux500_pm_other_cpu_wfi())
+ break;
+
+ if (ux500_pm_gic_pending_interrupt())
+ return -1;
+
+ if (atomic_read(&idle_cpus_counter) < num_online_cpus())
+ return 0;
+ }
+
+ power_state_req = power_state_active_is_enabled() ||
+ prcmu_is_ac_wake_requested();
+
+ sleep_time = get_remaining_sleep_time();
+
+ /*
+ * Never go deeper than the governor recommends even though it might be
+ * possible from a scheduled wake up point of view
+ */
+
+ /*
+ * The variable "i" now contains the index of the deepest sleep state
+ * we can go to right now
+ */
+
+ for (i = min(gov_cstate, ux500_ci_dbg_deepest_state()); i > 0; i--) {
+
+ if (sleep_time <= cstates[i].threshold)
+ continue;
+
+ if ((cstates[i].ARM != ARM_ON) &&
+ !cores_timer_migrated())
+ /*
+ * This sleep state needs timer migration,
+ * but other cpu has not migrated its timer
+ */
+ continue;
+
+ if (cstates[i].APE == APE_OFF) {
+ /* This state says APE should be off */
+ if (power_state_req ||
+ ux500_ci_dbg_force_ape_on())
+ continue;
+ }
+
+ if ((cstates[i].ARM == ARM_OFF) &&
+ (!cores_ready_deep_sleep()))
+ continue;
+
+ /* OK state */
+ break;
+ }
+
+ return max(0, i);
+
+}
+
+static int enter_sleep(struct cpuidle_device *dev,
+ struct cpuidle_state *ci_state)
+{
+ ktime_t t1, t2;
+ s64 diff;
+ int ret;
+ bool pending_int;
+ int cpu;
+ int target;
+ int gov_cstate;
+ struct cpu_state *state;
+
+ unsigned long iflags;
+ int idle_cpus;
+ u32 divps_rate;
+
+ local_irq_disable();
+
+ t1 = ktime_get(); /* Time now */
+
+ state = per_cpu(cpu_state, smp_processor_id());
+
+ /* Save scheduled wake up for this cpu */
+ spin_lock_irqsave(&cpuidle_lock, iflags);
+ state->sched_wake_up = ktime_add(t1, tick_nohz_get_sleep_length());
+ spin_unlock_irqrestore(&cpuidle_lock, iflags);
+
+ /*
+ * Retrive the cstate that the governor recommends
+ * for this CPU
+ */
+ gov_cstate = (int) cpuidle_get_statedata(ci_state);
+
+ idle_cpus = atomic_inc_return(&idle_cpus_counter);
+
+ /*
+ * Determine sleep state considering both CPUs and
+ * shared resources like e.g. VAPE
+ */
+ target = determine_sleep_state(idle_cpus,
+ gov_cstate);
+ if (target < 0) {
+ /* "target" will be last_state in the cpuidle framework */
+ target = 0;
+ goto exit;
+ }
+
+ if (cstates[target].ARM == ARM_ON) {
+
+ if (cstates[gov_cstate].ARM == ARM_OFF) {
+
+ ux500_ci_dbg_msg("WFI_prep");
+
+ /*
+ * Can not turn off arm now, but it might be
+ * possible later, so prepare for it by saving
+ * context of cpu etc already now.
+ */
+
+ /*
+ * ARM timers will stop during ARM retention or
+ * ARM off mode. Use always-on-timer instead.
+ */
+ migrate_to_always_on_timer(state);
+
+ context_varm_save_core();
+ context_save_cpu_registers();
+ state->ready_deep_sleep = true;
+ smp_wmb();
+
+ /*
+ * Save return address to SRAM and set this
+ * CPU in WFI
+ */
+ ux500_ci_dbg_log(CI_WFI, smp_processor_id());
+ context_save_to_sram_and_wfi(false);
+
+ state->ready_deep_sleep = false;
+ smp_wmb();
+
+ context_restore_cpu_registers();
+
+ } else if (cstates[gov_cstate].ARM != ARM_ON) {
+
+ /*
+ * Can not go ApIdle or deeper now, but it
+ * might be possible later, so prepare for it
+ */
+
+ /*
+ * ARM timers will stop during ARM retention or
+ * ARM off mode. Use always-on-timer instead.
+ */
+ migrate_to_always_on_timer(state);
+
+ ux500_ci_dbg_msg("WFI_prep2");
+ ux500_ci_dbg_log(CI_WFI, smp_processor_id());
+ __asm__ __volatile__
+ ("dsb\n\t" "wfi\n\t" : : : "memory");
+
+
+ } else { /* Just WFI */
+
+ ux500_ci_dbg_msg("WFI");
+ ux500_ci_dbg_log(CI_WFI, smp_processor_id());
+ __asm__ __volatile__
+ ("dsb\n\t" "wfi\n\t" : : : "memory");
+ }
+
+ restore_sequence(state);
+ goto exit;
+ }
+
+ /* Decouple GIC from the interrupt bus */
+ ux500_pm_gic_decouple();
+
+ if (!ux500_pm_other_cpu_wfi()) {
+ /* Other CPU was not in WFI => abort */
+ ux500_pm_gic_recouple();
+ migrate_to_local_timer(state);
+
+ goto exit;
+ }
+
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
+ PRCMU_WAKEUP(ABB));
+
+ migrate_to_always_on_timer(state);
+
+ /* Check pending interrupts */
+ pending_int = ux500_pm_gic_pending_interrupt();
+
+ idle_cpus = atomic_read(&idle_cpus_counter);
+
+ /*
+ * Check if we have a pending interrupt or if sleep
+ * state has changed after GIC has been frozen
+ */
+ if (pending_int ||
+ (target != determine_sleep_state(idle_cpus, gov_cstate))) {
+ /* Pending interrupt or sleep state has changed => abort */
+
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+ migrate_to_local_timer(state);
+
+ goto exit;
+ }
+
+ /*
+ * Copy GIC interrupt settings to
+ * PRCMU interrupt settings
+ */
+ ux500_pm_prcmu_copy_gic_settings();
+
+ /* Clean the cache before slowing down cpu frequency */
+ context_clean_l1_cache_all();
+
+ divps_rate = ux500_pm_arm_on_ext_clk(cstates[target].ARM_PLL);
+
+ if (ux500_pm_prcmu_pending_interrupt()) {
+
+ /* An interrupt found => abort */
+
+ ux500_pm_arm_on_arm_pll(divps_rate);
+
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+
+ migrate_to_local_timer(state);
+
+ goto exit;
+
+ }
+
+ /*
+ * No PRCMU interrupt was pending => continue
+ * the sleeping stages
+ */
+
+ /* Compensate for ULPLL start up time */
+ if (cstates[target].UL_PLL == UL_PLL_OFF)
+ (void) u8500_rtc_adjust_next_wakeup(-UL_PLL_START_UP_LATENCY);
+
+ if (cstates[target].APE == APE_OFF) {
+
+ /*
+ * We are going to sleep or deep sleep =>
+ * prepare for it
+ */
+
+ context_vape_save();
+
+ ux500_ci_dbg_console_handle_ape_suspend();
+ ux500_pm_prcmu_set_ioforce(true);
+
+ spin_lock_irqsave(&cpuidle_lock, iflags);
+ restore_ape = true;
+ smp_wmb();
+ spin_unlock_irqrestore(&cpuidle_lock, iflags);
+ }
+
+ ux500_ci_dbg_log(target, smp_processor_id());
+
+ if (cstates[target].ARM == ARM_OFF) {
+
+ /* We are going to ApDeepSleep or ApDeepIdle */
+
+ /* Save gic settings */
+ context_varm_save_common();
+
+ context_varm_save_core();
+
+ spin_lock_irqsave(&cpuidle_lock, iflags);
+
+ restore_arm_common = true;
+ for_each_online_cpu(cpu) {
+ per_cpu(cpu_state, cpu)->restore_arm_core = true;
+ smp_wmb();
+ }
+
+ spin_unlock_irqrestore(&cpuidle_lock, iflags);
+
+ context_save_cpu_registers();
+
+ state->ready_deep_sleep = true;
+ smp_wmb();
+
+
+ if (cstates[target].APE == APE_OFF)
+ ux500_ci_dbg_msg("ApDeepSleep");
+ else
+ ux500_ci_dbg_msg("ApDeepIdle");
+
+
+ /*
+ * Due to we have only 100us between requesting a
+ * powerstate and wfi, we clean the cache before as
+ * well to assure the final cache clean before wfi
+ * has as little as possible to do.
+ */
+ context_clean_l1_cache_all();
+
+ if (cstates[target].APE == APE_OFF) {
+ /* ApDeepSleep */
+ prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
+ cstates[target].UL_PLL,
+ /* Is actually the AP PLL */
+ cstates[target].UL_PLL);
+ } else {
+ /* ApDeepIdle */
+ prcmu_set_power_state(PRCMU_AP_DEEP_IDLE,
+ cstates[target].UL_PLL,
+ /* Is actually the AP PLL */
+ cstates[target].UL_PLL);
+ }
+
+ /*
+ * Save return address to SRAM and set this CPU in WFI.
+ * This is last core to enter sleep, so we need to
+ * clean both L2 and L1 caches
+ */
+ context_save_to_sram_and_wfi(true);
+
+ } else if (cstates[target].APE == APE_OFF) {
+
+ /*
+ * Prepare for possible future deep sleep. We do not
+ * need to save varm common context at this stage
+ * because we can not go from ApSleep directly to
+ * ApDeepSleep without waking up any CPU
+ */
+
+ context_varm_save_core();
+
+ context_save_cpu_registers();
+
+ state->ready_deep_sleep = true;
+ smp_wmb();
+
+ ux500_ci_dbg_msg("ApSleep");
+
+ /*
+ * Due to we have only 100us between requesting a
+ * powerstate and wfi, we clean the cache before as
+ * well to assure the final cache clean before wfi
+ * has as little as possible to do.
+ */
+ context_clean_l1_cache_all();
+
+ /* ApSleep */
+ prcmu_set_power_state(PRCMU_AP_SLEEP,
+ cstates[target].UL_PLL,
+ /* Is actually the AP PLL */
+ cstates[target].UL_PLL);
+
+ /*
+ * Handle DDR, ULPLL, SOC PLL and ARM PLL via
+ * prcmu-API.
+ */
+
+ context_save_to_sram_and_wfi(false);
+
+ } else { /* We are going to Idle state */
+
+ context_varm_save_core();
+
+ context_save_cpu_registers();
+
+ state->ready_deep_sleep = true;
+ smp_wmb();
+
+ ux500_ci_dbg_msg("ApIdle");
+
+ /*
+ * Due to we have only 100us between requesting a
+ * powerstate and wfi, we clean the cache before as
+ * well to assure the final cache clean before wfi
+ * has as little as possible to do.
+ */
+ context_clean_l1_cache_all();
+
+ /* ApIdle */
+ prcmu_set_power_state(PRCMU_AP_IDLE, true, true);
+
+ context_save_to_sram_and_wfi(false);
+
+ }
+
+ /* The PRCMU restores ARM PLL and recouples the GIC */
+
+ state->ready_deep_sleep = false;
+ smp_wmb();
+
+ context_restore_cpu_registers();
+ restore_sequence(state);
+
+exit:
+ prcmu_disable_wakeups();
+
+ ux500_ci_dbg_log(CI_RUNNING, smp_processor_id());
+
+ atomic_dec(&idle_cpus_counter);
+
+ /*
+ * We might have chosen another state than what the
+ * governor recommended
+ */
+ if (target != gov_cstate)
+ /* Update last state pointer used by CPUIDLE subsystem */
+ dev->last_state = &(dev->states[target]);
+
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ ret = (int)diff;
+
+ ux500_ci_dbg_console_check_uart();
+
+ local_irq_enable();
+
+ ux500_ci_dbg_console();
+
+ return ret;
+}
+
+static void init_cstates(struct cpu_state *state)
+{
+ int i;
+ struct cpuidle_state *ci_state;
+ struct cpuidle_device *dev;
+
+ dev = &state->dev;
+ dev->cpu = state->this_cpu;
+
+ for (i = 0; i < ARRAY_SIZE(cstates); i++) {
+
+ ci_state = &dev->states[i];
+
+ cpuidle_set_statedata(ci_state, (void *)i);
+
+ ci_state->exit_latency = cstates[i].exit_latency;
+ ci_state->target_residency = cstates[i].threshold;
+ ci_state->flags = cstates[i].flags;
+ ci_state->enter = enter_sleep;
+ ci_state->power_usage = cstates[i].power_usage;
+ snprintf(ci_state->name, CPUIDLE_NAME_LEN, "C%d", i);
+ strncpy(ci_state->desc, cstates[i].desc, CPUIDLE_DESC_LEN);
+ }
+
+ dev->state_count = ARRAY_SIZE(cstates);
+
+ dev->safe_state = &dev->states[0]; /* Currently not used */
+
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_ERR "cpuidle %s: register device failed\n",
+ __func__);
+ return;
+ }
+
+ pr_debug("cpuidle driver initiated for CPU%d.\n", state->this_cpu);
+}
+
+struct cpuidle_driver cpuidle_drv = {
+ .name = "cpuidle_driver",
+ .owner = THIS_MODULE,
+};
+
+static int __init cpuidle_driver_init(void)
+{
+ int result = 0;
+ int cpu;
+
+ if (ux500_is_svp())
+ return -ENODEV;
+
+ ux500_ci_dbg_init();
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(cpu_state, cpu) = kzalloc(sizeof(struct cpu_state),
+ GFP_KERNEL);
+ per_cpu(cpu_state, cpu)->this_cpu = cpu;
+ }
+
+ result = cpuidle_register_driver(&cpuidle_drv);
+ if (result < 0)
+ return result;
+
+ for_each_online_cpu(cpu)
+ init_cstates(per_cpu(cpu_state, cpu));
+
+
+ return result;
+}
+
+static void __exit cpuidle_driver_exit(void)
+{
+ int cpu;
+ struct cpuidle_device *dev;
+
+ ux500_ci_dbg_remove();
+
+ for_each_possible_cpu(cpu) {
+ dev = &per_cpu(cpu_state, cpu)->dev;
+ cpuidle_unregister_device(dev);
+ }
+
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu(cpu_state, cpu));
+
+ cpuidle_unregister_driver(&cpuidle_drv);
+}
+
+module_init(cpuidle_driver_init);
+module_exit(cpuidle_driver_exit);
+
+MODULE_DESCRIPTION("U8500 cpuidle driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rickard Andersson <rickard.andersson@stericsson.com>");
diff --git a/arch/arm/mach-ux500/pm/cpuidle.h b/arch/arm/mach-ux500/pm/cpuidle.h
new file mode 100644
index 00000000000..6f3e713cf34
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/cpuidle.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson. Loosly based on cpuidle.c by Sundar Iyer.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef __CPUIDLE_H
+#define __CPUIDLE_H
+
+#include <linux/cpuidle.h>
+
+#include <mach/prcmu-fw-defs_v1.h>
+
+enum ARM {
+ ARM_OFF,
+ ARM_RET,
+ ARM_ON
+};
+
+enum APE {
+ APE_OFF,
+ APE_ON
+};
+
+enum ARM_PLL {
+ ARM_PLL_OFF = 0,
+ ARM_PLL_ON = 1
+};
+
+enum UL_PLL {
+ UL_PLL_OFF,
+ UL_PLL_ON
+};
+
+enum ESRAM {
+ ESRAM_OFF,
+ ESRAM_RET
+};
+
+enum ci_pwrst {
+ CI_WFI = 0,
+ CI_IDLE,
+ CI_SLEEP,
+ CI_DEEP_IDLE,
+ CI_DEEP_SLEEP,
+ CI_RUNNING = 255,
+};
+
+struct cstate {
+ /* Required state of different hardwares */
+ enum ARM ARM;
+ enum APE APE;
+ enum ARM_PLL ARM_PLL;
+ enum UL_PLL UL_PLL;
+ /* ESRAM = ESRAM_RET means that ESRAM context to be kept */
+ enum ESRAM ESRAM;
+
+ u32 enter_latency;
+ u32 exit_latency;
+ u32 power_usage;
+ u32 threshold;
+ u32 flags;
+ /* Only used for debugging purpose */
+ enum ci_pwrst state;
+ char desc[CPUIDLE_DESC_LEN];
+};
+
+struct cstate *ux500_ci_get_cstates(int *len);
+
+void ux500_cpuidle_plug(int cpu);
+void ux500_cpuidle_unplug(int cpu);
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/cpuidle_dbg.c b/arch/arm/mach-ux500/pm/cpuidle_dbg.c
new file mode 100644
index 00000000000..6490827b4ac
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/cpuidle_dbg.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for ST-Ericsson
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <plat/gpio.h>
+#include <asm/hardware/gic.h>
+
+#include "cpuidle.h"
+#include "pm.h"
+
+#define DBG_BUF_SIZE 5000
+#define APE_ON_TIMER_INTERVAL 5 /* Seconds */
+
+#define UART_RX_GPIO_PIN_MASK (1 << (CONFIG_UX500_CONSOLE_UART_GPIO_PIN % 32))
+
+#define UART011_MIS_RTIS (1 << 6) /* receive timeout interrupt status */
+#define UART011_MIS_RXIS (1 << 4) /* receive interrupt status */
+#define UART011_MIS 0x40 /* Masked interrupt status register */
+
+struct state_history {
+ ktime_t start;
+ u32 state;
+ u32 *counter;
+ ktime_t *time;
+ spinlock_t lock;
+};
+static DEFINE_PER_CPU(struct state_history, *state_history);
+
+static struct delayed_work cpuidle_work;
+static u32 dbg_console_enable = 1;
+static void __iomem *uart_base;
+
+ /* Blocks ApSleep and ApDeepSleep */
+static bool force_APE_on;
+static bool reset_timer;
+static int deepest_allowed_state = CONFIG_U8500_CPUIDLE_DEEPEST_STATE;
+
+static struct cstate *cstates;
+static int cstates_len;
+static DEFINE_SPINLOCK(dbg_lock);
+
+#ifdef U8500_CPUIDLE_EXTRA_DBG
+void ux500_ci_dbg_msg(char *dbg_string)
+{
+ static char dbg_buf[DBG_BUF_SIZE];
+ static int index; /* protected by dbg_lock */
+ int str_len;
+ int smp_no_len;
+ int head_len;
+ unsigned long flags;
+ static const char * const smp_no_str = "\n %d:";
+ static const char * const head_str = ":HEAD->";
+
+ spin_lock_irqsave(&dbg_lock, flags);
+
+ str_len = strlen(dbg_string);
+ smp_no_len = strlen(smp_no_str);
+ head_len = strlen(head_str);
+
+ if (index > head_len)
+ /* Remove last head printing */
+ index -= head_len;
+
+ if ((index + str_len + smp_no_len + head_len) > DBG_BUF_SIZE)
+ index = 0; /* Non perfect wrapping... */
+
+ sprintf(&dbg_buf[index], smp_no_str, smp_processor_id());
+ index += smp_no_len;
+
+ strcpy(&dbg_buf[index], dbg_string);
+ index += str_len;
+
+ strcpy(&dbg_buf[index], head_str);
+ index += head_len;
+
+ spin_unlock_irqrestore(&dbg_lock, flags);
+}
+#endif
+
+bool ux500_ci_dbg_force_ape_on(void)
+{
+ return force_APE_on;
+}
+
+int ux500_ci_dbg_deepest_state(void)
+{
+ return deepest_allowed_state;
+}
+
+void ux500_ci_dbg_console_handle_ape_suspend(void)
+{
+ if (!dbg_console_enable)
+ return;
+
+ set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 1);
+ set_irq_type(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN),
+ IRQ_TYPE_EDGE_BOTH);
+}
+
+void ux500_ci_dbg_console_handle_ape_resume(void)
+{
+ unsigned long flags;
+ u32 WKS_reg_value;
+
+ if (!dbg_console_enable)
+ return;
+
+ WKS_reg_value = ux500_pm_gpio_read_wake_up_status(0);
+
+ if (WKS_reg_value & UART_RX_GPIO_PIN_MASK) {
+ spin_lock_irqsave(&dbg_lock, flags);
+ reset_timer = true;
+ spin_unlock_irqrestore(&dbg_lock, flags);
+ }
+ set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 0);
+
+}
+
+void ux500_ci_dbg_console_check_uart(void)
+{
+ unsigned long flags;
+ u32 status;
+
+ if (!dbg_console_enable)
+ return;
+
+ spin_lock_irqsave(&dbg_lock, flags);
+ status = readw(uart_base + UART011_MIS);
+
+ if (status & (UART011_MIS_RTIS | UART011_MIS_RXIS)) {
+ reset_timer = true;
+ spin_unlock_irqrestore(&dbg_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&dbg_lock, flags);
+ }
+}
+
+void ux500_ci_dbg_console(void)
+{
+ unsigned long flags;
+
+ if (!dbg_console_enable)
+ return;
+
+ spin_lock_irqsave(&dbg_lock, flags);
+ if (reset_timer) {
+ reset_timer = false;
+ spin_unlock_irqrestore(&dbg_lock, flags);
+
+ cancel_delayed_work(&cpuidle_work);
+ force_APE_on = true;
+ schedule_delayed_work(&cpuidle_work,
+ msecs_to_jiffies(APE_ON_TIMER_INTERVAL *
+ 1000));
+ } else {
+ spin_unlock_irqrestore(&dbg_lock, flags);
+ }
+}
+
+
+static void dbg_cpuidle_work_function(struct work_struct *work)
+{
+ force_APE_on = false;
+}
+
+void ux500_ci_dbg_log(int state, int this_cpu)
+{
+ int i;
+ ktime_t now;
+ ktime_t dtime;
+ unsigned long flags;
+ struct state_history *sh;
+ struct state_history *sh_other;
+
+ now = ktime_get();
+
+ sh = per_cpu(state_history, this_cpu);
+
+ spin_lock_irqsave(&sh->lock, flags);
+
+ dtime = ktime_sub(now, sh->start);
+ sh->time[sh->state] = ktime_add(sh->time[sh->state], dtime);
+
+ sh->start = now;
+
+ if (state == CI_RUNNING)
+ sh->state = cstates_len;
+ else
+ sh->state = state;
+ sh->counter[sh->state]++;
+
+ /*
+ * Update other cpus, (this_cpu = A, other cpus = B) if:
+ * - A = running and B != WFI | running: Set B to WFI
+ * - A = WFI and then B must be running: No changes
+ * - A = !WFI && !RUNNING and then B must be WFI: B sets to A
+ */
+
+ if (sh->state == CI_WFI)
+ goto done;
+
+ for_each_possible_cpu(i) {
+
+ if (this_cpu == i)
+ continue;
+
+ sh_other = per_cpu(state_history, i);
+
+ /* Same state, continue */
+ if (sh_other->state == sh->state)
+ continue;
+
+ if (state == CI_RUNNING && sh_other->state != CI_WFI) {
+ ux500_ci_dbg_log(CI_WFI, i);
+ continue;
+ }
+ /*
+ * This cpu is something else than running or wfi, both must be
+ * in the same state.
+ */
+ ux500_ci_dbg_log(state, i);
+ }
+done:
+ spin_unlock_irqrestore(&sh->lock, flags);
+}
+
+static ssize_t set_deepest_state(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ char buf[32];
+ ssize_t buf_size;
+ long unsigned int i;
+
+ /* Get userspace string and assure termination */
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ if (strict_strtoul(buf, 0, &i) != 0)
+ return buf_size;
+
+ if (i > cstates_len - 1)
+ i = cstates_len - 1;
+
+ deepest_allowed_state = i;
+
+ pr_debug("cpuidle: changed deepest allowed sleep state to %d.\n",
+ deepest_allowed_state);
+
+ return buf_size;
+}
+
+static int deepest_state_print(struct seq_file *s, void *p)
+{
+ seq_printf(s, "Deepest allowed sleep state is %d\n",
+ deepest_allowed_state);
+
+ return 0;
+}
+
+static ssize_t stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long flags;
+ unsigned int cpu;
+ int i;
+ struct state_history *sh;
+
+ printk(KERN_INFO "\nreset\n");
+
+ for_each_possible_cpu(cpu) {
+ sh = per_cpu(state_history, cpu);
+ spin_lock_irqsave(&sh->lock, flags);
+ for (i = 0; i <= cstates_len; i++) {
+ sh->counter[i] = 0;
+ sh->time[i] = ktime_set(0, 0);
+ }
+
+ for (i = 0; i <= cstates_len; i++)
+ sh->start = ktime_get();
+ sh->state = CI_RUNNING;
+ spin_unlock_irqrestore(&sh->lock, flags);
+ }
+
+ return count;
+}
+
+static int stats_print(struct seq_file *s, void *p)
+{
+ int cpu;
+ int i;
+ unsigned long flags;
+ struct state_history *sh;
+ ktime_t total;
+ s64 t_us;
+ s64 perc;
+ s64 total_us;
+
+ for_each_possible_cpu(cpu) {
+ sh = per_cpu(state_history, cpu);
+ spin_lock_irqsave(&sh->lock, flags);
+ seq_printf(s, "\nCPU%d\n", cpu);
+
+ total = ktime_set(0, 0);
+
+ for (i = 0; i <= cstates_len; i++)
+ total = ktime_add(total, sh->time[i]);
+ total_us = ktime_to_us(total);
+ do_div(total_us, 100);
+
+ for (i = 0; i <= cstates_len; i++) {
+
+ t_us = ktime_to_us(sh->time[i]);
+ perc = ktime_to_us(sh->time[i]);
+ do_div(t_us, 1000); /* to ms */
+ do_div(perc, total_us);
+
+ if (i == cstates_len)
+ seq_printf(s, " - Running : # "
+ "%d in %d ms %d%%\n",
+ sh->counter[cstates_len],
+ (u32) t_us, (u32)perc);
+ else
+ seq_printf(s, "%d - %s: # %u in %d ms %d%%\n",
+ i, cstates[i].desc,
+ sh->counter[i],
+ (u32) t_us, (u32)perc);
+ }
+ spin_unlock_irqrestore(&sh->lock, flags);
+ }
+ return 0;
+}
+
+
+static int ap_family_show(struct seq_file *s, void *iter)
+{
+ int i;
+ u32 count = 0;
+ unsigned long flags;
+ struct state_history *sh;
+
+ sh = per_cpu(state_history, 0);
+ spin_lock_irqsave(&sh->lock, flags);
+
+ for (i = 0 ; i < cstates_len; i++) {
+ if (cstates[i].state == (enum ci_pwrst)s->private)
+ count += sh->counter[i];
+ }
+
+ seq_printf(s, "%u\n", count);
+ spin_unlock_irqrestore(&sh->lock, flags);
+
+ return 0;
+}
+
+static int deepest_state_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, deepest_state_print, inode->i_private);
+}
+
+static int stats_open_file(struct inode *inode, struct file *file)
+{
+ return single_open(file, stats_print, inode->i_private);
+}
+
+
+static int ap_family_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ap_family_show, inode->i_private);
+}
+
+static const struct file_operations deepest_state_fops = {
+ .open = deepest_state_open_file,
+ .write = set_deepest_state,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations stats_fops = {
+ .open = stats_open_file,
+ .write = stats_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct file_operations ap_family_fops = {
+ .open = ap_family_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static struct dentry *cpuidle_dir;
+static struct dentry *deepest_state_file;
+static struct dentry *stats_file;
+static struct dentry *dbg_console_file;
+static struct dentry *apidle_file;
+static struct dentry *apsleep_file;
+static struct dentry *apdeepidle_file;
+static struct dentry *apdeepsleep_file;
+
+static void remove_debugfs(void)
+{
+ if (!IS_ERR_OR_NULL(dbg_console_file))
+ debugfs_remove(dbg_console_file);
+ if (!IS_ERR_OR_NULL(stats_file))
+ debugfs_remove(stats_file);
+ if (!IS_ERR_OR_NULL(deepest_state_file))
+ debugfs_remove(deepest_state_file);
+ if (!IS_ERR_OR_NULL(apidle_file))
+ debugfs_remove(apidle_file);
+ if (!IS_ERR_OR_NULL(apsleep_file))
+ debugfs_remove(apsleep_file);
+ if (!IS_ERR_OR_NULL(apdeepidle_file))
+ debugfs_remove(apdeepidle_file);
+ if (!IS_ERR_OR_NULL(apdeepsleep_file))
+ debugfs_remove(apdeepsleep_file);
+
+ if (!IS_ERR_OR_NULL(cpuidle_dir))
+ debugfs_remove(cpuidle_dir);
+}
+
+static void setup_debugfs(void)
+{
+ cpuidle_dir = debugfs_create_dir("cpuidle", NULL);
+ if (IS_ERR_OR_NULL(cpuidle_dir))
+ goto fail;
+
+ deepest_state_file = debugfs_create_file("deepest_state",
+ S_IWUGO | S_IRUGO, cpuidle_dir,
+ NULL, &deepest_state_fops);
+ if (IS_ERR_OR_NULL(deepest_state_file))
+ goto fail;
+
+ stats_file = debugfs_create_file("stats",
+ S_IRUGO, cpuidle_dir, NULL,
+ &stats_fops);
+ if (IS_ERR_OR_NULL(stats_file))
+ goto fail;
+
+ dbg_console_file = debugfs_create_bool("dbg_console_enable",
+ S_IWUGO | S_IRUGO, cpuidle_dir,
+ &dbg_console_enable);
+ if (IS_ERR_OR_NULL(dbg_console_file))
+ goto fail;
+
+ apidle_file = debugfs_create_file("ap_idle", S_IRUGO,
+ cpuidle_dir,
+ (void *)CI_IDLE,
+ &ap_family_fops);
+ if (IS_ERR_OR_NULL(apidle_file))
+ goto fail;
+
+ apsleep_file = debugfs_create_file("ap_sleep", S_IRUGO,
+ cpuidle_dir,
+ (void *)CI_SLEEP,
+ &ap_family_fops);
+ if (IS_ERR_OR_NULL(apsleep_file))
+ goto fail;
+
+ apdeepidle_file = debugfs_create_file("ap_deepidle", S_IRUGO,
+ cpuidle_dir,
+ (void *)CI_DEEP_IDLE,
+ &ap_family_fops);
+ if (IS_ERR_OR_NULL(apdeepidle_file))
+ goto fail;
+
+ apdeepsleep_file = debugfs_create_file("ap_deepsleep", S_IRUGO,
+ cpuidle_dir,
+ (void *)CI_DEEP_SLEEP,
+ &ap_family_fops);
+ if (IS_ERR_OR_NULL(apdeepsleep_file))
+ goto fail;
+
+ return;
+fail:
+ remove_debugfs();
+}
+
+void ux500_ci_dbg_init(void)
+{
+ int cpu;
+ struct state_history *sh;
+
+ cstates = ux500_ci_get_cstates(&cstates_len);
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(state_history, cpu) = kzalloc(sizeof(struct state_history),
+ GFP_KERNEL);
+ sh = per_cpu(state_history, cpu);
+ sh->counter = kzalloc(sizeof(u32) * (cstates_len + 1),
+ GFP_KERNEL);
+ sh->time = kzalloc(sizeof(ktime_t) * (cstates_len + 1),
+ GFP_KERNEL);
+
+ spin_lock_init(&sh->lock);
+ /* Only first CPU used during boot */
+ if (cpu == 0)
+ sh->state = CI_RUNNING;
+ else
+ sh->state = CI_WFI;
+ sh->start = ktime_get();
+ }
+
+ setup_debugfs();
+
+ /* Uart debug init */
+ switch (CONFIG_UX500_DEBUG_UART) {
+ case 0:
+ uart_base = ioremap(U8500_UART0_BASE, SZ_4K);
+ break;
+ case 1:
+ uart_base = ioremap(U8500_UART1_BASE, SZ_4K);
+ break;
+ case 2:
+ uart_base = ioremap(U8500_UART2_BASE, SZ_4K);
+ break;
+ default:
+ uart_base = ioremap(U8500_UART2_BASE, SZ_4K);
+ break;
+ }
+
+ INIT_DELAYED_WORK(&cpuidle_work, dbg_cpuidle_work_function);
+
+}
+
+void ux500_ci_dbg_remove(void)
+{
+ int cpu;
+ struct state_history *sh;
+
+ remove_debugfs();
+
+ for_each_possible_cpu(cpu) {
+ sh = per_cpu(state_history, cpu);
+ kfree(sh->time);
+ kfree(sh->counter);
+ kfree(sh);
+ }
+
+ iounmap(uart_base);
+}
diff --git a/arch/arm/mach-ux500/pm/cpuidle_dbg.h b/arch/arm/mach-ux500/pm/cpuidle_dbg.h
new file mode 100644
index 00000000000..e614910e9bc
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/cpuidle_dbg.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for ST-Ericsson
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef CPUIDLE_DBG_H
+#define CPUIDLE_DBG_H
+
+#ifdef CONFIG_U8500_CPUIDLE_DEBUG
+void ux500_ci_dbg_init(void);
+void ux500_ci_dbg_remove(void);
+
+void ux500_ci_dbg_log(int state, int this_cpu);
+
+bool ux500_ci_dbg_force_ape_on(void);
+int ux500_ci_dbg_deepest_state(void);
+
+void ux500_ci_dbg_console(void);
+void ux500_ci_dbg_console_check_uart(void);
+void ux500_ci_dbg_console_handle_ape_resume(void);
+void ux500_ci_dbg_console_handle_ape_suspend(void);
+
+#ifdef U8500_CPUIDLE_EXTRA_DBG
+void ux500_ci_dbg_msg(char *dbg_string);
+#else
+static inline void ux500_ci_dbg_msg(char *dbg_string) { }
+#endif
+
+#else
+
+static inline void ux500_ci_dbg_init(void) { }
+static inline void ux500_ci_dbg_remove(void) { }
+
+static inline void ux500_ci_dbg_log(int state, int this_cpu) { }
+
+static inline bool ux500_ci_dbg_force_ape_on(void)
+{
+ return false;
+}
+
+static inline int ux500_ci_dbg_deepest_state(void)
+{
+ /* This means no lower sleep state than ApIdle */
+ return CONFIG_U8500_CPUIDLE_DEEPEST_STATE;
+}
+
+static inline void ux500_ci_dbg_console(void) { }
+static inline void ux500_ci_dbg_console_check_uart(void) { }
+static inline void ux500_ci_dbg_console_handle_ape_resume(void) { }
+static inline void ux500_ci_dbg_console_handle_ape_suspend(void) { }
+static inline void ux500_ci_dbg_msg(char *dbg_string) { }
+
+
+
+#endif
+#endif
diff --git a/arch/arm/mach-ux500/pm/pm.c b/arch/arm/mach-ux500/pm/pm.c
new file mode 100644
index 00000000000..06b8c270289
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/pm.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/hardware/gic.h>
+
+#include <mach/hardware.h>
+#include <mach/prcmu-regs.h>
+#include <mach/gpio.h>
+
+#define STABILIZATION_TIME 30 /* us */
+
+#define PRCM_ARM_WFI_STANDBY_CPU0_WFI 0x8
+#define PRCM_ARM_WFI_STANDBY_CPU1_WFI 0x10
+
+static u32 u8500_gpio_banks[] = {U8500_GPIOBANK0_BASE,
+ U8500_GPIOBANK1_BASE,
+ U8500_GPIOBANK2_BASE,
+ U8500_GPIOBANK3_BASE,
+ U8500_GPIOBANK4_BASE,
+ U8500_GPIOBANK5_BASE,
+ U8500_GPIOBANK6_BASE,
+ U8500_GPIOBANK7_BASE,
+ U8500_GPIOBANK8_BASE};
+
+static u32 u5500_gpio_banks[] = {U5500_GPIOBANK0_BASE,
+ U5500_GPIOBANK1_BASE,
+ U5500_GPIOBANK2_BASE,
+ U5500_GPIOBANK3_BASE,
+ U5500_GPIOBANK4_BASE,
+ U5500_GPIOBANK5_BASE,
+ U5500_GPIOBANK6_BASE,
+ U5500_GPIOBANK7_BASE};
+
+static u32 ux500_gpio_wks[ARRAY_SIZE(u8500_gpio_banks)];
+#ifdef ENABLE_ARM_FREQ_RAMP
+/*
+ * Ramp down the ARM frequency in order to reduce voltage
+ * overshoot/undershoot
+ */
+int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on)
+{
+ u32 val;
+ int divps_rate;
+
+ /*
+ * TODO: we should check that there is no ongoing
+ * OPP change because then our writings could collide with the PRCMU.
+ */
+
+ val = readl(PRCM_ARM_CHGCLKREQ);
+
+ if (val & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ)
+ return -EINVAL;
+
+ val = readl(PRCM_ARM_PLLDIVPS);
+
+ /*
+ * TODO: Investigate if ramp down should start
+ * from current frequency.
+ */
+ if (cpu_is_u8500v20_or_later()) {
+
+ /*
+ * Store the current rate value. Is needed if
+ * we need to restore the frequency
+ */
+ divps_rate = val & PRCM_ARM_PLLDIVPS_ARM_BRM_RATE;
+ val &= ~PRCM_ARM_PLLDIVPS_ARM_BRM_RATE;
+
+ /* Slow down the cpu's */
+ if (divps_rate > 11) {
+ writel(val | 11, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate > 5) {
+ writel(val | 5, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate > 2)
+ writel(val | 2, PRCM_ARM_PLLDIVPS);
+ } else {
+
+ divps_rate = val & PRCM_ARM_PLLDIVPS_MAX_MASK;
+ val &= ~PRCM_ARM_PLLDIVPS_MAX_MASK;
+
+ /* Slow down the cpu's */
+ if (divps_rate < 3) {
+ writel(val | 3, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate < 7) {
+ writel(val | 7, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate < 15)
+ writel(val | 15, PRCM_ARM_PLLDIVPS);
+ }
+
+ /* switch to external clock */
+ writel(readl(PRCM_ARM_CHGCLKREQ) |
+ PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ,
+ PRCM_ARM_CHGCLKREQ);
+
+ val = readl(PRCM_PLLARM_ENABLE);
+
+ if (leave_arm_pll_on)
+ /* Leave ARM PLL on */
+ writel(val & (~PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON),
+ PRCM_PLLARM_ENABLE);
+ else
+ /* Stop ARM PLL */
+ writel(val & (~PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE),
+ PRCM_PLLARM_ENABLE);
+ return divps_rate;
+}
+#else
+inline int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on)
+{
+ return 0;
+}
+#endif
+
+#ifdef ENABLE_ARM_FREQ_RAMP
+void ux500_pm_arm_on_arm_pll(int divps_rate)
+{
+ u32 pll_arm;
+ u32 clk_req;
+ u32 val;
+
+ if (divps_rate < 0)
+ return;
+
+ clk_req = readl(PRCM_ARM_CHGCLKREQ);
+
+ /* Return, if not running on external pll */
+ if (!(clk_req & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ))
+ return;
+
+ pll_arm = readl(PRCM_PLLARM_ENABLE);
+
+ if (pll_arm & PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE) {
+ /* ARM PLL is still on, set "counton" bit */
+ writel(pll_arm | PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON,
+ PRCM_PLLARM_ENABLE);
+ } else {
+ /* ARM PLL was stopped => turn on */
+ writel(pll_arm | PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE,
+ PRCM_PLLARM_ENABLE);
+
+ /* Wait for PLL to lock */
+ while (!(readl(PRCM_PLLARM_LOCKP) &
+ PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3))
+ cpu_relax();
+ }
+
+ writel(clk_req & ~PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ,
+ PRCM_ARM_CHGCLKREQ);
+
+ val = readl(PRCM_ARM_PLLDIVPS);
+
+ if (cpu_is_u8500v20_or_later()) {
+ val &= ~PRCM_ARM_PLLDIVPS_ARM_BRM_RATE;
+
+ /* Ramp up the ARM PLL */
+ if (divps_rate >= 2) {
+ writel(val | 2, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate >= 5) {
+ writel(val | 5, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate >= 11)
+ writel(val | 11, PRCM_ARM_PLLDIVPS);
+ } else {
+ val &= ~PRCM_ARM_PLLDIVPS_MAX_MASK;
+ /* Ramp up the ARM PLL */
+ if (divps_rate <= 15) {
+ writel(val | 15, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate <= 7) {
+ writel(val | 7, PRCM_ARM_PLLDIVPS);
+ /* Wait for voltage to stabilize */
+ udelay(STABILIZATION_TIME);
+ }
+ if (divps_rate <= 3)
+ writel(val | 3, PRCM_ARM_PLLDIVPS);
+ }
+}
+#else
+inline void ux500_pm_arm_on_arm_pll(int divps_rate)
+{
+}
+#endif
+
+/* Decouple GIC from the interrupt bus */
+void ux500_pm_gic_decouple(void)
+{
+ writel(readl(PRCM_A9_MASK_REQ) | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ,
+ PRCM_A9_MASK_REQ);
+
+ while (!readl(PRCM_A9_MASK_REQ))
+ cpu_relax();
+
+ /* TODO: Use the ack bit when possible */
+ udelay(100);
+}
+
+/* Recouple GIC with the interrupt bus */
+void ux500_pm_gic_recouple(void)
+{
+ writel((readl(PRCM_A9_MASK_REQ) & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ),
+ PRCM_A9_MASK_REQ);
+
+ /* TODO: Use the ack bit when possible */
+}
+
+#define GIC_NUMBER_REGS 5
+bool ux500_pm_gic_pending_interrupt(void)
+{
+ u32 pr; /* Pending register */
+ u32 er; /* Enable register */
+ int i;
+
+ /* 5 registers. STI & PPI not skipped */
+ for (i = 0; i < GIC_NUMBER_REGS; i++) {
+
+ pr = readl(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_PENDING_SET + i * 4);
+ er = readl(__io_address(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + i * 4);
+
+ if (pr & er)
+ return true; /* There is a pending interrupt */
+
+ }
+
+ return false;
+}
+
+#define GIC_NUMBER_SPI_REGS 4
+bool ux500_pm_prcmu_pending_interrupt(void)
+{
+ u32 it;
+ u32 im;
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* There are 4 registers */
+
+ it = readl(PRCM_ARMITVAL31TO0 + i * 4);
+ im = readl(PRCM_ARMITMSK31TO0 + i * 4);
+
+ if (it & im)
+ return true; /* There is a pending interrupt */
+ }
+
+ return false;
+}
+
+void ux500_pm_prcmu_set_ioforce(bool enable)
+{
+ if (enable)
+ writel(readl(PRCM_IOCR) | PRCM_IOCR_IOFORCE, PRCM_IOCR);
+ else
+ writel(readl(PRCM_IOCR) & ~PRCM_IOCR_IOFORCE, PRCM_IOCR);
+}
+
+void ux500_pm_prcmu_copy_gic_settings(void)
+{
+ u32 er; /* Enable register */
+ int i;
+
+ for (i = 0; i < GIC_NUMBER_SPI_REGS; i++) { /* 4*32 SPI interrupts */
+ /* +1 due to skip STI and PPI */
+ er = readl(IO_ADDRESS(U8500_GIC_DIST_BASE) +
+ GIC_DIST_ENABLE_SET + (i + 1) * 4);
+ writel(er, PRCM_ARMITMSK31TO0 + i * 4);
+ }
+}
+
+void ux500_pm_gpio_save_wake_up_status(void)
+{
+ int num_banks;
+ u32 *banks;
+ int i;
+
+ if (cpu_is_u5500()) {
+ num_banks = ARRAY_SIZE(u5500_gpio_banks);
+ banks = u5500_gpio_banks;
+ } else {
+ num_banks = ARRAY_SIZE(u8500_gpio_banks);
+ banks = u8500_gpio_banks;
+ }
+
+ for (i = 0; i < num_banks; i++)
+ ux500_gpio_wks[i] = readl(IO_ADDRESS(banks[i]) + NMK_GPIO_WKS);
+}
+
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_num)
+{
+ if (WARN_ON(cpu_is_u5500() && bank_num >=
+ ARRAY_SIZE(u5500_gpio_banks)))
+ return 0;
+
+ if (WARN_ON(cpu_is_u8500() && bank_num >=
+ ARRAY_SIZE(u8500_gpio_banks)))
+ return 0;
+
+ return ux500_gpio_wks[bank_num];
+}
+
+/* Check if the other CPU is in WFI */
+bool ux500_pm_other_cpu_wfi(void)
+{
+ if (smp_processor_id()) {
+ /* We are CPU 1 => check if CPU0 is in WFI */
+ if (readl(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU0_WFI)
+ return true;
+ } else {
+ /* We are CPU 0 => check if CPU1 is in WFI */
+ if (readl(PRCM_ARM_WFI_STANDBY) &
+ PRCM_ARM_WFI_STANDBY_CPU1_WFI)
+ return true;
+ }
+
+ return false;
+}
diff --git a/arch/arm/mach-ux500/pm/pm.h b/arch/arm/mach-ux500/pm/pm.h
new file mode 100644
index 00000000000..e8f67183682
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/pm.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef PM_COMMON_H
+#define PM_COMMON_H
+
+#ifdef CONFIG_PM
+/**
+ * ux500_pm_arm_on_ext_clk()
+ *
+ * @leave_arm_pll_on: True, if leave the ARM PLL on.
+ *
+ * returns divps_rate, used as input for ux500_pm_arm_on_arm_pll
+ * or -EINVAL if already running on external clock.
+ */
+int ux500_pm_arm_on_ext_clk(bool leave_arm_pll_on);
+
+/**
+ * ux500_pm_arm_on_arm_pll()
+ *
+ * @divps_rate: Rate provided by ux500_pm_arm_on_ext_clk
+ *
+ * Restores the previous arm pll settings.
+ */
+void ux500_pm_arm_on_arm_pll(int divps_rate);
+
+/**
+ * ux500_pm_gic_decouple()
+ *
+ * Decouple GIC from the interrupt bus.
+ */
+void ux500_pm_gic_decouple(void);
+
+/**
+ * ux500_pm_gic_recouple()
+ *
+ * Recouple GIC with the interrupt bus.
+ */
+void ux500_pm_gic_recouple(void);
+
+/**
+ * ux500_pm_gic_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_gic_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_pending_interrupt()
+ *
+ * returns true, if there are pending interrupts.
+ */
+bool ux500_pm_prcmu_pending_interrupt(void);
+
+/**
+ * ux500_pm_prcmu_set_ioforce()
+ *
+ * @enable: Enable/disable
+ *
+ * Enable/disable the gpio-ring
+ */
+void ux500_pm_prcmu_set_ioforce(bool enable);
+
+/**
+ * ux500_pm_prcmu_copy_gic_settings()
+ *
+ * This function copies all the gic interrupt settings to the prcmu.
+ * This is needed for the system to catch interrupts in ApIdle
+ */
+void ux500_pm_prcmu_copy_gic_settings(void);
+
+/**
+ * ux500_pm_gpio_save_wake_up_status()
+ *
+ * This function is called when the prcmu has woken the ARM
+ * but before ioforce is disabled.
+ */
+void ux500_pm_gpio_save_wake_up_status(void);
+
+/**
+ * ux500_pm_gpio_read_wake_up_status()
+ *
+ * @bank_number: The gpio bank.
+ *
+ * Returns the WKS register settings for given bank number.
+ * The WKS register is cleared when ioforce is released therefore
+ * this function is needed.
+ */
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number);
+
+/**
+ * ux500_pm_other_cpu_wfi()
+ *
+ * Returns true if the other CPU is in WFI.
+ */
+bool ux500_pm_other_cpu_wfi(void);
+
+#else
+u32 ux500_pm_gpio_read_wake_up_status(unsigned int bank_number)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/pm/runtime.c b/arch/arm/mach-ux500/pm/runtime.c
new file mode 100644
index 00000000000..cb6801b04b1
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/runtime.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson> for ST-Ericsson
+ *
+ * Based on:
+ * Runtime PM support code for SuperH Mobile ARM
+ * Copyright (C) 2009-2010 Magnus Damm
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <plat/pincfg.h>
+#include <mach/regulator.h>
+
+#include "../pins.h"
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_ENABLED 2
+
+struct pm_runtime_data {
+ unsigned long flags;
+ struct ux500_regulator *regulator;
+ struct ux500_pins *pins;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+ struct pm_runtime_data *prd = res;
+
+ dev_dbg(dev, "__devres_release()\n");
+
+ if (test_bit(BIT_ENABLED, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+ }
+
+ if (test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_put(prd->pins);
+ if (prd->regulator)
+ ux500_regulator_put(prd->regulator);
+ }
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+ return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+ prd->pins = ux500_pins_get(dev_name(dev));
+
+ prd->regulator = ux500_regulator_get(dev);
+ if (IS_ERR(prd->regulator))
+ prd->regulator = NULL;
+
+ if (prd->pins || prd->regulator) {
+ dev_info(dev, "managed by runtime pm: %s%s\n",
+ prd->pins ? "pins " : "",
+ prd->regulator ? "regulator " : "");
+
+ set_bit(BIT_ACTIVE, &prd->flags);
+ }
+ }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+ struct pm_runtime_data *prd)
+{
+ if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+ dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+static int ux500_pd_runtime_suspend(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_bug(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_disable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_disable(prd->regulator);
+
+ clear_bit(BIT_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+static int ux500_pd_runtime_resume(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ platform_pm_runtime_init(dev, prd);
+
+ if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+ if (prd->pins)
+ ux500_pins_enable(prd->pins);
+
+ if (prd->regulator)
+ ux500_regulator_atomic_enable(prd->regulator);
+
+ set_bit(BIT_ENABLED, &prd->flags);
+ }
+
+ return 0;
+}
+
+static int ux500_pd_suspend_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /* Already is runtime suspended? Nothing to do. */
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended for some
+ * reason. We still need to do the power save stuff when going into
+ * suspend, so force it here.
+ */
+ return ux500_pd_runtime_suspend(dev);
+}
+
+static int ux500_pd_resume_noirq(struct device *dev)
+{
+ struct pm_runtime_data *prd = __to_prd(dev);
+
+ dev_vdbg(dev, "%s()\n", __func__);
+
+ /* Only handle devices that use runtime pm */
+ if (!prd || !test_bit(BIT_ONCE, &prd->flags))
+ return 0;
+
+ /*
+ * Already was runtime suspended? No need to resume here, runtime
+ * resume will take care of it.
+ */
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ /*
+ * We get here only if the device was not runtime suspended,
+ * but we forced it down in suspend_noirq above. Bring it
+ * up since pm-runtime thinks it is not suspended.
+ */
+ return ux500_pd_runtime_resume(dev);
+}
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pm_runtime_data *prd;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ if (action == BUS_NOTIFY_BIND_DRIVER) {
+ prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+ if (prd)
+ devres_add(dev, prd);
+ else
+ dev_err(dev, "unable to alloc memory for runtime pm\n");
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+#define ux500_pd_suspend_noirq NULL
+#define ux500_pd_resume_noirq NULL
+#define ux500_pd_runtime_suspend NULL
+#define ux500_pd_runtime_resume NULL
+
+static int ux500_pd_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ux500_regulator *regulator = NULL;
+ struct ux500_pins *pins = NULL;
+ struct device *dev = data;
+ const char *onoff = NULL;
+
+ dev_dbg(dev, "%s() %ld !\n", __func__, action);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_enable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_enable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "on";
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ pins = ux500_pins_get(dev_name(dev));
+ if (pins) {
+ ux500_pins_disable(pins);
+ ux500_pins_put(pins);
+ }
+
+ regulator = ux500_regulator_get(dev);
+ if (IS_ERR(regulator))
+ regulator = NULL;
+ else {
+ ux500_regulator_atomic_disable(regulator);
+ ux500_regulator_put(regulator);
+ }
+
+ onoff = "off";
+ break;
+ }
+
+ if (pins || regulator) {
+ dev_info(dev, "runtime pm disabled, forced %s: %s%s\n",
+ onoff,
+ pins ? "pins " : "",
+ regulator ? "regulator " : "");
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+struct dev_power_domain ux500_dev_power_domain = {
+ .ops = {
+ .suspend_noirq = ux500_pd_suspend_noirq,
+ .resume_noirq = ux500_pd_resume_noirq,
+ .runtime_suspend = ux500_pd_runtime_suspend,
+ .runtime_resume = ux500_pd_runtime_resume,
+ },
+};
+
+static struct notifier_block ux500_pd_platform_notifier = {
+ .notifier_call = ux500_pd_bus_notify,
+};
+
+static struct notifier_block ux500_pd_amba_notifier = {
+ .notifier_call = ux500_pd_bus_notify,
+};
+
+static int __init ux500_pm_runtime_platform_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &ux500_pd_platform_notifier);
+ return 0;
+}
+core_initcall(ux500_pm_runtime_platform_init);
+
+/*
+ * The amba bus itself gets registered in a core_initcall, so we can't use
+ * that.
+ */
+static int __init ux500_pm_runtime_amba_init(void)
+{
+ bus_register_notifier(&amba_bustype, &ux500_pd_amba_notifier);
+ return 0;
+}
+arch_initcall(ux500_pm_runtime_amba_init);
diff --git a/arch/arm/mach-ux500/pm/suspend.c b/arch/arm/mach-ux500/pm/suspend.c
new file mode 100644
index 00000000000..02d5dd085e1
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#include <linux/suspend.h>
+#include <linux/gpio.h>
+
+#include <linux/regulator/ab8500-debug.h>
+
+#include <mach/prcmu-fw-api.h>
+#include <mach/prcmu-regs.h>
+
+#include "context.h"
+#include "pm.h"
+#include "suspend_dbg.h"
+
+static atomic_t block_sleep = ATOMIC_INIT(0);
+
+void suspend_block_sleep(void)
+{
+ atomic_inc(&block_sleep);
+}
+
+void suspend_unblock_sleep(void)
+{
+ atomic_dec(&block_sleep);
+}
+
+static bool sleep_is_blocked(void)
+{
+ return (atomic_read(&block_sleep) != 0);
+}
+
+static int suspend(bool do_deepsleep)
+{
+ u32 divps_rate;
+
+ if (sleep_is_blocked()) {
+ pr_info("suspend/resume: interrupted by modem.\n");
+ return -EBUSY;
+ }
+
+ ux500_suspend_dbg_add_wake_on_uart();
+ nmk_gpio_wakeups_suspend();
+
+ /* configure the prcm for a sleep wakeup */
+ prcmu_enable_wakeups(PRCMU_WAKEUP(ABB));
+
+ context_vape_save();
+
+ ux500_pm_gic_decouple();
+
+ divps_rate = ux500_pm_arm_on_ext_clk(false);
+
+ if (ux500_pm_gic_pending_interrupt()) {
+ prcmu_disable_wakeups();
+ nmk_gpio_wakeups_resume();
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ ux500_pm_arm_on_arm_pll(divps_rate);
+ /* Recouple GIC with the interrupt bus */
+ ux500_pm_gic_recouple();
+ pr_info("suspend/resume: pending interrupt\n");
+ return -EBUSY;
+ }
+ ux500_pm_prcmu_set_ioforce(true);
+
+ if (do_deepsleep) {
+ context_varm_save_common();
+ context_varm_save_core();
+ context_save_cpu_registers();
+
+ /*
+ * Due to we have only 100us between requesting a powerstate
+ * and wfi, we clean the cache before as well to assure the
+ * final cache clean before wfi has as little as possible to
+ * do.
+ */
+ context_clean_l1_cache_all();
+
+ (void) prcmu_set_power_state(PRCMU_AP_DEEP_SLEEP,
+ false, false);
+ context_save_to_sram_and_wfi(true);
+
+ context_restore_cpu_registers();
+ context_varm_restore_core();
+ context_varm_restore_common();
+
+ } else {
+
+ context_clean_l1_cache_all();
+ (void) prcmu_set_power_state(APEXECUTE_TO_APSLEEP,
+ false, false);
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+ }
+
+ context_vape_restore();
+
+ /* If GPIO woke us up then save the pins that caused the wake up */
+ ux500_pm_gpio_save_wake_up_status();
+
+ ux500_suspend_dbg_sleep_status(do_deepsleep);
+
+ /* APE was turned off, restore IO ring */
+ ux500_pm_prcmu_set_ioforce(false);
+ prcmu_disable_wakeups();
+
+ nmk_gpio_wakeups_resume();
+ ux500_suspend_dbg_remove_wake_on_uart();
+
+ return 0;
+}
+
+static int ux500_suspend_enter(suspend_state_t state)
+{
+
+ if (ux500_suspend_enabled()) {
+ if (ux500_suspend_deepsleep_enabled() &&
+ state == PM_SUSPEND_MEM)
+ return suspend(true);
+ if (ux500_suspend_sleep_enabled())
+ return suspend(false);
+ /* For debugging, if Sleep and DeepSleep disabled, do Idle */
+ prcmu_set_power_state(PRCMU_AP_IDLE, true, true);
+ }
+
+ dsb();
+ __asm__ __volatile__("wfi\n\t" : : : "memory");
+ return 0;
+}
+
+static int ux500_suspend_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
+}
+
+static int ux500_suspend_prepare_late(void)
+{
+ ab8500_regulator_debug_force();
+
+ return 0;
+}
+
+static void ux500_suspend_wake(void)
+{
+ ab8500_regulator_debug_restore();
+}
+
+static struct platform_suspend_ops ux500_suspend_ops = {
+ .enter = ux500_suspend_enter,
+ .valid = ux500_suspend_valid,
+ .prepare_late = ux500_suspend_prepare_late,
+ .wake = ux500_suspend_wake,
+ .begin = ux500_suspend_dbg_begin,
+};
+
+static __init int ux500_suspend_init(void)
+{
+ ux500_suspend_dbg_init();
+ suspend_set_ops(&ux500_suspend_ops);
+ return 0;
+}
+
+device_initcall(ux500_suspend_init);
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.c b/arch/arm/mach-ux500/pm/suspend_dbg.c
new file mode 100644
index 00000000000..fce77b5cf73
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>,
+ * Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#define DEEP_SLEEP_OK 0xf6
+#define SLEEP_OK 0xf3
+
+#define PRCMU_STATUS_REGISTER_V1 0x8015fe08
+#define PRCMU_STATUS_REGISTER_V2 0x801b8e08
+
+static void __iomem *prcmu_status_reg;
+
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+static u32 sleep_enabled = 1;
+#else
+static u32 sleep_enabled;
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_MEM
+static u32 deepsleep_enabled = 1;
+#else
+static u32 deepsleep_enabled;
+#endif
+
+static u32 suspend_enabled = 1;
+
+static u32 deepsleeps_done;
+static u32 deepsleeps_failed;
+static u32 sleeps_done;
+static u32 sleeps_failed;
+static u32 suspend_count;
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void)
+{
+ set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 1);
+ set_irq_type(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN),
+ IRQ_TYPE_EDGE_BOTH);
+}
+
+void ux500_suspend_dbg_remove_wake_on_uart(void)
+{
+ set_irq_wake(GPIO_TO_IRQ(CONFIG_UX500_CONSOLE_UART_GPIO_PIN), 0);
+}
+#endif
+
+bool ux500_suspend_enabled(void)
+{
+ return suspend_enabled != 0;
+}
+
+bool ux500_suspend_sleep_enabled(void)
+{
+ return sleep_enabled != 0;
+}
+
+bool ux500_suspend_deepsleep_enabled(void)
+{
+ return deepsleep_enabled != 0;
+}
+
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep)
+{
+ u32 prcmu_status;
+
+ prcmu_status = readl(prcmu_status_reg) & 0xff;
+
+ if (is_deepsleep) {
+ pr_info("Returning from ApDeepSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == DEEP_SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == DEEP_SLEEP_OK)
+ deepsleeps_done++;
+ else
+ deepsleeps_failed++;
+ } else {
+ pr_info("Returning from ApSleep. PRCMU ret: 0x%x - %s\n",
+ prcmu_status,
+ prcmu_status == SLEEP_OK ? "Success" : "Fail!");
+ if (prcmu_status == SLEEP_OK)
+ sleeps_done++;
+ else
+ sleeps_failed++;
+ }
+}
+
+int ux500_suspend_dbg_begin(suspend_state_t state)
+{
+ suspend_count++;
+ return 0;
+}
+
+void ux500_suspend_dbg_init(void)
+{
+ struct dentry *suspend_dir = NULL;
+ struct dentry *sleep_file = NULL;
+ struct dentry *deepsleep_file = NULL;
+ struct dentry *enable_file = NULL;
+ struct dentry *suspend_count_file = NULL;
+ struct dentry *sleeps_done_file = NULL;
+ struct dentry *deepsleeps_done_file = NULL;
+ struct dentry *sleeps_failed_file = NULL;
+ struct dentry *deepsleeps_failed_file = NULL;
+
+
+ if (cpu_is_u8500v20_or_later())
+ prcmu_status_reg = (void *)IO_ADDRESS(PRCMU_STATUS_REGISTER_V2);
+ else
+ prcmu_status_reg = (void *)IO_ADDRESS(PRCMU_STATUS_REGISTER_V1);
+
+ suspend_dir = debugfs_create_dir("suspend", NULL);
+ if (IS_ERR_OR_NULL(suspend_dir))
+ return;
+
+ sleep_file = debugfs_create_bool("sleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &sleep_enabled);
+ if (IS_ERR_OR_NULL(sleep_file))
+ goto error;
+
+ deepsleep_file = debugfs_create_bool("deepsleep", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &deepsleep_enabled);
+ if (IS_ERR_OR_NULL(deepsleep_file))
+ goto error;
+
+ enable_file = debugfs_create_bool("enable", S_IWUGO | S_IRUGO,
+ suspend_dir,
+ &suspend_enabled);
+ if (IS_ERR_OR_NULL(enable_file))
+ goto error;
+
+ suspend_count_file = debugfs_create_u32("count", S_IRUGO,
+ suspend_dir,
+ &suspend_count);
+ if (IS_ERR_OR_NULL(suspend_count_file))
+ goto error;
+
+ sleeps_done_file = debugfs_create_u32("sleep_count", S_IRUGO,
+ suspend_dir,
+ &sleeps_done);
+ if (IS_ERR_OR_NULL(sleeps_done_file))
+ goto error;
+
+ deepsleeps_done_file = debugfs_create_u32("deepsleep_count", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_done);
+ if (IS_ERR_OR_NULL(deepsleeps_done_file))
+ goto error;
+
+
+ sleeps_failed_file = debugfs_create_u32("sleep_failed", S_IRUGO,
+ suspend_dir,
+ &sleeps_failed);
+ if (IS_ERR_OR_NULL(sleeps_failed_file))
+ goto error;
+
+ deepsleeps_failed_file = debugfs_create_u32("deepsleep_failed", S_IRUGO,
+ suspend_dir,
+ &deepsleeps_failed);
+ if (IS_ERR_OR_NULL(deepsleeps_failed_file))
+ goto error;
+
+ return;
+error:
+ if (!IS_ERR_OR_NULL(deepsleeps_failed_file))
+ debugfs_remove(deepsleeps_failed_file);
+ if (!IS_ERR_OR_NULL(sleeps_failed_file))
+ debugfs_remove(sleeps_failed_file);
+ if (!IS_ERR_OR_NULL(deepsleeps_done_file))
+ debugfs_remove(deepsleeps_done_file);
+ if (!IS_ERR_OR_NULL(sleeps_done_file))
+ debugfs_remove(sleeps_done_file);
+ if (!IS_ERR_OR_NULL(suspend_count_file))
+ debugfs_remove(suspend_count_file);
+ if (!IS_ERR_OR_NULL(enable_file))
+ debugfs_remove(enable_file);
+ if (!IS_ERR_OR_NULL(deepsleep_file))
+ debugfs_remove(deepsleep_file);
+ if (!IS_ERR_OR_NULL(sleep_file))
+ debugfs_remove(sleep_file);
+
+ debugfs_remove(suspend_dir);
+}
diff --git a/arch/arm/mach-ux500/pm/suspend_dbg.h b/arch/arm/mach-ux500/pm/suspend_dbg.h
new file mode 100644
index 00000000000..46cbe6e7014
--- /dev/null
+++ b/arch/arm/mach-ux500/pm/suspend_dbg.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ *
+ */
+
+#ifndef UX500_SUSPEND_DBG_H
+#define UX500_SUSPEND_DBG_H
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+#ifdef CONFIG_UX500_SUSPEND_DBG_WAKE_ON_UART
+void ux500_suspend_dbg_add_wake_on_uart(void);
+void ux500_suspend_dbg_remove_wake_on_uart(void);
+#else
+static inline void ux500_suspend_dbg_add_wake_on_uart(void) { }
+static inline void ux500_suspend_dbg_remove_wake_on_uart(void) { }
+#endif
+
+#ifdef CONFIG_UX500_SUSPEND_DBG
+bool ux500_suspend_enabled(void);
+bool ux500_suspend_sleep_enabled(void);
+bool ux500_suspend_deepsleep_enabled(void);
+void ux500_suspend_dbg_sleep_status(bool is_deepsleep);
+void ux500_suspend_dbg_init(void);
+int ux500_suspend_dbg_begin(suspend_state_t state);
+
+#else
+static inline bool ux500_suspend_enabled(void)
+{
+ return true;
+}
+static inline bool ux500_suspend_sleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_STANDBY
+ return true;
+#else
+ return false;
+#endif
+}
+static inline bool ux500_suspend_deepsleep_enabled(void)
+{
+#ifdef CONFIG_UX500_SUSPEND_MEM
+ return true;
+#else
+ return false;
+#endif
+}
+static inline void ux500_suspend_dbg_sleep_status(bool is_deepsleep) { }
+static inline void ux500_suspend_dbg_init(void) { }
+
+#define ux500_suspend_dbg_begin NULL
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-ux500/regulator-db8500.c b/arch/arm/mach-ux500/regulator-db8500.c
index edeef2def2c..c8d1670ab01 100644
--- a/arch/arm/mach-ux500/regulator-db8500.c
+++ b/arch/arm/mach-ux500/regulator-db8500.c
@@ -62,6 +62,88 @@ int power_state_active_is_enabled(void)
return (power_state_active_cnt > 0);
}
+struct ux500_regulator {
+ char *name;
+ void (*enable)(void);
+ int (*disable)(void);
+};
+
+/*
+ * Don't add any clients to this struct without checking with regulator
+ * responsible!
+ */
+static struct ux500_regulator ux500_atomic_regulators[] = {
+ {
+ .name = "dma40.0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "ssp1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi0",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi1",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi2",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+ {
+ .name = "spi3",
+ .enable = power_state_active_enable,
+ .disable = power_state_active_disable,
+ },
+};
+
+struct ux500_regulator *__must_check ux500_regulator_get(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ux500_atomic_regulators); i++) {
+ if (!strcmp(dev_name(dev), ux500_atomic_regulators[i].name))
+ return &ux500_atomic_regulators[i];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+int ux500_regulator_atomic_enable(struct ux500_regulator *regulator)
+{
+ if (regulator) {
+ regulator->enable();
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int ux500_regulator_atomic_disable(struct ux500_regulator *regulator)
+{
+ if (regulator)
+ return regulator->disable();
+ else
+ return -EINVAL;
+}
+
+void ux500_regulator_put(struct ux500_regulator *regulator)
+{
+ /* Here for symetric reasons and for possible future use */
+}
+
/**
* struct db8500_regulator_info - db8500 regulator information
* @dev: device pointer