From 8659f7569ca9d705059940ae86544cfafb95bac8 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 3 Aug 2011 18:12:05 +0100 Subject: ARM: 7009/1: l2x0: Add OF based initialization This adds probing for ARM L2x0 cache controllers via device tree. Support includes the L210, L220, and PL310 controllers. The binding allows setting up cache RAM latencies and filter addresses (PL310 only). Signed-off-by: Rob Herring Acked-by: Grant Likely Acked-by: Arnd Bergmann Acked-by: Olof Johansson Acked-by: Barry Song <21cnbao@gmail.com> Signed-off-by: Russell King --- Documentation/devicetree/bindings/arm/l2cc.txt | 42 ++++++++++ arch/arm/include/asm/hardware/cache-l2x0.h | 17 ++++ arch/arm/mm/cache-l2x0.c | 103 +++++++++++++++++++++++++ 3 files changed, 162 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/l2cc.txt diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt new file mode 100644 index 00000000000..f50e021a099 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/l2cc.txt @@ -0,0 +1,42 @@ +* ARM L2 Cache Controller + +ARM cores often have a separate level 2 cache controller. There are various +implementations of the L2 cache controller with compatible programming models. +The ARM L2 cache representation in the device tree should be done as follows: + +Required properties: + +- compatible : should be one of: + "arm,pl310-cache" + "arm,l220-cache" + "arm,l210-cache" +- cache-unified : Specifies the cache is a unified cache. +- cache-level : Should be set to 2 for a level 2 cache. +- reg : Physical base address and size of cache controller's memory mapped + registers. + +Optional properties: + +- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of + read, write and setup latencies. Minimum valid values are 1. Controllers + without setup latency control should use a value of 0. +- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of + read, write and setup latencies. Controllers without setup latency control + should use 0. Controllers without separate read and write Tag RAM latency + values should only use the first cell. +- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell. +- arm,filter-ranges : Starting address and length of window to + filter. Addresses in the filter window are directed to the M1 port. Other + addresses will go to the M0 port. + +Example: + +L2: cache-controller { + compatible = "arm,pl310-cache"; + reg = <0xfff12000 0x1000>; + arm,data-latency = <1 1 1>; + arm,tag-latency = <2 2 2>; + arm,filter-latency = <0x80000000 0x8000000>; + cache-unified; + cache-level = <2>; +}; diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 99a6ed7e1bf..c48cb1e1c46 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -52,6 +52,8 @@ #define L2X0_LOCKDOWN_WAY_D_BASE 0x900 #define L2X0_LOCKDOWN_WAY_I_BASE 0x904 #define L2X0_LOCKDOWN_STRIDE 0x08 +#define L2X0_ADDR_FILTER_START 0xC00 +#define L2X0_ADDR_FILTER_END 0xC04 #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 @@ -67,6 +69,14 @@ #define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_AUX_CTRL_MASK 0xc0000fff +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) +#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) +#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 #define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) @@ -77,8 +87,15 @@ #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 +#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 +#define L2X0_LATENCY_CTRL_RD_SHIFT 4 +#define L2X0_LATENCY_CTRL_WR_SHIFT 8 + +#define L2X0_ADDR_FILTER_EN 1 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); #endif #endif diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9ecfdb51195..db4484f5bf9 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -16,9 +16,12 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include +#include +#include #include #include @@ -372,3 +375,103 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", ways, cache_id, aux, l2x0_size); } + +#ifdef CONFIG_OF +static void __init l2x0_of_setup(const struct device_node *np, + __u32 *aux_val, __u32 *aux_mask) +{ + u32 data[2] = { 0, 0 }; + u32 tag = 0; + u32 dirty = 0; + u32 val = 0, mask = 0; + + of_property_read_u32(np, "arm,tag-latency", &tag); + if (tag) { + mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; + val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; + } + + of_property_read_u32_array(np, "arm,data-latency", + data, ARRAY_SIZE(data)); + if (data[0] && data[1]) { + mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | + L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; + val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | + ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); + } + + of_property_read_u32(np, "arm,dirty-latency", &dirty); + if (dirty) { + mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; + val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; + } + + *aux_val &= ~mask; + *aux_val |= val; + *aux_mask &= ~mask; +} + +static void __init pl310_of_setup(const struct device_node *np, + __u32 *aux_val, __u32 *aux_mask) +{ + u32 data[3] = { 0, 0, 0 }; + u32 tag[3] = { 0, 0, 0 }; + u32 filter[2] = { 0, 0 }; + + of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); + if (tag[0] && tag[1] && tag[2]) + writel_relaxed( + ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | + ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | + ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), + l2x0_base + L2X0_TAG_LATENCY_CTRL); + + of_property_read_u32_array(np, "arm,data-latency", + data, ARRAY_SIZE(data)); + if (data[0] && data[1] && data[2]) + writel_relaxed( + ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | + ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | + ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), + l2x0_base + L2X0_DATA_LATENCY_CTRL); + + of_property_read_u32_array(np, "arm,filter-ranges", + filter, ARRAY_SIZE(filter)); + if (filter[0] && filter[1]) { + writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), + l2x0_base + L2X0_ADDR_FILTER_END); + writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, + l2x0_base + L2X0_ADDR_FILTER_START); + } +} + +static const struct of_device_id l2x0_ids[] __initconst = { + { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, + { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, + { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, + {} +}; + +int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) +{ + struct device_node *np; + void (*l2_setup)(const struct device_node *np, + __u32 *aux_val, __u32 *aux_mask); + + np = of_find_matching_node(NULL, l2x0_ids); + if (!np) + return -ENODEV; + l2x0_base = of_iomap(np, 0); + if (!l2x0_base) + return -ENOMEM; + + /* L2 configuration can only be changed if the cache is disabled */ + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + l2_setup = of_match_node(l2x0_ids, np)->data; + if (l2_setup) + l2_setup(np, &aux_val, &aux_mask); + } + l2x0_init(l2x0_base, aux_val, aux_mask); + return 0; +} +#endif -- cgit v1.2.3 From 9ce6bc6fbdac53391958cb3a9b12006d2ce98986 Mon Sep 17 00:00:00 2001 From: Barry Song <21cnbao@gmail.com> Date: Fri, 9 Sep 2011 10:30:34 +0100 Subject: ARM: 7089/1: L2X0: add explicit cpu_relax() for busy wait loop using cpu_relax in busy loops is a well-known idiom in the kernel. It's more for documentation purposes than technically needed here. Signed-off-by: Barry Song Acked-by: Arnd Bergmann Reviewed-by: Jamie Iles Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index db4484f5bf9..a78044885ed 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -37,7 +37,7 @@ static inline void cache_wait_way(void __iomem *reg, unsigned long mask) { /* wait for cache operation by line or way to complete */ while (readl_relaxed(reg) & mask) - ; + cpu_relax(); } #ifdef CONFIG_CACHE_PL310 -- cgit v1.2.3 From 730003001e05275d858934d35e66b045eb25c835 Mon Sep 17 00:00:00 2001 From: Barry Song <21cnbao@gmail.com> Date: Wed, 14 Sep 2011 03:20:01 +0100 Subject: ARM: 7090/1: CACHE-L2X0: filter start address can be 0 and is often 0 this patch fixes the error in Rob Herring's ARM: 7009/1: l2x0: Add OF based initialization http://www.spinics.net/lists/arm-kernel/msg131123.html it has been in rmk/for-next with commit 41c86ff5b Cc: Shawn Guo Cc: Arnd Bergmann Signed-off-by: Barry Song Acked-by: Rob Herring Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a78044885ed..0d85d221d7b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -437,7 +437,7 @@ static void __init pl310_of_setup(const struct device_node *np, of_property_read_u32_array(np, "arm,filter-ranges", filter, ARRAY_SIZE(filter)); - if (filter[0] && filter[1]) { + if (filter[1]) { writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), l2x0_base + L2X0_ADDR_FILTER_END); writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, -- cgit v1.2.3 From 852f9d1db7401d6b44c646171d2238ca126487cf Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 17 Aug 2011 18:03:17 +0100 Subject: ARM: 7023/1: L2x0: Add interrupts property to OF binding Following the discussion here: http://lists.ozlabs.org/pipermail/devicetree-discuss/2011-August/007301.html The L2x0 L2 Cache Controllers support a combined interrupt line which can be used for several events (e.g. read/write/parity errors on tag/data RAM, event counter increment/overflow). Unfortunately the OF binding added in c519ecf2 ("ARM: 7009/1: l2x0: Add OF based initialization") does not represent the interrupt. This patch adds an "interrupts" property to the L2x0 OF binding, representing the combined interrupt line. Signed-off-by: Mark Rutland Acked-by: Rob Herring Acked-by: Will Deacon Cc: Grant Likely Cc: Arnd Bergmann Cc: Olof Johansson Cc: Barry Song <21cnbao@gmail.com> Signed-off-by: Russell King --- Documentation/devicetree/bindings/arm/l2cc.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt index f50e021a099..7ca52161e7a 100644 --- a/Documentation/devicetree/bindings/arm/l2cc.txt +++ b/Documentation/devicetree/bindings/arm/l2cc.txt @@ -28,6 +28,7 @@ Optional properties: - arm,filter-ranges : Starting address and length of window to filter. Addresses in the filter window are directed to the M1 port. Other addresses will go to the M0 port. +- interrupts : 1 combined interrupt. Example: @@ -39,4 +40,5 @@ L2: cache-controller { arm,filter-latency = <0x80000000 0x8000000>; cache-unified; cache-level = <2>; + interrupts = <45>; }; -- cgit v1.2.3 From fa5a990e62788b12c2c5eaedf4016ab904c4bf55 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 30 Sep 2011 14:43:12 +0100 Subject: ARM: 7114/1: cache-l2x0: add resume entry for l2 in secure mode we save the l2x0 registers at the first initialization, and platform codes can get them to restore l2x0 status after wakeup. Cc: Lorenzo Pieralisi Signed-off-by: Barry Song Reviewed-by: Santosh Shilimkar Tested-by: Shawn Guo Signed-off-by: Russell King --- arch/arm/include/asm/hardware/cache-l2x0.h | 25 ++++++ arch/arm/include/asm/outercache.h | 7 ++ arch/arm/kernel/asm-offsets.c | 12 +++ arch/arm/mm/cache-l2x0.c | 129 ++++++++++++++++++++++++++--- 4 files changed, 163 insertions(+), 10 deletions(-) diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index c48cb1e1c46..434edccdf7f 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -67,6 +67,13 @@ #define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6) +#define L2X0_CACHE_ID_RTL_MASK 0x3f +#define L2X0_CACHE_ID_RTL_R0P0 0x0 +#define L2X0_CACHE_ID_RTL_R1P0 0x2 +#define L2X0_CACHE_ID_RTL_R2P0 0x4 +#define L2X0_CACHE_ID_RTL_R3P0 0x5 +#define L2X0_CACHE_ID_RTL_R3P1 0x6 +#define L2X0_CACHE_ID_RTL_R3P2 0x8 #define L2X0_AUX_CTRL_MASK 0xc0000fff #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 @@ -96,6 +103,24 @@ #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); + +struct l2x0_regs { + unsigned long phy_base; + unsigned long aux_ctrl; + /* + * Whether the following registers need to be saved/restored + * depends on platform + */ + unsigned long tag_latency; + unsigned long data_latency; + unsigned long filter_start; + unsigned long filter_end; + unsigned long prefetch_ctrl; + unsigned long pwr_ctrl; +}; + +extern struct l2x0_regs l2x0_saved_regs; + #endif #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index d8387437ec5..53426c66352 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -34,6 +34,7 @@ struct outer_cache_fns { void (*sync)(void); #endif void (*set_debug)(unsigned long); + void (*resume)(void); }; #ifdef CONFIG_OUTER_CACHE @@ -74,6 +75,12 @@ static inline void outer_disable(void) outer_cache.disable(); } +static inline void outer_resume(void) +{ + if (outer_cache.resume) + outer_cache.resume(); +} + #else static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 16baba2e436..1429d8989fb 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -20,6 +20,7 @@ #include #include #include +#include #include /* @@ -92,6 +93,17 @@ int main(void) DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); +#ifdef CONFIG_CACHE_L2X0 + DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); + DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); + DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency)); + DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency)); + DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start)); + DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end)); + DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl)); + DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl)); + BLANK(); +#endif #ifdef CONFIG_CPU_HAS_ASID DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); BLANK(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 0d85d221d7b..3f9b9980478 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -33,6 +33,14 @@ static DEFINE_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ static uint32_t l2x0_size; +struct l2x0_regs l2x0_saved_regs; + +struct l2x0_of_data { + void (*setup)(const struct device_node *, __u32 *, __u32 *); + void (*save)(void); + void (*resume)(void); +}; + static inline void cache_wait_way(void __iomem *reg, unsigned long mask) { /* wait for cache operation by line or way to complete */ @@ -280,7 +288,7 @@ static void l2x0_disable(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static void __init l2x0_unlock(__u32 cache_id) +static void l2x0_unlock(__u32 cache_id) { int lockregs; int i; @@ -356,6 +364,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) /* l2x0 controller is disabled */ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); + l2x0_saved_regs.aux_ctrl = aux; + l2x0_inv_all(); /* enable L2X0 */ @@ -445,33 +455,132 @@ static void __init pl310_of_setup(const struct device_node *np, } } +static void __init pl310_save(void) +{ + u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & + L2X0_CACHE_ID_RTL_MASK; + + l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + + L2X0_TAG_LATENCY_CTRL); + l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + + L2X0_DATA_LATENCY_CTRL); + l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + + L2X0_ADDR_FILTER_END); + l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + + L2X0_ADDR_FILTER_START); + + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { + /* + * From r2p0, there is Prefetch offset/control register + */ + l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + + L2X0_PREFETCH_CTRL); + /* + * From r3p0, there is Power control register + */ + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) + l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + + L2X0_POWER_CTRL); + } +} + +static void l2x0_resume(void) +{ + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + /* restore aux ctrl and enable l2 */ + l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); + + writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + + L2X0_AUX_CTRL); + + l2x0_inv_all(); + + writel_relaxed(1, l2x0_base + L2X0_CTRL); + } +} + +static void pl310_resume(void) +{ + u32 l2x0_revision; + + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + /* restore pl310 setup */ + writel_relaxed(l2x0_saved_regs.tag_latency, + l2x0_base + L2X0_TAG_LATENCY_CTRL); + writel_relaxed(l2x0_saved_regs.data_latency, + l2x0_base + L2X0_DATA_LATENCY_CTRL); + writel_relaxed(l2x0_saved_regs.filter_end, + l2x0_base + L2X0_ADDR_FILTER_END); + writel_relaxed(l2x0_saved_regs.filter_start, + l2x0_base + L2X0_ADDR_FILTER_START); + + l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & + L2X0_CACHE_ID_RTL_MASK; + + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { + writel_relaxed(l2x0_saved_regs.prefetch_ctrl, + l2x0_base + L2X0_PREFETCH_CTRL); + if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) + writel_relaxed(l2x0_saved_regs.pwr_ctrl, + l2x0_base + L2X0_POWER_CTRL); + } + } + + l2x0_resume(); +} + +static const struct l2x0_of_data pl310_data = { + pl310_of_setup, + pl310_save, + pl310_resume, +}; + +static const struct l2x0_of_data l2x0_data = { + l2x0_of_setup, + NULL, + l2x0_resume, +}; + static const struct of_device_id l2x0_ids[] __initconst = { - { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, - { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, - { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, + { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, + { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, + { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, {} }; int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) { struct device_node *np; - void (*l2_setup)(const struct device_node *np, - __u32 *aux_val, __u32 *aux_mask); + struct l2x0_of_data *data; + struct resource res; np = of_find_matching_node(NULL, l2x0_ids); if (!np) return -ENODEV; - l2x0_base = of_iomap(np, 0); + + if (of_address_to_resource(np, 0, &res)) + return -ENODEV; + + l2x0_base = ioremap(res.start, resource_size(&res)); if (!l2x0_base) return -ENOMEM; + l2x0_saved_regs.phy_base = res.start; + + data = of_match_node(l2x0_ids, np)->data; + /* L2 configuration can only be changed if the cache is disabled */ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { - l2_setup = of_match_node(l2x0_ids, np)->data; - if (l2_setup) - l2_setup(np, &aux_val, &aux_mask); + if (data->setup) + data->setup(np, &aux_val, &aux_mask); } + + if (data->save) + data->save(); + l2x0_init(l2x0_base, aux_val, aux_mask); + + outer_cache.resume = data->resume; return 0; } #endif -- cgit v1.2.3 From 795978a9db59b0c5273af2ca32d430a3d07f304b Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Sun, 25 Sep 2011 08:25:43 +0100 Subject: ARM: 7100/1: smp_scu: remove __init annotation from scu_enable() When Cortex-A9 MPCore resumes from Dormant or Shutdown modes, SCU needs to be re-enabled. This patch removes __init annotation from function scu_enable(), so that platform resume procedure can call it to re-enable SCU. Signed-off-by: Shawn Guo Signed-off-by: Russell King --- arch/arm/kernel/smp_scu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 7fcddb75c87..8f5dd796335 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -34,7 +34,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base) /* * Enable the SCU */ -void __init scu_enable(void __iomem *scu_base) +void scu_enable(void __iomem *scu_base) { u32 scu_ctrl; -- cgit v1.2.3 From adbb56c1e6bbbd1ee7b93b195c6f40ecadaab19a Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 11 Nov 2011 13:45:47 +0530 Subject: ARM: exynos4: Add support for AFTR mode cpuidle state This patch adds support for AFTR(ARM OFF TOP RUNNING) mode in cpuidle driver for EXYNOS4210. L2 cache keeps their data in this mode. This patch ports the code to the latest interfaces to save/restore CPU state inclusive of CPU PM notifiers, l2 resume and cpu_suspend/resume. Signed-off-by: Jaecheol Lee Signed-off-by: Lorenzo Pieralisi Signed-off-by: Amit Daniel Kachhap --- arch/arm/mach-exynos4/cpuidle.c | 152 ++++++++++++++++++++++++++++++- arch/arm/mach-exynos4/include/mach/pmu.h | 2 + 2 files changed, 151 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-exynos4/cpuidle.c b/arch/arm/mach-exynos4/cpuidle.c index bf7e96f2793..6d527610c5a 100644 --- a/arch/arm/mach-exynos4/cpuidle.c +++ b/arch/arm/mach-exynos4/cpuidle.c @@ -11,22 +11,48 @@ #include #include #include +#include #include - +#include +#include #include +#include +#include +#include +#include +#include + +#include +#include + +#define REG_DIRECTGO_ADDR (exynos4_subrev() == 0 ?\ + (S5P_VA_SYSRAM + 0x24) : S5P_INFORM7) +#define REG_DIRECTGO_FLAG (exynos4_subrev() == 0 ?\ + (S5P_VA_SYSRAM + 0x20) : S5P_INFORM6) static int exynos4_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state); +static int exynos4_enter_lowpower(struct cpuidle_device *dev, + struct cpuidle_state *state); + static struct cpuidle_state exynos4_cpuidle_set[] = { [0] = { .enter = exynos4_enter_idle, .exit_latency = 1, .target_residency = 100000, .flags = CPUIDLE_FLAG_TIME_VALID, - .name = "IDLE", + .name = "C0", .desc = "ARM clock gating(WFI)", }, + [1] = { + .enter = exynos4_enter_lowpower, + .exit_latency = 300, + .target_residency = 100000, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "C1", + .desc = "ARM power down", + }, }; static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device); @@ -36,6 +62,96 @@ static struct cpuidle_driver exynos4_idle_driver = { .owner = THIS_MODULE, }; +/* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ +static void exynos4_set_wakeupmask(void) +{ + __raw_writel(0x0000ff3e, S5P_WAKEUP_MASK); +} + +static unsigned int g_pwr_ctrl, g_diag_reg; + +static void save_cpu_arch_register(void) +{ + /*read power control register*/ + asm("mrc p15, 0, %0, c15, c0, 0" : "=r"(g_pwr_ctrl) : : "cc"); + /*read diagnostic register*/ + asm("mrc p15, 0, %0, c15, c0, 1" : "=r"(g_diag_reg) : : "cc"); + return; +} + +static void restore_cpu_arch_register(void) +{ + /*write power control register*/ + asm("mcr p15, 0, %0, c15, c0, 0" : : "r"(g_pwr_ctrl) : "cc"); + /*write diagnostic register*/ + asm("mcr p15, 0, %0, c15, c0, 1" : : "r"(g_diag_reg) : "cc"); + return; +} + +static int idle_finisher(unsigned long flags) +{ + cpu_do_idle(); + return 1; +} + +static int exynos4_enter_core0_aftr(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct timeval before, after; + int idle_time; + unsigned long tmp; + + local_irq_disable(); + do_gettimeofday(&before); + + exynos4_set_wakeupmask(); + + /* Set value of power down register for aftr mode */ + exynos4_sys_powerdown_conf(SYS_AFTR); + + save_cpu_arch_register(); + + /* Setting Central Sequence Register for power down mode */ + tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); + tmp &= ~S5P_CENTRAL_LOWPWR_CFG; + __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); + + cpu_pm_enter(); + cpu_cluster_pm_enter(); + + cpu_suspend(0, idle_finisher); + + scu_enable(S5P_VA_SCU); + + cpu_cluster_pm_exit(); + cpu_pm_exit(); + + restore_cpu_arch_register(); + + /* + * If PMU failed while entering sleep mode, WFI will be + * ignored by PMU and then exiting cpu_do_idle(). + * S5P_CENTRAL_LOWPWR_CFG bit will not be set automatically + * in this situation. + */ + tmp = __raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION); + if (!(tmp & S5P_CENTRAL_LOWPWR_CFG)) { + tmp |= S5P_CENTRAL_LOWPWR_CFG; + __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); + } + + /* Clear wakeup state register */ + __raw_writel(0x0, S5P_WAKEUP_STAT); + + do_gettimeofday(&after); + + local_irq_enable(); + idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + + (after.tv_usec - before.tv_usec); + + return idle_time; +} + static int exynos4_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { @@ -55,6 +171,26 @@ static int exynos4_enter_idle(struct cpuidle_device *dev, return idle_time; } +static int exynos4_enter_lowpower(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct cpuidle_state *new_state = state; + + /* This mode only can be entered when Core1 is offline */ + if (num_online_cpus() > 1) { + BUG_ON(!dev->safe_state); + new_state = dev->safe_state; + } + dev->last_state = new_state; + + if (new_state == &dev->states[0]) + return exynos4_enter_idle(dev, new_state); + else + return exynos4_enter_core0_aftr(dev, new_state); + + return exynos4_enter_idle(dev, new_state); +} + static int __init exynos4_init_cpuidle(void) { int i, max_cpuidle_state, cpu_id; @@ -66,8 +202,11 @@ static int __init exynos4_init_cpuidle(void) device = &per_cpu(exynos4_cpuidle_device, cpu_id); device->cpu = cpu_id; - device->state_count = (sizeof(exynos4_cpuidle_set) / + if (cpu_id == 0) + device->state_count = (sizeof(exynos4_cpuidle_set) / sizeof(struct cpuidle_state)); + else + device->state_count = 1; /* Support IDLE only */ max_cpuidle_state = device->state_count; @@ -76,11 +215,18 @@ static int __init exynos4_init_cpuidle(void) sizeof(struct cpuidle_state)); } + device->safe_state = &device->states[0]; + if (cpuidle_register_device(device)) { printk(KERN_ERR "CPUidle register device failed\n,"); return -EIO; } } + + __raw_writel(BSYM(virt_to_phys(s3c_cpu_resume)), + REG_DIRECTGO_ADDR); + __raw_writel(0xfcba0d10, REG_DIRECTGO_FLAG); + return 0; } device_initcall(exynos4_init_cpuidle); diff --git a/arch/arm/mach-exynos4/include/mach/pmu.h b/arch/arm/mach-exynos4/include/mach/pmu.h index a952904b010..31d1ed8e3c6 100644 --- a/arch/arm/mach-exynos4/include/mach/pmu.h +++ b/arch/arm/mach-exynos4/include/mach/pmu.h @@ -20,6 +20,8 @@ enum sys_powerdown { NUM_SYS_POWERDOWN, }; +extern unsigned long l2x0_regs_phys; extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode); +extern void s3c_cpu_resume(void); #endif /* __ASM_ARCH_PMU_H */ -- cgit v1.2.3 From 0ff7354f3889a6fe34384d867c94030119dd9d27 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 14 Oct 2011 11:51:45 +0100 Subject: ARM: exynos4: remove useless churn in sleep.S This patch cleans up sleep code in preparation for L2 resume code and hotplug functions Signed-off-by: Lorenzo Pieralisi --- arch/arm/mach-exynos4/sleep.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index 0984078f1eb..c19527bf301 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -27,8 +27,6 @@ */ #include -#include -#include .text @@ -52,3 +50,4 @@ ENTRY(s3c_cpu_resume) b cpu_resume +ENDPROC(s3c_cpu_resume) -- cgit v1.2.3 From 9caa7be09f1c7641ded024fa1fa6ee45b8b7d87f Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 11 Nov 2011 13:11:32 +0530 Subject: ARM: exynos4: add L2 early resume code This patch adds code to save L2 register configuration at boot, and to resume L2 before MMU is enabled in suspend and cpuidle resume paths. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Amit Daniel Kachhap --- arch/arm/mach-exynos4/cpu.c | 36 ++++++++++++++++++++++++++++-------- arch/arm/mach-exynos4/sleep.S | 26 ++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 8 deletions(-) diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c index 746d6fc6d39..979fb5d4367 100644 --- a/arch/arm/mach-exynos4/cpu.c +++ b/arch/arm/mach-exynos4/cpu.c @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -31,6 +32,7 @@ #include #include +#include extern int combiner_init(unsigned int combiner_nr, void __iomem *base, unsigned int irq_start); @@ -221,16 +223,34 @@ core_initcall(exynos4_core_init); #ifdef CONFIG_CACHE_L2X0 static int __init exynos4_l2x0_cache_init(void) { - /* TAG, Data Latency Control: 2cycle */ - __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); - __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); + if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) { - /* L2X0 Prefetch Control */ - __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL); + l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC; + /* TAG, Data Latency Control: 2 cycles */ + l2x0_saved_regs.tag_latency = 0x110; + l2x0_saved_regs.data_latency = 0x110; + l2x0_saved_regs.prefetch_ctrl = 0x30000007; + l2x0_saved_regs.pwr_ctrl = + (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN); - /* L2X0 Power Control */ - __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN, - S5P_VA_L2CC + L2X0_POWER_CTRL); + l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); + + __raw_writel(l2x0_saved_regs.tag_latency, + S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); + __raw_writel(l2x0_saved_regs.data_latency, + S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); + + /* L2X0 Prefetch Control */ + __raw_writel(l2x0_saved_regs.prefetch_ctrl, + S5P_VA_L2CC + L2X0_PREFETCH_CTRL); + + /* L2X0 Power Control */ + __raw_writel(l2x0_saved_regs.pwr_ctrl, + S5P_VA_L2CC + L2X0_POWER_CTRL); + + clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); + clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs)); + } l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff); diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index c19527bf301..3284213a1a3 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S @@ -27,6 +27,8 @@ */ #include +#include +#include .text @@ -47,7 +49,31 @@ * other way of restoring the stack pointer after sleep, and we * must not write to the code segment (code is read-only) */ + .align + .data ENTRY(s3c_cpu_resume) + adr r0, l2x0_regs_phys + ldr r0, [r0] + ldr r1, [r0, #L2X0_R_PHY_BASE] + ldr r2, [r1, #L2X0_CTRL] + tst r2, #0x1 + bne resume_l2on + ldr r2, [r0, #L2X0_R_AUX_CTRL] + str r2, [r1, #L2X0_AUX_CTRL] + ldr r2, [r0, #L2X0_R_TAG_LATENCY] + str r2, [r1, #L2X0_TAG_LATENCY_CTRL] + ldr r2, [r0, #L2X0_R_DATA_LATENCY] + str r2, [r1, #L2X0_DATA_LATENCY_CTRL] + ldr r2, [r0, #L2X0_R_PREFETCH_CTRL] + str r2, [r1, #L2X0_PREFETCH_CTRL] + ldr r2, [r0, #L2X0_R_PWR_CTRL] + str r2, [r1, #L2X0_POWER_CTRL] + mov r2, #1 + str r2, [r1, #L2X0_CTRL] +resume_l2on: b cpu_resume ENDPROC(s3c_cpu_resume) + .globl l2x0_regs_phys +l2x0_regs_phys: + .long 0 -- cgit v1.2.3 From 41518122dfdff24f5c4d60277af9b1162f0c7b6c Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 11 Nov 2011 13:13:43 +0530 Subject: ARM: exynos4: remove useless code to save/restore L2 and GIC state Following the merge of CPU PM notifiers and L2 resume code, this patch removes useless code to save and restore L2 and GIC registers. This is now automatically covered by suspend calls which integrated CPU PM notifiers and new sleep code that allows to resume L2 before MMU is turned on. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Amit Daniel Kachhap --- arch/arm/mach-exynos4/pm.c | 87 ---------------------------------------------- 1 file changed, 87 deletions(-) diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index bc6ca9482de..47d5428887d 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c @@ -131,78 +131,6 @@ static struct sleep_save exynos4_core_save[] = { SAVE_ITEM(S5P_CLKGATE_SCLKCPU), SAVE_ITEM(S5P_CLKGATE_IP_CPU), - /* GIC side */ - SAVE_ITEM(S5P_VA_GIC_CPU + 0x000), - SAVE_ITEM(S5P_VA_GIC_CPU + 0x004), - SAVE_ITEM(S5P_VA_GIC_CPU + 0x008), - SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C), - SAVE_ITEM(S5P_VA_GIC_CPU + 0x014), - SAVE_ITEM(S5P_VA_GIC_CPU + 0x018), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x000), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x004), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x100), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x104), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x108), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x300), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x304), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x308), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x400), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x404), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x408), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x410), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x414), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x418), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x420), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x424), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x428), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x430), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x434), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x438), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x440), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x444), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x448), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x450), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x454), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x458), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C), - - SAVE_ITEM(S5P_VA_GIC_DIST + 0x800), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x804), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x808), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x810), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x814), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x818), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x820), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x824), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x828), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x830), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x834), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x838), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x840), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x844), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x848), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x850), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x854), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x858), - SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C), - - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00), - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04), - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08), - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C), - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10), - SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14), - SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000), SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010), SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020), @@ -222,13 +150,6 @@ static struct sleep_save exynos4_core_save[] = { SAVE_ITEM(S5P_SROM_BC3), }; -static struct sleep_save exynos4_l2cc_save[] = { - SAVE_ITEM(S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL), - SAVE_ITEM(S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL), - SAVE_ITEM(S5P_VA_L2CC + L2X0_PREFETCH_CTRL), - SAVE_ITEM(S5P_VA_L2CC + L2X0_POWER_CTRL), - SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), -}; /* For Cortex-A9 Diagnostic and Power control register */ static unsigned int save_arm_register[2]; @@ -249,7 +170,6 @@ static void exynos4_pm_prepare(void) u32 tmp; s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save)); - s3c_pm_do_save(exynos4_l2cc_save, ARRAY_SIZE(exynos4_l2cc_save)); s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save)); s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save)); @@ -462,13 +382,6 @@ static void exynos4_pm_resume(void) exynos4_scu_enable(S5P_VA_SCU); -#ifdef CONFIG_CACHE_L2X0 - s3c_pm_do_restore_core(exynos4_l2cc_save, ARRAY_SIZE(exynos4_l2cc_save)); - outer_inv_all(); - /* enable L2X0*/ - writel_relaxed(1, S5P_VA_L2CC + L2X0_CTRL); -#endif - early_wakeup: return; } -- cgit v1.2.3 From e982cd3347519e59a7499644603f8402b02bcba8 Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Fri, 11 Nov 2011 13:49:57 +0530 Subject: ARM: EXYNOS4: Added function to read chip id This adds a function to get the revision id. This is a temporary workaround patch and may get dropped later. Signed-off-by: Jaecheol Lee Signed-off-by: Changhwan Youn Signed-off-by: Amit Daniel Kachhap --- arch/arm/mach-exynos4/cpu.c | 9 +++++++++ arch/arm/mach-exynos4/platsmp.c | 7 +++++-- arch/arm/plat-s5p/include/plat/exynos4.h | 1 + 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c index 979fb5d4367..c1cb39df589 100644 --- a/arch/arm/mach-exynos4/cpu.c +++ b/arch/arm/mach-exynos4/cpu.c @@ -260,6 +260,15 @@ static int __init exynos4_l2x0_cache_init(void) early_initcall(exynos4_l2x0_cache_init); #endif +int exynos4_subrev(void) +{ + static int subrev = -1; + if (unlikely(subrev < 0)) + subrev = readl(S5P_VA_CHIPID) & 0xf; + + return subrev; +} + int __init exynos4_init(void) { printk(KERN_INFO "EXYNOS4: Initializing architecture\n"); diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index df6ef1b2f98..ca01370840d 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c @@ -30,9 +30,11 @@ #include #include +#include + extern void exynos4_secondary_startup(void); -#define CPU1_BOOT_REG S5P_VA_SYSRAM +#define CPU1_BOOT_REG (exynos4_subrev() == 0 ? S5P_VA_SYSRAM : S5P_INFORM5) /* * control for which core is the next to come out of the secondary @@ -218,5 +220,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus) * until it receives a soft interrupt, and then the * secondary CPU branches to this address. */ - __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), S5P_VA_SYSRAM); + __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)), + CPU1_BOOT_REG); } diff --git a/arch/arm/plat-s5p/include/plat/exynos4.h b/arch/arm/plat-s5p/include/plat/exynos4.h index 907caab53dc..d62f7f74a80 100644 --- a/arch/arm/plat-s5p/include/plat/exynos4.h +++ b/arch/arm/plat-s5p/include/plat/exynos4.h @@ -15,6 +15,7 @@ extern void exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void exynos4_register_clocks(void); extern void exynos4_setup_clocks(void); +extern int exynos4_subrev(void); #ifdef CONFIG_CPU_EXYNOS4210 -- cgit v1.2.3