summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-14 13:05:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-14 13:05:21 -0800
commit4964e0664c80680fa6b28ef91381c076a5b25c2c (patch)
tree62099c5aaeee7274bcc66bcfba35d479affa97cf /arch/mips/kernel
parent0a80939b3e6af4b0dc93bf88ec02fd7e90a16f1b (diff)
parent7bf6612e8a9d6a0b3b82e8e2611942be1258b307 (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (119 commits) MIPS: Delete unused function add_temporary_entry. MIPS: Set default pci cache line size. MIPS: Flush huge TLB MIPS: Octeon: Remove SYS_SUPPORTS_HIGHMEM. MIPS: Octeon: Add support for OCTEON II PCIe MIPS: Octeon: Update PCI Latency timer and enable more error reporting. MIPS: Alchemy: Update cpu-feature-overrides MIPS: Alchemy: db1200: Improve PB1200 detection. MIPS: Alchemy: merge Au1000 and Au1300-style IRQ controller code. MIPS: Alchemy: chain IRQ controllers to MIPS IRQ controller MIPS: Alchemy: irq: register pm at irq init time MIPS: Alchemy: Touchscreen support on DB1100 MIPS: Alchemy: Hook up IrDA on DB1000/DB1100 net/irda: convert au1k_ir to platform driver. MIPS: Alchemy: remove unused board headers MTD: nand: make au1550nd.c a platform_driver MIPS: Netlogic: Mark Netlogic chips as SMT capable MIPS: Netlogic: Add support for XLP 3XX cores MIPS: Netlogic: Merge some of XLR/XLP wakup code MIPS: Netlogic: Add default XLP config. ... Fix up trivial conflicts in arch/mips/kernel/{perf_event_mipsxx.c, traps.c} and drivers/tty/serial/Makefile
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/bmips_vec.S255
-rw-r--r--arch/mips/kernel/branch.c128
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-ds1287.c2
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c28
-rw-r--r--arch/mips/kernel/i8253.c2
-rw-r--r--arch/mips/kernel/kprobes.c177
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c72
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/setup.c43
-rw-r--r--arch/mips/kernel/smp-bmips.c458
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/kernel/traps.c16
18 files changed, 1046 insertions, 154 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 1a966183e35..0c6877ea900 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -55,9 +55,11 @@ obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
obj-$(CONFIG_CPU_XLR) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_XLP) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
new file mode 100644
index 00000000000..e908e81330b
--- /dev/null
+++ b/arch/mips/kernel/bmips_vec.S
@@ -0,0 +1,255 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Reset/NMI/re-entry vectors for BMIPS processors
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+ .macro BARRIER
+ .set mips32
+ _ssnop
+ _ssnop
+ _ssnop
+ .set mips0
+ .endm
+
+ __CPUINIT
+
+/***********************************************************************
+ * Alternate CPU1 startup vector for BMIPS4350
+ *
+ * On some systems the bootloader has already started CPU1 and configured
+ * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
+ * triggered by the SW1 interrupt. If that is the case we try to move
+ * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
+ ***********************************************************************/
+
+LEAF(bmips_smp_movevec)
+ la k0, 1f
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+
+1:
+ /* clear IV, pending IPIs */
+ mtc0 zero, CP0_CAUSE
+
+ /* re-enable IRQs to wait for SW1 */
+ li k0, ST0_IE | ST0_BEV | STATUSF_IP1
+ mtc0 k0, CP0_STATUS
+
+ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
+ li k0, 0xff400000
+ mtc0 k0, $22, 6
+ li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1
+ or k0, k1
+ li k1, 0xa0080000
+ sw k1, 0(k0)
+
+ /* wait here for SW1 interrupt from bmips_boot_secondary() */
+ wait
+
+ la k0, bmips_reset_nmi_vec
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+END(bmips_smp_movevec)
+
+/***********************************************************************
+ * Reset/NMI vector
+ * For BMIPS processors that can relocate their exception vectors, this
+ * entire function gets copied to 0x8000_0000.
+ ***********************************************************************/
+
+NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
+ .set push
+ .set noat
+ .align 4
+
+#ifdef CONFIG_SMP
+ /* if the NMI bit is clear, assume this is a CPU1 reset instead */
+ li k1, (1 << 19)
+ mfc0 k0, CP0_STATUS
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ /* if we're not on core 0, this must be the SMP boot signal */
+ li k1, (3 << 25)
+ mfc0 k0, $22
+ and k0, k1
+ bnez k0, bmips_smp_entry
+#endif
+#endif /* CONFIG_SMP */
+
+ /* nope, it's just a regular NMI */
+ SAVE_ALL
+ move a0, sp
+
+ /* clear EXL, ERL, BEV so that TLB refills still work */
+ mfc0 k0, CP0_STATUS
+ li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
+ or k0, k1
+ xor k0, k1
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* jump to the NMI handler function */
+ la k0, nmi_handler
+ jr k0
+
+ RESTORE_ALL
+ .set mips3
+ eret
+
+/***********************************************************************
+ * CPU1 reset vector (used for the initial boot only)
+ * This is still part of bmips_reset_nmi_vec().
+ ***********************************************************************/
+
+#ifdef CONFIG_SMP
+
+bmips_smp_entry:
+
+ /* set up CP0 STATUS; enable FPU */
+ li k0, 0x30000000
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
+ mfc0 k0, CP0_CONFIG
+ ori k0, 0x07
+ xori k0, 0x04
+ mtc0 k0, CP0_CONFIG
+
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ /* initialize CPU1's local I-cache */
+ li k0, 0x80000000
+ li k1, 0x80010000
+ mtc0 zero, $28
+ mtc0 zero, $28, 1
+ BARRIER
+
+1: cache Index_Store_Tag_I, 0(k0)
+ addiu k0, 16
+ bne k0, k1, 1b
+#elif defined(CONFIG_CPU_BMIPS5000)
+ /* set exception vector base */
+ la k0, ebase
+ lw k0, 0(k0)
+ mtc0 k0, $15, 1
+ BARRIER
+#endif
+
+ /* jump back to kseg0 in case we need to remap the kseg1 area */
+ la k0, 1f
+ jr k0
+1:
+ la k0, bmips_enable_xks01
+ jalr k0
+
+ /* use temporary stack to set up upper memory TLB */
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+
+ /* switch to permanent stack and continue booting */
+
+ .global bmips_secondary_reentry
+bmips_secondary_reentry:
+ la k0, bmips_smp_boot_sp
+ lw sp, 0(k0)
+ la k0, bmips_smp_boot_gp
+ lw gp, 0(k0)
+ la k0, start_secondary
+ jr k0
+
+#endif /* CONFIG_SMP */
+
+ .align 4
+ .global bmips_reset_nmi_vec_end
+bmips_reset_nmi_vec_end:
+
+END(bmips_reset_nmi_vec)
+
+ .set pop
+ .previous
+
+/***********************************************************************
+ * CPU1 warm restart vector (used for second and subsequent boots).
+ * Also used for S2 standby recovery (PM).
+ * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
+ ***********************************************************************/
+
+LEAF(bmips_smp_int_vec)
+
+ .align 4
+ mfc0 k0, CP0_STATUS
+ ori k0, 0x01
+ xori k0, 0x01
+ mtc0 k0, CP0_STATUS
+ eret
+
+ .align 4
+ .global bmips_smp_int_vec_end
+bmips_smp_int_vec_end:
+
+END(bmips_smp_int_vec)
+
+/***********************************************************************
+ * XKS01 support
+ * Certain CPUs support extending kseg0 to 1024MB.
+ ***********************************************************************/
+
+ __CPUINIT
+
+LEAF(bmips_enable_xks01)
+
+#if defined(CONFIG_XKS01)
+
+#if defined(CONFIG_CPU_BMIPS4380)
+ mfc0 t0, $22, 3
+ li t1, 0x1ff0
+ li t2, (1 << 12) | (1 << 9)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 3
+ BARRIER
+#elif defined(CONFIG_CPU_BMIPS5000)
+ mfc0 t0, $22, 5
+ li t1, 0x01ff
+ li t2, (1 << 8) | (1 << 5)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 5
+ BARRIER
+#else
+
+#error Missing XKS01 setup
+
+#endif
+
+#endif /* defined(CONFIG_XKS01) */
+
+ jr ra
+
+END(bmips_enable_xks01)
+
+ .previous
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 32103cc2a25..4d735d0e58f 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -17,28 +18,22 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-/*
- * Compute the return address and do emulate branch simulation, if required.
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
*/
-int __compute_return_epc(struct pt_regs *regs)
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
{
- unsigned int __user *addr;
unsigned int bit, fcr31, dspcontrol;
- long epc;
- union mips_instruction insn;
-
- epc = regs->cp0_epc;
- if (epc & 3)
- goto unaligned;
-
- /*
- * Read the instruction
- */
- addr = (unsigned int __user *) epc;
- if (__get_user(insn.word, addr)) {
- force_sig(SIGSEGV, current);
- return -EFAULT;
- }
+ long epc = regs->cp0_epc;
+ int ret = 0;
switch (insn.i_format.opcode) {
/*
@@ -64,18 +59,22 @@ int __compute_return_epc(struct pt_regs *regs)
switch (insn.i_format.rt) {
case bltz_op:
case bltzl_op:
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
case bgezl_op:
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -83,9 +82,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -93,12 +94,15 @@ int __compute_return_epc(struct pt_regs *regs)
case bgezal_op:
case bgezall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
+
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
@@ -133,9 +137,11 @@ int __compute_return_epc(struct pt_regs *regs)
case beq_op:
case beql_op:
if (regs->regs[insn.i_format.rs] ==
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -143,9 +149,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bne_op:
case bnel_op:
if (regs->regs[insn.i_format.rs] !=
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -153,9 +161,11 @@ int __compute_return_epc(struct pt_regs *regs)
case blez_op: /* not really i_format */
case blezl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] <= 0)
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -163,9 +173,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bgtz_op:
case bgtzl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] > 0)
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -187,18 +199,22 @@ int __compute_return_epc(struct pt_regs *regs)
switch (insn.i_format.rt & 3) {
case 0: /* bc1f */
case 2: /* bc1fl */
- if (~fcr31 & (1 << bit))
+ if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case 1: /* bc1t */
case 3: /* bc1tl */
- if (fcr31 & (1 << bit))
+ if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -239,15 +255,39 @@ int __compute_return_epc(struct pt_regs *regs)
#endif
}
- return 0;
+ return ret;
-unaligned:
- printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
-sigill:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+
}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 36c3898b76d..69bbfae183b 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -145,7 +145,7 @@ void __cpuinit sb1480_clockevent_init(void)
bcm1480_unmask_irq(cpu, irq);
action->handler = sibyte_counter_handler;
- action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index 939157e397b..ed648cb5a69 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -108,7 +108,7 @@ static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
static struct irqaction ds1287_irqaction = {
.handler = ds1287_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "ds1287",
};
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 339f3639b90..831b47585b7 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -114,7 +114,7 @@ static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
static struct irqaction gt641xx_timer0_irqaction = {
.handler = gt641xx_timer0_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "gt641xx_timer0",
};
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index e2d8e199be3..51095dd9599 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -84,7 +84,7 @@ out:
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 590c54f28a8..e73439fd685 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -144,7 +144,7 @@ void __cpuinit sb1250_clockevent_init(void)
sb1250_unmask_irq(cpu, irq);
action->handler = sibyte_counter_handler;
- action->flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index f0ab92a1b05..e5c30b1d086 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -146,7 +146,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
static struct irqaction txx9tmr_irq = {
.handler = txx9tmr_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "txx9tmr",
.dev_id = &txx9_clock_event_device,
};
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c7d3cf1ce46..0bab464b8e3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -191,6 +191,8 @@ void __init check_wait(void)
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_JZRISC:
+ case CPU_XLR:
+ case CPU_XLP:
cpu_wait = r4k_wait;
break;
@@ -1014,6 +1016,13 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
{
decode_configs(c);
+ if ((c->processor_id & 0xff00) == PRID_IMP_NETLOGIC_AU13XX) {
+ c->cputype = CPU_ALCHEMY;
+ __cpu_name[cpu] = "Au1300";
+ /* following stuff is not for Alchemy */
+ return;
+ }
+
c->options = (MIPS_CPU_TLB |
MIPS_CPU_4KEX |
MIPS_CPU_COUNTER |
@@ -1023,6 +1032,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
MIPS_CPU_LLSC);
switch (c->processor_id & 0xff00) {
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Netlogic XLP";
+ break;
+
case PRID_IMP_NETLOGIC_XLR732:
case PRID_IMP_NETLOGIC_XLR716:
case PRID_IMP_NETLOGIC_XLR532:
@@ -1053,14 +1068,21 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
break;
default:
- printk(KERN_INFO "Unknown Netlogic chip id [%02x]!\n",
+ pr_info("Unknown Netlogic chip id [%02x]!\n",
c->processor_id);
c->cputype = CPU_XLR;
break;
}
- c->isa_level = MIPS_CPU_ISA_M64R1;
- c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
+ if (c->cputype == CPU_XLP) {
+ c->isa_level = MIPS_CPU_ISA_M64R2;
+ c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
+ /* This will be updated again after all threads are woken up */
+ c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+ } else {
+ c->isa_level = MIPS_CPU_ISA_M64R1;
+ c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
+ }
}
#ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 7047bff35ea..c5bc344fc74 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -19,7 +19,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
static struct irqaction irq0 = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER,
.name = "timer"
};
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index ee28683fc2a..158467da9bc 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -25,10 +25,12 @@
#include <linux/kprobes.h>
#include <linux/preempt.h>
+#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
+#include <asm/branch.h>
#include <asm/break.h>
#include <asm/inst.h>
@@ -112,17 +114,49 @@ insn_ok:
return 0;
}
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
union mips_instruction insn;
union mips_instruction prev_insn;
int ret = 0;
- prev_insn = p->addr[-1];
insn = p->addr[0];
- if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
- pr_notice("Kprobes for branch and jump instructions are not supported\n");
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((probe_kernel_read(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0) &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
ret = -EINVAL;
goto out;
}
@@ -138,9 +172,20 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break trap instruction at
* index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
*/
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
- memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
p->ainsn.insn[1] = breakpoint2_insn;
p->opcode = *p->addr;
@@ -191,16 +236,96 @@ static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_saved_epc = regs->cp0_epc;
}
-static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
regs->cp0_status &= ~ST0_IE;
/* single step inline if the instruction is a break */
if (p->opcode.word == breakpoint_insn.word ||
p->opcode.word == breakpoint2_insn.word)
regs->cp0_epc = (unsigned long)p->addr;
- else
- regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0) {
+ pr_notice("Kprobes: Error in evaluating branch\n");
+ return;
+ }
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
@@ -239,8 +364,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
+ prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
return 1;
} else {
if (addr->word != breakpoint_insn.word) {
@@ -284,8 +414,16 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
}
ss_probe:
- prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
return 1;
no_kprobe:
@@ -294,25 +432,6 @@ no_kprobe:
}
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the "break 0"
- * instruction. To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * breakpoint trap.
- */
-static void __kprobes resume_execution(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- unsigned long orig_epc = kcb->kprobe_saved_epc;
- regs->cp0_epc = orig_epc + 4;
-}
-
static inline int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 315fc0b250f..e3b897acfbc 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -621,11 +621,6 @@ static int mipspmu_event_init(struct perf_event *event)
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
- atomic_dec(&active_events);
- return -EINVAL;
- }
-
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
@@ -638,11 +633,7 @@ static int mipspmu_event_init(struct perf_event *event)
if (err)
return err;
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err;
+ return __hw_perf_event_init(event);
}
static struct pmu pmu = {
@@ -712,18 +703,6 @@ static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
}
-static int validate_event(struct cpu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event fake_hwc = event->hw;
-
- /* Allow mixed event group. So return 1 to pass validation. */
- if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
- return 1;
-
- return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0;
-}
-
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
@@ -731,15 +710,15 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
- if (!validate_event(&fake_cpuc, leader))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
- if (!validate_event(&fake_cpuc, sibling))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
- if (!validate_event(&fake_cpuc, event))
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;
@@ -1279,13 +1258,14 @@ static int __hw_perf_event_init(struct perf_event *event)
}
err = 0;
- if (event->group_leader != event) {
+ if (event->group_leader != event)
err = validate_group(event);
- if (err)
- return -EINVAL;
- }
event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
return err;
}
@@ -1380,20 +1360,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
}
/* 24K */
-#define IS_UNSUPPORTED_24K_EVENT(r, b) \
- ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
- (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
- (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
- (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
- ((b) >= 68 && (b) <= 127))
#define IS_BOTH_COUNTERS_24K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
/* 34K */
-#define IS_UNSUPPORTED_34K_EVENT(r, b) \
- ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
- (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
- ((b) >= 68 && (b) <= 127))
#define IS_BOTH_COUNTERS_34K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
@@ -1406,20 +1376,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
#endif
/* 74K */
-#define IS_UNSUPPORTED_74K_EVENT(r, b) \
- ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
- ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
- (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
- (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
- (b) == 61 || (r) == 62 || (r) == 191 || \
- ((b) >= 64 && (b) <= 127))
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
((b) == 0 || (b) == 1)
/* 1004K */
-#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
- ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
- (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
#ifdef CONFIG_MIPS_MT_SMP
@@ -1445,11 +1405,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
+ raw_event.event_id = base_id;
+
switch (current_cpu_type()) {
case CPU_24K:
- if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
- return ERR_PTR(-EOPNOTSUPP);
- raw_event.event_id = base_id;
if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
@@ -1464,9 +1423,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_34K:
- if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
- return ERR_PTR(-EOPNOTSUPP);
- raw_event.event_id = base_id;
if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
@@ -1482,9 +1438,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_74K:
- if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
- return ERR_PTR(-EOPNOTSUPP);
- raw_event.event_id = base_id;
if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
@@ -1495,9 +1448,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_1004K:
- if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
- return ERR_PTR(-EOPNOTSUPP);
- raw_event.event_id = base_id;
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
else
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 933166f44a6..a9d801dec6b 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -473,7 +473,6 @@ static const struct file_operations rtlx_fops = {
static struct irqaction rtlx_irq = {
.handler = rtlx_interrupt,
- .flags = IRQF_DISABLED,
.name = "RTLX",
};
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index b1cb8f87d7b..058e964e730 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -122,6 +122,9 @@ static void __init print_memory_map(void)
case BOOT_MEM_RAM:
printk(KERN_CONT "(usable)\n");
break;
+ case BOOT_MEM_INIT_RAM:
+ printk(KERN_CONT "(usable after init)\n");
+ break;
case BOOT_MEM_ROM_DATA:
printk(KERN_CONT "(ROM data)\n");
break;
@@ -362,15 +365,24 @@ static void __init bootmem_init(void)
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end, size;
+ start = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr
+ + boot_mem_map.map[i].size);
+
/*
* Reserve usable memory.
*/
- if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RAM:
+ break;
+ case BOOT_MEM_INIT_RAM:
+ memory_present(0, start, end);
continue;
+ default:
+ /* Not usable memory */
+ continue;
+ }
- start = PFN_UP(boot_mem_map.map[i].addr);
- end = PFN_DOWN(boot_mem_map.map[i].addr
- + boot_mem_map.map[i].size);
/*
* We are rounding up the start address of usable memory
* and at the end of the usable range downwards.
@@ -456,11 +468,33 @@ early_param("mem", early_parse_mem);
static void __init arch_mem_init(char **cmdline_p)
{
+ phys_t init_mem, init_end, init_size;
+
extern void plat_mem_setup(void);
/* call board setup routine */
plat_mem_setup();
+ init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT;
+ init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT;
+ init_size = init_end - init_mem;
+ if (init_size) {
+ /* Make sure it is in the boot_mem_map */
+ int i, found;
+ found = 0;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (init_mem >= boot_mem_map.map[i].addr &&
+ init_mem < (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size)) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ add_memory_region(init_mem, init_size,
+ BOOT_MEM_INIT_RAM);
+ }
+
pr_info("Determined physical RAM map:\n");
print_memory_map();
@@ -524,6 +558,7 @@ static void __init resource_init(void)
res = alloc_bootmem(sizeof(struct resource));
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
+ case BOOT_MEM_INIT_RAM:
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
new file mode 100644
index 00000000000..58fe71afd87
--- /dev/null
+++ b/arch/mips/kernel/smp-bmips.c
@@ -0,0 +1,458 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * SMP support for BMIPS
+ */
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <asm/time.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/pmon.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+#include <asm/traps.h>
+#include <asm/barrier.h>
+
+static int __maybe_unused max_cpus = 1;
+
+/* these may be configured by the platform code */
+int bmips_smp_enabled = 1;
+int bmips_cpu_offset;
+cpumask_t bmips_booted_mask;
+
+#ifdef CONFIG_SMP
+
+/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
+unsigned long bmips_smp_boot_sp;
+unsigned long bmips_smp_boot_gp;
+
+static void bmips_send_ipi_single(int cpu, unsigned int action);
+static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
+
+/* SW interrupts 0,1 are used for interprocessor signaling */
+#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
+#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
+
+#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
+#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
+
+static void __init bmips_smp_setup(void)
+{
+ int i;
+
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ /* arbitration priority */
+ clear_c0_brcm_cmt_ctrl(0x30);
+
+ /* NBK and weak order flags */
+ set_c0_brcm_config_0(0x30000);
+
+ /*
+ * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
+ * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
+ * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
+ */
+ change_c0_brcm_cmt_intr(0xf8018000,
+ (0x02 << 27) | (0x03 << 15));
+
+ /* single core, 2 threads (2 pipelines) */
+ max_cpus = 2;
+#elif defined(CONFIG_CPU_BMIPS5000)
+ /* enable raceless SW interrupts */
+ set_c0_brcm_config(0x03 << 22);
+
+ /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
+ change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
+
+ /* N cores, 2 threads per core */
+ max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+
+ /* clear any pending SW interrupts */
+ for (i = 0; i < max_cpus; i++) {
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+ }
+#endif
+
+ if (!bmips_smp_enabled)
+ max_cpus = 1;
+
+ /* this can be overridden by the BSP */
+ if (!board_ebase_setup)
+ board_ebase_setup = &bmips_ebase_setup;
+
+ for (i = 0; i < max_cpus; i++) {
+ __cpu_number_map[i] = 1;
+ __cpu_logical_map[i] = 1;
+ set_cpu_possible(i, 1);
+ set_cpu_present(i, 1);
+ }
+}
+
+/*
+ * IPI IRQ setup - runs on CPU0
+ */
+static void bmips_prepare_cpus(unsigned int max_cpus)
+{
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+ "smp_ipi0", NULL))
+ panic("Can't request IPI0 interrupt\n");
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+ "smp_ipi1", NULL))
+ panic("Can't request IPI1 interrupt\n");
+}
+
+/*
+ * Tell the hardware to boot CPUx - runs on CPU0
+ */
+static void bmips_boot_secondary(int cpu, struct task_struct *idle)
+{
+ bmips_smp_boot_sp = __KSTK_TOS(idle);
+ bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
+ mb();
+
+ /*
+ * Initial boot sequence for secondary CPU:
+ * bmips_reset_nmi_vec @ a000_0000 ->
+ * bmips_smp_entry ->
+ * plat_wired_tlb_setup (cached function call; optional) ->
+ * start_secondary (cached jump)
+ *
+ * Warm restart sequence:
+ * play_dead WAIT loop ->
+ * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
+ * eret to play_dead ->
+ * bmips_secondary_reentry ->
+ * start_secondary
+ */
+
+ pr_info("SMP: Booting CPU%d...\n", cpu);
+
+ if (cpumask_test_cpu(cpu, &bmips_booted_mask))
+ bmips_send_ipi_single(cpu, 0);
+ else {
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ set_c0_brcm_cmt_ctrl(0x01);
+#elif defined(CONFIG_CPU_BMIPS5000)
+ if (cpu & 0x01)
+ write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
+ else {
+ /*
+ * core N thread 0 was already booted; just
+ * pulse the NMI line
+ */
+ bmips_write_zscm_reg(0x210, 0xc0000000);
+ udelay(10);
+ bmips_write_zscm_reg(0x210, 0x00);
+ }
+#endif
+ cpumask_set_cpu(cpu, &bmips_booted_mask);
+ }
+}
+
+/*
+ * Early setup - runs on secondary CPU after cache probe
+ */
+static void bmips_init_secondary(void)
+{
+ /* move NMI vector to kseg0, in case XKS01 is enabled */
+
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ void __iomem *cbr = BMIPS_GET_CBR();
+ unsigned long old_vec;
+
+ old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+
+ clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
+#elif defined(CONFIG_CPU_BMIPS5000)
+ write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
+ (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+#endif
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
+}
+
+/*
+ * Late setup - runs on secondary CPU before entering the idle loop
+ */
+static void bmips_smp_finish(void)
+{
+ pr_info("SMP: CPU%d is running\n", smp_processor_id());
+}
+
+/*
+ * Runs on CPU0 after all CPUs have been booted
+ */
+static void bmips_cpus_done(void)
+{
+}
+
+#if defined(CONFIG_CPU_BMIPS5000)
+
+/*
+ * BMIPS5000 raceless IPIs
+ *
+ * Each CPU has two inbound SW IRQs which are independent of all other CPUs.
+ * IPI0 is used for SMP_RESCHEDULE_YOURSELF
+ * IPI1 is used for SMP_CALL_FUNCTION
+ */
+
+static void bmips_send_ipi_single(int cpu, unsigned int action)
+{
+ write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
+}
+
+static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
+{
+ int action = irq - IPI0_IRQ;
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
+
+ if (action == 0)
+ scheduler_ipi();
+ else
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+#else
+
+/*
+ * BMIPS43xx racey IPIs
+ *
+ * We use one inbound SW IRQ for each CPU.
+ *
+ * A spinlock must be held in order to keep CPUx from accidentally clearing
+ * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
+ * same spinlock is used to protect the action masks.
+ */
+
+static DEFINE_SPINLOCK(ipi_lock);
+static DEFINE_PER_CPU(int, ipi_action_mask);
+
+static void bmips_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ set_c0_cause(cpu ? C_SW1 : C_SW0);
+ per_cpu(ipi_action_mask, cpu) |= action;
+ irq_enable_hazard();
+ spin_unlock_irqrestore(&ipi_lock, flags);
+}
+
+static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ int action, cpu = irq - IPI0_IRQ;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ action = __get_cpu_var(ipi_action_mask);
+ per_cpu(ipi_action_mask, cpu) = 0;
+ clear_c0_cause(cpu ? C_SW1 : C_SW0);
+ spin_unlock_irqrestore(&ipi_lock, flags);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+ if (action & SMP_CALL_FUNCTION)
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+#endif /* BMIPS type */
+
+static void bmips_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips_send_ipi_single(i, action);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int bmips_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ pr_info("SMP: CPU%d is offline\n", cpu);
+
+ cpu_clear(cpu, cpu_online_map);
+ cpu_clear(cpu, cpu_callin_map);
+
+ local_flush_tlb_all();
+ local_flush_icache_range(0, ~0);
+
+ return 0;
+}
+
+static void bmips_cpu_die(unsigned int cpu)
+{
+}
+
+void __ref play_dead(void)
+{
+ idle_task_exit();
+
+ /* flush data cache */
+ _dma_cache_wback_inv(0, ~0);
+
+ /*
+ * Wakeup is on SW0 or SW1; disable everything else
+ * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
+ * IRQ handlers; this clears ST0_IE and returns immediately.
+ */
+ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
+ change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
+ IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
+ irq_disable_hazard();
+
+ /*
+ * wait for SW interrupt from bmips_boot_secondary(), then jump
+ * back to start_secondary()
+ */
+ __asm__ __volatile__(
+ " wait\n"
+ " j bmips_secondary_reentry\n"
+ : : : "memory");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct plat_smp_ops bmips_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .cpus_done = bmips_cpus_done,
+ .send_ipi_single = bmips_send_ipi_single,
+ .send_ipi_mask = bmips_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+};
+
+#endif /* CONFIG_SMP */
+
+/***********************************************************************
+ * BMIPS vector relocation
+ * This is primarily used for SMP boot, but it is applicable to some
+ * UP BMIPS systems as well.
+ ***********************************************************************/
+
+static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
+{
+ memcpy((void *)dst, start, end - start);
+ dma_cache_wback((unsigned long)start, end - start);
+ local_flush_icache_range(dst, dst + (end - start));
+ instruction_hazard();
+}
+
+static inline void __cpuinit bmips_nmi_handler_setup(void)
+{
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
+ &bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
+ &bmips_smp_int_vec_end);
+}
+
+void __cpuinit bmips_ebase_setup(void)
+{
+ unsigned long new_ebase = ebase;
+ void __iomem __maybe_unused *cbr;
+
+ BUG_ON(ebase != CKSEG0);
+
+#if defined(CONFIG_CPU_BMIPS4350)
+ /*
+ * BMIPS4350 cannot relocate the normal vectors, but it
+ * can relocate the BEV=1 vectors. So CPU1 starts up at
+ * the relocated BEV=1, IV=0 general exception vector @
+ * 0xa000_0380.
+ *
+ * set_uncached_handler() is used here because:
+ * - CPU1 will run this from uncached space
+ * - None of the cacheflush functions are set up yet
+ */
+ set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
+ &bmips_smp_int_vec, 0x80);
+ __sync();
+ return;
+#elif defined(CONFIG_CPU_BMIPS4380)
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_0400: normal vectors
+ */
+ new_ebase = 0x80000400;
+ cbr = BMIPS_GET_CBR();
+ __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+ __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+#elif defined(CONFIG_CPU_BMIPS5000)
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_1000: normal vectors
+ */
+ new_ebase = 0x80001000;
+ write_c0_brcm_bootvec(0xa0088008);
+ write_c0_ebase(new_ebase);
+ if (max_cpus > 2)
+ bmips_write_zscm_reg(0xa0, 0xa008a008);
+#else
+ return;
+#endif
+ board_nmi_handler_setup = &bmips_nmi_handler_setup;
+ ebase = new_ebase;
+}
+
+asmlinkage void __weak plat_wired_tlb_setup(void)
+{
+ /*
+ * Called when starting/restarting a secondary CPU.
+ * Kernel stacks and other important data might only be accessible
+ * once the wired entries are present.
+ */
+}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f0895e70e28..0a42ff3ff6a 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -559,7 +559,7 @@ void smtc_prepare_cpus(int cpus)
pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
if (pipi == NULL)
- panic("kmalloc of IPI message buffers failed\n");
+ panic("kmalloc of IPI message buffers failed");
else
printk("IPI buffer pool of %d buffers\n", nipi);
for (i = 0; i < nipi; i++) {
@@ -813,7 +813,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
if (pipi == NULL) {
bust_spinlocks(1);
mips_mt_regdump(dvpe());
- panic("IPI Msg. Buffers Depleted\n");
+ panic("IPI Msg. Buffers Depleted");
}
pipi->type = type;
pipi->arg = (void *)action;
@@ -1130,7 +1130,7 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi = {
.handler = ipi_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .flags = IRQF_PERCPU,
.name = "SMTC_IPI"
};
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bbddb86c1fa..cc4a3f120f5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -91,6 +91,7 @@ int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
+void (*board_ebase_setup)(void);
static void show_raw_backtrace(unsigned long reg29)
@@ -400,7 +401,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
panic("Fatal exception in interrupt");
if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
ssleep(5);
panic("Fatal exception");
}
@@ -1150,7 +1151,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
- panic("Unexpected DSP exception\n");
+ panic("Unexpected DSP exception");
force_sig(SIGILL, current);
}
@@ -1339,9 +1340,18 @@ void ejtag_exception_handler(struct pt_regs *regs)
/*
* NMI exception handler.
+ * No lock; only written during early bootup by CPU 0.
*/
+static RAW_NOTIFIER_HEAD(nmi_chain);
+
+int register_nmi_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&nmi_chain, nb);
+}
+
void __noreturn nmi_exception_handler(struct pt_regs *regs)
{
+ raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
printk("NMI taken!!!!\n");
die("NMI", regs);
@@ -1682,6 +1692,8 @@ void __init trap_init(void)
ebase += (read_c0_ebase() & 0x3ffff000);
}
+ if (board_ebase_setup)
+ board_ebase_setup();
per_cpu_trap_init();
/*