summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
authorAvik Sil <avik.sil@linaro.org>2011-03-31 11:06:38 +0000
committerAvik Sil <avik.sil@linaro.org>2011-03-31 11:06:38 +0000
commitebb688e3183bd5891312bdb8f4e2f520d70b36b6 (patch)
treec30d1abefaccc8cd1baa4944aae3348668e13bde /arch/mips
parent8061f3a885ec3538bf405ff3957c205b1ab2aae4 (diff)
parentb2afcd30fff4c24290a63a2497de301864d9726d (diff)
Merge remote branch 'lttng/2.6.38-lttng-0.247'
Conflicts: arch/arm/kernel/traps.c arch/arm/mach-omap2/clock34xx.c arch/arm/mach-omap2/pm34xx.c
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig15
-rw-r--r--arch/mips/include/asm/barrier.h6
-rw-r--r--arch/mips/include/asm/mipsregs.h1
-rw-r--r--arch/mips/include/asm/octeon/trace-clock.h43
-rw-r--r--arch/mips/include/asm/thread_info.h4
-rw-r--r--arch/mips/include/asm/timex.h97
-rw-r--r--arch/mips/include/asm/trace-clock.h77
-rw-r--r--arch/mips/kernel/entry.S2
-rw-r--r--arch/mips/kernel/linux32.c5
-rw-r--r--arch/mips/kernel/process.c9
-rw-r--r--arch/mips/kernel/ptrace.c9
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S4
-rw-r--r--arch/mips/kernel/scall64-o32.S10
-rw-r--r--arch/mips/kernel/smp.c3
-rw-r--r--arch/mips/kernel/syscall.c66
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c25
-rw-r--r--arch/mips/kernel/unaligned.c6
-rw-r--r--arch/mips/mm/fault.c7
21 files changed, 380 insertions, 17 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d88983516e2..7a4d951ed97 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -18,6 +18,7 @@ config MIPS
select HAVE_KRETPROBES
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
+ select HAVE_LTT_DUMP_TABLES
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
@@ -1967,6 +1968,20 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS
bool
+config HAVE_GET_CYCLES_32
+ def_bool y
+ depends on !CPU_R4400_WORKAROUNDS
+ depends on !CPU_CAVIUM_OCTEON
+ select HAVE_TRACE_CLOCK
+ select HAVE_TRACE_CLOCK_32_TO_64
+ select HAVE_UNSYNCHRONIZED_TSC
+
+config HAVE_GET_CYCLES
+ def_bool y
+ depends on CPU_CAVIUM_OCTEON
+ select HAVE_TRACE_CLOCK
+ select HAVE_UNSYNCHRONIZED_TSC
+
#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index c0884f02d3a..1419b787e1e 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -178,4 +178,10 @@
#define nudge_writes() mb()
#endif
+/*
+ * MIPS does not have any instruction to serialize instruction execution on the
+ * core.
+ */
+#define sync_core()
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 4d987097538..44f631b39ff 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -438,6 +438,7 @@
*/
#define CAUSEB_EXCCODE 2
#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
+#define CAUSE_EXCCODE(cause) (((cause) & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE)
#define CAUSEB_IP 8
#define CAUSEF_IP (_ULCAST_(255) << 8)
#define CAUSEB_IP0 8
diff --git a/arch/mips/include/asm/octeon/trace-clock.h b/arch/mips/include/asm/octeon/trace-clock.h
new file mode 100644
index 00000000000..062662b732a
--- /dev/null
+++ b/arch/mips/include/asm/octeon/trace-clock.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2005,2008 Mathieu Desnoyers
+ *
+ * Trace clock MIPS Octeon definitions.
+ */
+
+#ifndef _ASM_MIPS_OCTEON_TRACE_CLOCK_H
+#define _ASM_MIPS_OCTEON_TRACE_CLOCK_H
+
+#include <asm/octeon/octeon.h>
+
+#define TC_HW_BITS 64
+
+static inline u32 trace_clock_read32(void)
+{
+ return (u32)read_c0_cvmcount(); /* only need the 32 LSB */
+}
+
+static inline u64 trace_clock_read64(void)
+{
+ return read_c0_cvmcount();
+}
+
+static inline u64 trace_clock_frequency(void)
+{
+ return octeon_get_clock_rate();
+}
+
+static inline u32 trace_clock_freq_scale(void)
+{
+ return 1;
+}
+
+static inline int get_trace_clock(void)
+{
+ return 0;
+}
+
+static inline void put_trace_clock(void)
+{
+ return;
+}
+#endif /* _ASM_MIPS_OCTEON_TRACE_CLOCK_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d309556cacf..eb7f7b99038 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -122,6 +122,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
#define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
+#define TIF_KERNEL_TRACE 30 /* kernel trace active */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#ifdef CONFIG_MIPS32_O32
@@ -131,6 +132,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#endif /* CONFIG_MIPS32_O32 */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_KERNEL_TRACE (1<<TIF_KERNEL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
@@ -151,7 +153,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_WORK_MASK (0x0000ffef & \
~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
/* work to do on any return to u-space */
-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
+#define _TIF_ALLWORK_MASK (0xc000ffff & ~_TIF_SECCOMP)
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h
index 6529704aa73..6c150979fa2 100644
--- a/arch/mips/include/asm/timex.h
+++ b/arch/mips/include/asm/timex.h
@@ -20,6 +20,8 @@
*/
#define CLOCK_TICK_RATE 1193182
+extern unsigned int mips_hpt_frequency;
+
/*
* Standard way to access the cycle counter.
* Currently only used on SMP for scheduling.
@@ -29,14 +31,109 @@
* which isn't an evil thing.
*
* We know that all SMP capable CPUs have cycle counters.
+ *
+ * Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * HAVE_GET_CYCLES makes sure that this case is handled properly :
+ *
+ * Ralf Baechle <ralf@linux-mips.org> :
+ * This avoids us executing an mfc0 c0_count instruction on processors which
+ * don't have but also on certain R4000 and R4400 versions where reading from
+ * the count register just in the very moment when its value equals c0_compare
+ * will result in the timer interrupt getting lost.
*/
+#ifdef CONFIG_HAVE_GET_CYCLES
+# ifdef CONFIG_CPU_CAVIUM_OCTEON
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ return read_c0_cvmcount();
+}
+
+static inline void get_cycles_barrier(void)
+{
+}
+
+static inline cycles_t get_cycles_rate(void)
+{
+ return mips_hpt_frequency;
+}
+
+extern int test_tsc_synchronization(void);
+extern int _tsc_is_sync;
+static inline int tsc_is_sync(void)
+{
+ return _tsc_is_sync;
+}
+# else /* #ifdef CONFIG_CPU_CAVIUM_OCTEON */
+# error "64-bit get_cycles() supported only on Cavium Octeon MIPS architectures"
+# endif /* #else #ifdef CONFIG_CPU_CAVIUM_OCTEON */
+#elif defined(CONFIG_HAVE_GET_CYCLES_32)
typedef unsigned int cycles_t;
static inline cycles_t get_cycles(void)
{
+ return read_c0_count();
+}
+
+static inline void get_cycles_barrier(void)
+{
+}
+
+static inline cycles_t get_cycles_rate(void)
+{
+ return mips_hpt_frequency;
+}
+
+extern int test_tsc_synchronization(void);
+extern int _tsc_is_sync;
+static inline int tsc_is_sync(void)
+{
+ return _tsc_is_sync;
+}
+#else
+typedef unsigned int cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+static inline int test_tsc_synchronization(void)
+{
return 0;
}
+static inline int tsc_is_sync(void)
+{
+ return 0;
+}
+#endif
+
+#define DELAY_INTERRUPT 100
+/*
+ * Only updates 32 LSB.
+ */
+static inline void write_tsc(u32 val1, u32 val2)
+{
+ write_c0_count(val1);
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + DELAY_INTERRUPT);
+}
+
+/*
+ * Currently unused, should update internal tsc-related timekeeping sources.
+ */
+static inline void mark_tsc_unstable(char *reason)
+{
+}
+
+/*
+ * Currently simply use the tsc_is_sync value.
+ */
+static inline int unsynchronized_tsc(void)
+{
+ return !tsc_is_sync();
+}
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/trace-clock.h b/arch/mips/include/asm/trace-clock.h
new file mode 100644
index 00000000000..9bbcf999bef
--- /dev/null
+++ b/arch/mips/include/asm/trace-clock.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2005,2008 Mathieu Desnoyers
+ *
+ * Trace clock MIPS definitions.
+ */
+
+#ifndef _ASM_MIPS_TRACE_CLOCK_H
+#define _ASM_MIPS_TRACE_CLOCK_H
+
+#include <linux/timex.h>
+#include <asm/processor.h>
+
+#define TRACE_CLOCK_MIN_PROBE_DURATION 200
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# include <asm/octeon/trace-clock.h>
+#else /* !CONFIG_CPU_CAVIUM_OCTEON */
+/*
+ * Number of hardware clock bits. The higher order bits are expected to be 0.
+ * If the hardware clock source has more than 32 bits, the bits higher than the
+ * 32nd will be truncated by a cast to a 32 bits unsigned. Range : 1 - 32.
+ * (too few bits would be unrealistic though, since we depend on the timer to
+ * detect the overflows).
+ */
+#define TC_HW_BITS 32
+
+/* Expected maximum interrupt latency in ms : 15ms, *2 for security */
+#define TC_EXPECTED_INTERRUPT_LATENCY 30
+
+extern u64 trace_clock_read_synthetic_tsc(void);
+
+/*
+ * MIPS get_cycles only returns a 32 bits TSC (see timex.h). The assumption
+ * there is that the reschedule is done every 8 seconds or so. Given that
+ * tracing needs to detect delays longer than 8 seconds, we need a full 64-bits
+ * TSC, whic is provided by trace-clock-32-to-64.
+*/
+
+static inline u32 trace_clock_read32(void)
+{
+ return (u32)get_cycles(); /* only need the 32 LSB */
+}
+
+static inline u64 trace_clock_read64(void)
+{
+ return trace_clock_read_synthetic_tsc();
+}
+
+static inline u64 trace_clock_frequency(void)
+{
+ return get_cycles_rate();
+}
+
+static inline u32 trace_clock_freq_scale(void)
+{
+ return 1;
+}
+
+extern void get_synthetic_tsc(void);
+extern void put_synthetic_tsc(void);
+
+static inline int get_trace_clock(void)
+{
+ get_synthetic_tsc();
+ return 0;
+}
+
+static inline void put_trace_clock(void)
+{
+ put_synthetic_tsc();
+}
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+
+static inline void set_trace_clock_is_sync(int state)
+{
+}
+#endif /* _ASM_MIPS_TRACE_CLOCK_H */
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index ffa331029e0..8c5410f97e7 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -167,7 +167,7 @@ work_notifysig: # deal with pending signals and
FEXPORT(syscall_exit_work_partial)
SAVE_STATIC
syscall_exit_work:
- li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_KERNEL_TRACE
and t0, a2 # a2 is preloaded with TI_FLAGS
beqz t0, work_pending # trace bit set?
local_irq_enable # could let do_syscall_trace()
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 876a75cc376..76a82609626 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -34,6 +34,7 @@
#include <linux/vfs.h>
#include <linux/ipc.h>
#include <linux/slab.h>
+#include <trace/ipc.h>
#include <net/sock.h>
#include <net/scm.h>
@@ -44,6 +45,8 @@
#include <asm/mmu_context.h>
#include <asm/mman.h>
+DEFINE_TRACE(ipc_call);
+
/* Use this to get at 32-bit user passed pointers. */
/* A() macro should be used for places where you e.g.
have some internal variable u32 and just want to get
@@ -166,6 +169,8 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
+ trace_ipc_call(call, first);
+
switch (call) {
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ae167df73dd..7d9bb1cdd7f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
+#include <trace/sched.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -42,6 +43,8 @@
#include <asm/inst.h>
#include <asm/stacktrace.h>
+DEFINE_TRACE(sched_kthread_create);
+
/*
* The idle thread. There's no useful work to be done, so just try to conserve
* power and have a low exit latency (ie sit in a loop waiting for somebody to
@@ -234,6 +237,7 @@ static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
+ long pid;
memset(&regs, 0, sizeof(regs));
@@ -249,7 +253,10 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
#endif
/* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
+ trace_sched_kthread_create(fn, pid);
+ return pid;
}
/*
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index d21c388c011..79e1750cc7c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <trace/syscall.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -39,6 +40,9 @@
#include <asm/bootinfo.h>
#include <asm/reg.h>
+DEFINE_TRACE(syscall_entry);
+DEFINE_TRACE(syscall_exit);
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -535,6 +539,11 @@ static inline int audit_arch(void)
*/
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
+ if (!entryexit)
+ trace_syscall_entry(regs, regs->regs[2]);
+ else
+ trace_syscall_exit(regs->regs[2]);
+
/* do the secure computing check first */
if (!entryexit)
secure_computing(regs->regs[2]);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fbaabad0e6e..1b90e8255da 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
stack_done:
lw t0, TI_FLAGS($28) # syscall tracing enabled?
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_KERNEL_TRACE
and t0, t1
bnez t0, syscall_trace_entry # -> yes
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 3f417928320..c574a1a12f2 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_KERNEL_TRACE
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
@@ -126,7 +126,8 @@ illegal_syscall:
END(handle_sys64)
.align 3
-sys_call_table:
+ .type sys_call_table,@object
+EXPORT(sys_call_table)
PTR sys_read /* 5000 */
PTR sys_write
PTR sys_open
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f08ece6d8ac..0d312c2d54d 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_KERNEL_TRACE
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
@@ -121,6 +121,8 @@ not_n32_scall:
END(handle_sysn32)
+ .align 3
+ .type sysn32_call_table,@object
EXPORT(sysn32_call_table)
PTR sys_read /* 6000 */
PTR sys_write
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 78d768a3e19..635d0d84344 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -53,7 +53,7 @@ NESTED(handle_sys, PT_SIZE, sp)
sll a3, a3, 0
dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
+ ld t2, (syso32_call_table - (__NR_O32_Linux * 8))(t0)
sd a3, PT_R26(sp) # save a3 for syscall restarting
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
PTR 4b, bad_stack
.previous
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_KERNEL_TRACE
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
@@ -180,7 +180,7 @@ LEAF(sys32_syscall)
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
- ld t2, sys_call_table(t1) # syscall routine
+ ld t2, syso32_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
@@ -202,8 +202,8 @@ einval: li v0, -ENOSYS
END(sys32_syscall)
.align 3
- .type sys_call_table,@object
-sys_call_table:
+ .type syso32_call_table,@object
+EXPORT(syso32_call_table)
PTR sys32_syscall /* 4000 */
PTR sys_exit
PTR sys_fork
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 32a25610108..f8df840935d 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -164,6 +164,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
synchronise_count_master();
+#ifdef CONFIG_HAVE_UNSYNCHRONIZED_TSC
+ test_tsc_synchronization();
+#endif
}
/* called from main before smp_init() */
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 58beabf50b3..42db7a4eedc 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -31,6 +31,8 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <linux/ipc.h>
+#include <linux/kallsyms.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -463,3 +465,67 @@ int kernel_execve(const char *filename,
return -__v0;
}
+
+void ltt_dump_sys_call_table(void *call_data)
+{
+ int i;
+ char namebuf[KSYM_NAME_LEN];
+
+#ifdef CONFIG_32BIT
+ for (i = 0; i < __NR_O32_Linux_syscalls; i++) {
+ extern struct {
+ unsigned long ptr;
+ long j;
+ } sys_call_table[];
+
+ sprint_symbol(namebuf, sys_call_table[i].ptr);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i + __NR_O32_Linux, (void *)sys_call_table[i].ptr,
+ namebuf);
+ }
+#endif
+#ifdef CONFIG_64BIT
+# ifdef CONFIG_MIPS32_O32
+ for (i = 0; i < __NR_O32_Linux_syscalls; i++) {
+ extern unsigned long syso32_call_table[];
+
+ sprint_symbol(namebuf, syso32_call_table[i]);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i + __NR_O32_Linux, (void *)syso32_call_table[i],
+ namebuf);
+ }
+# endif
+
+ for (i = 0; i < __NR_64_Linux_syscalls; i++) {
+ extern unsigned long sys_call_table[];
+
+ sprint_symbol(namebuf, sys_call_table[i]);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i + __NR_64_Linux, (void *)sys_call_table[i],
+ namebuf);
+ }
+
+# ifdef CONFIG_MIPS32_N32
+ for (i = 0; i < __NR_N32_Linux_syscalls; i++) {
+ extern unsigned long sysn32_call_table[];
+
+ sprint_symbol(namebuf, sysn32_call_table[i]);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i + __NR_N32_Linux, (void *)sysn32_call_table[i],
+ namebuf);
+ }
+# endif
+#endif
+}
+EXPORT_SYMBOL_GPL(ltt_dump_sys_call_table);
+
+void ltt_dump_idt_table(void *call_data)
+{
+ /* No IDT information yet. */
+ return;
+}
+EXPORT_SYMBOL_GPL(ltt_dump_idt_table);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index fb749740551..51561a75dcf 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
*/
unsigned int mips_hpt_frequency;
+EXPORT_SYMBOL(mips_hpt_frequency);
/*
* This function exists in order to cause an error due to a duplicate
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 71350f7f2d8..b6a12d70e8c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <trace/trap.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -55,6 +56,12 @@
#include <asm/stacktrace.h>
#include <asm/uasm.h>
+/*
+ * Also used in unaligned.c and fault.c.
+ */
+DEFINE_TRACE(trap_entry);
+DEFINE_TRACE(trap_exit);
+
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
@@ -321,7 +328,7 @@ static void __show_regs(const struct pt_regs *regs)
printk("Cause : %08x\n", cause);
- cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ cause = CAUSE_EXCCODE(cause);
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
@@ -698,6 +705,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
return;
die_if_kernel("FP exception in kernel code", regs);
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
@@ -730,7 +738,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
-
+ trace_trap_exit();
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -748,6 +756,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
+ trace_trap_exit();
}
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
@@ -979,6 +988,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
int status;
unsigned long __maybe_unused flags;
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
+
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -990,8 +1001,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
opcode = 0;
status = -1;
- if (unlikely(compute_return_epc(regs) < 0))
+ if (unlikely(compute_return_epc(regs) < 0)) {
+ trace_trap_exit();
return;
+ }
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
@@ -1009,7 +1022,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
-
+ trace_trap_exit();
return;
case 1:
@@ -1029,11 +1042,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}
-
+ trace_trap_exit();
return;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ trace_trap_exit();
return;
case 3:
@@ -1041,6 +1055,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
}
force_sig(SIGILL, current);
+ trace_trap_exit();
}
asmlinkage void do_mdmx(struct pt_regs *regs)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index cfea1adfa15..d3af94de240 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -79,6 +79,7 @@
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
+#include <trace/trap.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -518,6 +519,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
@@ -543,6 +545,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
+ trace_trap_exit();
+
return;
sigbus:
@@ -552,6 +556,8 @@ sigbus:
/*
* XXX On return from the signal handler we should advance the epc
*/
+
+ trace_trap_exit();
}
#ifdef CONFIG_DEBUG_FS
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 137ee76a004..1a5bd7b9018 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
+#include <trace/fault.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -28,6 +29,9 @@
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
+DEFINE_TRACE(page_fault_entry);
+DEFINE_TRACE(page_fault_exit);
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -144,7 +148,10 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
+ trace_page_fault_entry(regs, CAUSE_EXCCODE(regs->cp0_cause), mm, vma,
+ address, write);
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+ trace_page_fault_exit(fault);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)