summaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/linux32.c5
-rw-r--r--arch/mips/kernel/process.c9
-rw-r--r--arch/mips/kernel/ptrace.c9
-rw-r--r--arch/mips/kernel/syscall.c1
-rw-r--r--arch/mips/kernel/traps.c25
-rw-r--r--arch/mips/kernel/unaligned.c6
6 files changed, 49 insertions, 6 deletions
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 876a75cc376..76a82609626 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -34,6 +34,7 @@
#include <linux/vfs.h>
#include <linux/ipc.h>
#include <linux/slab.h>
+#include <trace/ipc.h>
#include <net/sock.h>
#include <net/scm.h>
@@ -44,6 +45,8 @@
#include <asm/mmu_context.h>
#include <asm/mman.h>
+DEFINE_TRACE(ipc_call);
+
/* Use this to get at 32-bit user passed pointers. */
/* A() macro should be used for places where you e.g.
have some internal variable u32 and just want to get
@@ -166,6 +169,8 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
+ trace_ipc_call(call, first);
+
switch (call) {
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ae167df73dd..7d9bb1cdd7f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
+#include <trace/sched.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -42,6 +43,8 @@
#include <asm/inst.h>
#include <asm/stacktrace.h>
+DEFINE_TRACE(sched_kthread_create);
+
/*
* The idle thread. There's no useful work to be done, so just try to conserve
* power and have a low exit latency (ie sit in a loop waiting for somebody to
@@ -234,6 +237,7 @@ static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
+ long pid;
memset(&regs, 0, sizeof(regs));
@@ -249,7 +253,10 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
#endif
/* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+ pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
+ trace_sched_kthread_create(fn, pid);
+ return pid;
}
/*
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index d21c388c011..79e1750cc7c 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <trace/syscall.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -39,6 +40,9 @@
#include <asm/bootinfo.h>
#include <asm/reg.h>
+DEFINE_TRACE(syscall_entry);
+DEFINE_TRACE(syscall_exit);
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -535,6 +539,11 @@ static inline int audit_arch(void)
*/
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
+ if (!entryexit)
+ trace_syscall_entry(regs, regs->regs[2]);
+ else
+ trace_syscall_exit(regs->regs[2]);
+
/* do the secure computing check first */
if (!entryexit)
secure_computing(regs->regs[2]);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 58beabf50b3..eeec9a1507f 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/elf.h>
+#include <linux/ipc.h>
#include <asm/asm.h>
#include <asm/branch.h>
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 71350f7f2d8..b6a12d70e8c 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
+#include <trace/trap.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -55,6 +56,12 @@
#include <asm/stacktrace.h>
#include <asm/uasm.h>
+/*
+ * Also used in unaligned.c and fault.c.
+ */
+DEFINE_TRACE(trap_entry);
+DEFINE_TRACE(trap_exit);
+
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
@@ -321,7 +328,7 @@ static void __show_regs(const struct pt_regs *regs)
printk("Cause : %08x\n", cause);
- cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ cause = CAUSE_EXCCODE(cause);
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
@@ -698,6 +705,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
return;
die_if_kernel("FP exception in kernel code", regs);
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
@@ -730,7 +738,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
-
+ trace_trap_exit();
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -748,6 +756,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
+ trace_trap_exit();
}
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
@@ -979,6 +988,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
int status;
unsigned long __maybe_unused flags;
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
+
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -990,8 +1001,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
opcode = 0;
status = -1;
- if (unlikely(compute_return_epc(regs) < 0))
+ if (unlikely(compute_return_epc(regs) < 0)) {
+ trace_trap_exit();
return;
+ }
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
@@ -1009,7 +1022,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
-
+ trace_trap_exit();
return;
case 1:
@@ -1029,11 +1042,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}
-
+ trace_trap_exit();
return;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ trace_trap_exit();
return;
case 3:
@@ -1041,6 +1055,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
}
force_sig(SIGILL, current);
+ trace_trap_exit();
}
asmlinkage void do_mdmx(struct pt_regs *regs)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index cfea1adfa15..d3af94de240 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -79,6 +79,7 @@
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
+#include <trace/trap.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -518,6 +519,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
+ trace_trap_entry(regs, CAUSE_EXCCODE(regs->cp0_cause));
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
@@ -543,6 +545,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
+ trace_trap_exit();
+
return;
sigbus:
@@ -552,6 +556,8 @@ sigbus:
/*
* XXX On return from the signal handler we should advance the epc
*/
+
+ trace_trap_exit();
}
#ifdef CONFIG_DEBUG_FS