diff options
-rw-r--r-- | arch/x86/ia32/ipc32.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 44 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 4 |
9 files changed, 82 insertions, 9 deletions
diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c index 29cdcd02ead..accd6b42bd2 100644 --- a/arch/x86/ia32/ipc32.c +++ b/arch/x86/ia32/ipc32.c @@ -8,8 +8,11 @@ #include <linux/shm.h> #include <linux/ipc.h> #include <linux/compat.h> +#include <trace/ipc.h> #include <asm/sys_ia32.h> +DEFINE_TRACE(ipc_call); + asmlinkage long sys32_ipc(u32 call, int first, int second, int third, compat_uptr_t ptr, u32 fifth) { @@ -18,6 +21,8 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, version = call >> 16; /* hack for backward compatibility */ call &= 0xffff; + trace_ipc_call(call, first); + switch (call) { case SEMOP: /* struct sembuf is the same on 32 and 64bit :)) */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 76b96d74978..c604d23b4f3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -33,6 +33,7 @@ #include <linux/dmi.h> #include <linux/smp.h> #include <linux/mm.h> +#include <trace/irq.h> #include <asm/perf_event.h> #include <asm/x86_init.h> @@ -868,7 +869,9 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) */ exit_idle(); irq_enter(); + trace_irq_entry(LOCAL_TIMER_VECTOR, regs, NULL); local_apic_timer_interrupt(); + trace_irq_exit(IRQ_HANDLED); irq_exit(); set_irq_regs(old_regs); @@ -1788,6 +1791,7 @@ void smp_spurious_interrupt(struct pt_regs *regs) exit_idle(); irq_enter(); + trace_irq_entry(SPURIOUS_APIC_VECTOR, NULL, NULL); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... @@ -1802,6 +1806,7 @@ void smp_spurious_interrupt(struct pt_regs *regs) /* see sw-dev-man vol 3, chapter 7.4.13.5 */ pr_info("spurious APIC interrupt on CPU#%d, " "should never happen.\n", smp_processor_id()); + trace_irq_exit(IRQ_HANDLED); irq_exit(); } @@ -1814,6 +1819,7 @@ void smp_error_interrupt(struct pt_regs *regs) exit_idle(); irq_enter(); + trace_irq_entry(ERROR_APIC_VECTOR, NULL, NULL); /* First tickle the hardware, only then report what went on. -- REW */ v = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); @@ -1834,6 +1840,7 @@ void smp_error_interrupt(struct pt_regs *regs) */ pr_debug("APIC error on CPU%d: %02x(%02x)\n", smp_processor_id(), v , v1); + trace_irq_exit(IRQ_HANDLED); irq_exit(); } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 1d59834396b..6052f6f65a6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1069,6 +1069,7 @@ unsigned long kernel_eflags; * debugging, no special alignment required. */ DEFINE_PER_CPU(struct orig_ist, orig_ist); +EXPORT_PER_CPU_SYMBOL_GPL(orig_ist); #else /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 6f8c5e9da97..c8a6411d8ba 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/smp.h> #include <linux/cpu.h> +#include <trace/irq.h> #include <asm/processor.h> #include <asm/system.h> @@ -402,8 +403,10 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) { exit_idle(); irq_enter(); + trace_irq_entry(THERMAL_APIC_VECTOR, regs, NULL); inc_irq_stat(irq_thermal_count); smp_thermal_vector(); + trace_irq_exit(IRQ_HANDLED); irq_exit(); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ff455419898..e0e4ffcad48 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -13,6 +13,7 @@ #include <linux/dmi.h> #include <linux/utsname.h> #include <trace/events/power.h> +#include <trace/sched.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> #include <asm/system.h> @@ -23,6 +24,8 @@ #include <asm/i387.h> #include <asm/debugreg.h> +DEFINE_TRACE(sched_kthread_create); + struct kmem_cache *task_xstate_cachep; EXPORT_SYMBOL_GPL(task_xstate_cachep); @@ -278,6 +281,7 @@ extern void kernel_thread_helper(void); int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; + long pid; memset(®s, 0, sizeof(regs)); @@ -299,7 +303,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) regs.flags = X86_EFLAGS_IF | 0x2; /* Ok, create the new process.. */ - return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); + pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, + 0, ®s, 0, NULL, NULL); + trace_sched_kthread_create(fn, pid); + return pid; } EXPORT_SYMBOL(kernel_thread); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 45892dc4b72..ee3024d4f61 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,6 +21,7 @@ #include <linux/signal.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> +#include <trace/syscall.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -152,6 +153,9 @@ static const int arg_offs_table[] = { X86_EFLAGS_DF | X86_EFLAGS_OF | \ X86_EFLAGS_RF | X86_EFLAGS_AC)) +DEFINE_TRACE(syscall_entry); +DEFINE_TRACE(syscall_exit); + /* * Determines whether a value may be installed in a segment register. */ @@ -1361,6 +1365,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SINGLESTEP)) regs->flags |= X86_EFLAGS_TF; + trace_syscall_entry(regs, regs->orig_ax); + /* do the secure computing check first */ secure_computing(regs->orig_ax); @@ -1396,6 +1402,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs) { bool step; + trace_syscall_exit(regs->ax); + if (unlikely(current->audit_context)) audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b9b67166f9d..4dfa969c88c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -31,6 +31,7 @@ #include <linux/mm.h> #include <linux/smp.h> #include <linux/io.h> +#include <trace/trap.h> #ifdef CONFIG_EISA #include <linux/ioport.h> @@ -81,6 +82,12 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; DECLARE_BITMAP(used_vectors, NR_VECTORS); EXPORT_SYMBOL_GPL(used_vectors); +/* + * Also used in arch/x86/mm/fault.c. + */ +DEFINE_TRACE(trap_entry); +DEFINE_TRACE(trap_exit); + static int ignore_nmis; int unknown_nmi_panic; @@ -122,6 +129,8 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, { struct task_struct *tsk = current; + trace_trap_entry(regs, trapnr); + #ifdef CONFIG_X86_32 if (regs->flags & X86_VM_MASK) { /* @@ -168,7 +177,7 @@ trap_signal: force_sig_info(signr, info, tsk); else force_sig(signr, tsk); - return; + goto end; kernel_trap: if (!fixup_exception(regs)) { @@ -176,15 +185,17 @@ kernel_trap: tsk->thread.trap_no = trapnr; die(str, regs, error_code); } - return; + goto end; #ifdef CONFIG_X86_32 vm86_trap: if (handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) goto trap_signal; - return; + goto end; #endif +end: + trace_trap_exit(); } #define DO_ERROR(trapnr, signr, str, name) \ @@ -285,7 +296,9 @@ do_general_protection(struct pt_regs *regs, long error_code) printk("\n"); } + trace_trap_entry(regs, 13); force_sig(SIGSEGV, tsk); + trace_trap_exit(); return; #ifdef CONFIG_X86_32 @@ -371,9 +384,11 @@ io_check_error(unsigned char reason, struct pt_regs *regs) static notrace __kprobes void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { + trace_trap_entry(regs, 2); + if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) - return; + goto end; #ifdef CONFIG_MCA /* * Might actually be able to figure out what the guilty party @@ -381,7 +396,7 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) */ if (MCA_bus) { mca_handle_nmi(); - return; + goto end; } #endif pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", @@ -392,19 +407,23 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); +end: + trace_trap_exit(); } static notrace __kprobes void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; + trace_trap_entry(regs, 2); + /* * CPU-specific NMI must be processed before non-CPU-specific * NMI, otherwise we may lose it, because the CPU-specific * NMI can not be detected/processed on other CPUs. */ if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) - return; + goto end; /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ raw_spin_lock(&nmi_reason_lock); @@ -423,11 +442,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs) reassert_nmi(); #endif raw_spin_unlock(&nmi_reason_lock); - return; + goto end; } raw_spin_unlock(&nmi_reason_lock); unknown_nmi_error(reason, regs); +end: + trace_trap_exit(); } dotraplinkage notrace __kprobes void @@ -570,8 +591,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) preempt_conditional_sti(regs); if (regs->flags & X86_VM_MASK) { + trace_trap_entry(regs, 1); handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); + trace_trap_exit(); preempt_conditional_cli(regs); return; } @@ -589,8 +612,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) regs->flags &= ~X86_EFLAGS_TF; } si_code = get_si_code(tsk->thread.debugreg6); - if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) { + trace_trap_entry(regs, 1); send_sigtrap(tsk, regs, error_code, si_code); + trace_trap_exit(); + } preempt_conditional_cli(regs); return; @@ -701,11 +727,13 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code) dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) { + trace_trap_entry(regs, 16); conditional_sti(regs); #if 0 /* No need to warn about this any longer. */ printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); #endif + trace_trap_exit(); } asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 20e3f8702d1..abeb09914d5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -12,6 +12,7 @@ #include <linux/mmiotrace.h> /* kmmio_handler, ... */ #include <linux/perf_event.h> /* perf_sw_event */ #include <linux/hugetlb.h> /* hstate_index_to_shift */ +#include <trace/fault.h> #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ @@ -35,6 +36,11 @@ enum x86_pf_error_code { PF_INSTR = 1 << 4, }; +DEFINE_TRACE(page_fault_entry); +DEFINE_TRACE(page_fault_exit); +DEFINE_TRACE(page_fault_nosem_entry); +DEFINE_TRACE(page_fault_nosem_exit); + /* * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: @@ -719,6 +725,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (is_errata100(regs, address)) return; + trace_page_fault_nosem_entry(regs, 14, address); if (unlikely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); @@ -728,6 +735,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.trap_no = 14; force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); + trace_page_fault_nosem_exit(); return; } @@ -1130,7 +1138,9 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault: */ + trace_page_fault_entry(regs, 14, mm, vma, address, write); fault = handle_mm_fault(mm, vma, address, flags); + trace_page_fault_exit(fault); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6acc724d5d8..14b9317eccb 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -6,6 +6,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> +#include <trace/irq.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> @@ -141,6 +142,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &flush_state[sender]; + trace_irq_entry(sender, regs, NULL); + if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* @@ -167,6 +170,7 @@ out: cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); + trace_irq_exit(IRQ_HANDLED); } static void flush_tlb_others_ipi(const struct cpumask *cpumask, |