diff options
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r-- | kernel/trace/trace_functions.c | 84 |
1 files changed, 83 insertions, 1 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 3a5fa08cedb0..2dce3c7370d1 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -20,6 +20,7 @@ static struct trace_array *func_trace; static void start_function_trace(struct trace_array *tr) { + func_trace = tr; tr->cpu = get_cpu(); tracing_reset_online_cpus(tr); put_cpu(); @@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr) static int function_trace_init(struct trace_array *tr) { - func_trace = tr; start_function_trace(tr); return 0; } @@ -52,6 +52,64 @@ static void function_trace_start(struct trace_array *tr) } static void +function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = func_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu, resched; + int pc; + + if (unlikely(!ftrace_function_enabled)) + return; + + pc = preempt_count(); + resched = ftrace_preempt_disable(); + local_save_flags(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + + if (likely(disabled == 1)) + trace_function(tr, data, ip, parent_ip, flags, pc); + + atomic_dec(&data->disabled); + ftrace_preempt_enable(resched); +} + +static void +function_trace_call(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = func_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + + if (unlikely(!ftrace_function_enabled)) + return; + + /* + * Need to use raw, since this must be called before the + * recursive protection is performed. + */ + local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + + if (likely(disabled == 1)) { + pc = preempt_count(); + trace_function(tr, data, ip, parent_ip, flags, pc); + } + + atomic_dec(&data->disabled); + local_irq_restore(flags); +} + +static void function_stack_trace_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = func_trace; @@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) local_irq_restore(flags); } + +static struct ftrace_ops trace_ops __read_mostly = +{ + .func = function_trace_call, +}; + +void tracing_start_function_trace(void) +{ + ftrace_function_enabled = 0; + + if (trace_flags & TRACE_ITER_PREEMPTONLY) + trace_ops.func = function_trace_call_preempt_only; + else + trace_ops.func = function_trace_call; + + register_ftrace_function(&trace_ops); + ftrace_function_enabled = 1; +} + +void tracing_stop_function_trace(void) +{ + ftrace_function_enabled = 0; + unregister_ftrace_function(&trace_ops); +} static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, |