summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-01-15 20:40:23 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 12:17:10 +0100
commitbb3c3c95f330f7bf16e33b002e48882616089db1 (patch)
treee2f4045f002fdb96adc3a82cbf24436063c82ce8 /kernel/trace/trace_functions.c
parent5361499101306cfb776c3cfa0f69d0479bc63868 (diff)
ftrace: move function tracer functions out of trace.c
Impact: clean up of trace.c The function tracer functions were put in trace.c because it needed to share static variables that were in trace.c. Since then, those variables have become global for various reasons. This patch moves the function tracer functions into trace_function.c where they belong. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c84
1 files changed, 83 insertions, 1 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 3a5fa08cedb..2dce3c7370d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -20,6 +20,7 @@ static struct trace_array *func_trace;
static void start_function_trace(struct trace_array *tr)
{
+ func_trace = tr;
tr->cpu = get_cpu();
tracing_reset_online_cpus(tr);
put_cpu();
@@ -36,7 +37,6 @@ static void stop_function_trace(struct trace_array *tr)
static int function_trace_init(struct trace_array *tr)
{
- func_trace = tr;
start_function_trace(tr);
return 0;
}
@@ -52,6 +52,64 @@ static void function_trace_start(struct trace_array *tr)
}
static void
+function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = func_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu, resched;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ pc = preempt_count();
+ resched = ftrace_preempt_disable();
+ local_save_flags(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ trace_function(tr, data, ip, parent_ip, flags, pc);
+
+ atomic_dec(&data->disabled);
+ ftrace_preempt_enable(resched);
+}
+
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = func_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ if (unlikely(!ftrace_function_enabled))
+ return;
+
+ /*
+ * Need to use raw, since this must be called before the
+ * recursive protection is performed.
+ */
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ trace_function(tr, data, ip, parent_ip, flags, pc);
+ }
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
+static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = func_trace;
@@ -90,6 +148,30 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
local_irq_restore(flags);
}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = function_trace_call,
+};
+
+void tracing_start_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+
+ if (trace_flags & TRACE_ITER_PREEMPTONLY)
+ trace_ops.func = function_trace_call_preempt_only;
+ else
+ trace_ops.func = function_trace_call;
+
+ register_ftrace_function(&trace_ops);
+ ftrace_function_enabled = 1;
+}
+
+void tracing_stop_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+ unregister_ftrace_function(&trace_ops);
+}
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,