summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_sched_switch.c10
3 files changed, 23 insertions, 1 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a3f1bc5d2a0..2d1c28efc8b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -37,6 +37,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/fs.h>
+#include <trace/stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -911,7 +912,7 @@ void tracing_reset_current_online_cpus(void)
tracing_reset_online_cpus(&global_trace);
}
-#define SAVED_CMDLINES 128
+#define SAVED_CMDLINES 2048
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
@@ -1239,6 +1240,8 @@ trace_function(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_ftrace(ip, parent_ip);
}
void
@@ -1333,6 +1336,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+ stm_stack_trace(trace.entries);
+
out:
/* Again, don't let gcc optimize things here */
barrier();
@@ -1503,6 +1508,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
ftrace_trace_stack(buffer, flags, 6, pc);
}
+ stm_trace_bprintk_buf(ip, fmt, trace_buf, sizeof(u32) * len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
@@ -1579,6 +1586,8 @@ int trace_array_vprintk(struct trace_array *tr,
ftrace_trace_stack(buffer, irq_flags, 6, pc);
}
+ stm_trace_printk_buf(ip, trace_buf, len);
+
out_unlock:
arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c212a7f934e..773cb84adc8 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <asm/setup.h>
+#include <trace/stm.h>
#include "trace_output.h"
@@ -1703,6 +1704,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ stm_ftrace(ip, parent_ip);
+
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
preempt_enable_notrace();
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a1845..a136fd86533 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <trace/events/sched.h>
+#include <trace/stm.h>
#include "trace.h"
@@ -47,6 +48,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
+
+ stm_sched_switch(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
}
static void
@@ -103,6 +108,11 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
+
+ stm_sched_wakeup(entry->prev_pid, entry->prev_prio, entry->prev_state,
+ entry->next_pid, entry->next_prio, entry->next_state,
+ entry->next_cpu);
+
ftrace_trace_stack(tr->buffer, flags, 6, pc);
ftrace_trace_userstack(tr->buffer, flags, pc);
}