summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 90289f31dd87..7081d7ea12e5 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -89,6 +89,8 @@ struct trace {
u64 base_time;
FILE *output;
unsigned long nr_events;
+ unsigned long nr_events_printed;
+ unsigned long max_events;
struct strlist *ev_qualifier;
struct {
size_t nr;
@@ -1664,6 +1666,8 @@ static int trace__printf_interrupted_entry(struct trace *trace)
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
+ ++trace->nr_events_printed;
+
return printed;
}
@@ -1810,12 +1814,14 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse
int max_stack = evsel->attr.sample_max_stack ?
evsel->attr.sample_max_stack :
trace->max_stack;
+ int err;
- if (machine__resolve(trace->host, &al, sample) < 0 ||
- thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack))
+ if (machine__resolve(trace->host, &al, sample) < 0)
return -1;
- return 0;
+ err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
+ addr_location__put(&al);
+ return err;
}
static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
@@ -1940,6 +1946,13 @@ errno_print: {
fputc('\n', trace->output);
+ /*
+ * We only consider an 'event' for the sake of --max-events a non-filtered
+ * sys_enter + sys_exit and other tracepoint events.
+ */
+ if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
+ interrupted = true;
+
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
@@ -2072,14 +2085,25 @@ static void bpf_output__fprintf(struct trace *trace,
{
binary__fprintf(sample->raw_data, sample->raw_size, 8,
bpf_output__printer, NULL, trace->output);
+ ++trace->nr_events_printed;
}
static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
- struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+ struct thread *thread;
int callchain_ret = 0;
+ /*
+ * Check if we called perf_evsel__disable(evsel) due to, for instance,
+ * this event's max_events having been hit and this is an entry coming
+ * from the ring buffer that we should discard, since the max events
+ * have already been considered/printed.
+ */
+ if (evsel->disabled)
+ return 0;
+
+ thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
if (sample->callchain) {
callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
@@ -2127,6 +2151,12 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size,
trace->output);
+ ++trace->nr_events_printed;
+
+ if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
+ perf_evsel__disable(evsel);
+ perf_evsel__close(evsel);
+ }
}
}
@@ -2137,8 +2167,8 @@ newline:
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
- thread__put(thread);
out:
+ thread__put(thread);
return 0;
}
@@ -2225,6 +2255,8 @@ static int trace__pgfault(struct trace *trace,
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+
+ ++trace->nr_events_printed;
out:
err = 0;
out_put:
@@ -2402,6 +2434,9 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
tracepoint_handler handler = evsel->handler;
handler(trace, evsel, event, sample);
}
+
+ if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
+ interrupted = true;
}
static int trace__add_syscall_newtp(struct trace *trace)
@@ -2706,7 +2741,7 @@ next_event:
int timeout = done ? 100 : -1;
if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
- if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
+ if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
draining = true;
goto again;
@@ -3249,6 +3284,7 @@ int cmd_trace(int argc, const char **argv)
.trace_syscalls = false,
.kernel_syscallchains = false,
.max_stack = UINT_MAX,
+ .max_events = ULONG_MAX,
};
const char *output_name = NULL;
const struct option trace_options[] = {
@@ -3301,6 +3337,8 @@ int cmd_trace(int argc, const char **argv)
&record_parse_callchain_opt),
OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
"Show the kernel callchains on the syscall exit path"),
+ OPT_ULONG(0, "max-events", &trace.max_events,
+ "Set the maximum number of events to print, exit after that is reached. "),
OPT_UINTEGER(0, "min-stack", &trace.min_stack,
"Set the minimum stack depth when parsing the callchain, "
"anything below the specified depth will be ignored."),