summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c78
1 files changed, 68 insertions, 10 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 54a4c152edb3..3a022b3e5c02 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -168,6 +168,7 @@ static struct timespec ref_time;
static struct cpu_map *aggr_map;
static aggr_get_id_t aggr_get_id;
static bool append_file;
+static bool interval_count;
static const char *output_name;
static int output_fd;
static int print_free_counters_hint;
@@ -507,14 +508,13 @@ static int perf_stat_synthesize_config(bool is_pipe)
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-static int __store_counter_ids(struct perf_evsel *counter,
- struct cpu_map *cpus,
- struct thread_map *threads)
+static int __store_counter_ids(struct perf_evsel *counter)
{
int cpu, thread;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
- for (thread = 0; thread < threads->nr; thread++) {
+ for (cpu = 0; cpu < xyarray__max_x(counter->fd); cpu++) {
+ for (thread = 0; thread < xyarray__max_y(counter->fd);
+ thread++) {
int fd = FD(counter, cpu, thread);
if (perf_evlist__id_add_fd(evsel_list, counter,
@@ -534,7 +534,7 @@ static int store_counter_ids(struct perf_evsel *counter)
if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
return -ENOMEM;
- return __store_counter_ids(counter, cpus, threads);
+ return __store_counter_ids(counter);
}
static bool perf_evsel__should_store_id(struct perf_evsel *counter)
@@ -571,6 +571,8 @@ static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
static int __run_perf_stat(int argc, const char **argv)
{
int interval = stat_config.interval;
+ int times = stat_config.times;
+ int timeout = stat_config.timeout;
char msg[BUFSIZ];
unsigned long long t0, t1;
struct perf_evsel *counter;
@@ -584,6 +586,9 @@ static int __run_perf_stat(int argc, const char **argv)
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
+ } else if (timeout) {
+ ts.tv_sec = timeout / USEC_PER_MSEC;
+ ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
} else {
ts.tv_sec = 1;
ts.tv_nsec = 0;
@@ -632,7 +637,19 @@ try_again:
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
- }
+ } else if (target__has_per_thread(&target) &&
+ evsel_list->threads &&
+ evsel_list->threads->err_thread != -1) {
+ /*
+ * For global --per-thread case, skip current
+ * error thread.
+ */
+ if (!thread_map__remove(evsel_list->threads,
+ evsel_list->threads->err_thread)) {
+ evsel_list->threads->err_thread = -1;
+ goto try_again;
+ }
+ }
perf_evsel__open_strerror(counter, &target,
errno, msg, sizeof(msg));
@@ -696,10 +713,14 @@ try_again:
perf_evlist__start_workload(evsel_list);
enable_counters();
- if (interval) {
+ if (interval || timeout) {
while (!waitpid(child_pid, &status, WNOHANG)) {
nanosleep(&ts, NULL);
+ if (timeout)
+ break;
process_interval();
+ if (interval_count && !(--times))
+ break;
}
}
waitpid(child_pid, &status, 0);
@@ -716,8 +737,13 @@ try_again:
enable_counters();
while (!done) {
nanosleep(&ts, NULL);
- if (interval)
+ if (timeout)
+ break;
+ if (interval) {
process_interval();
+ if (interval_count && !(--times))
+ break;
+ }
}
}
@@ -1891,6 +1917,10 @@ static const struct option stat_options[] = {
"command to run after to the measured command"),
OPT_UINTEGER('I', "interval-print", &stat_config.interval,
"print counts at regular interval in ms (>= 10)"),
+ OPT_INTEGER(0, "interval-count", &stat_config.times,
+ "print counts for fixed number of times"),
+ OPT_UINTEGER(0, "timeout", &stat_config.timeout,
+ "stop workload and print counts after a timeout period in ms (>= 10ms)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
@@ -2688,7 +2718,7 @@ int cmd_stat(int argc, const char **argv)
int status = -EINVAL, run_idx;
const char *mode;
FILE *output = stderr;
- unsigned int interval;
+ unsigned int interval, timeout;
const char * const stat_subcommands[] = { "record", "report" };
setlocale(LC_ALL, "");
@@ -2719,6 +2749,7 @@ int cmd_stat(int argc, const char **argv)
return __cmd_report(argc, argv);
interval = stat_config.interval;
+ timeout = stat_config.timeout;
/*
* For record command the -o is already taken care of.
@@ -2871,6 +2902,33 @@ int cmd_stat(int argc, const char **argv)
"Please proceed with caution.\n");
}
+ if (stat_config.times && interval)
+ interval_count = true;
+ else if (stat_config.times && !interval) {
+ pr_err("interval-count option should be used together with "
+ "interval-print.\n");
+ parse_options_usage(stat_usage, stat_options, "interval-count", 0);
+ parse_options_usage(stat_usage, stat_options, "I", 1);
+ goto out;
+ }
+
+ if (timeout && timeout < 100) {
+ if (timeout < 10) {
+ pr_err("timeout must be >= 10ms.\n");
+ parse_options_usage(stat_usage, stat_options, "timeout", 0);
+ goto out;
+ } else
+ pr_warning("timeout < 100ms. "
+ "The overhead percentage could be high in some cases. "
+ "Please proceed with caution.\n");
+ }
+ if (timeout && interval) {
+ pr_err("timeout option is not supported with interval-print.\n");
+ parse_options_usage(stat_usage, stat_options, "timeout", 0);
+ parse_options_usage(stat_usage, stat_options, "I", 1);
+ goto out;
+ }
+
if (perf_evlist__alloc_stats(evsel_list, interval))
goto out;