summaryrefslogtreecommitdiff
path: root/tools/perf/util/mmap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-10-15 07:19:55 +0200
committerIngo Molnar <mingo@kernel.org>2019-10-15 07:19:55 +0200
commit39b656ee9f2ce41eb969c86525f9a2a63fefac5b (patch)
tree4d5013e83a8ae89f5b70ca176f6b2d8ae973c96a /tools/perf/util/mmap.c
parent4f5cafb5cb8471e54afdc9054d973535614f7675 (diff)
parentcebf7d51a6c3babc4d0589da7aec0de1af0a5691 (diff)
Merge tag 'perf-core-for-mingo-5.5-20191011' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: perf trace: Arnaldo Carvalho de Melo: - Reuse the strace-like syscall_arg_fmt->scnprintf() beautification routines (convert integer arguments into strings, like open flags, etc) in tracepoint arguments. For now the type based scnprintf routines (pid_t, umode_t, etc) and the ones based in well known arg name based ("fd", etc) gets associated with tracepoint args of that type. A tracepoint only arg, "msr", for the msr:{write,read}_msr gets added as an initial step. - Introduce syscall_arg_fmt->strtoul() methods to be the reverse operation of ->scnprintf(), i.e. to go from a string to an integer. - Implement --filter, just like in 'perf record', that affects the tracepoint events specied thus far in the command line, use the ->strtoul() methods to allow strings in tables associated with beautifiers to the integers the in-kernel tracepoint (eBPF later) filters expect, e.g.: # perf trace --max-events 1 -e sched:*ipi --filter="cpu==1 || cpu==2" 0.000 as/24630 sched:sched_wake_idle_without_ipi(cpu: 1) # # perf trace --max-events 1 --max-stack=32 -e msr:* --filter="msr==IA32_TSC_DEADLINE" 207.000 cc1/19963 msr:write_msr(msr: IA32_TSC_DEADLINE, val: 5442316760822) do_trace_write_msr ([kernel.kallsyms]) do_trace_write_msr ([kernel.kallsyms]) lapic_next_deadline ([kernel.kallsyms]) clockevents_program_event ([kernel.kallsyms]) hrtimer_interrupt ([kernel.kallsyms]) smp_apic_timer_interrupt ([kernel.kallsyms]) apic_timer_interrupt ([kernel.kallsyms]) [0x6ff66c] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x7047c3] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x707708] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_one_pass (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d37] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x4f3d49] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) execute_pass_list (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) cgraph_node::expand (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x2625b4] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) symbol_table::finalize_compilation_unit (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x5ae8b9] (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) toplev::main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) main (/usr/lib/gcc-cross/alpha-linux-gnu/8/cc1) [0x26b6a] (/usr/lib/x86_64-linux-gnu/libc-2.29.so) # # perf trace --max-events 8 -e msr:* --filter="msr==IA32_SPEC_CTRL" 0.000 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.063 migration/3/25 msr:write_msr(msr: IA32_SPEC_CTRL) 0.217 kworker/u16:1-/4826 msr:write_msr(msr: IA32_SPEC_CTRL) 0.687 rcu_sched/11 msr:write_msr(msr: IA32_SPEC_CTRL) 0.696 :13280/13280 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.305 :13281/13281 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 0.355 :13274/13274 msr:write_msr(msr: IA32_SPEC_CTRL, val: 6) 2.743 kworker/u16:0-/6711 msr:write_msr(msr: IA32_SPEC_CTRL) # # perf trace --max-events 8 --cpu 1 -e msr:* --filter="msr!=IA32_SPEC_CTRL && msr!=IA32_TSC_DEADLINE && msr != FS_BASE" 0.000 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 68719479037) 0.096 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 238.925 mtr-packet/30819 msr:write_msr(msr: 0x830, val: 8589936893) 511.010 :0/0 msr:write_msr(msr: 0x830, val: 68719479037) 1005.052 :0/0 msr:read_msr(msr: IA32_TSC_ADJUST) 1235.131 CPU 0/KVM/3750 msr:write_msr(msr: 0x830, val: 4294969595) 1235.195 CPU 0/KVM/3750 msr:read_msr(msr: IA32_SYSENTER_ESP, val: -2199023037952) 1235.201 CPU 0/KVM/3750 msr:read_msr(msr: IA32_APICBASE, val: 4276096000) # - Default to not using libtraceevent and its plugins for beautifying tracepoint arguments, since now we're reusing the strace-like beatufiers. Use --libtraceevent_print (using just --libtrace is unambiguous and can be used as a short hand) to go back to those beautifiers. This will help in the transition, as can be seen in some of the sched tracepoints that still need some work in the libbeauty based mode: # trace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.006 ( ): sched:sched_wakeup(comm: "trace", pid: 3319 (trace), prio: 120, success: 1) 0.348 ( ): sched:sched_process_exec(filename: 140212596720100, pid: 3319 (sleep), old_pid: 3319 (sleep)) 0.490 ( ): msr:write_msr(msr: FS_BASE, val: 139631189321088) 0.670 ( ): nanosleep(rqtp: 0x7ffc52c23bc0) ... 0.674 ( ): sched:sched_stat_runtime(comm: "sleep", pid: 3319 (sleep), runtime: 659259, vruntime: 78942418342) 0.675 ( ): sched:sched_switch(prev_comm: "sleep", prev_pid: 3319 (sleep), prev_prio: 120, prev_state: 1, next_comm: "swapper/0", next_prio: 120) 1001.059 ( ): sched:sched_waking(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 1001.098 ( ): sched:sched_wakeup(comm: "sleep", pid: 3319 (sleep), prio: 120, success: 1) 0.670 (1000.504 ms): ... [continued]: nanosleep()) = 0 1001.456 ( ): sched:sched_process_exit(comm: "sleep", pid: 3319 (sleep), prio: 120) # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 # trace --libtrace --no-inherit -e msr:*,*sleep,sched:* sleep 1 0.000 ( ): sched:sched_waking(comm=trace pid=3323 prio=120 target_cpu=000) 0.007 ( ): sched:sched_wakeup(comm=trace pid=3323 prio=120 target_cpu=000) 0.382 ( ): sched:sched_process_exec(filename=/usr/bin/sleep pid=3323 old_pid=3323) 0.525 ( ): msr:write_msr(c0000100, value 7f5d508a0580) 0.713 ( ): nanosleep(rqtp: 0x7fff487fb4a0) ... 0.717 ( ): sched:sched_stat_runtime(comm=sleep pid=3323 runtime=617722 [ns] vruntime=78957731636 [ns]) 0.719 ( ): sched:sched_switch(prev_comm=sleep prev_pid=3323 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120) 1001.117 ( ): sched:sched_waking(comm=sleep pid=3323 prio=120 target_cpu=000) 1001.157 ( ): sched:sched_wakeup(comm=sleep pid=3323 prio=120 target_cpu=000) 0.713 (1000.522 ms): ... [continued]: nanosleep()) = 0 1001.538 ( ): sched:sched_process_exit(comm=sleep pid=3323 prio=120) # - Make -v (verbose) mode be honoured for .perfconfig based trace.add_events, to help in diagnosing problems with building eBPF events (-e source.c). - When using eBPF syscall payload augmentation do not show strace-like syscalls when all the user specified was some tracepoint event, bringing the behaviour in line with that of when not using eBPF augmentation. Intel PT: exported-sql-viewer GUI: Adrian Hunter: - Add LookupModel, HBoxLayout, VBoxLayout, global time range calculations so as to add a time chart by CPU. perf script: Andi Kleen: - Allow --time (to specify a time span of interest) with --reltime perf diff: Jin Yao: - Report noise for cycles diff, i.e. a histogram + stddev. (timestamps relative to start). perf annotate: Arnaldo Carvalho de Melo: - Initialize env->cpuid when running in live mode (perf top), as it is used in some of the per arch annotation init routines. samples bpf: Björn Töpel: - Fixup fallout of using tools/perf/perf-sys. from outside tools/perf. Core: Ian Rogers: - Avoid 'sample_reg_masks' being const + weak, as this breaks with some compilers that constant-propagate from the weak symbol. libperf: - First part of moving the perf_mmap class from tools/perf to libperf. - Propagate CFLAGS to libperf from the tools/perf Makefile. Vendor events: John Garry: - Add entry in MAINTAINERS with reviewers for the for perf tool arm64 pmu-events files. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf/util/mmap.c')
-rw-r--r--tools/perf/util/mmap.c260
1 files changed, 15 insertions, 245 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index a35dc57d5995..063d1b93c53d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // sysconf()
+#include <perf/mmap.h>
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -23,116 +24,9 @@
#include "../perf.h"
#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct mmap *map)
+size_t mmap__mmap_len(struct mmap *map)
{
- return map->core.mask + 1 + page_size;
-}
-
-/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct mmap *map,
- u64 *startp, u64 end)
-{
- unsigned char *data = map->core.base + page_size;
- union perf_event *event = NULL;
- int diff = end - *startp;
-
- if (diff >= (int)sizeof(event->header)) {
- size_t size;
-
- event = (union perf_event *)&data[*startp & map->core.mask];
- size = event->header.size;
-
- if (size < sizeof(event->header) || diff < (int)size)
- return NULL;
-
- /*
- * Event straddles the mmap boundary -- header should always
- * be inside due to u64 alignment of output.
- */
- if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
- unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->core.event_copy;
-
- do {
- cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
- memcpy(dst, &data[offset & map->core.mask], cpy);
- offset += cpy;
- dst += cpy;
- len -= cpy;
- } while (len);
-
- event = (union perf_event *)map->core.event_copy;
- }
-
- *startp += size;
- }
-
- return event;
-}
-
-/*
- * Read event from ring buffer one by one.
- * Return one event for each call.
- *
- * Usage:
- * perf_mmap__read_init()
- * while(event = perf_mmap__read_event()) {
- * //process the event
- * perf_mmap__consume()
- * }
- * perf_mmap__read_done()
- */
-union perf_event *perf_mmap__read_event(struct mmap *map)
-{
- union perf_event *event;
-
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return NULL;
-
- /* non-overwirte doesn't pause the ringbuffer */
- if (!map->core.overwrite)
- map->core.end = perf_mmap__read_head(map);
-
- event = perf_mmap__read(map, &map->core.start, map->core.end);
-
- if (!map->core.overwrite)
- map->core.prev = map->core.start;
-
- return event;
-}
-
-static bool perf_mmap__empty(struct mmap *map)
-{
- return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
-}
-
-void perf_mmap__get(struct mmap *map)
-{
- refcount_inc(&map->core.refcnt);
-}
-
-void perf_mmap__put(struct mmap *map)
-{
- BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
-
- if (refcount_dec_and_test(&map->core.refcnt))
- perf_mmap__munmap(map);
-}
-
-void perf_mmap__consume(struct mmap *map)
-{
- if (!map->core.overwrite) {
- u64 old = map->core.prev;
-
- perf_mmap__write_tail(map, old);
- }
-
- if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
- perf_mmap__put(map);
+ return perf_mmap__mmap_len(&map->core);
}
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
@@ -170,7 +64,7 @@ static int perf_mmap__aio_enabled(struct mmap *map)
#ifdef HAVE_LIBNUMA_SUPPORT
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->aio.data[idx] == MAP_FAILED) {
map->aio.data[idx] = NULL;
@@ -183,7 +77,7 @@ static int perf_mmap__aio_alloc(struct mmap *map, int idx)
static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
- munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ munmap(map->aio.data[idx], mmap__mmap_len(map));
map->aio.data[idx] = NULL;
}
}
@@ -196,7 +90,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
data = map->aio.data[idx];
- mmap_len = perf_mmap__mmap_len(map);
+ mmap_len = mmap__mmap_len(map);
node_mask = 1UL << cpu__get_node(cpu);
if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
@@ -210,7 +104,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
#else /* !HAVE_LIBNUMA_SUPPORT */
static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
- map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ map->aio.data[idx] = malloc(mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
return -1;
@@ -311,19 +205,13 @@ static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
}
#endif
-void perf_mmap__munmap(struct mmap *map)
+void mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
- munmap(map->data, perf_mmap__mmap_len(map));
+ munmap(map->data, mmap__mmap_len(map));
map->data = NULL;
}
- if (map->core.base != NULL) {
- munmap(map->core.base, perf_mmap__mmap_len(map));
- map->core.base = NULL;
- map->core.fd = -1;
- refcount_set(&map->core.refcnt, 0);
- }
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -353,34 +241,13 @@ static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params
CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
+int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
- /*
- * The last one will be done at perf_mmap__consume(), so that we
- * make sure we don't prevent tools from consuming every last event in
- * the ring buffer.
- *
- * I.e. we can get the POLLHUP meaning that the fd doesn't exist
- * anymore, but the last events for it are still in the ring buffer,
- * waiting to be consumed.
- *
- * Tools can chose to ignore this at their own discretion, but the
- * evlist layer can't just drop it when filtering events in
- * perf_evlist__filter_pollfd().
- */
- refcount_set(&map->core.refcnt, 2);
- map->core.prev = 0;
- map->core.mask = mp->mask;
- map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
- MAP_SHARED, fd, 0);
- if (map->core.base == MAP_FAILED) {
+ if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->core.base = NULL;
return -1;
}
- map->core.fd = fd;
- map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
@@ -389,7 +256,7 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
map->comp_level = mp->comp_level;
if (map->comp_level && !perf_mmap__aio_enabled(map)) {
- map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
if (map->data == MAP_FAILED) {
pr_debug2("failed to mmap data buffer, error %d\n",
@@ -406,96 +273,16 @@ int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
return perf_mmap__aio_mmap(map, mp);
}
-static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
-{
- struct perf_event_header *pheader;
- u64 evt_head = *start;
- int size = mask + 1;
-
- pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
- pheader = (struct perf_event_header *)(buf + (*start & mask));
- while (true) {
- if (evt_head - *start >= (unsigned int)size) {
- pr_debug("Finished reading overwrite ring buffer: rewind\n");
- if (evt_head - *start > (unsigned int)size)
- evt_head -= pheader->size;
- *end = evt_head;
- return 0;
- }
-
- pheader = (struct perf_event_header *)(buf + (evt_head & mask));
-
- if (pheader->size == 0) {
- pr_debug("Finished reading overwrite ring buffer: get start\n");
- *end = evt_head;
- return 0;
- }
-
- evt_head += pheader->size;
- pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
- }
- WARN_ONCE(1, "Shouldn't get here\n");
- return -1;
-}
-
-/*
- * Report the start and end of the available data in ringbuffer
- */
-static int __perf_mmap__read_init(struct mmap *md)
-{
- u64 head = perf_mmap__read_head(md);
- u64 old = md->core.prev;
- unsigned char *data = md->core.base + page_size;
- unsigned long size;
-
- md->core.start = md->core.overwrite ? head : old;
- md->core.end = md->core.overwrite ? old : head;
-
- if ((md->core.end - md->core.start) < md->core.flush)
- return -EAGAIN;
-
- size = md->core.end - md->core.start;
- if (size > (unsigned long)(md->core.mask) + 1) {
- if (!md->core.overwrite) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
- md->core.prev = head;
- perf_mmap__consume(md);
- return -EAGAIN;
- }
-
- /*
- * Backward ring buffer is full. We still have a chance to read
- * most of data from it.
- */
- if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
- return -EINVAL;
- }
-
- return 0;
-}
-
-int perf_mmap__read_init(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return -ENOENT;
-
- return __perf_mmap__read_init(map);
-}
-
int perf_mmap__push(struct mmap *md, void *to,
int push(struct mmap *map, void *to, void *buf, size_t size))
{
- u64 head = perf_mmap__read_head(md);
+ u64 head = perf_mmap__read_head(&md->core);
unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
- rc = perf_mmap__read_init(md);
+ rc = perf_mmap__read_init(&md->core);
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
@@ -522,24 +309,7 @@ int perf_mmap__push(struct mmap *md, void *to,
}
md->core.prev = head;
- perf_mmap__consume(md);
+ perf_mmap__consume(&md->core);
out:
return rc;
}
-
-/*
- * Mandatory for overwrite mode
- * The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->core.prev.
- * Need to correct the map->core.prev to head which is the end of next read.
- */
-void perf_mmap__read_done(struct mmap *map)
-{
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&map->core.refcnt))
- return;
-
- map->core.prev = perf_mmap__read_head(map);
-}