summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 15:23:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 15:23:47 -0700
commitc5617b200ac52e35f7e8cf05a17b0a2d50f6b3e9 (patch)
tree40d5e99660c77c5791392d349a93113c044dbf14 /arch/sparc/kernel/perf_event.c
parentcad719d86e9dbd06634eaba6401e022c8101d6b2 (diff)
parent49c177461bfbedeccbab22bf3905db2f9da7f1c3 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (61 commits) tracing: Add __used annotation to event variable perf, trace: Fix !x86 build bug perf report: Support multiple events on the TUI perf annotate: Fix up usage of the build id cache x86/mmiotrace: Remove redundant instruction prefix checks perf annotate: Add TUI interface perf tui: Remove annotate from popup menu after failure perf report: Don't start the TUI if -D is used perf: Fix getline undeclared perf: Optimize perf_tp_event_match() perf: Remove more code from the fastpath perf: Optimize the !vmalloc backed buffer perf: Optimize perf_output_copy() perf: Fix wakeup storm for RO mmap()s perf-record: Share per-cpu buffers perf-record: Remove -M perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction perf tui: Allow disabling the TUI on a per command basis in ~/.perfconfig ...
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c108
1 files changed, 61 insertions, 47 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 34ce49f80eac..0ec92c8861dd 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -92,6 +92,8 @@ struct cpu_hw_events {
/* Enabled/disable state. */
int enabled;
+
+ unsigned int group_flag;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
@@ -981,53 +983,6 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-static void event_sched_in(struct perf_event *event)
-{
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
- event->tstamp_running += event->ctx->time - event->tstamp_stopped;
- if (is_software_event(event))
- event->pmu->enable(event);
-}
-
-int hw_perf_group_sched_in(struct perf_event *group_leader,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- struct perf_event *sub;
- int n0, n;
-
- if (!sparc_pmu)
- return 0;
-
- n0 = cpuc->n_events;
- n = collect_events(group_leader, perf_max_events - n0,
- &cpuc->event[n0], &cpuc->events[n0],
- &cpuc->current_idx[n0]);
- if (n < 0)
- return -EAGAIN;
- if (check_excludes(cpuc->event, n0, n))
- return -EINVAL;
- if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
- return -EAGAIN;
- cpuc->n_events = n0 + n;
- cpuc->n_added += n;
-
- cpuctx->active_oncpu += n;
- n = 1;
- event_sched_in(group_leader);
- list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
- if (sub->state != PERF_EVENT_STATE_OFF) {
- event_sched_in(sub);
- n++;
- }
- }
- ctx->nr_active += n;
-
- return 1;
-}
-
static int sparc_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1045,11 +1000,20 @@ static int sparc_pmu_enable(struct perf_event *event)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
+ /*
+ * If group events scheduling transaction was started,
+ * skip the schedulability test here, it will be peformed
+ * at commit time(->commit_txn) as a whole
+ */
+ if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
+ goto nocheck;
+
if (check_excludes(cpuc->event, n0, 1))
goto out;
if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
goto out;
+nocheck:
cpuc->n_events++;
cpuc->n_added++;
@@ -1129,11 +1093,61 @@ static int __hw_perf_event_init(struct perf_event *event)
return 0;
}
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+static void sparc_pmu_start_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+static void sparc_pmu_cancel_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+ cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+static int sparc_pmu_commit_txn(const struct pmu *pmu)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int n;
+
+ if (!sparc_pmu)
+ return -EINVAL;
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ n = cpuc->n_events;
+ if (check_excludes(cpuc->event, 0, n))
+ return -EINVAL;
+ if (sparc_check_constraints(cpuc->event, cpuc->events, n))
+ return -EAGAIN;
+
+ return 0;
+}
+
static const struct pmu pmu = {
.enable = sparc_pmu_enable,
.disable = sparc_pmu_disable,
.read = sparc_pmu_read,
.unthrottle = sparc_pmu_unthrottle,
+ .start_txn = sparc_pmu_start_txn,
+ .cancel_txn = sparc_pmu_cancel_txn,
+ .commit_txn = sparc_pmu_commit_txn,
};
const struct pmu *hw_perf_event_init(struct perf_event *event)