diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-09 21:17:46 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-28 18:01:19 +0200 |
commit | dce5855bba5df9e87bb04584d505c1f1b103c652 (patch) | |
tree | 167312131a85a176ec71775fa81ddbf14a33dcb6 /kernel/events | |
parent | db24d33e08b88e990991760a44d72006a5dc6102 (diff) |
perf: Collect the schedule-in rules in one function
This was scattered out - refactor it into a single function.
No change in functionality.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.979862055@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 71c2d44ff95..802f3b24eee 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1476,6 +1476,18 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type, struct task_struct *task); +static void perf_event_sched_in(struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx, + struct task_struct *task) +{ + cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); + if (ctx) + ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); + if (ctx) + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); +} + /* * Cross CPU call to install and enable a performance event * @@ -1523,12 +1535,7 @@ static int __perf_install_in_context(void *info) /* * Schedule everything back in */ - cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); - if (task_ctx) - ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task); - cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); - if (task_ctx) - ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task); + perf_event_sched_in(cpuctx, task_ctx, task); perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, task_ctx); @@ -2107,9 +2114,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx, */ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); - cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); - ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); + perf_event_sched_in(cpuctx, ctx, task); cpuctx->task_ctx = ctx; @@ -2347,9 +2352,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx) if (ctx) rotate_ctx(ctx); - cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); - if (ctx) - ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current); + perf_event_sched_in(cpuctx, ctx, current); done: if (remove) |