diff options
author | Chris Redpath <chris.redpath@arm.com> | 2015-02-03 16:56:54 +0900 |
---|---|---|
committer | Seung-Woo Kim <sw0312.kim@samsung.com> | 2016-12-14 13:41:47 +0900 |
commit | 519aaf480b302512edad375022be5d9050585ef1 (patch) | |
tree | e3050768468f083f555536b96f4d5f25b2a5f5f6 /kernel | |
parent | 79febe68d970e2b88accf64933ddd2dd8f2284a4 (diff) |
sched: HMP: Additional trace points for debugging HMP behaviour
1. Replace magic numbers in code for migration trace.
Trace points still emit a number as force=<n> field:
force=0 : wakeup migration
force=1 : forced migration
force=2 : offload migration
force=3 : idle pull migration
2. Add trace to expose offload decision-making.
Also adds tracing rq->nr_running so that you can
look back to see what state the RQ was in at the
time.
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com>
Signed-off-by: Jon Medhurst <tixy@linaro.org>
[k.kozlowski: rebased on 4.1, no signed-off-by of previous committer]
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b471ad275e90..2f9b0f6aef4e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2879,6 +2879,7 @@ static inline void update_rq_runnable_avg(struct rq *rq, int runnable) __update_tg_runnable_avg(&rq->avg, &rq->cfs); trace_sched_rq_runnable_ratio(cpu_of(rq), rq->avg.load_avg_ratio); trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg); + trace_sched_rq_nr_running(cpu_of(rq), rq->nr_running, rq->nr_iowait.counter); } #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, @@ -5520,25 +5521,37 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se) /* Is there an idle CPU in the current domain */ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL); - if (min_usage == 0) + if (min_usage == 0) { + trace_sched_hmp_offload_abort(cpu, min_usage, "load"); return NR_CPUS; + } /* Is the task alone on the cpu? */ - if (cpu_rq(cpu)->cfs.h_nr_running < 2) + if (cpu_rq(cpu)->cfs.h_nr_running < 2) { + trace_sched_hmp_offload_abort(cpu, + cpu_rq(cpu)->cfs.h_nr_running, "nr_running"); return NR_CPUS; + } /* Is the task actually starving? */ /* >=25% ratio running/runnable = starving */ - if (hmp_task_starvation(se) > 768) + if (hmp_task_starvation(se) > 768) { + trace_sched_hmp_offload_abort(cpu, hmp_task_starvation(se), + "starvation"); return NR_CPUS; + } /* Does the slower domain have any idle CPUs? */ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu); - if (min_usage > 0) + if (min_usage > 0) { + trace_sched_hmp_offload_abort(cpu, min_usage, "slowdomain"); return NR_CPUS; + } - if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) + if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) { + trace_sched_hmp_offload_succeed(cpu, dest_cpu); return dest_cpu; + } return NR_CPUS; } @@ -5665,13 +5678,13 @@ unlock: #ifdef CONFIG_SCHED_HMP if (hmp_up_migration(prev_cpu, &new_cpu, &p->se)) { hmp_next_up_delay(&p->se, new_cpu); - trace_sched_hmp_migrate(p, new_cpu, 0); + trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP); return new_cpu; } if (hmp_down_migration(prev_cpu, &p->se)) { new_cpu = hmp_select_slower_cpu(p, prev_cpu); hmp_next_down_delay(&p->se, new_cpu); - trace_sched_hmp_migrate(p, new_cpu, 0); + trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP); return new_cpu; } /* Make sure that the task stays in its previous hmp domain */ @@ -8897,7 +8910,7 @@ static void hmp_force_up_migration(int this_cpu) target->push_cpu = target_cpu; target->migrate_task = p; force = 1; - trace_sched_hmp_migrate(p, target->push_cpu, 1); + trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_FORCE); hmp_next_up_delay(&p->se, target->push_cpu); } } @@ -8913,7 +8926,7 @@ static void hmp_force_up_migration(int this_cpu) target->active_balance = 1; target->migrate_task = p; force = 1; - trace_sched_hmp_migrate(p, target->push_cpu, 2); + trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_FORCE); hmp_next_down_delay(&p->se, target->push_cpu); } } @@ -8992,7 +9005,7 @@ static unsigned int hmp_idle_pull(int this_cpu) target->push_cpu = this_cpu; target->migrate_task = p; force = 1; - trace_sched_hmp_migrate(p, target->push_cpu, 3); + trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_IDLE_PULL); hmp_next_up_delay(&p->se, target->push_cpu); } raw_spin_unlock_irqrestore(&target->lock, flags); |