diff options
author | Chris Redpath <chris.redpath@arm.com> | 2015-01-30 17:04:09 +0900 |
---|---|---|
committer | Seung-Woo Kim <sw0312.kim@samsung.com> | 2016-12-14 13:41:37 +0900 |
commit | eccbc929a0b307fc77bcb81783a9bf8702505c43 (patch) | |
tree | 1ff5be5a44c9f66f42c2df040cbbe8797728b25a /kernel/sched | |
parent | 25f916b55389554c394fd41ac30828bce03b6898 (diff) |
HMP: Select least-loaded CPU when performing HMP Migrations
The reference patch set always selects the first CPU in an HMP
domain as a migration target. In busy situations, this means that
the migrated thread cannot make immediate use of an idle CPU but
must share a busy one until the load balancer runs across the big
domain.
This patch uses the hmp_domain_min_load function introduced in
global balancing to figure out which of the CPUs is the least busy
and selects that as a migration target - in both directions.
This essentially implements a task-spread strategy and is intended
to maximise performance of migrated threads but is likely
to use more power than the packing strategy previously employed.
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
[k.kozlowski: rebased on 4.1, no signed-off-by of previous committer]
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 24 |
1 files changed, 22 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c4086d79d00b..440b81ef6d82 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5094,6 +5094,8 @@ unsigned int hmp_next_down_threshold = 4096; static unsigned int hmp_up_migration(int cpu, struct sched_entity *se); static unsigned int hmp_down_migration(int cpu, struct sched_entity *se); +static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd, + int *min_cpu); /* Check if cpu is in fastest hmp_domain */ static inline unsigned int hmp_cpu_is_fastest(int cpu) @@ -5138,7 +5140,16 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu) static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk, int cpu) { - return cpumask_any_and(&hmp_faster_domain(cpu)->cpus, + int lowest_cpu=NR_CPUS; + __always_unused int lowest_ratio = hmp_domain_min_load(hmp_faster_domain(cpu), &lowest_cpu); + /* + * If the lowest-loaded CPU in the domain is allowed by the task affinity + * select that one, otherwise select one which is allowed + */ + if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk))) + return lowest_cpu; + else + return cpumask_any_and(&hmp_faster_domain(cpu)->cpus, tsk_cpus_allowed(tsk)); } @@ -5149,7 +5160,16 @@ static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk, static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk, int cpu) { - return cpumask_any_and(&hmp_slower_domain(cpu)->cpus, + int lowest_cpu=NR_CPUS; + __always_unused int lowest_ratio = hmp_domain_min_load(hmp_slower_domain(cpu), &lowest_cpu); + /* + * If the lowest-loaded CPU in the domain is allowed by the task affinity + * select that one, otherwise select one which is allowed + */ + if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk))) + return lowest_cpu; + else + return cpumask_any_and(&hmp_slower_domain(cpu)->cpus, tsk_cpus_allowed(tsk)); } |