summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2015-03-12 20:32:13 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:42:03 +0900
commit3884488a27857f148ccacfa364ecfb5338d0d9e1 (patch)
treee4705afb88ba0a70a3cc5f1bc58b585632ed7f9c /kernel
parentb552aedc846e51f4904b121017b45b67ddd0d59a (diff)
hmp: Restrict ILB events if no CPU has > 1 task
Frequently in HMP, the big CPUs are only active with one task per CPU and there may be idle CPUs in the big cluster. This patch avoids triggering an idle balance in situations where none of the active CPUs in the current HMP domain have > 1 tasks running. When packing is enabled, only enforce this behaviour when we are not in the smallest domain - there we idle balance whenever a CPU is over the up_threshold regardless of tasks in case one needs to be moved. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5a361465937f..5900db7e1fba 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8505,16 +8505,16 @@ static int nohz_test_cpu(int cpu)
* Decide if the tasks on the busy CPUs in the
* littlest domain would benefit from an idle balance
*/
-static int hmp_packing_ilb_needed(int cpu)
+static int hmp_packing_ilb_needed(int cpu, int ilb_needed)
{
struct hmp_domain *hmp;
- /* always allow ilb on non-slowest domain */
+ /* allow previous decision on non-slowest domain */
if (!hmp_cpu_is_slowest(cpu))
- return 1;
+ return ilb_needed;
/* if disabled, use normal ILB behaviour */
if (!hmp_packing_enabled)
- return 1;
+ return ilb_needed;
hmp = hmp_cpu_domain(cpu);
for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
@@ -8526,20 +8526,36 @@ static int hmp_packing_ilb_needed(int cpu)
}
#endif
+DEFINE_PER_CPU(cpumask_var_t, ilb_tmpmask);
+
static inline int find_new_ilb(void)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
#ifdef CONFIG_SCHED_HMP
int call_cpu = smp_processor_id();
- int ilb_needed = 1;
+ int ilb_needed = 0;
+ int cpu;
+ struct cpumask* tmp = per_cpu(ilb_tmpmask, smp_processor_id());
/* restrict nohz balancing to occur in the same hmp domain */
ilb = cpumask_first_and(nohz.idle_cpus_mask,
&((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
+
+ /* check to see if it's necessary within this domain */
+ cpumask_andnot(tmp,
+ &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus,
+ nohz.idle_cpus_mask);
+ for_each_cpu(cpu, tmp) {
+ if (cpu_rq(cpu)->nr_running > 1) {
+ ilb_needed = 1;
+ break;
+ }
+ }
+
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
if (ilb < nr_cpu_ids)
- ilb_needed = hmp_packing_ilb_needed(ilb);
+ ilb_needed = hmp_packing_ilb_needed(ilb, ilb_needed);
#endif
if (ilb_needed && ilb < nr_cpu_ids && idle_cpu(ilb))