summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23da6b6a961a..c4086d79d00b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5339,20 +5339,24 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
int *min_cpu)
{
int cpu;
- int min_load = INT_MAX;
- int min_cpu_temp = NR_CPUS;
+ int min_cpu_runnable_temp = NR_CPUS;
+ unsigned long min_runnable_load = INT_MAX;
+ unsigned long contrib;
for_each_cpu_mask(cpu, hmpd->cpus) {
- if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
- min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
- min_cpu_temp = cpu;
+ /* don't use the divisor in the loop, just at the end */
+ contrib = cpu_rq(cpu)->avg.runnable_avg_sum * scale_load_down(1024);
+ if (contrib < min_runnable_load) {
+ min_runnable_load = contrib;
+ min_cpu_runnable_temp = cpu;
}
}
if (min_cpu)
- *min_cpu = min_cpu_temp;
+ *min_cpu = min_cpu_runnable_temp;
- return min_load;
+ /* domain will often have at least one empty CPU */
+ return min_runnable_load ? min_runnable_load / (LOAD_AVG_MAX + 1) : 0;
}
/*
@@ -5380,22 +5384,18 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
return NR_CPUS;
/* Is the current domain fully loaded? */
- /* load < ~94% */
+ /* load < ~50% */
min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
- if (min_usage < NICE_0_LOAD-64)
- return NR_CPUS;
-
- /* Is the cpu oversubscribed? */
- /* load < ~194% */
- if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+ if (min_usage < (NICE_0_LOAD>>1))
return NR_CPUS;
/* Is the task alone on the cpu? */
- if (cpu_rq(cpu)->nr_running < 2)
+ if (cpu_rq(cpu)->cfs.nr_running < 2)
return NR_CPUS;
/* Is the task actually starving? */
- if (hmp_task_starvation(se) > 768) /* <25% waiting */
+ /* >=25% ratio running/runnable = starving */
+ if (hmp_task_starvation(se) > 768)
return NR_CPUS;
/* Does the slower domain have spare cycles? */
@@ -5406,6 +5406,7 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
return dest_cpu;
+
return NR_CPUS;
}
#endif /* CONFIG_SCHED_HMP */