summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2015-02-03 17:34:03 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:41:48 +0900
commit9b05ce6bf65f21ba34854b77833d9b9208b925f9 (patch)
treeae44011a1be0504e9ba4836496fc8f3188507205 /kernel
parentda7debdb62bcd558948f54404c2d4530bc10335a (diff)
sched: HMP: fix potential logical errors
The previous API for hmp_up_migration reset the destination CPU every time, regardless of if a migration was desired. The code using it assumed that the value would not be changed unless a migration was required. In one rare circumstance, this could have lead to a task migrating to a little CPU at the wrong time. Fixing that lead to a slight logical tweak to make the surrounding APIs operate a bit more obviously. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Robin Randhawa <robin.randhawa@arm.com> Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org> [k.kozlowski: rebased on 4.1, no signed-off-by of previous committer] Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c126
1 files changed, 61 insertions, 65 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f9b0f6aef4e..f4c732ae143f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5175,7 +5175,7 @@ unsigned int hmp_next_down_threshold = 4096;
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se);
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
- int *min_cpu);
+ int *min_cpu, struct cpumask *affinity);
/* Check if cpu is in fastest hmp_domain */
static inline unsigned int hmp_cpu_is_fastest(int cpu)
@@ -5215,22 +5215,23 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu)
/*
* Selects a cpu in previous (faster) hmp_domain
- * Note that cpumask_any_and() returns the first cpu in the cpumask
*/
static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
int cpu)
{
int lowest_cpu=NR_CPUS;
- __always_unused int lowest_ratio = hmp_domain_min_load(hmp_faster_domain(cpu), &lowest_cpu);
- /*
- * If the lowest-loaded CPU in the domain is allowed by the task affinity
- * select that one, otherwise select one which is allowed
- */
- if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
- return lowest_cpu;
+ __always_unused int lowest_ratio;
+ struct hmp_domain *hmp;
+
+ if (hmp_cpu_is_fastest(cpu))
+ hmp = hmp_cpu_domain(cpu);
else
- return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(tsk));
+ hmp = hmp_faster_domain(cpu);
+
+ lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu,
+ tsk_cpus_allowed(tsk));
+
+ return lowest_cpu;
}
/*
@@ -5249,16 +5250,10 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
else
hmp = hmp_slower_domain(cpu);
- lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu);
- /*
- * If the lowest-loaded CPU in the domain is allowed by the task affinity
- * select that one, otherwise select one which is allowed
- */
- if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
- return lowest_cpu;
- else
- return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
- tsk_cpus_allowed(tsk));
+ lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu,
+ tsk_cpus_allowed(tsk));
+
+ return lowest_cpu;
}
static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
@@ -5446,9 +5441,24 @@ static int hmp_attr_init(void)
}
late_initcall(hmp_attr_init);
#endif /* CONFIG_HMP_VARIABLE_SCALE */
-
+/*
+ * return the load of the lowest-loaded CPU in a given HMP domain
+ * min_cpu optionally points to an int to receive the CPU.
+ * affinity optionally points to a cpumask containing the
+ * CPUs to be considered. note:
+ * + min_cpu = NR_CPUS only if no CPUs are in the set of
+ * affinity && hmp_domain cpus
+ * + min_cpu will always otherwise equal one of the CPUs in
+ * the hmp domain
+ * + when more than one CPU has the same load, the one which
+ * is least-recently-disturbed by an HMP migration will be
+ * selected
+ * + if all CPUs are equally loaded or idle and the times are
+ * all the same, the first in the set will be used
+ * + if affinity is not set, cpu_online_mask is used
+ */
static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
- int *min_cpu)
+ int *min_cpu, struct cpumask *affinity)
{
int cpu;
int min_cpu_runnable_temp = NR_CPUS;
@@ -5457,8 +5467,15 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
unsigned long min_runnable_load = INT_MAX;
unsigned long contrib;
struct sched_avg *avg;
+ struct cpumask temp_cpumask;
+ /*
+ * only look at CPUs allowed if specified,
+ * otherwise look at all online CPUs in the
+ * right HMP domain
+ */
+ cpumask_and(&temp_cpumask, &hmpd->cpus, affinity ? affinity : cpu_online_mask);
- for_each_cpu(cpu, &hmpd->cpus) {
+ for_each_cpu(cpu, &temp_cpumask) {
avg = &cpu_rq(cpu)->avg;
/* used for both up and down migration */
curr_last_migration = avg->hmp_last_up_migration ?
@@ -5520,7 +5537,7 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
return NR_CPUS;
/* Is there an idle CPU in the current domain */
- min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
+ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL, NULL);
if (min_usage == 0) {
trace_sched_hmp_offload_abort(cpu, min_usage, "load");
return NR_CPUS;
@@ -5542,17 +5559,14 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
}
/* Does the slower domain have any idle CPUs? */
- min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
- if (min_usage > 0) {
- trace_sched_hmp_offload_abort(cpu, min_usage, "slowdomain");
- return NR_CPUS;
- }
+ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu,
+ tsk_cpus_allowed(task_of(se)));
- if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) {
+ if (min_usage == 0) {
trace_sched_hmp_offload_succeed(cpu, dest_cpu);
return dest_cpu;
- }
-
+ } else
+ trace_sched_hmp_offload_abort(cpu,min_usage,"slowdomain");
return NR_CPUS;
}
#endif /* CONFIG_SCHED_HMP */
@@ -5581,30 +5595,13 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
#ifdef CONFIG_SCHED_HMP
/* always put non-kernel forking tasks on a big domain */
if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
- if(hmp_cpu_is_fastest(prev_cpu)) {
- struct hmp_domain *hmpdom = list_entry(&hmp_cpu_domain(prev_cpu)->hmp_domains, struct hmp_domain, hmp_domains);
- __always_unused int lowest_ratio = hmp_domain_min_load(hmpdom, &new_cpu);
- if (new_cpu != NR_CPUS &&
- cpumask_test_cpu(new_cpu,
- tsk_cpus_allowed(p))) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- } else {
- new_cpu = cpumask_any_and(
- &hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(p));
- if (new_cpu < nr_cpu_ids) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- }
- }
- } else {
- new_cpu = hmp_select_faster_cpu(p, prev_cpu);
- if (new_cpu != NR_CPUS) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- }
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ if (new_cpu != NR_CPUS) {
+ hmp_next_up_delay(&p->se, new_cpu);
+ return new_cpu;
}
+ /* failed to perform HMP fork balance, use normal balance */
+ new_cpu = cpu;
}
#endif
@@ -5676,6 +5673,8 @@ unlock:
rcu_read_unlock();
#ifdef CONFIG_SCHED_HMP
+ prev_cpu = task_cpu(p);
+
if (hmp_up_migration(prev_cpu, &new_cpu, &p->se)) {
hmp_next_up_delay(&p->se, new_cpu);
trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP);
@@ -8573,11 +8572,9 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
+ int temp_target_cpu;
u64 now;
- if (target_cpu)
- *target_cpu = NR_CPUS;
-
if (hmp_cpu_is_fastest(cpu))
return 0;
@@ -8600,13 +8597,12 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
* idle CPU or 1023 for any partly-busy one.
* Be explicit about requirement for an idle CPU.
*/
- if (hmp_domain_min_load(hmp_faster_domain(cpu), target_cpu) != 0)
- return 0;
-
- if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(p)))
+ if (hmp_domain_min_load(hmp_faster_domain(cpu), &temp_target_cpu,
+ tsk_cpus_allowed(p)) == 0 && temp_target_cpu != NR_CPUS) {
+ if(target_cpu)
+ *target_cpu = temp_target_cpu;
return 1;
-
+ }
return 0;
}