summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2015-02-04 14:25:57 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:41:54 +0900
commit79445b8c7409e89f166ac5e57765cba574087679 (patch)
tree8f43a6c86d6e4e9dae2ecd422d7df5f5628c4039 /kernel
parent79850af1eae6c30630e11b7a7e2ecaffd7519307 (diff)
sched: update runqueue clock before migrations away
If we migrate a sleeping task away from a CPU which has the tick stopped, then both the clock_task and decay_counter will be out of date for that CPU and we will not decay load correctly regardless of how often we update the blocked load. This is only an issue for tasks which are not on a runqueue (because otherwise that CPU would be awake) and simultaneously the CPU the task previously ran on has had the tick stopped. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org> [k.kozlowski: rebased on 4.1, no signed-off-by of previous committer] Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22598f9f9891..91aca005f9d2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5829,6 +5829,15 @@ unlock:
return new_cpu;
}
+#ifdef CONFIG_NO_HZ_COMMON
+static int nohz_test_cpu(int cpu);
+#else
+static inline int nohz_test_cpu(int cpu)
+{
+ return 0;
+}
+#endif
+
/*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the
@@ -5848,6 +5857,25 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
* be negative here since on-rq tasks have decay-count == 0.
*/
if (se->avg.decay_count) {
+ /*
+ * If we migrate a sleeping task away from a CPU
+ * which has the tick stopped, then both the clock_task
+ * and decay_counter will be out of date for that CPU
+ * and we will not decay load correctly.
+ */
+ if (!se->on_rq && nohz_test_cpu(task_cpu(p))) {
+ struct rq *rq = cpu_rq(task_cpu(p));
+ unsigned long flags;
+ /*
+ * Current CPU cannot be holding rq->lock in this
+ * circumstance, but another might be. We must hold
+ * rq->lock before we go poking around in its clocks
+ */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ update_cfs_rq_blocked_load(cfs_rq, 0);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
se->avg.decay_count = -__synchronize_entity_decay(se);
atomic_long_add(se->avg.load_avg_contrib,
&cfs_rq->removed_load);
@@ -8324,6 +8352,18 @@ static struct {
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
+/*
+ * nohz_test_cpu used when load tracking is enabled. FAIR_GROUP_SCHED
+ * dependency below may be removed when load tracking guards are
+ * removed.
+ */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int nohz_test_cpu(int cpu)
+{
+ return cpumask_test_cpu(cpu, nohz.idle_cpus_mask);
+}
+#endif
+
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
/*
* Decide if the tasks on the busy CPUs in the