summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2015-02-03 21:19:41 +0900
committerSeung-Woo Kim <sw0312.kim@samsung.com>2016-12-14 13:41:49 +0900
commit8209016c92325719af3aab2b8e7de3c568ecdfb9 (patch)
treece8d901136f23d920df48af7760b732a29a03fa6 /kernel
parent9b05ce6bf65f21ba34854b77833d9b9208b925f9 (diff)
hmp: Remove potential for task_struct access race
Accessing the task_struct can be racy in certain conditions, so we need to only acquire the data when needed. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org> [k.kozlowski: rebased on 4.1, no signed-off-by of previous committer] Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f4c732ae143f..10280438fcb1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8784,6 +8784,7 @@ static int hmp_active_task_migration_cpu_stop(void *data)
rcu_read_unlock();
double_unlock_balance(busiest_rq, target_rq);
out_unlock:
+ put_task_struct(p);
busiest_rq->active_balance = 0;
raw_spin_unlock_irq(&busiest_rq->lock);
return 0;
@@ -8857,6 +8858,7 @@ static int hmp_idle_pull_cpu_stop(void *data)
rcu_read_unlock();
double_unlock_balance(busiest_rq, target_rq);
out_unlock:
+ put_task_struct(p);
busiest_rq->active_balance = 0;
raw_spin_unlock_irq(&busiest_rq->lock);
return 0;
@@ -8902,6 +8904,7 @@ static void hmp_force_up_migration(int this_cpu)
p = task_of(curr);
if (hmp_up_migration(cpu, &target_cpu, curr)) {
if (!target->active_balance) {
+ get_task_struct(p);
target->active_balance = 1;
target->push_cpu = target_cpu;
target->migrate_task = p;
@@ -8917,8 +8920,10 @@ static void hmp_force_up_migration(int this_cpu)
* require extensive book keeping.
*/
curr = hmp_get_lightest_task(orig, 1);
+ p = task_of(curr);
target->push_cpu = hmp_offload_down(cpu, curr);
if (target->push_cpu < NR_CPUS) {
+ get_task_struct(p);
target->active_balance = 1;
target->migrate_task = p;
force = 1;
@@ -8997,6 +9002,7 @@ static unsigned int hmp_idle_pull(int this_cpu)
/* now we have a candidate */
raw_spin_lock_irqsave(&target->lock, flags);
if (!target->active_balance && task_rq(p) == target) {
+ get_task_struct(p);
target->active_balance = 1;
target->push_cpu = this_cpu;
target->migrate_task = p;