aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-10-11 11:45:03 +0100
committerJon Medhurst <tixy@linaro.org>2013-10-11 15:07:18 +0100
commitcd5c2cc93d3dc581a19c62442f40895500d2a34c (patch)
tree16361e52d356f85ba1b955439782961946c82325 /kernel/sched
parent2e14ecb254a3eaa2993b5dd04014f41e1d6188ce (diff)
hmp: Remove potential for task_struct access race
Accessing the task_struct can be racy in certain conditions, so we need to only acquire the data when needed. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fbe51262ac7..003de405acd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6709,6 +6709,7 @@ static int hmp_active_task_migration_cpu_stop(void *data)
rcu_read_unlock();
double_unlock_balance(busiest_rq, target_rq);
out_unlock:
+ put_task_struct(p);
busiest_rq->active_balance = 0;
raw_spin_unlock_irq(&busiest_rq->lock);
return 0;
@@ -6782,6 +6783,7 @@ static int hmp_idle_pull_cpu_stop(void *data)
rcu_read_unlock();
double_unlock_balance(busiest_rq, target_rq);
out_unlock:
+ put_task_struct(p);
busiest_rq->active_balance = 0;
raw_spin_unlock_irq(&busiest_rq->lock);
return 0;
@@ -6827,6 +6829,7 @@ static void hmp_force_up_migration(int this_cpu)
p = task_of(curr);
if (hmp_up_migration(cpu, &target_cpu, curr)) {
if (!target->active_balance) {
+ get_task_struct(p);
target->active_balance = 1;
target->push_cpu = target_cpu;
target->migrate_task = p;
@@ -6842,8 +6845,10 @@ static void hmp_force_up_migration(int this_cpu)
* require extensive book keeping.
*/
curr = hmp_get_lightest_task(orig, 1);
+ p = task_of(curr);
target->push_cpu = hmp_offload_down(cpu, curr);
if (target->push_cpu < NR_CPUS) {
+ get_task_struct(p);
target->active_balance = 1;
target->migrate_task = p;
force = 1;
@@ -6922,6 +6927,7 @@ static unsigned int hmp_idle_pull(int this_cpu)
/* now we have a candidate */
raw_spin_lock_irqsave(&target->lock, flags);
if (!target->active_balance && task_rq(p) == target) {
+ get_task_struct(p);
target->active_balance = 1;
target->push_cpu = this_cpu;
target->migrate_task = p;