diff options
-rw-r--r-- | kernel/sched/core.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9a0c1841993f..8e8134312ecc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1212,10 +1212,10 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) p->sched_class->set_cpus_allowed(p, new_mask); - if (running) - p->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); + if (running) + p->sched_class->set_curr_task(rq); } /* @@ -3585,10 +3585,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio) p->prio = prio; + if (queued) + enqueue_task(rq, p, queue_flag); if (running) p->sched_class->set_curr_task(rq); - if (queued) - enqueue_task(rq, p, enqueue_flag); check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -4147,8 +4147,6 @@ change: prev_class = p->sched_class; __setscheduler(rq, p, attr, pi); - if (running) - p->sched_class->set_curr_task(rq); if (queued) { int enqueue_flags = ENQUEUE_RESTORE; /* @@ -4160,6 +4158,8 @@ change: enqueue_task(rq, p, enqueue_flags); } + if (running) + p->sched_class->set_curr_task(rq); check_class_changed(rq, p, prev_class, oldprio); preempt_disable(); /* avoid rq from going away on us */ @@ -5317,10 +5317,10 @@ void sched_setnuma(struct task_struct *p, int nid) p->numa_preferred_nid = nid; - if (running) - p->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, p, ENQUEUE_RESTORE); + if (running) + p->sched_class->set_curr_task(rq); task_rq_unlock(rq, p, &flags); } #endif /* CONFIG_NUMA_BALANCING */ @@ -8207,10 +8207,10 @@ void sched_move_task(struct task_struct *tsk) sched_change_group(tsk, TASK_MOVE_GROUP); - if (unlikely(running)) - tsk->sched_class->set_curr_task(rq); if (queued) enqueue_task(rq, tsk, ENQUEUE_RESTORE); + if (unlikely(running)) + tsk->sched_class->set_curr_task(rq); task_rq_unlock(rq, tsk, &flags); } |