summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-05-29 12:30:07 +0200
committerVincent Guittot <vincent.guittot@linaro.org>2018-05-29 12:30:07 +0200
commitaafabba821cf9c56b8bb36df2c1abd72a1557423 (patch)
treec6dd085b2b11251df6a54e9bd2e18ea242359af2
parentb5f83d1da89b093a4d1e5a3cc895b4a1104dfe09 (diff)
sched/core: Fix incorrect utilization accounting when switching to fair classsched-fix-util-accounting-switch-to-fair
[ Upstream commit a399d233078edbba7cf7902a6d080100cdf75636 ] When a task switches to fair scheduling class, the period between now and the last update of its utilization is accounted as running time whatever happened during this period. This incorrect accounting applies to the task and also to the task group branch. When changing the property of a running task like its list of allowed CPUs or its scheduling class, we follow the sequence: - dequeue task - put task - change the property - set task as current task - enqueue task The end of the sequence doesn't follow the normal sequence (as per __schedule()) which is: - enqueue a task - then set the task as current task. This incorrectordering is the root cause of incorrect utilization accounting. Update the sequence to follow the right one: - dequeue task - put task - change the property - enqueue task - set task as current task Change-Id: I91975778f6775061c53a039cfae8723ba02dbc0e Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: linaro-kernel@lists.linaro.org Cc: pjt@google.com Cc: yuyang.du@intel.com Link: http://lkml.kernel.org/r/1473666472-13749-8-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9a0c1841993f..8e8134312ecc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1212,10 +1212,10 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
p->sched_class->set_cpus_allowed(p, new_mask);
- if (running)
- p->sched_class->set_curr_task(rq);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE);
+ if (running)
+ p->sched_class->set_curr_task(rq);
}
/*
@@ -3585,10 +3585,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
p->prio = prio;
+ if (queued)
+ enqueue_task(rq, p, queue_flag);
if (running)
p->sched_class->set_curr_task(rq);
- if (queued)
- enqueue_task(rq, p, enqueue_flag);
check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
@@ -4147,8 +4147,6 @@ change:
prev_class = p->sched_class;
__setscheduler(rq, p, attr, pi);
- if (running)
- p->sched_class->set_curr_task(rq);
if (queued) {
int enqueue_flags = ENQUEUE_RESTORE;
/*
@@ -4160,6 +4158,8 @@ change:
enqueue_task(rq, p, enqueue_flags);
}
+ if (running)
+ p->sched_class->set_curr_task(rq);
check_class_changed(rq, p, prev_class, oldprio);
preempt_disable(); /* avoid rq from going away on us */
@@ -5317,10 +5317,10 @@ void sched_setnuma(struct task_struct *p, int nid)
p->numa_preferred_nid = nid;
- if (running)
- p->sched_class->set_curr_task(rq);
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE);
+ if (running)
+ p->sched_class->set_curr_task(rq);
task_rq_unlock(rq, p, &flags);
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -8207,10 +8207,10 @@ void sched_move_task(struct task_struct *tsk)
sched_change_group(tsk, TASK_MOVE_GROUP);
- if (unlikely(running))
- tsk->sched_class->set_curr_task(rq);
if (queued)
enqueue_task(rq, tsk, ENQUEUE_RESTORE);
+ if (unlikely(running))
+ tsk->sched_class->set_curr_task(rq);
task_rq_unlock(rq, tsk, &flags);
}