summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-04-10 17:38:36 +0400
committerIngo Molnar <mingo@kernel.org>2014-04-17 13:39:51 +0200
commita1d9a3231eac4117cadaf4b6bba5b2902c15a33e (patch)
treeea81cd4c9553b0ae0adb4357b578ba3cefc2dccf /kernel
parent60e69eed85bb7b5198ef70643b5895c26ad76ef7 (diff)
sched: Check for stop task appearance when balancing happens
We need to do it like we do for the other higher priority classes.. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Cc: Michael wang <wangyun@linux.vnet.ibm.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/336561397137116@web27h.yandex.ru Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/deadline.c11
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/sched/rt.c7
3 files changed, 16 insertions, 5 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 27ef4092552..b08095786cb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1021,8 +1021,17 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
dl_rq = &rq->dl;
- if (need_pull_dl_task(rq, prev))
+ if (need_pull_dl_task(rq, prev)) {
pull_dl_task(rq);
+ /*
+ * pull_rt_task() can drop (and re-acquire) rq->lock; this
+ * means a stop task can slip in, in which case we need to
+ * re-start task selection.
+ */
+ if (rq->stop && rq->stop->on_rq)
+ return RETRY_TASK;
+ }
+
/*
* When prev is DL, we may throttle it in put_prev_task().
* So, we update time before we check for dl_nr_running.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4f14a656a72..7570dd969c2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6728,7 +6728,8 @@ static int idle_balance(struct rq *this_rq)
out:
/* Is there a task of a high priority class? */
if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
- (this_rq->dl.dl_nr_running ||
+ ((this_rq->stop && this_rq->stop->on_rq) ||
+ this_rq->dl.dl_nr_running ||
(this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
pulled_task = -1;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d8cdf161855..bd2267ad404 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1362,10 +1362,11 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
pull_rt_task(rq);
/*
* pull_rt_task() can drop (and re-acquire) rq->lock; this
- * means a dl task can slip in, in which case we need to
- * re-start task selection.
+ * means a dl or stop task can slip in, in which case we need
+ * to re-start task selection.
*/
- if (unlikely(rq->dl.dl_nr_running))
+ if (unlikely((rq->stop && rq->stop->on_rq) ||
+ rq->dl.dl_nr_running))
return RETRY_TASK;
}