aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 09:43:27 -0700
committerIngo Molnar <mingo@elte.hu>2011-08-14 12:01:13 +0200
commit953bfcd10e6f3697233e8e5128c611d275da39c1 (patch)
treea3ca8136bb9e992bb40945c5eee2a8dcc0fd0b57 /kernel/sched_fair.c
parent5710f15b52664ae0bfa60a66d75464769d297b2b (diff)
downloadlinux-linaro-stable-953bfcd10e6f3697233e8e5128c611d275da39c1.tar.gz
sched: Implement hierarchical task accounting for SCHED_OTHER
Introduce hierarchical task accounting for the group scheduling case in CFS, as well as promoting the responsibility for maintaining rq->nr_running to the scheduling classes. The primary motivation for this is that with scheduling classes supporting bandwidth throttling it is possible for entities participating in throttled sub-trees to not have root visible changes in rq->nr_running across activate and de-activate operations. This in turn leads to incorrect idle and weight-per-task load balance decisions. This also allows us to make a small fixlet to the fastpath in pick_next_task() under group scheduling. Note: this issue also exists with the existing sched_rt throttling mechanism. This patch does not address that. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184756.878333391@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f4b732a3552b..f86b0cb5eb29 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1310,16 +1310,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, flags);
+ cfs_rq->h_nr_running++;
flags = ENQUEUE_WAKEUP;
}
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_nr_running++;
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}
+ inc_nr_running(rq);
hrtick_update(rq);
}
@@ -1339,6 +1342,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
+ cfs_rq->h_nr_running--;
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
@@ -1358,11 +1362,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_nr_running--;
update_cfs_load(cfs_rq, 0);
update_cfs_shares(cfs_rq);
}
+ dec_nr_running(rq);
hrtick_update(rq);
}