aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 07:20:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 07:20:43 -0800
commit17bc14b767cf0692420c43dbe5310ae98a5a7836 (patch)
tree9b0f339e5d9769a51acdeb7a246f6e7522efa966
parent7313264b899bbf3988841296265a6e0e8a7b6521 (diff)
Revert "sched: Update_cfs_shares at period edge"
This reverts commit f269ae0469fc882332bdfb5db15d3c1315fe2a10. It turns out it causes a very noticeable interactivity regression with CONFIG_SCHED_AUTOGROUP (test-case: "make -j32" of the kernel in a terminal window, while scrolling in a browser - the autogrouping means that the two end up in separate cgroups, and the browser should be smooth as silk despite the high load). Says Paul Turner: "It seems that the update-throttling on the wake-side is reducing the interactive tasks' ability to preempt. While I suspect the right longer term answer here is force these updates only in the cross-cgroup case; this is less trivial. For this release I believe the right answer is either going to be a revert or restore the updates on the enqueue-side." Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Bisected-by: Mike Galbraith <efault@gmx.de> Acked-by: Paul Turner <pjt@google.com> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/sched/fair.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 59e072b2db9..756f9f9e854 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1265,7 +1265,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
}
__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
- update_cfs_shares(cfs_rq);
}
static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
@@ -1475,8 +1474,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- account_entity_enqueue(cfs_rq, se);
enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
+ account_entity_enqueue(cfs_rq, se);
+ update_cfs_shares(cfs_rq);
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
@@ -1549,6 +1549,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
update_stats_dequeue(cfs_rq, se);
if (flags & DEQUEUE_SLEEP) {
@@ -1568,8 +1569,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
+ se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
- dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1583,7 +1584,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return_cfs_rq_runtime(cfs_rq);
update_min_vruntime(cfs_rq);
- se->on_rq = 0;
+ update_cfs_shares(cfs_rq);
}
/*
@@ -2595,8 +2596,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
+ update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1);
- update_cfs_rq_blocked_load(cfs_rq, 0);
}
if (!se) {
@@ -2656,8 +2657,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
+ update_cfs_shares(cfs_rq);
update_entity_load_avg(se, 1);
- update_cfs_rq_blocked_load(cfs_rq, 0);
}
if (!se) {
@@ -5837,11 +5838,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se) {
+ for_each_sched_entity(se)
update_cfs_shares(group_cfs_rq(se));
- /* update contribution to parent */
- update_entity_load_avg(se, 1);
- }
raw_spin_unlock_irqrestore(&rq->lock, flags);
}