aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2015-01-20 15:43:04 +0000
committerRobin Randhawa <robin.randhawa@arm.com>2015-04-09 12:26:15 +0100
commit93859f6416c77c358d316e1b291524bd3ec0316d (patch)
tree43683884f3c1ee65666b0b477c4bc501ba2cef67
parentdd28dc5ee35875b167efb1ee8ece7318556e984b (diff)
downloadkernel-93859f6416c77c358d316e1b291524bd3ec0316d.tar.gz
sched: Tipping point from energy-aware to conventional load balancing
Energy-aware load balancing bases on cpu usage so the upper bound of its operational range is a fully utilized cpu. Above this tipping point it makes more sense to use weighted_cpuload to preserve smp_nice. This patch implements the tipping point detection in update_sg_lb_stats as if one cpu is over-utilized the current energy-aware load balance operation will fall back into the conventional weighted load based one. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--kernel/sched/fair.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 09c188d..42cd7cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6271,6 +6271,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->sum_weighted_load += weighted_cpuload(i);
if (idle_cpu(i))
sgs->idle_cpus++;
+
+ /* If cpu is over-utilized, bail out of ea */
+ if (env->use_ea && cpu_overutilized(i, env->sd))
+ env->use_ea = false;
}
/* Adjust by relative CPU capacity of the group */