aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-12-03 15:35:02 +0800
committerAlex Shi <alex.shi@linaro.org>2014-12-03 15:38:44 +0800
commit201ba1d3817cd48c4e1b89df9af246de1cd1e7f7 (patch)
tree1c224db3b7d01af9463b4be5fed09fca720bed09
parent57b084d607a0a27c1dc08372b7da788c5711a6cb (diff)
sched/fair: Rework Change "has_capacity" to "has_free_capacity"
The capacity of a CPU/group should be some intrinsic value that doesn't change with task placement. It is like a container which capacity is stable regardless of the amount of liquid in it (its "utilization")... unless the container itself is crushed that is, but that's another story. Therefore let's rename "has_capacity" to "has_free_capacity" in order to better convey the wanted meaning. Alex removed the numa part changes. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: linaro-kernel@lists.linaro.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/n/tip-djzkk027jm0e8x8jxy70opzh@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org> (rework commit from 1b6a749) Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--kernel/sched/fair.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a5d140b751..15a9781aff9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4401,7 +4401,7 @@ struct sg_lb_stats {
unsigned int idle_cpus;
unsigned int group_weight;
int group_imb; /* Is there an imbalance in the group ? */
- int group_has_capacity; /* Is there extra capacity in the group? */
+ int group_has_free_capacity;
};
/*
@@ -4755,7 +4755,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_capacity = sg_capacity(env, group);
if (sgs->group_capacity > sgs->sum_nr_running)
- sgs->group_has_capacity = 1;
+ sgs->group_has_free_capacity = 1;
}
/**
@@ -4850,7 +4850,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* with a large weight task outweighs the tasks on the system).
*/
if (prefer_sibling && sds->local &&
- sds->local_stat.group_has_capacity)
+ sds->local_stat.group_has_free_capacity)
sgs->group_capacity = min(sgs->group_capacity, 1U);
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
@@ -5109,8 +5109,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
- !busiest->group_has_capacity)
+ if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
+ !busiest->group_has_free_capacity)
goto force_balance;
/*