diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2019-09-06 16:57:22 +0200 |
---|---|---|
committer | Vincent Guittot <vincent.guittot@linaro.org> | 2019-09-06 16:57:22 +0200 |
commit | 4c2aaf2cfd8cb8b881e6150530e5f8409c0524d1 (patch) | |
tree | 18298034b92db9af0c9a2e493b48087f06307540 | |
parent | a2e62a48b8c80adc872a18936169c77c9192ac2d (diff) |
sched/fair: optimize wake_affine_weightsched-rework-load_balance-v3
wake_affine_weight() now gets CPU's load_avg in 2 different ways.
Consolidate the function to read and use load_avg only once.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
-rw-r--r-- | kernel/sched/fair.c | 48 |
1 files changed, 9 insertions, 39 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 91946588dff5..cd99d177bd0e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5620,16 +5620,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, { struct sched_group *idlest = NULL, *group = sd->groups; struct sched_group *most_spare_sg = NULL; - unsigned long min_runnable_load = ULONG_MAX; - unsigned long this_runnable_load = ULONG_MAX; - unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; + unsigned long min_load = ULONG_MAX, this_load = ULONG_MAX; unsigned long most_spare = 0, this_spare = 0; int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; do { - unsigned long load, avg_load, runnable_load; + unsigned long load; unsigned long spare_cap, max_spare_cap; int local_group; int i; @@ -5646,15 +5644,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, * Tally up the load of all CPUs in the group and find * the group containing the CPU with most spare capacity. */ - avg_load = 0; - runnable_load = 0; + load = 0; max_spare_cap = 0; for_each_cpu(i, sched_group_span(group)) { - load = cpu_load(cpu_rq(i)); - runnable_load += load; - - avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); + load += cpu_load(cpu_rq(i)); spare_cap = capacity_spare_without(i, p); @@ -5663,31 +5657,15 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } /* Adjust by relative CPU capacity of the group */ - avg_load = (avg_load * SCHED_CAPACITY_SCALE) / - group->sgc->capacity; - runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) / + load = (load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; if (local_group) { - this_runnable_load = runnable_load; - this_avg_load = avg_load; + this_load = load; this_spare = max_spare_cap; } else { - if (min_runnable_load > (runnable_load + imbalance)) { - /* - * The runnable load is significantly smaller - * so we can pick this new CPU: - */ - min_runnable_load = runnable_load; - min_avg_load = avg_load; - idlest = group; - } else if ((runnable_load < (min_runnable_load + imbalance)) && - (100*min_avg_load > imbalance_scale*avg_load)) { - /* - * The runnable loads are close so take the - * blocked load into account through avg_load: - */ - min_avg_load = avg_load; + if (load < min_load) { + min_load = load; idlest = group; } @@ -5731,15 +5709,7 @@ skip_spare: * imbalance to the runnable load on the remote node and consider * staying local. */ - if ((sd->flags & SD_NUMA) && - min_runnable_load + imbalance >= this_runnable_load) - return NULL; - - if (min_runnable_load > (this_runnable_load + imbalance)) - return NULL; - - if ((this_runnable_load < (min_runnable_load + imbalance)) && - (100*this_avg_load < imbalance_scale*min_avg_load)) + if (min_load + imbalance >= this_load) return NULL; return idlest; |