diff options
author | Juri Lelli <juri.lelli@arm.com> | 2015-06-25 14:37:27 +0100 |
---|---|---|
committer | Juri Lelli <juri.lelli@arm.com> | 2015-10-05 12:09:58 +0100 |
commit | d0cfe4a7fe1f81765dfa9d849851f7004050512b (patch) | |
tree | 169d600a5e7cfa2d05aa13fd451c642b308a24f3 | |
parent | 5f6844c832410b8d4a3612b6a9a3df9e0cda8bd3 (diff) | |
download | linux-linaro-stable-d0cfe4a7fe1f81765dfa9d849851f7004050512b.tar.gz |
FROMLIST: sched/fair: cpufreq_sched triggers for load balancing
As we don't trigger freq changes from {en,de}queue_task_fair() during load
balancing, we need to do explicitly so on load balancing paths.
cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
(am from https://patchwork.kernel.org/patch/6737901)
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Change-Id: I43466dfc1b4d93998ada9038a9f9ed14892c2a84
-rw-r--r-- | kernel/sched/fair.c | 64 |
1 files changed, 62 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e975cf2e1a47..a4cdc1026795 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7321,6 +7321,20 @@ more_balance: * ld_moved - cumulative load moved across iterations */ cur_ld_moved = detach_tasks(&env); + /* + * We want to potentially update env.src_cpu's OPP. + * + * Add a margin (same ~20% used for the tipping point) + * to our request to provide some head room for the remaining + * tasks. + */ + if (sched_energy_freq() && cur_ld_moved) { + unsigned long req_cap = get_cpu_usage(env.src_cpu); + + req_cap = req_cap * capacity_margin + >> SCHED_CAPACITY_SHIFT; + cpufreq_sched_set_cap(env.src_cpu, req_cap); + } /* * We've detached some tasks from busiest_rq. Every @@ -7335,6 +7349,21 @@ more_balance: if (cur_ld_moved) { attach_tasks(&env); ld_moved += cur_ld_moved; + /* + * We want to potentially update env.dst_cpu's OPP. + * + * Add a margin (same ~20% used for the tipping point) + * to our request to provide some head room if p's + * utilization further increases. + */ + if (sched_energy_freq()) { + unsigned long req_cap = + get_cpu_usage(env.dst_cpu); + + req_cap = req_cap * capacity_margin + >> SCHED_CAPACITY_SHIFT; + cpufreq_sched_set_cap(env.dst_cpu, req_cap); + } } local_irq_restore(flags); @@ -7694,8 +7723,24 @@ static int active_load_balance_cpu_stop(void *data) schedstat_inc(sd, alb_count); p = detach_one_task(&env); - if (p) + if (p) { schedstat_inc(sd, alb_pushed); + /* + * We want to potentially update env.src_cpu's OPP. + * + * Add a margin (same ~20% used for the tipping point) + * to our request to provide some head room for the + * remaining task. + */ + if (sched_energy_freq()) { + unsigned long req_cap = + get_cpu_usage(env.src_cpu); + + req_cap = req_cap * capacity_margin + >> SCHED_CAPACITY_SHIFT; + cpufreq_sched_set_cap(env.src_cpu, req_cap); + } + } else schedstat_inc(sd, alb_failed); } @@ -7704,8 +7749,23 @@ out_unlock: busiest_rq->active_balance = 0; raw_spin_unlock(&busiest_rq->lock); - if (p) + if (p) { attach_one_task(target_rq, p); + /* + * We want to potentially update target_cpu's OPP. + * + * Add a margin (same ~20% used for the tipping point) + * to our request to provide some head room if p's utilization + * further increases. + */ + if (sched_energy_freq()) { + unsigned long req_cap = get_cpu_usage(target_cpu); + + req_cap = req_cap * capacity_margin + >> SCHED_CAPACITY_SHIFT; + cpufreq_sched_set_cap(target_cpu, req_cap); + } + } local_irq_enable(); |