aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2014-08-12 14:50:55 +0100
committerJon Medhurst <tixy@linaro.org>2014-08-12 17:46:58 +0100
commite482d95c1d1888f34cc3f7e6778806cfda6174ff (patch)
treebd4d714e7d094ac80a5208f57919d6690cf8c69d
parentf83262408293795e5186e9d1bf66d525b24fdb12 (diff)
hmp: Restrict ILB events if no CPU has > 1 taskbig-LITTLE-MP-14.08
Frequently in HMP, the big CPUs are only active with one task per CPU and there may be idle CPUs in the big cluster. This patch avoids triggering an idle balance in situations where none of the active CPUs in the current HMP domain have > 1 tasks running. When packing is enabled, only enforce this behaviour when we are not in the smallest domain - there we idle balance whenever a CPU is over the up_threshold regardless of tasks in case one needs to be moved. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 74a5adfefeb..fd57f0be5b4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6510,16 +6510,16 @@ static int nohz_test_cpu(int cpu)
* Decide if the tasks on the busy CPUs in the
* littlest domain would benefit from an idle balance
*/
-static int hmp_packing_ilb_needed(int cpu)
+static int hmp_packing_ilb_needed(int cpu, int ilb_needed)
{
struct hmp_domain *hmp;
- /* always allow ilb on non-slowest domain */
+ /* allow previous decision on non-slowest domain */
if (!hmp_cpu_is_slowest(cpu))
- return 1;
+ return ilb_needed;
/* if disabled, use normal ILB behaviour */
if (!hmp_packing_enabled)
- return 1;
+ return ilb_needed;
hmp = hmp_cpu_domain(cpu);
for_each_cpu_and(cpu, &hmp->cpus, nohz.idle_cpus_mask) {
@@ -6531,19 +6531,34 @@ static int hmp_packing_ilb_needed(int cpu)
}
#endif
+DEFINE_PER_CPU(cpumask_var_t, ilb_tmpmask);
+
static inline int find_new_ilb(int call_cpu)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
#ifdef CONFIG_SCHED_HMP
- int ilb_needed = 1;
+ int ilb_needed = 0;
+ int cpu;
+ struct cpumask* tmp = per_cpu(ilb_tmpmask, smp_processor_id());
/* restrict nohz balancing to occur in the same hmp domain */
ilb = cpumask_first_and(nohz.idle_cpus_mask,
&((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus);
+ /* check to see if it's necessary within this domain */
+ cpumask_andnot(tmp,
+ &((struct hmp_domain *)hmp_cpu_domain(call_cpu))->cpus,
+ nohz.idle_cpus_mask);
+ for_each_cpu(cpu, tmp) {
+ if (cpu_rq(cpu)->nr_running > 1) {
+ ilb_needed = 1;
+ break;
+ }
+ }
+
#ifdef CONFIG_SCHED_HMP_LITTLE_PACKING
if (ilb < nr_cpu_ids)
- ilb_needed = hmp_packing_ilb_needed(ilb);
+ ilb_needed = hmp_packing_ilb_needed(ilb, ilb_needed);
#endif
if (ilb_needed && ilb < nr_cpu_ids && idle_cpu(ilb))