aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-05-16 17:48:24 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:12:26 +0100
commitb64cc6f7e54b97536dbecc05d193b31b27feecf1 (patch)
treef1000fa95e5501404f6369343178d43d95434df3
parentd2c920023cbc456414f8e07ff253a89be535b41b (diff)
sched: Ignore offline CPUs in HMP migration & load statsmaster-task-placement-v2
Previously, an offline CPU would always appear to have a zero load and this would distort the offload functionality used for balancing big and little domains. Maintain a mask of online CPUs in each domain and use this instead. Change-Id: I639b564b2f40cb659af8ceb8bd37f84b8a1fe323 Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--arch/arm/kernel/topology.c6
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/fair.c39
3 files changed, 42 insertions, 4 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index f2ca9e03080..9047dd1c5a1 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -383,12 +383,14 @@ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
if(!cpumask_empty(&hmp_slow_cpu_mask)) {
domain = (struct hmp_domain *)
kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
- cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
+ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
list_add(&domain->hmp_domains, hmp_domains_list);
}
domain = (struct hmp_domain *)
kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
- cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
+ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
list_add(&domain->hmp_domains, hmp_domains_list);
}
#endif /* CONFIG_SCHED_HMP */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e903596e48..0e2a546cdad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -888,6 +888,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu);
#ifdef CONFIG_SCHED_HMP
struct hmp_domain {
struct cpumask cpus;
+ struct cpumask possible_cpus;
struct list_head hmp_domains;
};
#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3866dcc9972..10e7dbbbf83 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3381,10 +3381,10 @@ static int __init hmp_cpu_mask_setup(void)
dc = 0;
list_for_each(pos, &hmp_domains) {
domain = list_entry(pos, struct hmp_domain, hmp_domains);
- cpulist_scnprintf(buf, 64, &domain->cpus);
+ cpulist_scnprintf(buf, 64, &domain->possible_cpus);
pr_debug(" HMP domain %d: %s\n", dc, buf);
- for_each_cpu_mask(cpu, domain->cpus) {
+ for_each_cpu_mask(cpu, domain->possible_cpus) {
per_cpu(hmp_cpu_domain, cpu) = domain;
}
dc++;
@@ -3393,6 +3393,35 @@ static int __init hmp_cpu_mask_setup(void)
return 1;
}
+static struct hmp_domain *hmp_get_hmp_domain_for_cpu(int cpu)
+{
+ struct hmp_domain *domain;
+ struct list_head *pos;
+
+ list_for_each(pos, &hmp_domains) {
+ domain = list_entry(pos, struct hmp_domain, hmp_domains);
+ if(cpumask_test_cpu(cpu, &domain->possible_cpus))
+ return domain;
+ }
+ return NULL;
+}
+
+static void hmp_online_cpu(int cpu)
+{
+ struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
+
+ if(domain)
+ cpumask_set_cpu(cpu, &domain->cpus);
+}
+
+static void hmp_offline_cpu(int cpu)
+{
+ struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
+
+ if(domain)
+ cpumask_clear_cpu(cpu, &domain->cpus);
+}
+
/*
* Migration thresholds should be in the range [0..1023]
* hmp_up_threshold: min. load required for migrating tasks to a faster cpu
@@ -6190,11 +6219,17 @@ void trigger_load_balance(struct rq *rq, int cpu)
static void rq_online_fair(struct rq *rq)
{
+#ifdef CONFIG_SCHED_HMP
+ hmp_online_cpu(rq->cpu);
+#endif
update_sysctl();
}
static void rq_offline_fair(struct rq *rq)
{
+#ifdef CONFIG_SCHED_HMP
+ hmp_offline_cpu(rq->cpu);
+#endif
update_sysctl();
/* Ensure any throttled groups are reachable by pick_next_task */