aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDietmar Eggemann <dietmar.eggemann@arm.com>2013-06-21 17:50:08 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:32:41 +0100
commit4ab2679351e9566a6b0822f2d841a902758ba066 (patch)
tree9faea058d1beee4871c6115046bbedb21d93d659
parent6eada0087366d8aec6bc38348a68f721f538cc5c (diff)
HMP: experimental: Force all rt tasks to start on little domain.big-LITTLE-MP-13.07master-task-placement-v2-updates
This patch restricts the allowed cpu mask for rt tasks initially started with a full cpu mask to the little domain. An rt task is specified as real time in __setscheduler() which is finally called for all rt tasks (kernel and user land). In this function we restrict the allowed cpu mask to the little domain. This also prevents that a rt tasks can later be pushed to the big domain because the function find_lowest_rq() will only recognize the allowed cpu mask of a task to find the new cpu the task runs on. Current kludges of the patch: * Since we do not have an API to get the cpu mask of the A7 cluster, hmp_slow_cpu_mask is made global in arm/kernel/topology.c for now. * The watchdog_enable() function calls sched_setscheduler() before kthread_bind() for the cpu specific watchdog kernel threads. The order of these two calls has to be changed to make this patch work. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
-rw-r--r--arch/arm/kernel/topology.c3
-rw-r--r--kernel/sched/core.c9
2 files changed, 10 insertions, 2 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9047dd1c5a1..4459c0b4e91 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -367,10 +367,11 @@ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
cpumask_clear(slow);
}
+struct cpumask hmp_slow_cpu_mask;
+
void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
struct cpumask hmp_fast_cpu_mask;
- struct cpumask hmp_slow_cpu_mask;
struct hmp_domain *domain;
arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4c53da3781e..50d9e9849ce 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3827,6 +3827,8 @@ static struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current;
}
+extern struct cpumask hmp_slow_cpu_mask;
+
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
@@ -3836,8 +3838,13 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
- if (rt_prio(p->prio))
+ if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_HMP
+ if (cpumask_equal(&p->cpus_allowed, cpu_all_mask))
+ do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
+#endif
+ }
else
p->sched_class = &fair_sched_class;
set_load_weight(p);