aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2014-08-12 14:50:54 +0100
committerJon Medhurst <tixy@linaro.org>2014-08-12 17:46:57 +0100
commitf83262408293795e5186e9d1bf66d525b24fdb12 (patch)
tree2a4cb0856459aad5c54c29e78ae7decb8801a75e
parent65abdc9b50378783981ed2f3453a0aae090404e4 (diff)
HMP: Do not fork-boost tasks coming from PIDs <= 2
System services are generally started by init, whilst kernel threads are started by kthreadd. We do not want to give those tasks a head start, as this costs power for very little benefit. We do however wish to do that for tasks which the user launches. Further, some tasks allocate per-cpu timers directly after launch which can lead to those tasks being always scheduled on a big CPU when there is no computational need to do so. Not promoting services to big CPUs on launch will prevent that unless a service allocates their per-cpu resources after a period of intense computation, which is not a common pattern. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c2
3 files changed, 12 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0e2a546cdade..b36dd2de437d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -946,6 +946,14 @@ struct sched_avg {
u32 usage_avg_sum;
};
+#ifdef CONFIG_SCHED_HMP
+/*
+ * We want to avoid boosting any processes forked from init (PID 1)
+ * and kthreadd (assumed to be PID 2).
+ */
+#define hmp_task_should_forkboost(task) ((task->parent && task->parent->pid > 2))
+#endif
+
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
u64 wait_start;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f242330ef85..65aaa1c78ca1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1629,9 +1629,9 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_SCHED_HMP
/* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
#define LOAD_AVG_MAX 47742
- if (p->mm) {
- p->se.avg.hmp_last_up_migration = 0;
- p->se.avg.hmp_last_down_migration = 0;
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+ if (hmp_task_should_forkboost(p)) {
p->se.avg.load_avg_ratio = 1023;
p->se.avg.load_avg_contrib =
(1023 * scale_load_down(p->se.load.weight));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 161da1ab3995..74a5adfefeb7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4358,7 +4358,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
#ifdef CONFIG_SCHED_HMP
/* always put non-kernel forking tasks on a big domain */
- if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
+ if (unlikely(sd_flag & SD_BALANCE_FORK) && hmp_task_should_forkboost(p)) {
new_cpu = hmp_select_faster_cpu(p, prev_cpu);
if (new_cpu != NR_CPUS) {
hmp_next_up_delay(&p->se, new_cpu);