aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-06-17 16:08:40 +0100
committerJon Medhurst <tixy@linaro.org>2013-07-17 11:32:30 +0100
commit954978dd2cff81cc15745b9e581a1709e238f8ef (patch)
tree5b297723fc2cce8e5293b1b559bbe2952d2654ac /kernel/sched/core.c
parent3f3b210703f80fe60dbfa13c25b30d4effbf9f4b (diff)
HMP: Force new non-kernel tasks onto big CPUs until load stabilises
Initialise the load stats for new tasks so that they do not see the instability in early task life which makes it so hard to decide which CPU is appropriate. Also, change the fork balance algorithm so that the least loaded of the CPUs in the big cluster is chosen regardless of the bigness of the parent task. This is intended to help performance for applications which use many short-lived tasks. Although best practise is usually to use a thread pool, apps which do not do this should not be subject to the randomness of the early stats. We should ignore real-time threads for forking on big CPUs, but it is not possible to figure out if a new thread is real-time or not at the fork stage. Instead, we prevent kernel threads from getting the initial boost - when they later become real-time they will only be on big if their compute requirements demand it. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e45295dc33a..4c53da3781e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1618,8 +1618,18 @@ static void __sched_fork(struct task_struct *p)
p->se.avg.runnable_avg_period = 0;
p->se.avg.runnable_avg_sum = 0;
#ifdef CONFIG_SCHED_HMP
- p->se.avg.hmp_last_up_migration = 0;
- p->se.avg.hmp_last_down_migration = 0;
+ /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
+#define LOAD_AVG_MAX 47742
+ if (p->mm) {
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+ p->se.avg.load_avg_ratio = 1023;
+ p->se.avg.load_avg_contrib =
+ (1023 * scale_load_down(p->se.load.weight));
+ p->se.avg.runnable_avg_period = LOAD_AVG_MAX;
+ p->se.avg.runnable_avg_sum = LOAD_AVG_MAX;
+ p->se.avg.usage_avg_sum = LOAD_AVG_MAX;
+ }
#endif
#endif
#ifdef CONFIG_SCHEDSTATS