aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c2
3 files changed, 12 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0e2a546cdad..b36dd2de437 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -946,6 +946,14 @@ struct sched_avg {
u32 usage_avg_sum;
};
+#ifdef CONFIG_SCHED_HMP
+/*
+ * We want to avoid boosting any processes forked from init (PID 1)
+ * and kthreadd (assumed to be PID 2).
+ */
+#define hmp_task_should_forkboost(task) ((task->parent && task->parent->pid > 2))
+#endif
+
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
u64 wait_start;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f242330ef8..65aaa1c78ca 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1629,9 +1629,9 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_SCHED_HMP
/* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
#define LOAD_AVG_MAX 47742
- if (p->mm) {
- p->se.avg.hmp_last_up_migration = 0;
- p->se.avg.hmp_last_down_migration = 0;
+ p->se.avg.hmp_last_up_migration = 0;
+ p->se.avg.hmp_last_down_migration = 0;
+ if (hmp_task_should_forkboost(p)) {
p->se.avg.load_avg_ratio = 1023;
p->se.avg.load_avg_contrib =
(1023 * scale_load_down(p->se.load.weight));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 161da1ab399..74a5adfefeb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4358,7 +4358,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
#ifdef CONFIG_SCHED_HMP
/* always put non-kernel forking tasks on a big domain */
- if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
+ if (unlikely(sd_flag & SD_BALANCE_FORK) && hmp_task_should_forkboost(p)) {
new_cpu = hmp_select_faster_cpu(p, prev_cpu);
if (new_cpu != NR_CPUS) {
hmp_next_up_delay(&p->se, new_cpu);