aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c31
1 files changed, 14 insertions, 17 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1957f2589d9a..128d5723ae4d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3694,31 +3694,30 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu);
/* must hold runqueue lock for queue se is currently on */
static struct sched_entity *hmp_get_heaviest_task(
- struct sched_entity *se, int target_cpu)
+ struct sched_entity *se, int migrate_up)
{
int num_tasks = hmp_max_tasks;
struct sched_entity *max_se = se;
unsigned long int max_ratio = se->avg.load_avg_ratio;
const struct cpumask *hmp_target_mask = NULL;
- struct hmp_domain *hmp;
- if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
- return max_se;
+ if (migrate_up) {
+ struct hmp_domain *hmp;
+ if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
+ return max_se;
- hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
- hmp_target_mask = &hmp->cpus;
- if (target_cpu >= 0) {
- BUG_ON(!cpumask_test_cpu(target_cpu, hmp_target_mask));
- hmp_target_mask = cpumask_of(target_cpu);
+ hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
+ hmp_target_mask = &hmp->cpus;
}
/* The currently running task is not on the runqueue */
se = __pick_first_entity(cfs_rq_of(se));
while (num_tasks && se) {
if (entity_is_task(se) &&
- se->avg.load_avg_ratio > max_ratio &&
- cpumask_intersects(hmp_target_mask,
- tsk_cpus_allowed(task_of(se)))) {
+ (se->avg.load_avg_ratio > max_ratio &&
+ hmp_target_mask &&
+ cpumask_intersects(hmp_target_mask,
+ tsk_cpus_allowed(task_of(se))))) {
max_se = se;
max_ratio = se->avg.load_avg_ratio;
}
@@ -7127,7 +7126,7 @@ static void hmp_force_up_migration(int this_cpu)
}
}
orig = curr;
- curr = hmp_get_heaviest_task(curr, -1);
+ curr = hmp_get_heaviest_task(curr, 1);
p = task_of(curr);
if (hmp_up_migration(cpu, &target_cpu, curr)) {
cpu_rq(target_cpu)->wake_for_idle_pull = 1;
@@ -7224,14 +7223,12 @@ static unsigned int hmp_idle_pull(int this_cpu)
}
}
orig = curr;
- curr = hmp_get_heaviest_task(curr, this_cpu);
+ curr = hmp_get_heaviest_task(curr, 1);
/* check if heaviest eligible task on this
* CPU is heavier than previous task
*/
if (hmp_task_eligible_for_up_migration(curr) &&
- curr->avg.load_avg_ratio > ratio &&
- cpumask_test_cpu(this_cpu,
- tsk_cpus_allowed(task_of(curr)))) {
+ curr->avg.load_avg_ratio > ratio) {
p = task_of(curr);
target = rq;
ratio = curr->avg.load_avg_ratio;