aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c30
-rw-r--r--kernel/sched/sched.h1
2 files changed, 24 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efed617ef158..399c6ae64c16 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5508,6 +5508,8 @@ again:
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ rq->misfit_task = !task_fits_capacity(p, rq->cpu);
+
return p;
simple:
cfs_rq = &rq->cfs;
@@ -5529,9 +5531,13 @@ simple:
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
+ rq->misfit_task = !task_fits_capacity(p, rq->cpu);
+
return p;
idle:
+ rq->misfit_task = 0;
+
new_tasks = idle_balance(rq);
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
@@ -5736,6 +5742,13 @@ static unsigned long __read_mostly max_load_balance_interval = HZ/10;
enum fbq_type { regular, remote, all };
+enum group_type {
+ group_other = 0,
+ group_misfit_task,
+ group_imbalanced,
+ group_overloaded,
+};
+
#define LBF_ALL_PINNED 0x01
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
@@ -6258,12 +6271,6 @@ static unsigned long task_h_load(struct task_struct *p)
/********** Helpers for find_busiest_group ************************/
-enum group_type {
- group_other = 0,
- group_imbalanced,
- group_overloaded,
-};
-
/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
@@ -6279,6 +6286,7 @@ struct sg_lb_stats {
unsigned int group_weight;
enum group_type group_type;
int group_no_capacity;
+ int group_misfit_task; /* A cpu has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
@@ -6565,6 +6573,9 @@ static enum group_type group_classify(struct lb_env *env,
if (sg_imbalanced(group))
return group_imbalanced;
+ if (sgs->group_misfit_task)
+ return group_misfit_task;
+
return group_other;
}
@@ -6612,8 +6623,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (idle_cpu(i))
sgs->idle_cpus++;
- if (cpu_overutilized(i))
+ if (cpu_overutilized(i)) {
*overutilized = true;
+ if (!sgs->group_misfit_task && rq->misfit_task)
+ sgs->group_misfit_task = capacity_of(i);
+ }
}
/* Adjust by relative CPU capacity of the group */
@@ -8234,6 +8248,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
rq->rd->overutilized = true;
+ rq->misfit_task = !task_fits_capacity(curr, rq->cpu);
+
/*
* To make free room for a task that is building up its "real"
* utilization and to harm its performance the least, request a
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a26aa7fd6241..0b5d64781e37 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -548,6 +548,7 @@ struct rq {
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
+ unsigned int misfit_task;
#ifdef CONFIG_NO_HZ_COMMON
u64 nohz_stamp;
unsigned long nohz_flags;