aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c8
2 files changed, 13 insertions, 1 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fbd8caa83ef..6b7b86cfaba 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -569,6 +569,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
"nr_involuntary_switches", (long long)p->nivcsw);
P(se.load.weight);
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+ P(se.avg.runnable_avg_sum);
+ P(se.avg.runnable_avg_period);
+ P(se.avg.load_avg_contrib);
+ P(se.avg.decay_count);
+#endif
P(policy);
P(prio);
#undef PN
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e242209bc3..7500a68f831 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1714,7 +1714,13 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
}
wakeup = 0;
} else {
- __synchronize_entity_decay(se);
+ /*
+ * Task re-woke on same cpu (or else migrate_task_rq_fair()
+ * would have made count negative); we must be careful to avoid
+ * double-accounting blocked time after synchronizing decays.
+ */
+ se->avg.last_runnable_update += __synchronize_entity_decay(se)
+ << 20;
}
/* migrated tasks did not contribute to our blocked load */