aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c76
1 files changed, 30 insertions, 46 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index b18f231a487..38933cafea8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -52,7 +52,6 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
-#include <linux/cpu_acct.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
@@ -217,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p)
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
-static inline void set_task_cfs_rq(struct task_struct *p)
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
{
- p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)];
- p->se.parent = task_group(p)->se[task_cpu(p)];
+ p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
+ p->se.parent = task_group(p)->se[cpu];
}
#else
-static inline void set_task_cfs_rq(struct task_struct *p) { }
+static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -456,18 +455,18 @@ static void update_rq_clock(struct rq *rq)
*/
enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
- SCHED_FEAT_START_DEBIT = 2,
- SCHED_FEAT_TREE_AVG = 4,
- SCHED_FEAT_APPROX_AVG = 8,
- SCHED_FEAT_WAKEUP_PREEMPT = 16,
+ SCHED_FEAT_WAKEUP_PREEMPT = 2,
+ SCHED_FEAT_START_DEBIT = 4,
+ SCHED_FEAT_TREE_AVG = 8,
+ SCHED_FEAT_APPROX_AVG = 16,
};
const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
+ SCHED_FEAT_WAKEUP_PREEMPT * 1 |
SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG * 0 |
- SCHED_FEAT_APPROX_AVG * 0 |
- SCHED_FEAT_WAKEUP_PREEMPT * 1;
+ SCHED_FEAT_APPROX_AVG * 0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
@@ -1023,10 +1022,16 @@ unsigned long weighted_cpuload(const int cpu)
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
+ set_task_cfs_rq(p, cpu);
#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
- set_task_cfs_rq(p);
}
#ifdef CONFIG_SMP
@@ -3338,13 +3343,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
- struct rq *rq = this_rq();
p->utime = cputime_add(p->utime, cputime);
- if (p != rq->idle)
- cpuacct_charge(p, cputime);
-
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
@@ -3395,10 +3396,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
struct rq *rq = this_rq();
cputime64_t tmp;
- if (p->flags & PF_VCPU) {
- account_guest_time(p, cputime);
- return;
- }
+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
+ return account_guest_time(p, cputime);
p->stime = cputime_add(p->stime, cputime);
@@ -3408,10 +3407,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
cpustat->irq = cputime64_add(cpustat->irq, tmp);
else if (softirq_count())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
- else if (p != rq->idle) {
+ else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
- cpuacct_charge(p, cputime);
- } else if (atomic_read(&rq->nr_iowait) > 0)
+ else if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
@@ -3447,10 +3445,8 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
else
cpustat->idle = cputime64_add(cpustat->idle, tmp);
- } else {
+ } else
cpustat->steal = cputime64_add(cpustat->steal, tmp);
- cpuacct_charge(p, -tmp);
- }
}
/*
@@ -5286,23 +5282,9 @@ static void migrate_live_tasks(int src_cpu)
}
/*
- * activate_idle_task - move idle task to the _front_ of runqueue.
- */
-static void activate_idle_task(struct task_struct *p, struct rq *rq)
-{
- update_rq_clock(rq);
-
- if (p->state == TASK_UNINTERRUPTIBLE)
- rq->nr_uninterruptible--;
-
- enqueue_task(rq, p, 0);
- inc_nr_running(p, rq);
-}
-
-/*
* Schedules idle task to be the next runnable task on current CPU.
- * It does so by boosting its priority to highest possible and adding it to
- * the _front_ of the runqueue. Used by CPU offline code.
+ * It does so by boosting its priority to highest possible.
+ * Used by CPU offline code.
*/
void sched_idle_next(void)
{
@@ -5322,8 +5304,8 @@ void sched_idle_next(void)
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
- /* Add idle task to the _front_ of its priority queue: */
- activate_idle_task(p, rq);
+ update_rq_clock(rq);
+ activate_task(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -7097,8 +7079,10 @@ void sched_move_task(struct task_struct *tsk)
rq = task_rq_lock(tsk, &flags);
- if (tsk->sched_class != &fair_sched_class)
+ if (tsk->sched_class != &fair_sched_class) {
+ set_task_cfs_rq(tsk, task_cpu(tsk));
goto done;
+ }
update_rq_clock(rq);
@@ -7111,7 +7095,7 @@ void sched_move_task(struct task_struct *tsk)
tsk->sched_class->put_prev_task(rq, tsk);
}
- set_task_cfs_rq(tsk);
+ set_task_cfs_rq(tsk, task_cpu(tsk));
if (on_rq) {
if (unlikely(running))