aboutsummaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-07-22 15:56:28 +0100
committerJon Medhurst <tixy@linaro.org>2013-09-05 18:09:17 +0100
commit0d520ee8d4e910d1400e1b21608aff3bbce7ad6f (patch)
treee6b48529862e5b63e9920a538676e97f4a83ade1 /kernel/sched/fair.c
parent1325a370daa4878e3153e877a68d29a0ab308d3b (diff)
HMP: Access runqueue task clocks directly.
Avoids accesses through cfs_rq going bad when the cpu_rq doesn't have a cfs member. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <liviu.dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index afd76bf9433..bfd27e89399 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3769,8 +3769,8 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
se->avg.hmp_last_up_migration = now;
se->avg.hmp_last_down_migration = 0;
cpu_rq(cpu)->avg.hmp_last_up_migration = now;
@@ -3779,8 +3779,8 @@ static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
se->avg.hmp_last_down_migration = now;
se->avg.hmp_last_up_migration = 0;
cpu_rq(cpu)->avg.hmp_last_down_migration = now;
@@ -6481,7 +6481,6 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (target_cpu)
@@ -6499,7 +6498,8 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
return 0;
/* Let the task load settle before doing another up migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
if (((now - se->avg.hmp_last_up_migration) >> 10)
< hmp_next_up_threshold)
return 0;
@@ -6522,7 +6522,6 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (hmp_cpu_is_slowest(cpu))
@@ -6538,7 +6537,8 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
#endif
/* Let the task load settle before doing another down migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
if (((now - se->avg.hmp_last_down_migration) >> 10)
< hmp_next_down_threshold)
return 0;