summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-08-19 15:06:22 +0100
committerJon Medhurst <tixy@linaro.org>2013-08-20 17:19:08 +0100
commitc962f2c4976d2a2a792306f7677c856d701bf6a1 (patch)
tree908abf67a2aad340da0abadb27127b8aa814c041
parentad710693d3bef2cd1d985b5e61d3f19ba8151146 (diff)
HMP: Access runqueue task clocks directly.
Avoids accesses through cfs_rq going bad when the cpu_rq doesn't have a cfs member. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <liviu.dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--kernel/sched/fair.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f481a9459219..b0d88752583b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3816,8 +3816,8 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk,
static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = rq_clock_task(cpu_rq(cpumask_first(cpu_online_mask)));
se->avg.hmp_last_up_migration = now;
se->avg.hmp_last_down_migration = 0;
cpu_rq(cpu)->avg.hmp_last_up_migration = now;
@@ -3826,8 +3826,8 @@ static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = rq_clock_task(cpu_rq(cpumask_first(cpu_online_mask)));
se->avg.hmp_last_down_migration = now;
se->avg.hmp_last_up_migration = 0;
cpu_rq(cpu)->avg.hmp_last_down_migration = now;
@@ -6523,7 +6523,6 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (target_cpu)
@@ -6541,7 +6540,8 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
return 0;
/* Let the task load settle before doing another up migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = rq_clock_task(cpu_rq(cpumask_first(cpu_online_mask)));
if (((now - se->avg.hmp_last_up_migration) >> 10)
< hmp_next_up_threshold)
return 0;
@@ -6564,7 +6564,6 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (hmp_cpu_is_slowest(cpu))
@@ -6580,7 +6579,8 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
#endif
/* Let the task load settle before doing another down migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = rq_clock_task(cpu_rq(cpumask_first(cpu_online_mask)));
if (((now - se->avg.hmp_last_down_migration) >> 10)
< hmp_next_down_threshold)
return 0;