aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2013-10-11 11:44:59 +0100
committerJon Medhurst <tixy@linaro.org>2013-10-11 15:07:17 +0100
commit7b8e0b3f2af55b1ffb5c10be1daa59d8dc21d140 (patch)
treead4fa53146152fb3c9fc780033602d1803ad30ff
parentd73babce9a77f8143136fe0d7b6c1ae44b5652dc (diff)
sched: HMP: Additional trace points for debugging HMP behaviour
1. Replace magic numbers in code for migration trace. Trace points still emit a number as force=<n> field: force=0 : wakeup migration force=1 : forced migration force=2 : offload migration force=3 : idle pull migration 2. Add trace to expose offload decision-making. Also adds tracing rq->nr_running so that you can look back to see what state the RQ was in at the time. Signed-off-by: Chris Redpath <chris.redpath@arm.com> Signed-off-by: Liviu Dudau <Liviu.Dudau@arm.com> Signed-off-by: Jon Medhurst <tixy@linaro.org>
-rw-r--r--include/trace/events/sched.h72
-rw-r--r--kernel/sched/fair.c33
2 files changed, 95 insertions, 10 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 203e8e9933b..66dc53bca19 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -530,6 +530,29 @@ TRACE_EVENT(sched_rq_runnable_load,
__entry->load)
);
+TRACE_EVENT(sched_rq_nr_running,
+
+ TP_PROTO(int cpu, unsigned int nr_running, int nr_iowait),
+
+ TP_ARGS(cpu, nr_running, nr_iowait),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned int, nr_running)
+ __field(int, nr_iowait)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->nr_running = nr_running;
+ __entry->nr_iowait = nr_iowait;
+ ),
+
+ TP_printk("cpu=%d nr_running=%u nr_iowait=%d",
+ __entry->cpu,
+ __entry->nr_running, __entry->nr_iowait)
+);
+
/*
* Tracepoint for showing tracked task cpu usage ratio [0..1023].
*/
@@ -559,6 +582,10 @@ TRACE_EVENT(sched_task_usage_ratio,
/*
* Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
*/
+#define HMP_MIGRATE_WAKEUP 0
+#define HMP_MIGRATE_FORCE 1
+#define HMP_MIGRATE_OFFLOAD 2
+#define HMP_MIGRATE_IDLE_PULL 3
TRACE_EVENT(sched_hmp_migrate,
TP_PROTO(struct task_struct *tsk, int dest, int force),
@@ -583,6 +610,51 @@ TRACE_EVENT(sched_hmp_migrate,
__entry->comm, __entry->pid,
__entry->dest, __entry->force)
);
+
+TRACE_EVENT(sched_hmp_offload_abort,
+
+ TP_PROTO(int cpu, int data, char *label),
+
+ TP_ARGS(cpu,data,label),
+
+ TP_STRUCT__entry(
+ __array(char, label, 64)
+ __field(int, cpu)
+ __field(int, data)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->label, label, 64);
+ __entry->cpu = cpu;
+ __entry->data = data;
+ ),
+
+ TP_printk("cpu=%d data=%d label=%63s",
+ __entry->cpu, __entry->data,
+ __entry->label)
+);
+
+TRACE_EVENT(sched_hmp_offload_succeed,
+
+ TP_PROTO(int cpu, int dest_cpu),
+
+ TP_ARGS(cpu,dest_cpu),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(int, dest_cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->dest_cpu = dest_cpu;
+ ),
+
+ TP_printk("cpu=%d dest=%d",
+ __entry->cpu,
+ __entry->dest_cpu)
+);
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 62a93c62a84..78c9307b6ad 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1688,6 +1688,7 @@ static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
trace_sched_rq_runnable_ratio(cpu_of(rq), rq->avg.load_avg_ratio);
trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg);
+ trace_sched_rq_nr_running(cpu_of(rq), rq->nr_running, rq->nr_iowait.counter);
}
/* Add the load generated by se into cfs_rq's child load-average */
@@ -4023,25 +4024,37 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
/* Is there an idle CPU in the current domain */
min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
- if (min_usage == 0)
+ if (min_usage == 0) {
+ trace_sched_hmp_offload_abort(cpu, min_usage, "load");
return NR_CPUS;
+ }
/* Is the task alone on the cpu? */
- if (cpu_rq(cpu)->cfs.h_nr_running < 2)
+ if (cpu_rq(cpu)->cfs.h_nr_running < 2) {
+ trace_sched_hmp_offload_abort(cpu,
+ cpu_rq(cpu)->cfs.h_nr_running, "nr_running");
return NR_CPUS;
+ }
/* Is the task actually starving? */
/* >=25% ratio running/runnable = starving */
- if (hmp_task_starvation(se) > 768)
+ if (hmp_task_starvation(se) > 768) {
+ trace_sched_hmp_offload_abort(cpu, hmp_task_starvation(se),
+ "starvation");
return NR_CPUS;
+ }
/* Does the slower domain have any idle CPUs? */
min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
- if (min_usage > 0)
+ if (min_usage > 0) {
+ trace_sched_hmp_offload_abort(cpu, min_usage, "slowdomain");
return NR_CPUS;
+ }
- if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
+ if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) {
+ trace_sched_hmp_offload_succeed(cpu, dest_cpu);
return dest_cpu;
+ }
return NR_CPUS;
}
@@ -4178,13 +4191,13 @@ unlock:
#ifdef CONFIG_SCHED_HMP
if (hmp_up_migration(prev_cpu, &new_cpu, &p->se)) {
hmp_next_up_delay(&p->se, new_cpu);
- trace_sched_hmp_migrate(p, new_cpu, 0);
+ trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP);
return new_cpu;
}
if (hmp_down_migration(prev_cpu, &p->se)) {
new_cpu = hmp_select_slower_cpu(p, prev_cpu);
hmp_next_down_delay(&p->se, new_cpu);
- trace_sched_hmp_migrate(p, new_cpu, 0);
+ trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP);
return new_cpu;
}
/* Make sure that the task stays in its previous hmp domain */
@@ -6822,7 +6835,7 @@ static void hmp_force_up_migration(int this_cpu)
target->push_cpu = target_cpu;
target->migrate_task = p;
force = 1;
- trace_sched_hmp_migrate(p, target->push_cpu, 1);
+ trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_FORCE);
hmp_next_up_delay(&p->se, target->push_cpu);
}
}
@@ -6838,7 +6851,7 @@ static void hmp_force_up_migration(int this_cpu)
target->active_balance = 1;
target->migrate_task = p;
force = 1;
- trace_sched_hmp_migrate(p, target->push_cpu, 2);
+ trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_OFFLOAD);
hmp_next_down_delay(&p->se, target->push_cpu);
}
}
@@ -6917,7 +6930,7 @@ static unsigned int hmp_idle_pull(int this_cpu)
target->push_cpu = this_cpu;
target->migrate_task = p;
force = 1;
- trace_sched_hmp_migrate(p, target->push_cpu, 3);
+ trace_sched_hmp_migrate(p, target->push_cpu, HMP_MIGRATE_IDLE_PULL);
hmp_next_up_delay(&p->se, target->push_cpu);
}
raw_spin_unlock_irqrestore(&target->lock, flags);