From 6d869c5b8d4231b630145f77c0410fcfc9a9b749 Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Fri, 17 Mar 2017 21:23:35 +0000 Subject: sched/events: Introduce task_group load tracking trace event The trace event key load is mapped to: (1) load : cfs_rq->tg->load_avg The cfs_rq owned by the task_group is used as the only parameter for the trace event because it has a reference to the taskgroup and the cpu. Using the taskgroup as a parameter instead would require the cpu as a second parameter. A task_group is global and not per-cpu data. The cpu key only tells on which cpu the value was gathered. The following list shows examples of the key=value pairs for: (1) a task group: cpu=1 path=/tg1/tg11/tg111 load=517 (2) an autogroup: cpu=1 path=/autogroup-10 load=1050 We don't maintain a load signal for a root task group. The trace event is only defined if cfs group scheduling support (CONFIG_FAIR_GROUP_SCHED) is enabled. Signed-off-by: Dietmar Eggemann Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Steven Rostedt Signed-off-by: Dietmar Eggemann --- include/trace/events/sched.h | 29 +++++++++++++++++++++++++++++ kernel/sched/fair.c | 2 ++ 2 files changed, 31 insertions(+) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9342a91f3e65..ee18c47fb7ac 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -684,6 +684,35 @@ TRACE_EVENT(sched_load_se, __entry->cpu, __get_str(path), __entry->comm, __entry->pid, __entry->load, __entry->rbl_load, __entry->util) ); + +/* + * Tracepoint for task_group load tracking: + */ +#ifdef CONFIG_FAIR_GROUP_SCHED +TRACE_EVENT(sched_load_tg, + + TP_PROTO(struct cfs_rq *cfs_rq), + + TP_ARGS(cfs_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __dynamic_array(char, path, + __trace_sched_path(cfs_rq, NULL, 0) ) + __field( long, load ) + ), + + TP_fast_assign( + __entry->cpu = cfs_rq->rq->cpu; + __trace_sched_path(cfs_rq, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + __entry->load = atomic_long_read(&cfs_rq->tg->load_avg); + ), + + TP_printk("cpu=%d path=%s load=%ld", __entry->cpu, __get_str(path), + __entry->load) +); +#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e17aa1efeb0a..7ef6bf8e3c98 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3359,6 +3359,8 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { atomic_long_add(delta, &cfs_rq->tg->load_avg); cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; + + trace_sched_load_tg(cfs_rq); } } -- cgit v1.2.3