From 6f8b7ac2224b4701114295f572a15ac03cbfb560 Mon Sep 17 00:00:00 2001 From: Ke Wang Date: Wed, 18 Oct 2017 15:49:33 +0800 Subject: trace: sched: Fix util_avg_walt in sched_load_avg_cpu trace cumulative_runnable_avg was introduced in commit ee4cebd75ed7 ("sched: EAS/WALT: use cr_avg instead of prev_runnable_sum") in cpu_util() for task placement, which is used to replace prev_runnable_sum. Fix util_avg_walt in sched_load_avg_cpu trace, which use prev_runnable_sum for cpu_util(). Moreover, fix potential overflow due to cumulative_runnable_avg is in u64. Change-Id: I1220477bf2ff32a6e34a34b6280b15a8178203a8 Signed-off-by: Ke Wang --- include/trace/events/sched.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/trace') diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index d4173039c599..bf96bf05be82 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -724,9 +724,9 @@ TRACE_EVENT(sched_load_avg_cpu, __entry->util_avg_pelt = cfs_rq->avg.util_avg; __entry->util_avg_walt = 0; #ifdef CONFIG_SCHED_WALT - __entry->util_avg_walt = - cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; - do_div(__entry->util_avg_walt, walt_ravg_window); + __entry->util_avg_walt = + div64_u64(cpu_rq(cpu)->cumulative_runnable_avg, + walt_ravg_window >> SCHED_LOAD_SHIFT); if (!walt_disabled && sysctl_sched_use_walt_cpu_util) __entry->util_avg = __entry->util_avg_walt; #endif -- cgit v1.2.3