aboutsummaryrefslogtreecommitdiff
path: root/include/trace/events
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace/events')
-rw-r--r--include/trace/events/arm-ipi.h100
-rw-r--r--include/trace/events/power_cpu_migrate.h67
-rw-r--r--include/trace/events/sched.h274
-rw-r--r--include/trace/events/smp.h90
4 files changed, 531 insertions, 0 deletions
diff --git a/include/trace/events/arm-ipi.h b/include/trace/events/arm-ipi.h
new file mode 100644
index 000000000000..5d3bd21827be
--- /dev/null
+++ b/include/trace/events/arm-ipi.h
@@ -0,0 +1,100 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM arm-ipi
+
+#if !defined(_TRACE_ARM_IPI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ARM_IPI_H
+
+#include <linux/tracepoint.h>
+
+#define show_arm_ipi_name(val) \
+ __print_symbolic(val, \
+ { 0, "IPI_WAKEUP" }, \
+ { 1, "IPI_TIMER" }, \
+ { 2, "IPI_RESCHEDULE" }, \
+ { 3, "IPI_CALL_FUNC" }, \
+ { 4, "IPI_CALL_FUNC_SINGLE" }, \
+ { 5, "IPI_CPU_STOP" }, \
+ { 6, "IPI_COMPLETION" }, \
+ { 7, "IPI_CPU_BACKTRACE" })
+
+DECLARE_EVENT_CLASS(arm_ipi,
+
+ TP_PROTO(unsigned int ipi_nr),
+
+ TP_ARGS(ipi_nr),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ipi )
+ ),
+
+ TP_fast_assign(
+ __entry->ipi = ipi_nr;
+ ),
+
+ TP_printk("ipi=%u [action=%s]", __entry->ipi,
+ show_arm_ipi_name(__entry->ipi))
+);
+
+/**
+ * arm_ipi_entry - called in the arm-generic ipi handler immediately before
+ * entering ipi-type handler
+ * @ipi_nr: ipi number
+ *
+ * When used in combination with the arm_ipi_exit tracepoint
+ * we can determine the ipi handler runtine.
+ */
+DEFINE_EVENT(arm_ipi, arm_ipi_entry,
+
+ TP_PROTO(unsigned int ipi_nr),
+
+ TP_ARGS(ipi_nr)
+);
+
+/**
+ * arm_ipi_exit - called in the arm-generic ipi handler immediately
+ * after the ipi-type handler returns
+ * @ipi_nr: ipi number
+ *
+ * When used in combination with the arm_ipi_entry tracepoint
+ * we can determine the ipi handler runtine.
+ */
+DEFINE_EVENT(arm_ipi, arm_ipi_exit,
+
+ TP_PROTO(unsigned int ipi_nr),
+
+ TP_ARGS(ipi_nr)
+);
+
+/**
+ * arm_ipi_send - called as the ipi target mask is built, immediately
+ * before the register is written
+ * @ipi_nr: ipi number
+ * @dest: cpu to send to
+ *
+ * When used in combination with the arm_ipi_entry tracepoint
+ * we can determine the ipi raise to run latency.
+ */
+TRACE_EVENT(arm_ipi_send,
+
+ TP_PROTO(unsigned int ipi_nr, int dest),
+
+ TP_ARGS(ipi_nr, dest),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ipi )
+ __field( int , dest )
+ ),
+
+ TP_fast_assign(
+ __entry->ipi = ipi_nr;
+ __entry->dest = dest;
+ ),
+
+ TP_printk("dest=%d ipi=%u [action=%s]", __entry->dest,
+ __entry->ipi, show_arm_ipi_name(__entry->ipi))
+);
+
+#endif /* _TRACE_ARM_IPI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644
index 000000000000..f76dd4de625e
--- /dev/null
+++ b/include/trace/events/power_cpu_migrate.h
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto \
+ TP_PROTO(u64 timestamp, \
+ u32 cpu_hwid)
+#define __cpu_migrate_args \
+ TP_ARGS(timestamp, \
+ cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+ __cpu_migrate_proto,
+ __cpu_migrate_args,
+
+ TP_STRUCT__entry(
+ __field(u64, timestamp )
+ __field(u32, cpu_hwid )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->cpu_hwid = cpu_hwid;
+ ),
+
+ TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+ (unsigned long long)__entry->timestamp,
+ (unsigned long)__entry->cpu_hwid
+ )
+);
+
+#define __define_cpu_migrate_event(name) \
+ DEFINE_EVENT(cpu_migrate, cpu_migrate_##name, \
+ __cpu_migrate_proto, \
+ __cpu_migrate_args \
+ )
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67a..2afcb71857fd 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -430,6 +430,280 @@ TRACE_EVENT(sched_pi_setprio,
__entry->oldprio, __entry->newprio)
);
+/*
+ * Tracepoint for showing tracked load contribution.
+ */
+TRACE_EVENT(sched_task_load_contrib,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
+
+ TP_ARGS(tsk, load_contrib),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, load_contrib)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->load_contrib = load_contrib;
+ ),
+
+ TP_printk("comm=%s pid=%d load_contrib=%lu",
+ __entry->comm, __entry->pid,
+ __entry->load_contrib)
+);
+
+/*
+ * Tracepoint for showing tracked task runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_runnable_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable ratio [0..1023].
+ */
+TRACE_EVENT(sched_rq_runnable_ratio,
+
+ TP_PROTO(int cpu, unsigned long ratio),
+
+ TP_ARGS(cpu, ratio),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("cpu=%d ratio=%lu",
+ __entry->cpu,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for showing tracked rq runnable load.
+ */
+TRACE_EVENT(sched_rq_runnable_load,
+
+ TP_PROTO(int cpu, u64 load),
+
+ TP_ARGS(cpu, load),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(u64, load)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->load = load;
+ ),
+
+ TP_printk("cpu=%d load=%llu",
+ __entry->cpu,
+ __entry->load)
+);
+
+TRACE_EVENT(sched_rq_nr_running,
+
+ TP_PROTO(int cpu, unsigned int nr_running, int nr_iowait),
+
+ TP_ARGS(cpu, nr_running, nr_iowait),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(unsigned int, nr_running)
+ __field(int, nr_iowait)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->nr_running = nr_running;
+ __entry->nr_iowait = nr_iowait;
+ ),
+
+ TP_printk("cpu=%d nr_running=%u nr_iowait=%d",
+ __entry->cpu,
+ __entry->nr_running, __entry->nr_iowait)
+);
+
+/*
+ * Tracepoint for showing tracked task cpu usage ratio [0..1023].
+ */
+TRACE_EVENT(sched_task_usage_ratio,
+
+ TP_PROTO(struct task_struct *tsk, unsigned long ratio),
+
+ TP_ARGS(tsk, ratio),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(unsigned long, ratio)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->ratio = ratio;
+ ),
+
+ TP_printk("comm=%s pid=%d ratio=%lu",
+ __entry->comm, __entry->pid,
+ __entry->ratio)
+);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations,
+ * marking the forced transition of runnable or running tasks.
+ */
+TRACE_EVENT(sched_hmp_migrate_force_running,
+
+ TP_PROTO(struct task_struct *tsk, int running),
+
+ TP_ARGS(tsk, running),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(int, running)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->running = running;
+ ),
+
+ TP_printk("running=%d comm=%s",
+ __entry->running, __entry->comm)
+);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations,
+ * marking the forced transition of runnable or running
+ * tasks when a task is about to go idle.
+ */
+TRACE_EVENT(sched_hmp_migrate_idle_running,
+
+ TP_PROTO(struct task_struct *tsk, int running),
+
+ TP_ARGS(tsk, running),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(int, running)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->running = running;
+ ),
+
+ TP_printk("running=%d comm=%s",
+ __entry->running, __entry->comm)
+);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+#define HMP_MIGRATE_WAKEUP 0
+#define HMP_MIGRATE_FORCE 1
+#define HMP_MIGRATE_OFFLOAD 2
+#define HMP_MIGRATE_IDLE_PULL 3
+TRACE_EVENT(sched_hmp_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+ TP_ARGS(tsk, dest, force),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(int, dest)
+ __field(int, force)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->dest = dest;
+ __entry->force = force;
+ ),
+
+ TP_printk("comm=%s pid=%d dest=%d force=%d",
+ __entry->comm, __entry->pid,
+ __entry->dest, __entry->force)
+);
+
+TRACE_EVENT(sched_hmp_offload_abort,
+
+ TP_PROTO(int cpu, int data, char *label),
+
+ TP_ARGS(cpu,data,label),
+
+ TP_STRUCT__entry(
+ __array(char, label, 64)
+ __field(int, cpu)
+ __field(int, data)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->label, label, 64);
+ __entry->cpu = cpu;
+ __entry->data = data;
+ ),
+
+ TP_printk("cpu=%d data=%d label=%63s",
+ __entry->cpu, __entry->data,
+ __entry->label)
+);
+
+TRACE_EVENT(sched_hmp_offload_succeed,
+
+ TP_PROTO(int cpu, int dest_cpu),
+
+ TP_ARGS(cpu,dest_cpu),
+
+ TP_STRUCT__entry(
+ __field(int, cpu)
+ __field(int, dest_cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->dest_cpu = dest_cpu;
+ ),
+
+ TP_printk("cpu=%d dest=%d",
+ __entry->cpu,
+ __entry->dest_cpu)
+);
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/smp.h b/include/trace/events/smp.h
new file mode 100644
index 000000000000..da0baf27a39a
--- /dev/null
+++ b/include/trace/events/smp.h
@@ -0,0 +1,90 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM smp
+
+#if !defined(_TRACE_SMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SMP_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(smp_call_class,
+
+ TP_PROTO(void * fnc),
+
+ TP_ARGS(fnc),
+
+ TP_STRUCT__entry(
+ __field( void *, func )
+ ),
+
+ TP_fast_assign(
+ __entry->func = fnc;
+ ),
+
+ TP_printk("func=%pf", __entry->func)
+);
+
+/**
+ * smp_call_func_entry - called in the generic smp-cross-call-handler
+ * immediately before calling the destination
+ * function
+ * @func: function pointer
+ *
+ * When used in combination with the smp_call_func_exit tracepoint
+ * we can determine the cross-call runtime.
+ */
+DEFINE_EVENT(smp_call_class, smp_call_func_entry,
+
+ TP_PROTO(void * fnc),
+
+ TP_ARGS(fnc)
+);
+
+/**
+ * smp_call_func_exit - called in the generic smp-cross-call-handler
+ * immediately after the destination function
+ * returns
+ * @func: function pointer
+ *
+ * When used in combination with the smp_call_entry tracepoint
+ * we can determine the cross-call runtime.
+ */
+DEFINE_EVENT(smp_call_class, smp_call_func_exit,
+
+ TP_PROTO(void * fnc),
+
+ TP_ARGS(fnc)
+);
+
+/**
+ * smp_call_func_send - called as destination function is set
+ * in the per-cpu storage
+ * @func: function pointer
+ * @dest: cpu to send to
+ *
+ * When used in combination with the smp_cross_call_entry tracepoint
+ * we can determine the call-to-run latency.
+ */
+TRACE_EVENT(smp_call_func_send,
+
+ TP_PROTO(void * func, int dest),
+
+ TP_ARGS(func, dest),
+
+ TP_STRUCT__entry(
+ __field( void * , func )
+ __field( int , dest )
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ __entry->dest = dest;
+ ),
+
+ TP_printk("dest=%d func=%pf", __entry->dest,
+ __entry->func)
+);
+
+#endif /* _TRACE_SMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>