aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ftrace.txt6
-rw-r--r--arch/Kconfig1
-rw-r--r--include/linux/ftrace.h58
-rw-r--r--include/linux/kernel.h58
-rw-r--r--kernel/trace/Kconfig29
-rw-r--r--kernel/trace/blktrace.c1
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace.c91
-rw-r--r--kernel/trace/trace.h5
-rw-r--r--kernel/trace/trace_branch.c1
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_output.c32
-rw-r--r--kernel/trace/trace_output.h1
-rw-r--r--kernel/trace/trace_sched_wakeup.c8
14 files changed, 133 insertions, 172 deletions
diff --git a/Documentation/ftrace.txt b/Documentation/ftrace.txt
index 2041ee951c1..22614bef635 100644
--- a/Documentation/ftrace.txt
+++ b/Documentation/ftrace.txt
@@ -1466,11 +1466,11 @@ want, depending on your needs.
You can put some comments on specific functions by using
-ftrace_printk() For example, if you want to put a comment inside
+trace_printk() For example, if you want to put a comment inside
the __might_sleep() function, you just have to include
-<linux/ftrace.h> and call ftrace_printk() inside __might_sleep()
+<linux/ftrace.h> and call trace_printk() inside __might_sleep()
-ftrace_printk("I'm a comment!\n")
+trace_printk("I'm a comment!\n")
will produce:
diff --git a/arch/Kconfig b/arch/Kconfig
index 550dab22daa..a092dc77c24 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -6,6 +6,7 @@ config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING
depends on HAVE_OPROFILE
+ depends on TRACING_SUPPORT
select TRACING
select RING_BUFFER
help
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6ea62acbe4b..498769425eb 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -319,62 +319,6 @@ static inline void __ftrace_enabled_restore(int enabled)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_TRACING
-extern int ftrace_dump_on_oops;
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
-
-extern void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
-
-/**
- * ftrace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __ftrace_printk is an internal function for ftrace_printk and
- * the @ip is passed in via the ftrace_printk macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving ftrace_printks scattered around in
- * your code.
- */
-# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
-extern int
-__ftrace_printk(unsigned long ip, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-# define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap)
-extern int
-__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
-extern void ftrace_dump(void);
-#else
-static inline void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
-static inline int
-ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
-
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
-static inline int
-ftrace_printk(const char *fmt, ...)
-{
- return 0;
-}
-static inline int
-ftrace_vprintk(const char *fmt, va_list ap)
-{
- return 0;
-}
-static inline void ftrace_dump(void) { }
-#endif
-
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
extern void ftrace_init_module(struct module *mod,
@@ -543,6 +487,8 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
+extern int ftrace_dump_on_oops;
+
#endif /* CONFIG_TRACING */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7fa371898e3..08bf5da8667 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -368,6 +368,64 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
#endif
/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_start()/tracing_stop:
+ */
+#ifdef CONFIG_TRACING
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk and
+ * the @ip is passed in via the trace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code.
+ */
+# define trace_printk(fmt...) __trace_printk(_THIS_IP_, fmt)
+extern int
+__trace_printk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+# define ftrace_vprintk(fmt, ap) __trace_printk(_THIS_IP_, fmt, ap)
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+extern void ftrace_dump(void);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
+static inline int
+trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(void) { }
+#endif
+
+/*
* Display an IP address in readable format.
*/
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 999c6a2485d..5d733da5345 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -53,12 +53,22 @@ config TRACING
select TRACEPOINTS
select NOP_TRACER
+#
+# Minimum requirements an architecture has to meet for us to
+# be able to offer generic tracing facilities:
+#
+config TRACING_SUPPORT
+ bool
+ depends on TRACE_IRQFLAGS_SUPPORT
+ depends on STACKTRACE_SUPPORT
+
+if TRACING_SUPPORT
+
menu "Tracers"
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
- depends on DEBUG_KERNEL
select FRAME_POINTER
select KALLSYMS
select TRACING
@@ -91,7 +101,6 @@ config IRQSOFF_TRACER
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
- depends on DEBUG_KERNEL
select TRACE_IRQFLAGS
select TRACING
select TRACER_MAX_TRACE
@@ -114,7 +123,6 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
- depends on DEBUG_KERNEL
select TRACING
select TRACER_MAX_TRACE
help
@@ -142,7 +150,6 @@ config SYSPROF_TRACER
config SCHED_TRACER
bool "Scheduling Latency Tracer"
- depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE
@@ -152,7 +159,6 @@ config SCHED_TRACER
config CONTEXT_SWITCH_TRACER
bool "Trace process context switches"
- depends on DEBUG_KERNEL
select TRACING
select MARKERS
help
@@ -161,7 +167,6 @@ config CONTEXT_SWITCH_TRACER
config EVENT_TRACER
bool "Trace various events in the kernel"
- depends on DEBUG_KERNEL
select TRACING
help
This tracer hooks to various trace points in the kernel
@@ -170,7 +175,6 @@ config EVENT_TRACER
config BOOT_TRACER
bool "Trace boot initcalls"
- depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
help
@@ -188,7 +192,6 @@ config BOOT_TRACER
config TRACE_BRANCH_PROFILING
bool "Trace likely/unlikely profiler"
- depends on DEBUG_KERNEL
select TRACING
help
This tracer profiles all the the likely and unlikely macros
@@ -241,7 +244,6 @@ config BRANCH_TRACER
config POWER_TRACER
bool "Trace power consumption behavior"
- depends on DEBUG_KERNEL
depends on X86
select TRACING
help
@@ -253,7 +255,6 @@ config POWER_TRACER
config STACK_TRACER
bool "Trace max stack"
depends on HAVE_FUNCTION_TRACER
- depends on DEBUG_KERNEL
select FUNCTION_TRACER
select STACKTRACE
select KALLSYMS
@@ -343,7 +344,6 @@ config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
- depends on DEBUG_KERNEL
default y
help
This option will modify all the calls to ftrace dynamically
@@ -369,7 +369,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
- depends on TRACING && DEBUG_KERNEL
+ depends on TRACING
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
@@ -379,7 +379,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE
bool "Memory mapped IO tracing"
- depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
+ depends on HAVE_MMIOTRACE_SUPPORT && PCI
select TRACING
help
Mmiotrace traces Memory Mapped I/O access and is meant for
@@ -401,3 +401,6 @@ config MMIOTRACE_TEST
Say N, unless you absolutely know what you are doing.
endmenu
+
+endif # TRACING_SUPPORT
+
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index e82cb9e930c..e39679a72a3 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1231,7 +1231,6 @@ static struct tracer blk_tracer __read_mostly = {
static struct trace_event trace_blk_event = {
.type = TRACE_BLK,
.trace = blk_trace_event_print,
- .latency_trace = blk_trace_event_print,
.binary = blk_trace_event_print_binary,
};
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f2a163db52f..f7473645b9c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2461,6 +2461,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
unsigned long flags;
unsigned int commit;
unsigned int read;
+ u64 save_timestamp;
int ret = -1;
/*
@@ -2515,6 +2516,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
if (len < size)
goto out;
+ /* save the current timestamp, since the user will need it */
+ save_timestamp = cpu_buffer->read_stamp;
+
/* Need to copy one event at a time */
do {
memcpy(bpage->data + pos, rpage->data + rpos, size);
@@ -2531,7 +2535,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
/* update bpage */
local_set(&bpage->commit, pos);
- bpage->time_stamp = rpage->time_stamp;
+ bpage->time_stamp = save_timestamp;
/* we copied everything to the beginning */
read = 0;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c8abbb0c839..c0e9c126339 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -48,7 +48,7 @@ unsigned long __read_mostly tracing_thresh;
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
- * insertions into the ring-buffer such as ftrace_printk could occurred
+ * insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
@@ -291,7 +291,7 @@ static const char *trace_options[] = {
"block",
"stacktrace",
"sched-tree",
- "ftrace_printk",
+ "trace_printk",
"ftrace_preempt",
"branch",
"annotate",
@@ -299,6 +299,7 @@ static const char *trace_options[] = {
"sym-userobj",
"printk-msg-only",
"context-info",
+ "latency-format",
NULL
};
@@ -346,6 +347,9 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
int len;
int ret;
+ if (!cnt)
+ return 0;
+
if (s->len <= s->readpos)
return -EBUSY;
@@ -353,10 +357,12 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
- if (ret)
+ if (ret == cnt)
return -EFAULT;
- s->readpos += len;
+ cnt -= ret;
+
+ s->readpos += cnt;
return cnt;
}
@@ -375,7 +381,7 @@ ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
if (!ret)
return -EFAULT;
- s->readpos += len;
+ s->readpos += cnt;
return cnt;
}
@@ -1462,33 +1468,6 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
}
-static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
-{
- struct trace_seq *s = &iter->seq;
- unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
- struct trace_event *event;
- struct trace_entry *entry = iter->ent;
-
- test_cpu_buff_start(iter);
-
- event = ftrace_find_event(entry->type);
-
- if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_lat_context(iter))
- goto partial;
- }
-
- if (event)
- return event->latency_trace(iter, sym_flags);
-
- if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
- goto partial;
-
- return TRACE_TYPE_HANDLED;
-partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
@@ -1503,8 +1482,13 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type);
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
- if (!trace_print_context(iter))
- goto partial;
+ if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+ if (!trace_print_lat_context(iter))
+ goto partial;
+ } else {
+ if (!trace_print_context(iter))
+ goto partial;
+ }
}
if (event)
@@ -1646,9 +1630,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
- if (iter->iter_flags & TRACE_FILE_LAT_FMT)
- return print_lat_fmt(iter);
-
return print_trace_fmt(iter);
}
@@ -1824,26 +1805,12 @@ static int tracing_open(struct inode *inode, struct file *file)
iter = __tracing_open(inode, file);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
-
- return ret;
-}
-
-static int tracing_lt_open(struct inode *inode, struct file *file)
-{
- struct trace_iterator *iter;
- int ret = 0;
-
- iter = __tracing_open(inode, file);
-
- if (IS_ERR(iter))
- ret = PTR_ERR(iter);
- else
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
return ret;
}
-
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
@@ -1922,13 +1889,6 @@ static struct file_operations tracing_fops = {
.release = tracing_release,
};
-static struct file_operations tracing_lt_fops = {
- .open = tracing_lt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = tracing_release,
-};
-
static struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
@@ -3049,6 +3009,9 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
ssize_t ret;
size_t size;
+ if (!count)
+ return 0;
+
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
@@ -3073,8 +3036,10 @@ read:
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
- if (ret)
+ if (ret == size)
return -EFAULT;
+ size -= ret;
+
*ppos += size;
info->read += size;
@@ -3803,7 +3768,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
}
EXPORT_SYMBOL_GPL(trace_vprintk);
-int __ftrace_printk(unsigned long ip, const char *fmt, ...)
+int __trace_printk(unsigned long ip, const char *fmt, ...)
{
int ret;
va_list ap;
@@ -3816,7 +3781,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...)
va_end(ap);
return ret;
}
-EXPORT_SYMBOL_GPL(__ftrace_printk);
+EXPORT_SYMBOL_GPL(__trace_printk);
int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
{
@@ -3918,8 +3883,10 @@ void ftrace_dump(void)
printk(KERN_TRACE "Dumping ftrace buffer:\n");
+ /* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
+ iter.cpu_file = TRACE_PIPE_ALL_CPU;
/*
* We need to stop all tracing on all CPUS to read the
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 561bb5c5d98..8beff03fda6 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -115,7 +115,7 @@ struct userstack_entry {
};
/*
- * ftrace_printk entry:
+ * trace_printk entry:
*/
struct print_entry {
struct trace_entry ent;
@@ -651,7 +651,8 @@ enum trace_iterator_flags {
TRACE_ITER_USERSTACKTRACE = 0x4000,
TRACE_ITER_SYM_USEROBJ = 0x8000,
TRACE_ITER_PRINTK_MSGONLY = 0x10000,
- TRACE_ITER_CONTEXT_INFO = 0x20000 /* Print pid/cpu/time */
+ TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
+ TRACE_ITER_LATENCY_FMT = 0x40000,
};
/*
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index c2e68d440c4..aaa0755268b 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -159,7 +159,6 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH,
.trace = trace_branch_print,
- .latency_trace = trace_branch_print,
};
static struct tracer branch_trace __read_mostly =
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 9e5ebd84415..b923d13e2fa 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,6 +32,8 @@ enum {
static int trace_type __read_mostly;
+static int save_lat_flag;
+
#ifdef CONFIG_PREEMPT_TRACER
static inline int
preempt_trace(void)
@@ -370,6 +372,9 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
static void __irqsoff_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
irqsoff_trace = tr;
/* make sure that the tracer is visible */
@@ -380,6 +385,9 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
static void irqsoff_tracer_reset(struct trace_array *tr)
{
stop_irqsoff_tracer(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void irqsoff_tracer_start(struct trace_array *tr)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 9fc815031b0..306fef84c50 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -437,8 +437,6 @@ int register_ftrace_event(struct trace_event *event)
if (event->trace == NULL)
event->trace = trace_nop_print;
- if (event->latency_trace == NULL)
- event->latency_trace = trace_nop_print;
if (event->raw == NULL)
event->raw = trace_nop_print;
if (event->hex == NULL)
@@ -480,29 +478,6 @@ enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
}
/* TRACE_FN */
-static enum print_line_t trace_fn_latency(struct trace_iterator *iter,
- int flags)
-{
- struct ftrace_entry *field;
- struct trace_seq *s = &iter->seq;
-
- trace_assign_type(field, iter->ent);
-
- if (!seq_print_ip_sym(s, field->ip, flags))
- goto partial;
- if (!trace_seq_puts(s, " ("))
- goto partial;
- if (!seq_print_ip_sym(s, field->parent_ip, flags))
- goto partial;
- if (!trace_seq_puts(s, ")\n"))
- goto partial;
-
- return TRACE_TYPE_HANDLED;
-
- partial:
- return TRACE_TYPE_PARTIAL_LINE;
-}
-
static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
{
struct ftrace_entry *field;
@@ -573,7 +548,6 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
static struct trace_event trace_fn_event = {
.type = TRACE_FN,
.trace = trace_fn_trace,
- .latency_trace = trace_fn_latency,
.raw = trace_fn_raw,
.hex = trace_fn_hex,
.binary = trace_fn_bin,
@@ -705,7 +679,6 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
static struct trace_event trace_ctx_event = {
.type = TRACE_CTX,
.trace = trace_ctx_print,
- .latency_trace = trace_ctx_print,
.raw = trace_ctx_raw,
.hex = trace_ctx_hex,
.binary = trace_ctxwake_bin,
@@ -714,7 +687,6 @@ static struct trace_event trace_ctx_event = {
static struct trace_event trace_wake_event = {
.type = TRACE_WAKE,
.trace = trace_wake_print,
- .latency_trace = trace_wake_print,
.raw = trace_wake_raw,
.hex = trace_wake_hex,
.binary = trace_ctxwake_bin,
@@ -770,7 +742,6 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter,
static struct trace_event trace_special_event = {
.type = TRACE_SPECIAL,
.trace = trace_special_print,
- .latency_trace = trace_special_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
@@ -808,7 +779,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
static struct trace_event trace_stack_event = {
.type = TRACE_STACK,
.trace = trace_stack_print,
- .latency_trace = trace_stack_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
@@ -838,7 +808,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
static struct trace_event trace_user_stack_event = {
.type = TRACE_USER_STACK,
.trace = trace_user_stack_print,
- .latency_trace = trace_user_stack_print,
.raw = trace_special_print,
.hex = trace_special_hex,
.binary = trace_special_bin,
@@ -883,7 +852,6 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
static struct trace_event trace_print_event = {
.type = TRACE_PRINT,
.trace = trace_print_print,
- .latency_trace = trace_print_print,
.raw = trace_print_raw,
};
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 551a25a7221..8a34d688ed6 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -10,7 +10,6 @@ struct trace_event {
struct hlist_node node;
int type;
trace_print_func trace;
- trace_print_func latency_trace;
trace_print_func raw;
trace_print_func hex;
trace_print_func binary;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index db55f7aaa64..3c5ad6b2ec8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -32,6 +32,8 @@ static raw_spinlock_t wakeup_lock =
static void __wakeup_reset(struct trace_array *tr);
+static int save_lat_flag;
+
#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
@@ -324,6 +326,9 @@ static void stop_wakeup_tracer(struct trace_array *tr)
static int __wakeup_tracer_init(struct trace_array *tr)
{
+ save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+ trace_flags |= TRACE_ITER_LATENCY_FMT;
+
tracing_max_latency = 0;
wakeup_trace = tr;
start_wakeup_tracer(tr);
@@ -347,6 +352,9 @@ static void wakeup_tracer_reset(struct trace_array *tr)
stop_wakeup_tracer(tr);
/* make sure we put back any tasks we are tracing */
wakeup_reset(tr);
+
+ if (!save_lat_flag)
+ trace_flags &= ~TRACE_ITER_LATENCY_FMT;
}
static void wakeup_tracer_start(struct trace_array *tr)