aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_functions.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-01-15 22:21:43 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 12:17:46 +0100
commit3eb36aa05329a47cbe201c151fd0024a4a3649cd (patch)
tree305b4805a32b47387dc6f76161c20c9e72280e94 /kernel/trace/trace_functions.c
parent5e4abc9839191e213965e0f1dbf36e2e44356c3a (diff)
ftrace: combine stack trace in function call
Impact: less likely to interleave function and stack traces This patch does replaces the separate stack trace on function with a record function and stack trace together. This will switch between the function only recording to a function and stack recording. Also some whitespace fix ups as well. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r--kernel/trace/trace_functions.c61
1 files changed, 36 insertions, 25 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 2dce3c7370d..61d0b73dabf 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -133,6 +133,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
if (likely(disabled == 1)) {
pc = preempt_count();
+ trace_function(tr, data, ip, parent_ip, flags, pc);
/*
* skip over 5 funcs:
* __ftrace_trace_stack,
@@ -154,24 +155,6 @@ static struct ftrace_ops trace_ops __read_mostly =
.func = function_trace_call,
};
-void tracing_start_function_trace(void)
-{
- ftrace_function_enabled = 0;
-
- if (trace_flags & TRACE_ITER_PREEMPTONLY)
- trace_ops.func = function_trace_call_preempt_only;
- else
- trace_ops.func = function_trace_call;
-
- register_ftrace_function(&trace_ops);
- ftrace_function_enabled = 1;
-}
-
-void tracing_stop_function_trace(void)
-{
- ftrace_function_enabled = 0;
- unregister_ftrace_function(&trace_ops);
-}
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
@@ -194,6 +177,31 @@ static struct tracer_flags func_flags = {
.opts = func_opts
};
+void tracing_start_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+
+ if (trace_flags & TRACE_ITER_PREEMPTONLY)
+ trace_ops.func = function_trace_call_preempt_only;
+ else
+ trace_ops.func = function_trace_call;
+
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
+ register_ftrace_function(&trace_stack_ops);
+ else
+ register_ftrace_function(&trace_ops);
+
+ ftrace_function_enabled = 1;
+}
+
+void tracing_stop_function_trace(void)
+{
+ ftrace_function_enabled = 0;
+ /* OK if they are not registered */
+ unregister_ftrace_function(&trace_stack_ops);
+ unregister_ftrace_function(&trace_ops);
+}
+
static int func_set_flag(u32 old_flags, u32 bit, int set)
{
if (bit == TRACE_FUNC_OPT_STACK) {
@@ -201,10 +209,13 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
return 0;
- if (set)
+ if (set) {
+ unregister_ftrace_function(&trace_ops);
register_ftrace_function(&trace_stack_ops);
- else
+ } else {
unregister_ftrace_function(&trace_stack_ops);
+ register_ftrace_function(&trace_ops);
+ }
return 0;
}
@@ -214,14 +225,14 @@ static int func_set_flag(u32 old_flags, u32 bit, int set)
static struct tracer function_trace __read_mostly =
{
- .name = "function",
- .init = function_trace_init,
- .reset = function_trace_reset,
- .start = function_trace_start,
+ .name = "function",
+ .init = function_trace_init,
+ .reset = function_trace_reset,
+ .start = function_trace_start,
.flags = &func_flags,
.set_flag = func_set_flag,
#ifdef CONFIG_FTRACE_SELFTEST
- .selftest = trace_selftest_startup_function,
+ .selftest = trace_selftest_startup_function,
#endif
};