aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/entry_32.S
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2012-06-05 20:00:11 -0400
committerSteven Rostedt <rostedt@goodmis.org>2012-07-19 13:20:37 -0400
commit4de72395ff4cf48e23b61986dbc90b99a7c4ed97 (patch)
treef2734ebdb6c7601c39fe4952f54d97f4aec34741 /arch/x86/kernel/entry_32.S
parent08f6fba503111e0336f2b4d6915a4a18f9b60e51 (diff)
ftrace/x86: Add save_regs for i386 function calls
Add saving full regs for function tracing on i386. The saving of regs was influenced by patches sent out by Masami Hiramatsu. Link: Link: http://lkml.kernel.org/r/20120711195745.379060003@goodmis.org Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/entry_32.S')
-rw-r--r--arch/x86/kernel/entry_32.S68
1 files changed, 68 insertions, 0 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 5da11d1b85a..46caa5649a5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1123,6 +1123,7 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
+ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -1134,6 +1135,73 @@ ftrace_stub:
ret
END(ftrace_caller)
+ENTRY(ftrace_regs_caller)
+ pushf /* push flags before compare (in cs location) */
+ cmpl $0, function_trace_stop
+ jne ftrace_restore_flags
+
+ /*
+ * i386 does not save SS and ESP when coming from kernel.
+ * Instead, to get sp, &regs->sp is used (see ptrace.h).
+ * Unfortunately, that means eflags must be at the same location
+ * as the current return ip is. We move the return ip into the
+ * ip location, and move flags into the return ip location.
+ */
+ pushl 4(%esp) /* save return ip into ip slot */
+ subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
+
+ pushl $0 /* Load 0 into orig_ax */
+ pushl %gs
+ pushl %fs
+ pushl %es
+ pushl %ds
+ pushl %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+
+ movl 13*4(%esp), %eax /* Get the saved flags */
+ movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
+ /* clobbering return ip */
+ movl $__KERNEL_CS,13*4(%esp)
+
+ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
+ movl 0x4(%ebp), %edx /* Load parent ip (2cd parameter) */
+ lea (%esp), %ecx
+ pushl %ecx /* Save pt_regs as 4th parameter */
+ leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+
+GLOBAL(ftrace_regs_call)
+ call ftrace_stub
+
+ addl $4, %esp /* Skip pt_regs */
+ movl 14*4(%esp), %eax /* Move flags back into cs */
+ movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
+ movl 12*4(%esp), %eax /* Get return ip from regs->ip */
+ addl $MCOUNT_INSN_SIZE, %eax
+ movl %eax, 14*4(%esp) /* Put return ip back for ret */
+
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
+ popl %ds
+ popl %es
+ popl %fs
+ popl %gs
+ addl $8, %esp /* Skip orig_ax and ip */
+ popf /* Pop flags at end (no addl to corrupt flags) */
+ jmp ftrace_ret
+
+ftrace_restore_flags:
+ popf
+ jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)