aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c72
1 files changed, 66 insertions, 6 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b9b67166f9d..bc618945fb1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -31,6 +31,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <trace/trap.h>
#ifdef CONFIG_EISA
#include <linux/ioport.h>
@@ -52,6 +53,7 @@
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/traps.h>
+#include <asm/unistd.h>
#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/mce.h>
@@ -76,11 +78,21 @@ char ignore_fpu_irq;
* F0 0F bug workaround.
*/
gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
+
+extern unsigned long sys_call_table[];
+extern unsigned long syscall_table_size;
+
#endif
DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors);
+/*
+ * Also used in arch/x86/mm/fault.c.
+ */
+DEFINE_TRACE(trap_entry);
+DEFINE_TRACE(trap_exit);
+
static int ignore_nmis;
int unknown_nmi_panic;
@@ -122,6 +134,8 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
{
struct task_struct *tsk = current;
+ trace_trap_entry(regs, trapnr);
+
#ifdef CONFIG_X86_32
if (regs->flags & X86_VM_MASK) {
/*
@@ -168,7 +182,7 @@ trap_signal:
force_sig_info(signr, info, tsk);
else
force_sig(signr, tsk);
- return;
+ goto end;
kernel_trap:
if (!fixup_exception(regs)) {
@@ -176,15 +190,17 @@ kernel_trap:
tsk->thread.trap_no = trapnr;
die(str, regs, error_code);
}
- return;
+ goto end;
#ifdef CONFIG_X86_32
vm86_trap:
if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, trapnr))
goto trap_signal;
- return;
+ goto end;
#endif
+end:
+ trace_trap_exit();
}
#define DO_ERROR(trapnr, signr, str, name) \
@@ -285,7 +301,9 @@ do_general_protection(struct pt_regs *regs, long error_code)
printk("\n");
}
+ trace_trap_entry(regs, 13);
force_sig(SIGSEGV, tsk);
+ trace_trap_exit();
return;
#ifdef CONFIG_X86_32
@@ -398,13 +416,15 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
+ trace_trap_entry(regs, 2);
+
/*
* CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs.
*/
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
- return;
+ goto end;
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
raw_spin_lock(&nmi_reason_lock);
@@ -423,11 +443,13 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
reassert_nmi();
#endif
raw_spin_unlock(&nmi_reason_lock);
- return;
+ goto end;
}
raw_spin_unlock(&nmi_reason_lock);
unknown_nmi_error(reason, regs);
+end:
+ trace_trap_exit();
}
dotraplinkage notrace __kprobes void
@@ -570,8 +592,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
preempt_conditional_sti(regs);
if (regs->flags & X86_VM_MASK) {
+ trace_trap_entry(regs, 1);
handle_vm86_trap((struct kernel_vm86_regs *) regs,
error_code, 1);
+ trace_trap_exit();
preempt_conditional_cli(regs);
return;
}
@@ -589,13 +613,32 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
regs->flags &= ~X86_EFLAGS_TF;
}
si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
+ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) {
+ trace_trap_entry(regs, 1);
send_sigtrap(tsk, regs, error_code, si_code);
+ trace_trap_exit();
+ }
preempt_conditional_cli(regs);
return;
}
+#ifdef CONFIG_X86_32
+void ltt_dump_sys_call_table(void *call_data)
+{
+ int i;
+ char namebuf[KSYM_NAME_LEN];
+
+ for (i = 0; i < NR_syscalls; i++) {
+ sprint_symbol(namebuf, sys_call_table[i]);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i, (void*)sys_call_table[i], namebuf);
+ }
+}
+EXPORT_SYMBOL_GPL(ltt_dump_sys_call_table);
+#endif
+
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
@@ -701,11 +744,13 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
dotraplinkage void
do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
{
+ trace_trap_entry(regs, 16);
conditional_sti(regs);
#if 0
/* No need to warn about this any longer. */
printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
#endif
+ trace_trap_exit();
}
asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
@@ -738,6 +783,21 @@ void __math_state_restore(void)
tsk->fpu_counter++;
}
+void ltt_dump_idt_table(void *call_data)
+{
+ int i;
+ char namebuf[KSYM_NAME_LEN];
+
+ for (i = 0; i < IDT_ENTRIES; i++) {
+ unsigned long address = gate_offset(idt_table[i]);
+ sprint_symbol(namebuf, address);
+ __trace_mark(0, irq_state, idt_table, call_data,
+ "irq %d address %p symbol %s",
+ i, (void *)address, namebuf);
+ }
+}
+EXPORT_SYMBOL_GPL(ltt_dump_idt_table);
+
/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task