aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S61
-rw-r--r--arch/arm/kernel/signal.c6
2 files changed, 51 insertions, 16 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..dd3721d1185e 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
.align 5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*
- * This is the fast syscall return path. We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path. We do as little as possible here,
+ * such as avoiding writing r0 to the stack. We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK
- bne __sys_trace_return
- tst r1, #_TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
- asm_trace_hardirqs_on
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- ct_user_enter
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+ /* Ok, we need to do extra processing, enter the slow path. */
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
-work_pending:
+ /* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled. As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ beq no_work_pending
+ UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
+
+ /* Slower path - fall through to work_pending */
+#endif
+
+ tst r1, #_TIF_SYSCALL_WORK
+ bne __sys_trace_return_nosave
+slow_work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_work_pending
@@ -64,16 +84,19 @@ work_pending:
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them. Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
*/
ENTRY(ret_to_user)
ret_slow_syscall:
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
- bne work_pending
+ bne slow_work_pending
no_work_pending:
- asm_trace_hardirqs_on
+ asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@@ -251,6 +274,12 @@ __sys_trace_return:
bl syscall_trace_exit
b ret_slow_syscall
+__sys_trace_return_nosave:
+ asm_trace_hardirqs_off save=0
+ mov r0, sp
+ bl syscall_trace_exit
+ b ret_slow_syscall
+
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e23791..b6cda06b455f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
+ /*
+ * The assembly code enters us with IRQs off, but it hasn't
+ * informed the tracing code of that for efficiency reasons.
+ * Update the trace code with the current status.
+ */
+ trace_hardirqs_off();
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();