aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKrzysztof Kozlowski <krzysztof.kozlowski@linaro.org>2023-01-23 19:40:26 +0100
committerKrzysztof Kozlowski <krzysztof.kozlowski@linaro.org>2023-01-23 19:40:26 +0100
commitf299752e58d14ca2886b72a9cde610978dd48ca6 (patch)
treebfdbc4c6064f757a3c15d6610d2dc0eecc0ae236
parent254b80a9fce6f32edbf25d95577c8e18a012a7bf (diff)
Revert "arm: Add support for lazy preemption"
This reverts commit 8386f940ea9a9a6a1c690ae7e1d038c3781b1403. New version of this patch is coming. Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/kernel/asm-offsets.c1
-rw-r--r--arch/arm/kernel/entry-armv.S19
-rw-r--r--arch/arm/kernel/signal.c3
5 files changed, 5 insertions, 25 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4ce79eb994a6..28298d43b4c5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -116,7 +116,6 @@ config ARM
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
- select HAVE_PREEMPT_LAZY
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index b02e555e5aa3..7f092cb55a41 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -62,7 +62,6 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
- int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
@@ -130,7 +129,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
#define TIF_NOTIFY_SIGNAL 4 /* signal notifications exist */
-#define TIF_NEED_RESCHED_LAZY 9
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -150,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
-#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
/* Checks for any syscall work in entry-common.S */
@@ -160,8 +157,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
- _TIF_SIGPENDING | \
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_NOTIFY_SIGNAL)
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c3bdec7d2df9..2c8d76fd7c66 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -43,7 +43,6 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cfb4660e9fea..c39303e5c234 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -222,18 +222,11 @@ __irq_svc:
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
- teq r8, #0 @ if preempt count != 0
- bne 1f @ return from exeption
ldr r0, [tsk, #TI_FLAGS] @ get flags
- tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
- blne svc_preempt @ preempt!
-
- ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
- teq r8, #0 @ if preempt lazy count != 0
+ teq r8, #0 @ if preempt count != 0
movne r0, #0 @ force flags to 0
- tst r0, #_TIF_NEED_RESCHED_LAZY
+ tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
-1:
#endif
svc_exit r5, irq = 1 @ return from exception
@@ -248,14 +241,8 @@ svc_preempt:
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
- bne 1b
- tst r0, #_TIF_NEED_RESCHED_LAZY
reteq r8 @ go again
- ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
- teq r0, #0 @ if preempt lazy count != 0
- beq 1b
- ret r8 @ go again
-
+ b 1b
#endif
__und_fault:
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b50a3248e79f..e07f359254c3 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -607,8 +607,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
*/
trace_hardirqs_off();
do {
- if (likely(thread_flags & (_TIF_NEED_RESCHED |
- _TIF_NEED_RESCHED_LAZY))) {
+ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
} else {
if (unlikely(!user_mode(regs)))