aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/include/asm/alternative.h71
-rw-r--r--arch/arm64/include/asm/assembler.h9
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/xen/hypercall.S19
5 files changed, 66 insertions, 44 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1f282b96be05..6a22ef746994 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -718,14 +718,6 @@ config SETEND_EMULATION
If unsure, say Y
endif
-config ARM64_SW_TTBR0_PAN
- bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
- help
- Enabling this option prevents the kernel from accessing
- user-space memory directly by pointing TTBR0_EL1 to a reserved
- zeroed area and reserved ASID. The user access routines
- restore the valid TTBR0_EL1 temporarily.
-
menu "ARMv8.1 architectural features"
config ARM64_PAN
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index b9d9791630a1..0417f95fddeb 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
+#include <asm/insn.h>
+
#ifndef __ASSEMBLY__
#include <linux/init.h>
@@ -86,34 +88,55 @@ void apply_alternatives(void *start, size_t length);
.endm
/*
- * Begin an alternative code sequence.
+ * Alternative sequences
+ *
+ * The code for the case where the capability is not present will be
+ * assembled and linked as normal. There are no restrictions on this
+ * code.
+ *
+ * The code for the case where the capability is present will be
+ * assembled into a special section to be used for dynamic patching.
+ * Code for that case must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ * sequence.
*
- * The code that follows this macro will be assembled and linked as
- * normal. There are no restrictions on this code.
+ * 2. Not contain a branch target that is used outside of the
+ * alternative sequence it is defined in (branches into an
+ * alternative sequence are not fixed up).
+ */
+
+/*
+ * Begin an alternative code sequence.
*/
.macro alternative_if_not cap
+ .set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
+.macro alternative_if cap
+ .set .Lasm_alt_mode, 1
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+ .align 2 /* So GAS knows label 661 is suitably aligned */
+661:
+.endm
+
/*
- * Provide the alternative code sequence.
- *
- * The code that follows this macro is assembled into a special
- * section to be used for dynamic patching. Code that follows this
- * macro must:
- *
- * 1. Be exactly the same length (in bytes) as the default code
- * sequence.
- *
- * 2. Not contain a branch target that is used outside of the
- * alternative sequence it is defined in (branches into an
- * alternative sequence are not fixed up).
+ * Provide the other half of the alternative code sequence.
*/
.macro alternative_else
-662: .pushsection .altinstr_replacement, "ax"
+662:
+ .if .Lasm_alt_mode==0
+ .pushsection .altinstr_replacement, "ax"
+ .else
+ .popsection
+ .endif
663:
.endm
@@ -121,11 +144,25 @@ void apply_alternatives(void *start, size_t length);
* Complete an alternative code sequence.
*/
.macro alternative_endif
-664: .popsection
+664:
+ .if .Lasm_alt_mode==0
+ .popsection
+ .endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
+/*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+ * previous case.
+ */
+.macro alternative_else_nop_endif
+alternative_else
+ nops (662b-661b) / AARCH64_INSN_SIZE
+alternative_endif
+.endm
+
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index d8732b773bd1..211003d861a1 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -109,6 +109,15 @@
dmb \opt
.endm
+/*
+ * NOP sequence
+ */
+ .macro nops, num
+ .rept \num
+ nop
+ .endr
+ .endm
+
#define USER(l, x...) \
9999: x; \
.section __ex_table,"a"; \
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 2666c9b84ca0..3577413c35f7 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -20,6 +20,9 @@
#ifndef __ASSEMBLY__
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) asm volatile(__nops(n))
+
#define sev() asm volatile("sev" : : : "memory")
#define wfe() asm volatile("wfe" : : : "memory")
#define wfi() asm volatile("wfi" : : : "memory")
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 6d6e4af1a4bf..8bbe9401f4f0 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,7 +49,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/uaccess.h>
#include <xen/interface/xen.h>
@@ -90,24 +89,6 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- /*
- * Privcmd calls are issued by the userspace. The kernel needs to
- * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
- * translations to user memory via AT instructions. Since AT
- * instructions are not affected by the PAN bit (ARMv8.1), we only
- * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
- * is enabled (it implies that hardware UAO and PAN disabled).
- */
- uaccess_enable_not_uao x6, x7
-#endif
hvc XEN_IMM
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- /*
- * Disable userspace access from kernel once the hyp call completed.
- */
- uaccess_disable_not_uao x6
-#endif
ret
ENDPROC(privcmd_call);