aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig28
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi6
-rw-r--r--arch/arm64/include/asm/alternative-macros.h8
-rw-r--r--arch/arm64/include/asm/checksum.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h8
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h8
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/include/asm/mmu_context.h17
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h20
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h10
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kernel/cpufeature.c6
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/crash_dump.c2
-rw-r--r--arch/arm64/kernel/entry.S10
-rw-r--r--arch/arm64/kernel/head.S8
-rw-r--r--arch/arm64/kernel/idreg-override.c43
-rw-r--r--arch/arm64/kernel/image-vars.h3
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/stacktrace.c9
-rw-r--r--arch/arm64/kvm/arm.c7
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/hyp/entry.S8
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S15
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c14
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c3
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c1
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c49
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c3
-rw-r--r--arch/arm64/kvm/mmu.c3
-rw-r--r--arch/arm64/kvm/perf.c10
-rw-r--r--arch/arm64/kvm/pmu-emul.c10
-rw-r--r--arch/arm64/kvm/reset.c22
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c12
-rw-r--r--arch/arm64/mm/init.c29
-rw-r--r--arch/arm64/mm/mmu.c26
60 files changed, 338 insertions, 163 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1f212b47a48a..dfdc3e0af5e1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
+config NVIDIA_CARMEL_CNP_ERRATUM
+ bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
+ default y
+ help
+ If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
+ invalidate shared TLB entries installed by a different core, as it would
+ on standard ARM cores.
+
+ If unsure, say Y.
+
config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y
@@ -1055,8 +1065,6 @@ config HW_PERF_EVENTS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
-config ARCH_WANT_HUGE_PMD_SHARE
-
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
@@ -1157,8 +1165,8 @@ config XEN
config FORCE_MAX_ZONEORDER
int
- default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
- default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "14" if ARM64_64K_PAGES
+ default "12" if ARM64_16K_PAGES
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1398,10 +1406,13 @@ config ARM64_PAN
config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc)
+config AS_HAS_LSE_ATOMICS
+ def_bool $(as-instr,.arch_extension lse)
+
config ARM64_LSE_ATOMICS
bool
default ARM64_USE_LSE_ATOMICS
- depends on $(as-instr,.arch_extension lse)
+ depends on AS_HAS_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions"
@@ -1658,6 +1669,7 @@ config ARM64_MTE
default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
+ depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
select ARCH_USES_HIGH_VMA_FLAGS
@@ -1855,12 +1867,6 @@ config CMDLINE_FROM_BOOTLOADER
the boot loader doesn't provide any, the default kernel command
string provided in CMDLINE will be used.
-config CMDLINE_EXTEND
- bool "Extend bootloader kernel arguments"
- help
- The command-line arguments provided by the boot loader will be
- appended to the default kernel command string.
-
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
index 437ffe3628a5..e79ce49e7e6a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
@@ -19,3 +19,7 @@
};
};
};
+
+&mmc0 {
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index 3402cec87035..df62044ff7a7 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -34,7 +34,7 @@
vmmc-supply = <&reg_dcdc1>;
disable-wp;
bus-width = <4>;
- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 4f4755152fce..b5808047d6e4 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -289,10 +289,6 @@
vcc-pm-supply = <&reg_aldo1>;
};
-&rtc {
- clocks = <&ext_osc32k>;
-};
-
&spdif {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 49e979794094..af8b7d0ef750 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -995,9 +995,9 @@
compatible = "allwinner,sun8i-a23-rsb";
reg = <0x07083000 0x400>;
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&r_ccu 13>;
+ clocks = <&r_ccu CLK_R_APB2_RSB>;
clock-frequency = <3000000>;
- resets = <&r_ccu 7>;
+ resets = <&r_ccu RST_R_APB2_RSB>;
pinctrl-names = "default";
pinctrl-0 = <&r_rsb_pins>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 7de6b376d792..9058cfa4980f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -198,6 +198,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 5a8a1dc4262d..28c51e521cb2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -348,6 +348,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <0 75 0x4>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 1d6dfd189c7f..39458305e333 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -354,6 +354,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 5ccc4cc91959..a003e6af3353 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -124,7 +124,7 @@
#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
index 0e1a6d953389..122c95ddad30 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
@@ -35,7 +35,7 @@
&i2c2 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index 44a8c2337cee..f3965ec5b31d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -67,7 +67,7 @@
&i2c1 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
index b94b02080a34..68e8fa172974 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
@@ -130,7 +130,7 @@
#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index d239ab70ed99..53e817c5f6f3 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for CZ.NIC Turris Mox Board
- * 2019 by Marek Behun <marek.behun@nic.cz>
+ * 2019 by Marek BehĂșn <kabel@kernel.org>
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 64179a372ecf..c6f5df2deccf 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -310,9 +310,11 @@
};
CP11X_LABEL(sata0): sata@540000 {
- compatible = "marvell,armada-8k-ahci";
+ compatible = "marvell,armada-8k-ahci",
+ "generic-ahci";
reg = <0x540000 0x30000>;
dma-coherent;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CP11X_LABEL(clk) 1 15>,
<&CP11X_LABEL(clk) 1 16>;
#address-cells = <1>;
@@ -320,12 +322,10 @@
status = "disabled";
sata-port@0 {
- interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
reg = <0>;
};
sata-port@1 {
- interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
reg = <1>;
};
};
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 5df500dcc627..8a078fc662ac 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -97,9 +97,9 @@
.popsection
.subsection 1
663: \insn2
-664: .previous
- .org . - (664b-663b) + (662b-661b)
+664: .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
+ .previous
.endif
.endm
@@ -169,11 +169,11 @@
*/
.macro alternative_endif
664:
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
.if .Lasm_alt_mode==0
.previous
.endif
- .org . - (664b-663b) + (662b-661b)
- .org . - (662b-661b) + (664b-663b)
.endm
/*
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 93a161b3bf3f..dc52b733675d 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
- return csum_fold((__force u32)(sum >> 32));
+ return csum_fold((__force __wsum)(sum >> 32));
}
#define ip_fast_csum ip_fast_csum
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index b77d997b173b..c40f2490cd7b 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,7 +66,8 @@
#define ARM64_WORKAROUND_1508412 58
#define ARM64_HAS_LDAPR 59
#define ARM64_KVM_PROTECTED_MODE 60
+#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
-#define ARM64_NCAPS 61
+#define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4e90c2debf70..94d4025acc0b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -278,6 +278,7 @@
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF (1 << 19)
#define MDCR_EL2_TPMS (1 << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 22d933e9b59e..a7ab84f781f7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -47,10 +47,10 @@
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
@@ -183,16 +183,16 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
extern void __kvm_flush_vm_context(void);
+extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c0450828378b..32ae676236b6 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
+#ifdef __KVM_NVHE_HYPERVISOR__
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+#endif
+
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
@@ -97,7 +102,8 @@ bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
void __noreturn hyp_panic(void);
#ifdef __KVM_NVHE_HYPERVISOR__
-void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
+void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
+ u64 elr, u64 par);
#endif
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c759faf7a1ff..0aabc3be9a75 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ void *__addr = __va(page_to_phys(__page)); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
+})
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
#define page_to_virt(x) ({ \
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 70ce8c1d2b07..bd02e99b1a4c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
extern u64 idmap_t0sz;
extern u64 idmap_ptrs_per_pgd;
-static inline bool __cpu_uses_extended_idmap(void)
-{
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
- return false;
-
- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
-}
-
-/*
- * True if the extended ID map requires an extra level of translation table
- * to be configured.
- */
-static inline bool __cpu_uses_extended_idmap_level(void)
-{
- return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
-}
-
/*
* Ensure TCR.T0SZ is set to the provided value.
*/
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 046be789fbb4..9a65fb528110 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
-#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e17b96d0e4b5..47027796c2f9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
+#define pgprot_tagged(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
+#define pgprot_mhp pgprot_tagged
/*
* DMA allocations for non-coherent devices use what the Arm architecture calls
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ca2cd75d3286..efc10e9041a0 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
+asmlinkage void arm64_preempt_schedule_irq(void);
+
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index dfd4edbfe360..d4a5fca984c3 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -796,6 +796,11 @@
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
+
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else
@@ -961,14 +966,17 @@
#define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
#elif defined(CONFIG_ARM64_64K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#endif
#define MVFR2_FPMISC_SHIFT 4
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 9f4e3b266f21..6623c99f0984 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
#endif
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 3333950b5909..ea487218db79 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long ret, offset;
+ unsigned long ret, tmp;
/* Load word from unaligned pointer addr */
asm(
@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n"
" .pushsection .fixup,\"ax\"\n"
" .align 2\n"
- "3: and %1, %2, #0x7\n"
- " bic %2, %2, #0x7\n"
- " ldr %0, [%2]\n"
+ "3: bic %1, %2, #0x7\n"
+ " ldr %0, [%1]\n"
+ " and %1, %2, #0x7\n"
" lsl %1, %1, #0x3\n"
#ifndef __AARCH64EB__
" lsr %0, %0, %1\n"
@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" b 2b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 3b)
- : "=&r" (ret), "=&r" (offset)
+ : "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
return ret;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 506a1cd37973..e2c20c036442 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -526,6 +526,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1, 0),
},
#endif
+#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
+ {
+ /* NVIDIA Carmel */
+ .desc = "NVIDIA Carmel CNP erratum",
+ .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 066030717a4c..e5281e1c8f1d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
* of support.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
@@ -1321,7 +1320,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
* may share TLB entries with a CPU stuck in the crashed
* kernel.
*/
- if (is_kdump_kernel())
+ if (is_kdump_kernel())
+ return false;
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 77605aec25fe..51fcf99d5351 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
* with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for
- * acurate sanity check and feature enablement.
+ * accurate sanity check and feature enablement.
*/
info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index e6e284265f19..58303a9ec32c 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ *ppos += count;
+
return count;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a31a0a713c85..6acfc5e6b5e0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -148,16 +148,18 @@ alternative_cb_end
.endm
/* Check for MTE asynchronous tag check faults */
- .macro check_mte_async_tcf, flgs, tmp
+ .macro check_mte_async_tcf, tmp, ti_flags
#ifdef CONFIG_ARM64_MTE
+ .arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
- orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
- str \flgs, [tsk, #TSK_TI_FLAGS]
+ mov \tmp, #_TIF_MTE_ASYNC_FAULT
+ add \ti_flags, tsk, #TSK_TI_FLAGS
+ stset \tmp, [\ti_flags]
msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
@@ -244,7 +246,7 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
- check_mte_async_tcf x19, x22
+ check_mte_async_tcf x22, x23
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 66b0e0b66e31..840bda1869e9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x5, __idmap_text_end
clz x5, x5
- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
+ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz
@@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
- cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
- b.ne __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+ b.lt __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+ b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3
adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index dffb16682330..83f1c4b92095 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1);
}
-static __init void parse_cmdline(void)
+static __init const u8 *get_bootargs_cmdline(void)
{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- const u8 *prop;
- void *fdt;
- int node;
+ const u8 *prop;
+ void *fdt;
+ int node;
- fdt = get_early_fdt_ptr();
- if (!fdt)
- goto out;
+ fdt = get_early_fdt_ptr();
+ if (!fdt)
+ return NULL;
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return NULL;
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ return NULL;
- __parse_cmdline(prop, true);
+ return strlen(prop) ? prop : NULL;
+}
- if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- return;
- }
+static __init void parse_cmdline(void)
+{
+ const u8 *prop = get_bootargs_cmdline();
-out:
- __parse_cmdline(CONFIG_CMDLINE, true);
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
+ __parse_cmdline(CONFIG_CMDLINE, true);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
+ __parse_cmdline(prop, true);
}
/* Keep checkers quiet */
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 23f1a557bd9f..5aa9ed1e9ec6 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -101,6 +101,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
/* Array containing bases of nVHE per-CPU memory regions. */
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
+/* PMU available static key */
+KVM_NVHE_ALIAS(kvm_arm_pmu_available);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 7d2318f80955..4658fcf88c2b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline u32 armv8pmu_read_evcntr(int idx)
+static inline u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 66aac2881ba8..85645b2b0c7a 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
if (!instruction_pointer(regs))
BUG();
- if (kcb->kprobe_status == KPROBE_REENTER)
+ if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
- else
+ } else {
+ kprobes_restore_local_irqflag(kcb, regs);
reset_current_kprobe();
+ }
break;
case KPROBE_HIT_ACTIVE:
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 325c83b1a24d..6e60aa3b5ea9 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -57,6 +57,8 @@
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 5bfd9b87f85d..4ea9392f86e0 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
*/
bl cpu_do_resume
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
mov x0, sp
bl kasan_unpoison_task_stack_below
#endif
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ad20981dfda4..d55bdfb7789c 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
struct stackframe frame;
@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)arch_stack_walk);
+ (unsigned long)__builtin_frame_address(1),
+ (unsigned long)__builtin_return_address(0));
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index fc4c95dd2d26..7f06ba76698d 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -385,11 +385,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
+ * We guarantee that both TLBs and I-cache are private to each
+ * vcpu. If detecting that a vcpu from the same VM has
+ * previously run on the same physical CPU, call into the
+ * hypervisor code to nuke the relevant contexts.
+ *
* We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness.
*/
if (*last_ran != vcpu->vcpu_id) {
- kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
+ kvm_call_hyp(__kvm_flush_cpu_context, mmu);
*last_ran = vcpu->vcpu_id;
}
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 7a7e425616b5..dbc890511631 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index b0afad7a99c6..e831d3dfd50d 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -85,8 +85,10 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// If the hyp context is loaded, go straight to hyp_panic
get_loaded_vcpu x0, x1
- cbz x0, hyp_panic
+ cbnz x0, 1f
+ b hyp_panic
+1:
// The hyp context is saved so make sure it is restored to allow
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
// This makes use of __guest_exit to avoid duplication but sets the
@@ -94,7 +96,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// current state is saved to the guest context but it will only be
// accurate if the guest had been completely restored.
adr_this_cpu x0, kvm_hyp_ctxt, x1
- adr x1, hyp_panic
+ adr_l x1, hyp_panic
str x1, [x0, #CPU_XREG_OFFSET(30)]
get_vcpu_ptr x1, x0
@@ -146,7 +148,7 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// Now restore the hyp regs
restore_callee_saved_regs x2
- set_loaded_vcpu xzr, x1, x2
+ set_loaded_vcpu xzr, x2, x3
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 54f4860cd87c..6c1f51f25eb3 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -90,15 +90,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
* EL1 instead of being trapped to EL2.
*/
- write_sysreg(0, pmselr_el0);
- write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
+ if (kvm_arm_support_pmu_v3()) {
+ write_sysreg(0, pmselr_el0);
+ write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
+ }
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}
static inline void __deactivate_traps_common(void)
{
write_sysreg(0, hstr_el2);
- write_sysreg(0, pmuserenr_el0);
+ if (kvm_arm_support_pmu_v3())
+ write_sysreg(0, pmuserenr_el0);
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 91a711aa8382..f401724f12ef 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -58,16 +58,24 @@ static void __debug_restore_spe(u64 pmscr_el1)
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
-void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
+}
+
+void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
+{
__debug_switch_to_guest_common(vcpu);
}
-void __debug_switch_to_host(struct kvm_vcpu *vcpu)
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
+}
+
+void __debug_switch_to_host(struct kvm_vcpu *vcpu)
+{
__debug_switch_to_host_common(vcpu);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 6585a7cbbc56..5d94584840cc 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -71,7 +71,8 @@ SYM_FUNC_START(__host_enter)
SYM_FUNC_END(__host_enter)
/*
- * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
+ * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
+ * u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
/* Prepare and exit to the host's panic funciton. */
@@ -82,9 +83,11 @@ SYM_FUNC_START(__hyp_do_panic)
hyp_kimg_va lr, x6
msr elr_el2, lr
- /* Set the panic format string. Use the, now free, LR as scratch. */
- ldr lr, =__hyp_panic_string
- hyp_kimg_va lr, x6
+ mov x29, x0
+
+ /* Load the format string into x0 and arguments into x1-7 */
+ ldr x0, =__hyp_panic_string
+ hyp_kimg_va x0, x6
/* Load the format arguments into x1-7. */
mov x6, x3
@@ -94,9 +97,7 @@ SYM_FUNC_START(__hyp_do_panic)
mrs x5, hpfar_el2
/* Enter the host, conditionally restoring the host context. */
- cmp x0, xzr
- mov x0, lr
- b.eq __host_enter_without_restoring
+ cbz x29, __host_enter_without_restoring
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f012f8665ecc..936328207bde 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -46,11 +46,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
}
-static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
+static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
- __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
+ __kvm_flush_cpu_context(kern_hyp_va(mmu));
}
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
@@ -67,9 +67,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
write_sysreg_el2(tmp, SYS_SCTLR);
}
-static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
{
- cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
+ cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
}
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
@@ -115,10 +115,10 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
- HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
+ HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__kvm_enable_ssbs),
- HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
+ HANDLE_FUNC(__vgic_v3_get_gic_config),
HANDLE_FUNC(__vgic_v3_read_vmcr),
HANDLE_FUNC(__vgic_v3_write_vmcr),
HANDLE_FUNC(__vgic_v3_init_lrs),
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index f3d0e9eca56c..68ab6b4d5141 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -192,6 +192,14 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
__sysreg_save_state_nvhe(host_ctxt);
+ /*
+ * We must flush and disable the SPE buffer for nVHE, as
+ * the translation regime(EL1&0) is going to be loaded with
+ * that of the guest. And we must do this before we change the
+ * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
+ * before we load guest Stage1.
+ */
+ __debug_save_host_buffers_nvhe(vcpu);
__adjust_pc(vcpu);
@@ -234,11 +242,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
+ __debug_switch_to_host(vcpu);
/*
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
- __debug_switch_to_host(vcpu);
+ __debug_restore_host_buffers_nvhe(vcpu);
if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt);
@@ -257,7 +266,6 @@ void __noreturn hyp_panic(void)
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg_par();
- bool restore_host = true;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
@@ -271,7 +279,7 @@ void __noreturn hyp_panic(void)
__sysreg_restore_state_nvhe(host_ctxt);
}
- __hyp_do_panic(restore_host, spsr, elr, par);
+ __hyp_do_panic(host_ctxt, spsr, elr, par);
unreachable();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index fbde89a2c6e8..229b06748c20 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -123,7 +123,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
@@ -131,6 +131,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
+ asm volatile("ic iallu");
dsb(nsh);
isb();
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 4d177ce1d536..926fc07074f5 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -223,6 +223,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
goto out;
if (!table) {
+ data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
data->addr += kvm_granule_size(level);
goto out;
}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 80406f463c28..39f8f7f9227c 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -405,9 +405,54 @@ void __vgic_v3_init_lrs(void)
__gic_v3_set_lr(0, i);
}
-u64 __vgic_v3_get_ich_vtr_el2(void)
+/*
+ * Return the GIC CPU configuration:
+ * - [31:0] ICH_VTR_EL2
+ * - [62:32] RES0
+ * - [63] MMIO (GICv2) capable
+ */
+u64 __vgic_v3_get_gic_config(void)
{
- return read_gicreg(ICH_VTR_EL2);
+ u64 val, sre = read_gicreg(ICC_SRE_EL1);
+ unsigned long flags = 0;
+
+ /*
+ * To check whether we have a MMIO-based (GICv2 compatible)
+ * CPU interface, we need to disable the system register
+ * view. To do that safely, we have to prevent any interrupt
+ * from firing (which would be deadly).
+ *
+ * Note that this only makes sense on VHE, as interrupts are
+ * already masked for nVHE as part of the exception entry to
+ * EL2.
+ */
+ if (has_vhe())
+ flags = local_daif_save();
+
+ /*
+ * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+ * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+ * interrupt overrides must be set. You've got to love this.
+ */
+ sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+ isb();
+ write_gicreg(0, ICC_SRE_EL1);
+ isb();
+
+ val = read_gicreg(ICC_SRE_EL1);
+
+ write_gicreg(sre, ICC_SRE_EL1);
+ isb();
+ sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+ isb();
+
+ if (has_vhe())
+ local_daif_restore(flags);
+
+ val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
+ val |= read_gicreg(ICH_VTR_EL2);
+
+ return val;
}
u64 __vgic_v3_read_vmcr(void)
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index fd7895945bbc..66f17349f0c3 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
+ asm volatile("ic iallu");
dsb(nsh);
isb();
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 77cb2d28f2a4..8711894db8c2 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1312,8 +1312,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
- if (memslot->base_gfn + memslot->npages >=
- (kvm_phys_size(kvm) >> PAGE_SHIFT))
+ if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
mmap_read_lock(current->mm);
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
index d45b8b9a4415..739164324afe 100644
--- a/arch/arm64/kvm/perf.c
+++ b/arch/arm64/kvm/perf.c
@@ -11,6 +11,8 @@
#include <asm/kvm_emulate.h>
+DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
static int kvm_is_in_guest(void)
{
return kvm_get_running_vcpu() != NULL;
@@ -48,6 +50,14 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void)
{
+ /*
+ * Check if HW_PERF_EVENTS are supported by checking the number of
+ * hardware performance counters. This could ensure the presence of
+ * a physical PMU and CONFIG_PERF_EVENT is selected.
+ */
+ if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0)
+ static_branch_enable(&kvm_arm_pmu_available);
+
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index e9ec08b0b070..e32c6e139a09 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -823,16 +823,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return val & mask;
}
-bool kvm_arm_support_pmu_v3(void)
-{
- /*
- * Check if HW_PERF_EVENTS are supported by checking the number of
- * hardware performance counters. This could ensure the presence of
- * a physical PMU and CONFIG_PERF_EVENT is selected.
- */
- return (perf_num_counters() > 0);
-}
-
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
if (!kvm_vcpu_has_pmu(vcpu))
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 47f3f035f3ea..bd354cd45d28 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -311,23 +311,24 @@ int kvm_set_ipa_limit(void)
}
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
- default:
- case 1:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
- case 0:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break;
- case 2:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break;
+ default:
+ kvm_err("Unsupported value for TGRAN_2, giving up\n");
+ return -EINVAL;
}
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
- WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
- "KVM IPA Size Limit (%d bits) is smaller than default size\n",
- kvm_ipa_limit);
- kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
+ kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
+ ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
+ " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
return 0;
}
@@ -356,6 +357,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
return -EINVAL;
} else {
phys_shift = KVM_PHYS_SHIFT;
+ if (phys_shift > kvm_ipa_limit) {
+ pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
+ current->comm);
+ return -EINVAL;
+ }
}
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 52915b342351..6f530925a231 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -574,9 +574,13 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
*/
int vgic_v3_probe(const struct gic_kvm_info *info)
{
- u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
+ u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
+ bool has_v2;
int ret;
+ has_v2 = ich_vtr_el2 >> 63;
+ ich_vtr_el2 = (u32)ich_vtr_el2;
+
/*
* The ListRegs field is 5 bits, but there is an architectural
* maximum of 16 list registers. Just ignore bit 4...
@@ -594,13 +598,15 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
gicv4_enable ? "en" : "dis");
}
+ kvm_vgic_global_state.vcpu_base = 0;
+
if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n");
- kvm_vgic_global_state.vcpu_base = 0;
+ } else if (!has_v2) {
+ pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
- kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0ace5e68efba..3685e12aba9b 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
int pfn_valid(unsigned long pfn)
{
- phys_addr_t addr = pfn << PAGE_SHIFT;
+ phys_addr_t addr = PFN_PHYS(pfn);
- if ((addr >> PAGE_SHIFT) != pfn)
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(addr) != pfn)
return 0;
#ifdef CONFIG_SPARSEMEM
+{
+ struct mem_section *ms;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- if (!valid_section(__pfn_to_section(pfn)))
+ ms = __pfn_to_section(pfn);
+ if (!valid_section(ms))
return 0;
+
+ /*
+ * ZONE_DEVICE memory does not have the memblock entries.
+ * memblock_is_map_memory() check for ZONE_DEVICE based
+ * addresses will always fail. Even the normal hotplugged
+ * memory will never have MEMBLOCK_NOMAP flag set in their
+ * memblock entries. Skip memblock search for all non early
+ * memory sections covering all of hotplug memory including
+ * both normal and ZONE_DEVICE based.
+ */
+ if (!early_section(ms))
+ return pfn_section_valid(ms, pfn);
+}
#endif
return memblock_is_map_memory(addr);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3802cfbdd20d..5d9550fdb9cf 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,7 +40,7 @@
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual;
@@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
* if MTE is present. Otherwise, it has the same attributes as
* PAGE_KERNEL.
*/
- __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
+ __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
+ flags);
}
/*
@@ -1447,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
+ u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+ u64 end_linear_pa = __pa(PAGE_END - 1);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ /*
+ * Check for a wrap, it is possible because of randomized linear
+ * mapping the start physical address is actually bigger than
+ * the end physical address. In this case set start to zero
+ * because [0, end_linear_pa] range must still be able to cover
+ * all addressable physical addresses.
+ */
+ if (start_linear_pa > end_linear_pa)
+ start_linear_pa = 0;
+ }
+
+ WARN_ON(start_linear_pa > end_linear_pa);
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@@ -1454,8 +1471,9 @@ struct range arch_get_mappable_range(void)
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
- mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
- mhp_range.end = __pa(PAGE_END - 1);
+ mhp_range.start = start_linear_pa;
+ mhp_range.end = end_linear_pa;
+
return mhp_range;
}