aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-05-16 17:07:35 +0100
committerWill Deacon <will@kernel.org>2022-05-17 14:25:35 +0100
commiteb3d8ea3e1f03f4b0b72d8f5ed9eb7c3165862e8 (patch)
tree3ab5a2631364c0da3c3757414a4f0d01179c9fb4 /arch/arm64
parent19bef63f951e47dd4ba54810e6f7c7ff9344a3ef (diff)
arm64: kexec: load from kimage prior to clobbering
In arm64_relocate_new_kernel() we load some fields out of the kimage structure after relocation has occurred. As the kimage structure isn't allocated to be relocation-safe, it may be clobbered during relocation, and we may load junk values out of the structure. Due to this, kexec may fail when the kimage allocation happens to fall within a PA range that an object will be relocated to. This has been observed to occur for regular kexec on a QEMU TCG 'virt' machine with 2GiB of RAM, where the PA range of the new kernel image overlaps the kimage structure. Avoid this by ensuring we load all values from the kimage structure prior to relocation. I've tested this atop v5.16 and v5.18-rc6. Fixes: 878fdbd70486 ("arm64: kexec: pass kimage as the only argument to relocation function") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Link: https://lore.kernel.org/r/20220516160735.731404-1-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kernel/relocate_kernel.S22
1 files changed, 15 insertions, 7 deletions
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index f0a3df9e18a3..413f899e4ac6 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -37,6 +37,15 @@
* safe memory that has been set up to be preserved during the copy operation.
*/
SYM_CODE_START(arm64_relocate_new_kernel)
+ /*
+ * The kimage structure isn't allocated specially and may be clobbered
+ * during relocation. We must load any values we need from it prior to
+ * any relocation occurring.
+ */
+ ldr x28, [x0, #KIMAGE_START]
+ ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
+ ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
+
/* Setup the list loop variables. */
ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
ic iallu
dsb nsh
isb
- ldr x4, [x0, #KIMAGE_START] /* relocation start */
- ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
- ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
turn_off_mmu x12, x13
/* Start new image. */
- cbz x1, .Lel1
- mov x1, x4 /* relocation start */
- mov x2, x0 /* dtb address */
+ cbz x27, .Lel1
+ mov x1, x28 /* kernel entry point */
+ mov x2, x26 /* dtb address */
mov x3, xzr
mov x4, xzr
mov x0, #HVC_SOFT_RESTART
hvc #0 /* Jumps from el2 */
.Lel1:
+ mov x0, x26 /* dtb address */
+ mov x1, xzr
mov x2, xzr
mov x3, xzr
- br x4 /* Jumps from el1 */
+ br x28 /* Jumps from el1 */
SYM_CODE_END(arm64_relocate_new_kernel)