aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-07-09 10:45:49 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2014-10-02 22:03:14 +0200
commitb1e0dfd634313a9c73e69440233859c834c9c279 (patch)
treee2341e9a9b8b328d8618df7777c7c524453cd581 /arch/arm64/kvm
parent611c5bb5dd4db9b002a864d380d149c2222d9616 (diff)
arm64: KVM: vgic: add GICv3 world switch
Introduce the GICv3 world switch code used to save/restore the GICv3 context. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> (cherry picked from commit 754d37726010d872f1f714a8ce8920acdfa4978c) Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S238
1 files changed, 238 insertions, 0 deletions
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
index 9fbf27350c84..21e68f606a8f 100644
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -18,9 +18,247 @@
#include <linux/linkage.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
.text
.pushsection .hyp.text, "ax"
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Make sure stores to the GIC via the memory mapped interface
+ // are now visible to the system register interface
+ dsb st
+
+ // Save all interesting registers
+ mrs x4, ICH_HCR_EL2
+ mrs x5, ICH_VMCR_EL2
+ mrs x6, ICH_MISR_EL2
+ mrs x7, ICH_EISR_EL2
+ mrs x8, ICH_ELSR_EL2
+
+ str w4, [x3, #VGIC_V3_CPU_HCR]
+ str w5, [x3, #VGIC_V3_CPU_VMCR]
+ str w6, [x3, #VGIC_V3_CPU_MISR]
+ str w7, [x3, #VGIC_V3_CPU_EISR]
+ str w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+ msr ICH_HCR_EL2, xzr
+
+ mrs x21, ICH_VTR_EL2
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ mrs x20, ICH_LR15_EL2
+ mrs x19, ICH_LR14_EL2
+ mrs x18, ICH_LR13_EL2
+ mrs x17, ICH_LR12_EL2
+ mrs x16, ICH_LR11_EL2
+ mrs x15, ICH_LR10_EL2
+ mrs x14, ICH_LR9_EL2
+ mrs x13, ICH_LR8_EL2
+ mrs x12, ICH_LR7_EL2
+ mrs x11, ICH_LR6_EL2
+ mrs x10, ICH_LR5_EL2
+ mrs x9, ICH_LR4_EL2
+ mrs x8, ICH_LR3_EL2
+ mrs x7, ICH_LR2_EL2
+ mrs x6, ICH_LR1_EL2
+ mrs x5, ICH_LR0_EL2
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ str x20, [x3, #LR_OFFSET(15)]
+ str x19, [x3, #LR_OFFSET(14)]
+ str x18, [x3, #LR_OFFSET(13)]
+ str x17, [x3, #LR_OFFSET(12)]
+ str x16, [x3, #LR_OFFSET(11)]
+ str x15, [x3, #LR_OFFSET(10)]
+ str x14, [x3, #LR_OFFSET(9)]
+ str x13, [x3, #LR_OFFSET(8)]
+ str x12, [x3, #LR_OFFSET(7)]
+ str x11, [x3, #LR_OFFSET(6)]
+ str x10, [x3, #LR_OFFSET(5)]
+ str x9, [x3, #LR_OFFSET(4)]
+ str x8, [x3, #LR_OFFSET(3)]
+ str x7, [x3, #LR_OFFSET(2)]
+ str x6, [x3, #LR_OFFSET(1)]
+ str x5, [x3, #LR_OFFSET(0)]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs x20, ICH_AP0R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ mrs x19, ICH_AP0R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6: mrs x18, ICH_AP0R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5: mrs x17, ICH_AP0R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP0R]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs x20, ICH_AP1R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ mrs x19, ICH_AP1R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6: mrs x18, ICH_AP1R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5: mrs x17, ICH_AP1R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP1R]
+
+ // Restore SRE_EL1 access and re-enable SRE at EL1.
+ mrs x5, ICC_SRE_EL2
+ orr x5, x5, #ICC_SRE_EL2_ENABLE
+ msr ICC_SRE_EL2, x5
+ isb
+ mov x5, #1
+ msr ICC_SRE_EL1, x5
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+ // Disable SRE_EL1 access. Necessary, otherwise
+ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+ msr ICC_SRE_EL1, xzr
+ isb
+
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Restore all interesting registers
+ ldr w4, [x3, #VGIC_V3_CPU_HCR]
+ ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+
+ msr ICH_HCR_EL2, x4
+ msr ICH_VMCR_EL2, x5
+
+ mrs x21, ICH_VTR_EL2
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ msr ICH_AP1R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+ msr ICH_AP1R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+ msr ICH_AP1R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
+ msr ICH_AP1R0_EL2, x17
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ msr ICH_AP0R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+ msr ICH_AP0R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+ msr ICH_AP0R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
+ msr ICH_AP0R0_EL2, x17
+
+ and w22, w21, #0xf
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ ldr x20, [x3, #LR_OFFSET(15)]
+ ldr x19, [x3, #LR_OFFSET(14)]
+ ldr x18, [x3, #LR_OFFSET(13)]
+ ldr x17, [x3, #LR_OFFSET(12)]
+ ldr x16, [x3, #LR_OFFSET(11)]
+ ldr x15, [x3, #LR_OFFSET(10)]
+ ldr x14, [x3, #LR_OFFSET(9)]
+ ldr x13, [x3, #LR_OFFSET(8)]
+ ldr x12, [x3, #LR_OFFSET(7)]
+ ldr x11, [x3, #LR_OFFSET(6)]
+ ldr x10, [x3, #LR_OFFSET(5)]
+ ldr x9, [x3, #LR_OFFSET(4)]
+ ldr x8, [x3, #LR_OFFSET(3)]
+ ldr x7, [x3, #LR_OFFSET(2)]
+ ldr x6, [x3, #LR_OFFSET(1)]
+ ldr x5, [x3, #LR_OFFSET(0)]
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ msr ICH_LR15_EL2, x20
+ msr ICH_LR14_EL2, x19
+ msr ICH_LR13_EL2, x18
+ msr ICH_LR12_EL2, x17
+ msr ICH_LR11_EL2, x16
+ msr ICH_LR10_EL2, x15
+ msr ICH_LR9_EL2, x14
+ msr ICH_LR8_EL2, x13
+ msr ICH_LR7_EL2, x12
+ msr ICH_LR6_EL2, x11
+ msr ICH_LR5_EL2, x10
+ msr ICH_LR4_EL2, x9
+ msr ICH_LR3_EL2, x8
+ msr ICH_LR2_EL2, x7
+ msr ICH_LR1_EL2, x6
+ msr ICH_LR0_EL2, x5
+
+ // Ensure that the above will have reached the
+ // (re)distributors. This ensure the guest will read
+ // the correct values from the memory-mapped interface.
+ isb
+ dsb sy
+
+ // Prevent the guest from touching the GIC system registers
+ mrs x5, ICC_SRE_EL2
+ and x5, x5, #~ICC_SRE_EL2_ENABLE
+ msr ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+ save_vgic_v3_state
+ ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+ restore_vgic_v3_state
+ ret
+ENDPROC(__restore_vgic_v3_state)
+
ENTRY(__vgic_v3_get_ich_vtr_el2)
mrs x0, ICH_VTR_EL2
ret