aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-10-09 17:41:56 +0100
committerMark Brown <broonie@kernel.org>2014-10-09 17:41:56 +0100
commit33e58167fbe81bee0b59710e4d4cb3381ca68adf (patch)
treebba841cdc7496bc43553dbadc71db2808ffac20c /arch/arm64
parente71695de69fd1a94493506173349edb03a66bd13 (diff)
parent05fc265939403d2a0789cd071de9f26152842309 (diff)
Merge remote-tracking branch 'lsk/v3.14/topic/kvm' into linux-linaro-lsk-v3.14
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/barrier.h13
-rw-r--r--arch/arm64/include/asm/cputype.h1
-rw-r--r--arch/arm64/include/asm/debug-monitors.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h36
-rw-r--r--arch/arm64/include/asm/kvm_asm.h56
-rw-r--r--arch/arm64/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h27
-rw-r--r--arch/arm64/include/asm/kvm_host.h66
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h55
-rw-r--r--arch/arm64/include/asm/kvm_psci.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h5
-rw-r--r--arch/arm64/include/asm/virt.h4
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h15
-rw-r--r--arch/arm64/kernel/asm-offsets.c26
-rw-r--r--arch/arm64/kernel/debug-monitors.c9
-rw-r--r--arch/arm64/kvm/Makefile4
-rw-r--r--arch/arm64/kvm/guest.c70
-rw-r--r--arch/arm64/kvm/handle_exit.c14
-rw-r--r--arch/arm64/kvm/hyp-init.S6
-rw-r--r--arch/arm64/kvm/hyp.S612
-rw-r--r--arch/arm64/kvm/sys_regs.c637
-rw-r--r--arch/arm64/kvm/sys_regs.h2
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c2
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S133
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S267
-rw-r--r--arch/arm64/mm/proc.S8
26 files changed, 1809 insertions, 287 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 5c436e3457dd..71a42d6599fb 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,11 +25,12 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb(opt) asm volatile("dsb sy" : : : "memory")
+#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
+#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define mb() dsb(sy)
-#define rmb() asm volatile("dsb ld" : : : "memory")
-#define wmb() asm volatile("dsb st" : : : "memory")
+#define rmb() dsb(ld)
+#define wmb() dsb(st)
#ifndef CONFIG_SMP
#define smp_mb() barrier()
@@ -53,9 +54,9 @@ do { \
#else
-#define smp_mb() asm volatile("dmb ish" : : : "memory")
-#define smp_rmb() asm volatile("dmb ishld" : : : "memory")
-#define smp_wmb() asm volatile("dmb ishst" : : : "memory")
+#define smp_mb() dmb(ish)
+#define smp_rmb() dmb(ishld)
+#define smp_wmb() dmb(ishst)
#define smp_store_release(p, v) \
do { \
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index c404fb0df3a6..27f54a7cc81b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -41,6 +41,7 @@
#define ARM_CPU_PART_AEM_V8 0xD0F0
#define ARM_CPU_PART_FOUNDATION 0xD000
+#define ARM_CPU_PART_CORTEX_A53 0xD030
#define ARM_CPU_PART_CORTEX_A57 0xD070
#define APM_CPU_PART_POTENZA 0x0000
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 6e9b5b36921c..7fb343779498 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,15 @@
#ifdef __KERNEL__
+/* Low-level stepping controls. */
+#define DBG_MDSCR_SS (1 << 0)
+#define DBG_SPSR_SS (1 << 21)
+
+/* MDSCR_EL1 enabling bits */
+#define DBG_MDSCR_KDE (1 << 13)
+#define DBG_MDSCR_MDE (1 << 15)
+#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
+
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -73,11 +82,6 @@
#define CACHE_FLUSH_IS_SAFE 1
-enum debug_el {
- DBG_ACTIVE_EL0 = 0,
- DBG_ACTIVE_EL1,
-};
-
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
#define DBG_ESR_EVT_VECC 0x5
@@ -115,6 +119,11 @@ void unregister_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
+enum debug_el {
+ DBG_ACTIVE_EL0 = 0,
+ DBG_ACTIVE_EL1,
+};
+
void enable_debug_monitors(enum debug_el el);
void disable_debug_monitors(enum debug_el el);
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 0eb398655378..7fd3e27e3ccc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -62,6 +62,7 @@
* RW: 64bit by default, can be overriden for 32bit VMs
* TAC: Trap ACTLR
* TSC: Trap SMC
+ * TVM: Trap VM ops (until M+C set in SCTLR_EL1)
* TSW: Trap cache operations by set/way
* TWE: Trap WFE
* TWI: Trap WFI
@@ -74,10 +75,11 @@
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
- HCR_BSU_IS | HCR_FB | HCR_TAC | \
- HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_SWIO | HCR_TIDCP | HCR_RW)
+ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+
/* Hyp System Control Register (SCTLR_EL2) bits */
#define SCTLR_EL2_EE (1 << 25)
@@ -106,7 +108,6 @@
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_PS_MASK (7 << 16)
-#define VTCR_EL2_PS_40B (2 << 16)
#define VTCR_EL2_TG0_MASK (1 << 14)
#define VTCR_EL2_TG0_4K (0 << 14)
#define VTCR_EL2_TG0_64K (1 << 14)
@@ -121,6 +122,17 @@
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
+/*
+ * We configure the Stage-2 page tables to always restrict the IPA space to be
+ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
+ * not known to exist and will break with this configuration.
+ *
+ * Note that when using 4K pages, we concatenate two first level page tables
+ * together.
+ *
+ * The magic numbers used for VTTBR_X in this patch can be found in Tables
+ * D4-23 and D4-25 in ARM DDI 0487A.b.
+ */
#ifdef CONFIG_ARM64_64K_PAGES
/*
* Stage2 translation configuration:
@@ -129,10 +141,9 @@
* 64kB pages (TG0 = 1)
* 2 level page tables (SL = 1)
*/
-#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
- VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
- VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
- VTCR_EL2_T0SZ_40B)
+#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
+ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
@@ -142,15 +153,14 @@
* 4kB pages (TG0 = 0)
* 3 level page tables (SL = 1)
*/
-#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
- VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
- VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
- VTCR_EL2_T0SZ_40B)
+#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
+ VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index b25763bc0ec4..483842180f8f 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -18,6 +18,8 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
+#include <asm/virt.h>
+
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
@@ -43,14 +45,25 @@
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
+#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
+#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
+#define DBGBCR15_EL1 38
+#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
+#define DBGBVR15_EL1 54
+#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
+#define DBGWCR15_EL1 70
+#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
+#define DBGWVR15_EL1 86
+#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 22 /* Domain Access Control Register */
-#define IFSR32_EL2 23 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
-#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 28
+#define DACR32_EL2 88 /* Domain Access Control Register */
+#define IFSR32_EL2 89 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
+#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 94
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -79,13 +92,26 @@
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
-#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-#define NR_CP15_REGS (NR_SYS_REGS * 2)
+
+#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS (NR_SYS_REGS * 2)
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
+#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
+#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
@@ -95,13 +121,21 @@ extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
+#define __kvm_hyp_code_start __hyp_text_start
+#define __kvm_hyp_code_end __hyp_text_end
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+
+extern char __save_vgic_v2_state[];
+extern char __restore_vgic_v2_state[];
+extern char __save_vgic_v3_state[];
+extern char __restore_vgic_v3_state[];
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_coproc.h b/arch/arm64/include/asm/kvm_coproc.h
index 9a59301cd014..0b52377a6c11 100644
--- a/arch/arm64/include/asm/kvm_coproc.h
+++ b/arch/arm64/include/asm/kvm_coproc.h
@@ -39,7 +39,8 @@ void kvm_register_target_sys_reg_table(unsigned int target,
struct kvm_sys_reg_target_table *table);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index dd8ecfc3f995..5674a55b5518 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -174,6 +174,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+{
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
@@ -213,6 +218,17 @@ static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
default:
return be64_to_cpu(data);
}
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return le16_to_cpu(data & 0xffff);
+ case 4:
+ return le32_to_cpu(data & 0xffffffff);
+ default:
+ return le64_to_cpu(data);
+ }
}
return data; /* Leave LE untouched */
@@ -233,6 +249,17 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
default:
return cpu_to_be64(data);
}
+ } else {
+ switch (len) {
+ case 1:
+ return data & 0xff;
+ case 2:
+ return cpu_to_le16(data & 0xffff);
+ case 4:
+ return cpu_to_le32(data & 0xffffffff);
+ default:
+ return cpu_to_le64(data);
+ }
}
return data; /* Leave LE untouched */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0a1d69751562..bcde41905746 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,6 +22,8 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
+#include <linux/types.h>
+#include <linux/kvm_types.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -39,10 +41,9 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
-#define KVM_VCPU_MAX_FEATURES 2
+#define KVM_VCPU_MAX_FEATURES 3
-struct kvm_vcpu;
-int kvm_target_cpu(void);
+int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -86,7 +87,7 @@ struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
u64 sys_regs[NR_SYS_REGS];
- u32 cp15[NR_CP15_REGS];
+ u32 copro[NR_COPRO_REGS];
};
};
@@ -101,6 +102,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
+ /* Debug state */
+ u64 debug_flags;
+
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
@@ -138,7 +142,20 @@ struct kvm_vcpu_arch {
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
+/*
+ * CP14 and CP15 live in the same array, as they are backed by the
+ * same system registers.
+ */
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r))
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1)
+#else
+#define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1)
+#define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r))
+#endif
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -148,18 +165,15 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
-struct kvm_one_reg;
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-struct kvm;
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
@@ -177,7 +191,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
@@ -200,4 +214,38 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
+struct vgic_sr_vectors {
+ void *save_vgic;
+ void *restore_vgic;
+};
+
+static inline void vgic_arch_setup(const struct vgic_params *vgic)
+{
+ extern struct vgic_sr_vectors __vgic_sr_vectors;
+
+ switch(vgic->type)
+ {
+ case VGIC_V2:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
+ break;
+
+#ifdef CONFIG_ARM_GIC_V3
+ case VGIC_V3:
+ __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
+ __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
+ break;
+#endif
+
+ default:
+ BUG();
+ }
+}
+
+static inline void kvm_arch_hardware_disable(void) {}
+static inline void kvm_arch_hardware_unsetup(void) {}
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7f1f9408ff66..a030d163840b 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -59,10 +59,9 @@
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
/*
- * Align KVM with the kernel's view of physical memory. Should be
- * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
+ * We currently only support a 40bit IPA.
*/
-#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
+#define KVM_PHYS_SHIFT (40)
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
@@ -93,20 +92,6 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
-static inline bool kvm_is_write_fault(unsigned long esr)
-{
- unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
-
- if (esr_ec == ESR_EL2_EC_IABT)
- return false;
-
- if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
- return false;
-
- return true;
-}
-
-static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
static inline void kvm_clean_pgd(pgd_t *pgd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
@@ -122,11 +107,40 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= PMD_S2_RDWR;
}
+#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
+
+static inline bool kvm_page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
+#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
+#ifndef CONFIG_ARM64_64K_PAGES
+#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#else
+#define kvm_pmd_table_empty(pmdp) (0)
+#endif
+#define kvm_pud_table_empty(pudp) (0)
+
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
- unsigned long size)
+#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+{
+ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+ unsigned long size)
{
+ if (!vcpu_has_cache_enabled(vcpu))
+ kvm_flush_dcache_to_poc((void *)hva, size);
+
if (!icache_is_aliasing()) { /* PIPT */
flush_icache_range(hva, hva + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
@@ -135,8 +149,9 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
}
}
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
+void stage2_flush_vm(struct kvm *kvm);
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
index e301a4816355..bc39e557c56c 100644
--- a/arch/arm64/include/asm/kvm_psci.h
+++ b/arch/arm64/include/asm/kvm_psci.h
@@ -18,6 +18,10 @@
#ifndef __ARM64_KVM_PSCI_H__
#define __ARM64_KVM_PSCI_H__
-bool kvm_psci_call(struct kvm_vcpu *vcpu);
+#define KVM_ARM_PSCI_0_1 1
+#define KVM_ARM_PSCI_0_2 2
+
+int kvm_psci_version(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index b1d2e26c3c88..f7af66b54cb2 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -100,9 +100,9 @@
#define PTE_HYP PTE_USER
/*
- * 40-bit physical address supported.
+ * Highest possible physical address supported.
*/
-#define PHYS_MASK_SHIFT (40)
+#define PHYS_MASK_SHIFT (48)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
/*
@@ -122,7 +122,6 @@
#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
#define TCR_TG0_64K (UL(1) << 14)
#define TCR_TG1_64K (UL(1) << 30)
-#define TCR_IPS_40BIT (UL(2) << 32)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 130e2be952cf..290fb6690b33 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -63,6 +63,10 @@ static inline bool is_hyp_mode_mismatched(void)
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
+
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index eaf54a30bedc..8e38878c87c6 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -31,11 +31,13 @@
#define KVM_NR_SPSR 5
#ifndef __ASSEMBLY__
+#include <linux/psci.h>
#include <asm/types.h>
#include <asm/ptrace.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -56,8 +58,9 @@ struct kvm_regs {
#define KVM_ARM_TARGET_FOUNDATION_V8 1
#define KVM_ARM_TARGET_CORTEX_A57 2
#define KVM_ARM_TARGET_XGENE_POTENZA 3
+#define KVM_ARM_TARGET_CORTEX_A53 4
-#define KVM_ARM_NUM_TARGETS 4
+#define KVM_ARM_NUM_TARGETS 5
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
@@ -77,6 +80,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
+#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
struct kvm_vcpu_init {
__u32 target;
@@ -156,6 +160,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
@@ -186,10 +191,10 @@ struct kvm_arch_memory_slot {
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
-#define KVM_PSCI_RET_SUCCESS 0
-#define KVM_PSCI_RET_NI ((unsigned long)-1)
-#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
-#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
+#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 646f888387cd..9a9fce090d58 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -120,6 +120,7 @@ int main(void)
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+ DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
@@ -129,13 +130,24 @@ int main(void)
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
+ DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
+ DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
+ DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
+ DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
+ DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
+ DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
+ DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
+ DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
+ DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index bb9a0e684e12..6c6a2e56a8e3 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,15 +30,6 @@
#include <asm/cputype.h>
#include <asm/system_misc.h>
-/* Low-level stepping controls. */
-#define DBG_MDSCR_SS (1 << 0)
-#define DBG_SPSR_SS (1 << 21)
-
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE (1 << 13)
-#define DBG_MDSCR_MDE (1 << 15)
-#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
/* Determine debug architecture. */
u8 debug_monitors_arch(void)
{
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 72a9fd583ad3..32a096174b94 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,4 +20,8 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 08745578d54d..76794692c20b 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -136,13 +136,67 @@ static unsigned long num_core_regs(void)
}
/**
+ * ARM64 versions of the TIMER registers, always available on arm64
+ */
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+ switch (index) {
+ case KVM_REG_ARM_TIMER_CTL:
+ case KVM_REG_ARM_TIMER_CNT:
+ case KVM_REG_ARM_TIMER_CVAL:
+ return true;
+ }
+ return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+ int ret;
+
+ ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+ if (ret != 0)
+ return -EFAULT;
+
+ return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
+/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu);
+ return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+ + NUM_TIMER_REGS;
}
/**
@@ -154,6 +208,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+ int ret;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
if (put_user(core_reg | i, uindices))
@@ -161,6 +216,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
+ ret = copy_timer_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += NUM_TIMER_REGS;
+
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
}
@@ -174,6 +234,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return get_timer_reg(vcpu, reg);
+
return kvm_arm_sys_reg_get_reg(vcpu, reg);
}
@@ -187,6 +250,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return set_timer_reg(vcpu, reg);
+
return kvm_arm_sys_reg_set_reg(vcpu, reg);
}
@@ -214,6 +280,8 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_AEM_V8;
case ARM_CPU_PART_FOUNDATION:
return KVM_ARM_TARGET_FOUNDATION_V8;
+ case ARM_CPU_PART_CORTEX_A53:
+ return KVM_ARM_TARGET_CORTEX_A53;
case ARM_CPU_PART_CORTEX_A57:
return KVM_ARM_TARGET_CORTEX_A57;
};
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fd9aeba99683..34b8bd0711e9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -30,11 +30,15 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- if (kvm_psci_call(vcpu))
+ int ret;
+
+ ret = kvm_psci_call(vcpu);
+ if (ret < 0) {
+ kvm_inject_undefined(vcpu);
return 1;
+ }
- kvm_inject_undefined(vcpu);
- return 1;
+ return ret;
}
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
@@ -71,9 +75,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_EL2_EC_WFI] = kvm_handle_wfx,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
- [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
+ [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
+ [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
[ESR_EL2_EC_HVC32] = handle_hvc,
[ESR_EL2_EC_SMC32] = handle_smc,
[ESR_EL2_EC_HVC64] = handle_hvc,
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 12e26f358c31..c3191168a994 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -68,6 +68,12 @@ __do_hyp_init:
msr tcr_el2, x4
ldr x4, =VTCR_EL2_FLAGS
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+ * VTCR_EL2.
+ */
+ mrs x5, ID_AA64MMFR0_EL1
+ bfi x4, x5, #16, #3
msr vtcr_el2, x4
mrs x4, mair_el1
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 2c56012cb2d2..b72aa9f9215c 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -16,11 +16,11 @@
*/
#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
@@ -36,9 +36,6 @@
.pushsection .hyp.text, "ax"
.align PAGE_SHIFT
-__kvm_hyp_code_start:
- .globl __kvm_hyp_code_start
-
.macro save_common_regs
// x2: base address for cpu context
// x3: tmp register
@@ -215,6 +212,7 @@ __kvm_hyp_code_start:
mrs x22, amair_el1
mrs x23, cntkctl_el1
mrs x24, par_el1
+ mrs x25, mdscr_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
@@ -226,7 +224,202 @@ __kvm_hyp_code_start:
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
- str x24, [x3, #160]
+ stp x24, x25, [x3, #160]
+.endm
+
+.macro save_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbcr15_el1
+ mrs x19, dbgbcr14_el1
+ mrs x18, dbgbcr13_el1
+ mrs x17, dbgbcr12_el1
+ mrs x16, dbgbcr11_el1
+ mrs x15, dbgbcr10_el1
+ mrs x14, dbgbcr9_el1
+ mrs x13, dbgbcr8_el1
+ mrs x12, dbgbcr7_el1
+ mrs x11, dbgbcr6_el1
+ mrs x10, dbgbcr5_el1
+ mrs x9, dbgbcr4_el1
+ mrs x8, dbgbcr3_el1
+ mrs x7, dbgbcr2_el1
+ mrs x6, dbgbcr1_el1
+ mrs x5, dbgbcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ mrs x20, dbgbvr15_el1
+ mrs x19, dbgbvr14_el1
+ mrs x18, dbgbvr13_el1
+ mrs x17, dbgbvr12_el1
+ mrs x16, dbgbvr11_el1
+ mrs x15, dbgbvr10_el1
+ mrs x14, dbgbvr9_el1
+ mrs x13, dbgbvr8_el1
+ mrs x12, dbgbvr7_el1
+ mrs x11, dbgbvr6_el1
+ mrs x10, dbgbvr5_el1
+ mrs x9, dbgbvr4_el1
+ mrs x8, dbgbvr3_el1
+ mrs x7, dbgbvr2_el1
+ mrs x6, dbgbvr1_el1
+ mrs x5, dbgbvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwcr15_el1
+ mrs x19, dbgwcr14_el1
+ mrs x18, dbgwcr13_el1
+ mrs x17, dbgwcr12_el1
+ mrs x16, dbgwcr11_el1
+ mrs x15, dbgwcr10_el1
+ mrs x14, dbgwcr9_el1
+ mrs x13, dbgwcr8_el1
+ mrs x12, dbgwcr7_el1
+ mrs x11, dbgwcr6_el1
+ mrs x10, dbgwcr5_el1
+ mrs x9, dbgwcr4_el1
+ mrs x8, dbgwcr3_el1
+ mrs x7, dbgwcr2_el1
+ mrs x6, dbgwcr1_el1
+ mrs x5, dbgwcr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ mrs x20, dbgwvr15_el1
+ mrs x19, dbgwvr14_el1
+ mrs x18, dbgwvr13_el1
+ mrs x17, dbgwvr12_el1
+ mrs x16, dbgwvr11_el1
+ mrs x15, dbgwvr10_el1
+ mrs x14, dbgwvr9_el1
+ mrs x13, dbgwvr8_el1
+ mrs x12, dbgwvr7_el1
+ mrs x11, dbgwvr6_el1
+ mrs x10, dbgwvr5_el1
+ mrs x9, dbgwvr4_el1
+ mrs x8, dbgwvr3_el1
+ mrs x7, dbgwvr2_el1
+ mrs x6, dbgwvr1_el1
+ mrs x5, dbgwvr0_el1
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+
+1:
+ str x20, [x3, #(15 * 8)]
+ str x19, [x3, #(14 * 8)]
+ str x18, [x3, #(13 * 8)]
+ str x17, [x3, #(12 * 8)]
+ str x16, [x3, #(11 * 8)]
+ str x15, [x3, #(10 * 8)]
+ str x14, [x3, #(9 * 8)]
+ str x13, [x3, #(8 * 8)]
+ str x12, [x3, #(7 * 8)]
+ str x11, [x3, #(6 * 8)]
+ str x10, [x3, #(5 * 8)]
+ str x9, [x3, #(4 * 8)]
+ str x8, [x3, #(3 * 8)]
+ str x7, [x3, #(2 * 8)]
+ str x6, [x3, #(1 * 8)]
+ str x5, [x3, #(0 * 8)]
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
.endm
.macro restore_sysregs
@@ -245,7 +438,7 @@ __kvm_hyp_code_start:
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
- ldr x24, [x3, #160]
+ ldp x24, x25, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
@@ -268,6 +461,198 @@ __kvm_hyp_code_start:
msr amair_el1, x22
msr cntkctl_el1, x23
msr par_el1, x24
+ msr mdscr_el1, x25
+.endm
+
+.macro restore_debug
+ // x2: base address for cpu context
+ // x3: tmp register
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbcr15_el1, x20
+ msr dbgbcr14_el1, x19
+ msr dbgbcr13_el1, x18
+ msr dbgbcr12_el1, x17
+ msr dbgbcr11_el1, x16
+ msr dbgbcr10_el1, x15
+ msr dbgbcr9_el1, x14
+ msr dbgbcr8_el1, x13
+ msr dbgbcr7_el1, x12
+ msr dbgbcr6_el1, x11
+ msr dbgbcr5_el1, x10
+ msr dbgbcr4_el1, x9
+ msr dbgbcr3_el1, x8
+ msr dbgbcr2_el1, x7
+ msr dbgbcr1_el1, x6
+ msr dbgbcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x24, lsl #2
+ br x26
+1:
+ msr dbgbvr15_el1, x20
+ msr dbgbvr14_el1, x19
+ msr dbgbvr13_el1, x18
+ msr dbgbvr12_el1, x17
+ msr dbgbvr11_el1, x16
+ msr dbgbvr10_el1, x15
+ msr dbgbvr9_el1, x14
+ msr dbgbvr8_el1, x13
+ msr dbgbvr7_el1, x12
+ msr dbgbvr6_el1, x11
+ msr dbgbvr5_el1, x10
+ msr dbgbvr4_el1, x9
+ msr dbgbvr3_el1, x8
+ msr dbgbvr2_el1, x7
+ msr dbgbvr1_el1, x6
+ msr dbgbvr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwcr15_el1, x20
+ msr dbgwcr14_el1, x19
+ msr dbgwcr13_el1, x18
+ msr dbgwcr12_el1, x17
+ msr dbgwcr11_el1, x16
+ msr dbgwcr10_el1, x15
+ msr dbgwcr9_el1, x14
+ msr dbgwcr8_el1, x13
+ msr dbgwcr7_el1, x12
+ msr dbgwcr6_el1, x11
+ msr dbgwcr5_el1, x10
+ msr dbgwcr4_el1, x9
+ msr dbgwcr3_el1, x8
+ msr dbgwcr2_el1, x7
+ msr dbgwcr1_el1, x6
+ msr dbgwcr0_el1, x5
+
+ add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ ldr x20, [x3, #(15 * 8)]
+ ldr x19, [x3, #(14 * 8)]
+ ldr x18, [x3, #(13 * 8)]
+ ldr x17, [x3, #(12 * 8)]
+ ldr x16, [x3, #(11 * 8)]
+ ldr x15, [x3, #(10 * 8)]
+ ldr x14, [x3, #(9 * 8)]
+ ldr x13, [x3, #(8 * 8)]
+ ldr x12, [x3, #(7 * 8)]
+ ldr x11, [x3, #(6 * 8)]
+ ldr x10, [x3, #(5 * 8)]
+ ldr x9, [x3, #(4 * 8)]
+ ldr x8, [x3, #(3 * 8)]
+ ldr x7, [x3, #(2 * 8)]
+ ldr x6, [x3, #(1 * 8)]
+ ldr x5, [x3, #(0 * 8)]
+
+ adr x26, 1f
+ add x26, x26, x25, lsl #2
+ br x26
+1:
+ msr dbgwvr15_el1, x20
+ msr dbgwvr14_el1, x19
+ msr dbgwvr13_el1, x18
+ msr dbgwvr12_el1, x17
+ msr dbgwvr11_el1, x16
+ msr dbgwvr10_el1, x15
+ msr dbgwvr9_el1, x14
+ msr dbgwvr8_el1, x13
+ msr dbgwvr7_el1, x12
+ msr dbgwvr6_el1, x11
+ msr dbgwvr5_el1, x10
+ msr dbgwvr4_el1, x9
+ msr dbgwvr3_el1, x8
+ msr dbgwvr2_el1, x7
+ msr dbgwvr1_el1, x6
+ msr dbgwvr0_el1, x5
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
.endm
.macro skip_32bit_state tmp, target
@@ -282,6 +667,35 @@ __kvm_hyp_code_start:
tbz \tmp, #12, \target
.endm
+.macro skip_debug_state tmp, target
+ ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
+ tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
+.endm
+
+.macro compute_debug_state target
+ // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
+ // is set, we do a full save/restore cycle and disable trapping.
+ add x25, x0, #VCPU_CONTEXT
+
+ // Check the state of MDSCR_EL1
+ ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
+ and x26, x25, #DBG_MDSCR_KDE
+ and x25, x25, #DBG_MDSCR_MDE
+ adds xzr, x25, x26
+ b.eq 9998f // Nothing to see there
+
+ // If any interesting bits was set, we must set the flag
+ mov x26, #KVM_ARM64_DEBUG_DIRTY
+ str x26, [x0, #VCPU_DEBUG_FLAGS]
+ b 9999f // Don't skip restore
+
+9998:
+ // Otherwise load the flags from memory in case we recently
+ // trapped
+ skip_debug_state x25, \target
+9999:
+.endm
+
.macro save_guest_32bit_state
skip_32bit_state x3, 1f
@@ -297,10 +711,13 @@ __kvm_hyp_code_start:
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
mrs x6, fpexc32_el2
- mrs x7, dbgvcr32_el2
stp x4, x5, [x3]
- stp x6, x7, [x3, #16]
+ str x6, [x3, #16]
+ skip_debug_state x8, 2f
+ mrs x7, dbgvcr32_el2
+ str x7, [x3, #24]
+2:
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -323,12 +740,15 @@ __kvm_hyp_code_start:
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
- ldp x6, x7, [x3, #16]
+ ldr x6, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
msr fpexc32_el2, x6
- msr dbgvcr32_el2, x7
+ skip_debug_state x8, 2f
+ ldr x7, [x3, #24]
+ msr dbgvcr32_el2, x7
+2:
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
@@ -339,11 +759,8 @@ __kvm_hyp_code_start:
.endm
.macro activate_traps
- ldr x2, [x0, #VCPU_IRQ_LINES]
- ldr x1, [x0, #VCPU_HCR_EL2]
- orr x2, x2, x1
- msr hcr_el2, x2
-
+ ldr x2, [x0, #VCPU_HCR_EL2]
+ msr hcr_el2, x2
ldr x2, =(CPTR_EL2_TTA)
msr cptr_el2, x2
@@ -353,6 +770,14 @@ __kvm_hyp_code_start:
mrs x2, mdcr_el2
and x2, x2, #MDCR_EL2_HPMN_MASK
orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
+ orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
+
+ // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
+ // if not dirty.
+ ldr x3, [x0, #VCPU_DEBUG_FLAGS]
+ tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
+ orr x2, x2, #MDCR_EL2_TDA
+1:
msr mdcr_el2, x2
.endm
@@ -379,100 +804,33 @@ __kvm_hyp_code_start:
.endm
/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
+ * Call into the vgic backend for state saving
*/
.macro save_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* Save all interesting registers */
- ldr w4, [x2, #GICH_HCR]
- ldr w5, [x2, #GICH_VMCR]
- ldr w6, [x2, #GICH_MISR]
- ldr w7, [x2, #GICH_EISR0]
- ldr w8, [x2, #GICH_EISR1]
- ldr w9, [x2, #GICH_ELRSR0]
- ldr w10, [x2, #GICH_ELRSR1]
- ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
-CPU_BE( rev w9, w9 )
-CPU_BE( rev w10, w10 )
-CPU_BE( rev w11, w11 )
-
- str w4, [x3, #VGIC_CPU_HCR]
- str w5, [x3, #VGIC_CPU_VMCR]
- str w6, [x3, #VGIC_CPU_MISR]
- str w7, [x3, #VGIC_CPU_EISR]
- str w8, [x3, #(VGIC_CPU_EISR + 4)]
- str w9, [x3, #VGIC_CPU_ELRSR]
- str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
- str w11, [x3, #VGIC_CPU_APR]
-
- /* Clear GICH_HCR */
- str wzr, [x2, #GICH_HCR]
-
- /* Save list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
-1: ldr w5, [x2], #4
-CPU_BE( rev w5, w5 )
- str w5, [x3], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, VGIC_SAVE_FN]
+ kern_hyp_va x24
+ blr x24
+ mrs x24, hcr_el2
+ mov x25, #HCR_INT_OVERRIDE
+ neg x25, x25
+ and x24, x24, x25
+ msr hcr_el2, x24
.endm
/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
+ * Call into the vgic backend for state restoring
*/
.macro restore_vgic_state
- /* Get VGIC VCTRL base into x2 */
- ldr x2, [x0, #VCPU_KVM]
- kern_hyp_va x2
- ldr x2, [x2, #KVM_VGIC_VCTRL]
- kern_hyp_va x2
- cbz x2, 2f // disabled
-
- /* Compute the address of struct vgic_cpu */
- add x3, x0, #VCPU_VGIC_CPU
-
- /* We only restore a minimal set of registers */
- ldr w4, [x3, #VGIC_CPU_HCR]
- ldr w5, [x3, #VGIC_CPU_VMCR]
- ldr w6, [x3, #VGIC_CPU_APR]
-CPU_BE( rev w4, w4 )
-CPU_BE( rev w5, w5 )
-CPU_BE( rev w6, w6 )
-
- str w4, [x2, #GICH_HCR]
- str w5, [x2, #GICH_VMCR]
- str w6, [x2, #GICH_APR]
-
- /* Restore list registers */
- add x2, x2, #GICH_LR0
- ldr w4, [x3, #VGIC_CPU_NR_LR]
- add x3, x3, #VGIC_CPU_LR
-1: ldr w5, [x3], #4
-CPU_BE( rev w5, w5 )
- str w5, [x2], #4
- sub w4, w4, #1
- cbnz w4, 1b
-2:
+ mrs x24, hcr_el2
+ ldr x25, [x0, #VCPU_IRQ_LINES]
+ orr x24, x24, #HCR_INT_OVERRIDE
+ orr x24, x24, x25
+ msr hcr_el2, x24
+ adr x24, __vgic_sr_vectors
+ ldr x24, [x24, #VGIC_RESTORE_FN]
+ kern_hyp_va x24
+ blr x24
.endm
.macro save_timer_state
@@ -537,6 +895,14 @@ __restore_sysregs:
restore_sysregs
ret
+__save_debug:
+ save_debug
+ ret
+
+__restore_debug:
+ restore_debug
+ ret
+
__save_fpsimd:
save_fpsimd
ret
@@ -568,6 +934,9 @@ ENTRY(__kvm_vcpu_run)
bl __save_fpsimd
bl __save_sysregs
+ compute_debug_state 1f
+ bl __save_debug
+1:
activate_traps
activate_vm
@@ -579,6 +948,10 @@ ENTRY(__kvm_vcpu_run)
bl __restore_sysregs
bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ bl __restore_debug
+1:
restore_guest_32bit_state
restore_guest_regs
@@ -595,6 +968,10 @@ __kvm_vcpu_return:
save_guest_regs
bl __save_fpsimd
bl __save_sysregs
+
+ skip_debug_state x3, 1f
+ bl __save_debug
+1:
save_guest_32bit_state
save_timer_state
@@ -609,6 +986,14 @@ __kvm_vcpu_return:
bl __restore_sysregs
bl __restore_fpsimd
+
+ skip_debug_state x3, 1f
+ // Clear the dirty flag for the next run, as all the state has
+ // already been saved. Note that we nuke the whole 64bit word.
+ // If we ever add more flags, we'll have to be more careful...
+ str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ bl __restore_debug
+1:
restore_host_regs
mov x0, x1
@@ -630,9 +1015,15 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* whole of Stage-1. Weep...
*/
tlbi ipas2e1is, x1
- dsb sy
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb ish
tlbi vmalle1is
- dsb sy
+ dsb ish
isb
msr vttbr_el2, xzr
@@ -643,10 +1034,16 @@ ENTRY(__kvm_flush_vm_context)
dsb ishst
tlbi alle1is
ic ialluis
- dsb sy
+ dsb ish
ret
ENDPROC(__kvm_flush_vm_context)
+ // struct vgic_sr_vectors __vgi_sr_vectors;
+ .align 3
+ENTRY(__vgic_sr_vectors)
+ .skip VGIC_SR_VECTOR_SZ
+ENDPROC(__vgic_sr_vectors)
+
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
@@ -824,7 +1221,7 @@ el1_trap:
mrs x2, far_el2
2: mrs x0, tpidr_el2
- str x1, [x0, #VCPU_ESR_EL2]
+ str w1, [x0, #VCPU_ESR_EL2]
str x2, [x0, #VCPU_FAR_EL2]
str x3, [x0, #VCPU_HPFAR_EL2]
@@ -874,7 +1271,4 @@ ENTRY(__kvm_hyp_vector)
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
-__kvm_hyp_code_end:
- .globl __kvm_hyp_code_end
-
.popsection
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d800dbc8693a..4cc3b719208e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -27,8 +27,10 @@
#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_mmu.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/debug-monitors.h>
#include <trace/events/kvm.h>
#include "sys_regs.h"
@@ -121,17 +123,51 @@ done:
}
/*
- * We could trap ID_DFR0 and tell the guest we don't support performance
- * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
- * NAKed, so it will read the PMCR anyway.
- *
- * Therefore we tell the guest we have 0 counters. Unfortunately, we
- * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
- * all PM registers, which doesn't crash the guest kernel at least.
+ * Generic accessor for VM registers. Only called as long as HCR_TVM
+ * is set.
+ */
+static bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ unsigned long val;
+
+ BUG_ON(!p->is_write);
+
+ val = *vcpu_reg(vcpu, p->Rt);
+ if (!p->is_aarch32) {
+ vcpu_sys_reg(vcpu, r->reg) = val;
+ } else {
+ if (!p->is_32bit)
+ vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
+ vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
+ }
+
+ return true;
+}
+
+/*
+ * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
+ * guest enables the MMU, we stop trapping the VM sys_regs and leave
+ * it in complete control of the caches.
*/
-static bool pm_fake(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool access_sctlr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ access_vm_reg(vcpu, p, r);
+
+ if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
+ vcpu->arch.hcr_el2 &= ~HCR_TVM;
+ stage2_flush_vm(vcpu->kvm);
+ }
+
+ return true;
+}
+
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
@@ -139,6 +175,73 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
+static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = (1 << 3);
+ return true;
+ }
+}
+
+static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u32 val;
+ asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
+ *vcpu_reg(vcpu, p->Rt) = val;
+ return true;
+ }
+}
+
+/*
+ * We want to avoid world-switching all the DBG registers all the
+ * time:
+ *
+ * - If we've touched any debug register, it is likely that we're
+ * going to touch more of them. It then makes sense to disable the
+ * traps and start doing the save/restore dance
+ * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
+ * then mandatory to save/restore the registers, as the guest
+ * depends on them.
+ *
+ * For this, we use a DIRTY bit, indicating the guest has modified the
+ * debug registers, used as follow:
+ *
+ * On guest entry:
+ * - If the dirty bit is set (because we're coming back from trapping),
+ * disable the traps, save host registers, restore guest registers.
+ * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
+ * set the dirty bit, disable the traps, save host registers,
+ * restore guest registers.
+ * - Otherwise, enable the traps
+ *
+ * On guest exit:
+ * - If the dirty bit is set, save guest registers, restore host
+ * registers and clear the dirty bit. This ensure that the host can
+ * now use the debug registers.
+ */
+static bool trap_debug_regs(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
+ }
+
+ return true;
+}
+
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
@@ -155,9 +258,39 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
}
+/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
+#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
+ /* DBGBVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
+ trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ /* DBGBCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
+ trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ /* DBGWVRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
+ trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ /* DBGWCRn_EL1 */ \
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
+ trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * We could trap ID_DFR0 and tell the guest we don't support performance
+ * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
+ * NAKed, so it will read the PMCR anyway.
+ *
+ * Therefore we tell the guest we have 0 counters. Unfortunately, we
+ * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
+ * all PM registers, which doesn't crash the guest kernel at least.
+ *
+ * Debug handling: We do trap most, if not all debug related system
+ * registers. The implementation is good enough to ensure that a guest
+ * can use these with minimal performance degradation. The drawback is
+ * that we don't implement any of the external debug, none of the
+ * OSlock protocol. This should be revisited if we ever encounter a
+ * more demanding guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
/* DC ISW */
@@ -170,12 +303,71 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
access_dcsw },
+ DBG_BCR_BVR_WCR_WVR_EL1(0),
+ DBG_BCR_BVR_WCR_WVR_EL1(1),
+ /* MDCCINT_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
+ trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
+ /* MDSCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
+ trap_debug_regs, reset_val, MDSCR_EL1, 0 },
+ DBG_BCR_BVR_WCR_WVR_EL1(2),
+ DBG_BCR_BVR_WCR_WVR_EL1(3),
+ DBG_BCR_BVR_WCR_WVR_EL1(4),
+ DBG_BCR_BVR_WCR_WVR_EL1(5),
+ DBG_BCR_BVR_WCR_WVR_EL1(6),
+ DBG_BCR_BVR_WCR_WVR_EL1(7),
+ DBG_BCR_BVR_WCR_WVR_EL1(8),
+ DBG_BCR_BVR_WCR_WVR_EL1(9),
+ DBG_BCR_BVR_WCR_WVR_EL1(10),
+ DBG_BCR_BVR_WCR_WVR_EL1(11),
+ DBG_BCR_BVR_WCR_WVR_EL1(12),
+ DBG_BCR_BVR_WCR_WVR_EL1(13),
+ DBG_BCR_BVR_WCR_WVR_EL1(14),
+ DBG_BCR_BVR_WCR_WVR_EL1(15),
+
+ /* MDRAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
+ trap_raz_wi },
+ /* OSLAR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
+ trap_raz_wi },
+ /* OSLSR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
+ trap_oslsr_el1 },
+ /* OSDLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
+ trap_raz_wi },
+ /* DBGPRCR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
+ trap_raz_wi },
+ /* DBGCLAIMSET_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
+ trap_raz_wi },
+ /* DBGCLAIMCLR_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
+ trap_raz_wi },
+ /* DBGAUTHSTATUS_EL1 */
+ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
+ trap_dbgauthstatus_el1 },
+
/* TEECR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEECR32_EL1, 0 },
/* TEEHBR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEEHBR32_EL1, 0 },
+
+ /* MDCCSR_EL1 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
+ trap_raz_wi },
+ /* DBGDTR[TR]X_EL0 */
+ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
+ trap_raz_wi },
+
/* DBGVCR32_EL2 */
{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
NULL, reset_val, DBGVCR32_EL2, 0 },
@@ -185,56 +377,56 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- NULL, reset_val, SCTLR_EL1, 0x00C50078 },
+ access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
/* TTBR0_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
- NULL, reset_unknown, TTBR0_EL1 },
+ access_vm_reg, reset_unknown, TTBR0_EL1 },
/* TTBR1_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
- NULL, reset_unknown, TTBR1_EL1 },
+ access_vm_reg, reset_unknown, TTBR1_EL1 },
/* TCR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
- NULL, reset_val, TCR_EL1, 0 },
+ access_vm_reg, reset_val, TCR_EL1, 0 },
/* AFSR0_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
- NULL, reset_unknown, AFSR0_EL1 },
+ access_vm_reg, reset_unknown, AFSR0_EL1 },
/* AFSR1_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
- NULL, reset_unknown, AFSR1_EL1 },
+ access_vm_reg, reset_unknown, AFSR1_EL1 },
/* ESR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
- NULL, reset_unknown, ESR_EL1 },
+ access_vm_reg, reset_unknown, ESR_EL1 },
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
- NULL, reset_unknown, FAR_EL1 },
+ access_vm_reg, reset_unknown, FAR_EL1 },
/* PAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMINTENCLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* MAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
- NULL, reset_unknown, MAIR_EL1 },
+ access_vm_reg, reset_unknown, MAIR_EL1 },
/* AMAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
- NULL, reset_amair_el1, AMAIR_EL1 },
+ access_vm_reg, reset_amair_el1, AMAIR_EL1 },
/* VBAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
- NULL, reset_val, CONTEXTIDR_EL1, 0 },
+ access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
/* TPIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
NULL, reset_unknown, TPIDR_EL1 },
@@ -249,43 +441,43 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMCR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMCNTENCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
- pm_fake },
+ trap_raz_wi },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- pm_fake },
+ trap_raz_wi },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
- pm_fake },
+ trap_raz_wi },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
- pm_fake },
+ trap_raz_wi },
/* PMCEID1_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
- pm_fake },
+ trap_raz_wi },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
- pm_fake },
+ trap_raz_wi },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
- pm_fake },
+ trap_raz_wi },
/* PMUSERENR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- pm_fake },
+ trap_raz_wi },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
- pm_fake },
+ trap_raz_wi },
/* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -305,27 +497,205 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_val, FPEXC32_EL2, 0x70 },
};
-/* Trapped cp15 registers */
+static bool trap_dbgidr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ return ignore_write(vcpu, p);
+ } else {
+ u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
+ u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
+ u32 el3 = !!((pfr >> 12) & 0xf);
+
+ *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
+ (((dfr >> 12) & 0xf) << 24) |
+ (((dfr >> 28) & 0xf) << 20) |
+ (6 << 16) | (el3 << 14) | (el3 << 12));
+ return true;
+ }
+}
+
+static bool trap_debug32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
+ }
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
+ NULL, (cp14_DBGBVR0 + (n) * 2) }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
+ NULL, (cp14_DBGBCR0 + (n) * 2) }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
+ NULL, (cp14_DBGWVR0 + (n) * 2) }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
+ NULL, (cp14_DBGWCR0 + (n) * 2) }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
+ NULL, cp14_DBGBXVR0 + n * 2 }
+
+/*
+ * Trapped cp14 registers. We generally ignore most of the external
+ * debug, on the principle that they don't really make sense to a
+ * guest. Revisit this one day, whould this principle change.
+ */
+static const struct sys_reg_desc cp14_regs[] = {
+ /* DBGIDR */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
+ /* DBGDTRRXext */
+ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
+
+ DBG_BCR_BVR_WCR_WVR(0),
+ /* DBGDSCRint */
+ { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(1),
+ /* DBGDCCINT */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
+ /* DBGDSCRext */
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(2),
+ /* DBGDTR[RT]Xint */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
+ /* DBGDTR[RT]Xext */
+ { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(3),
+ DBG_BCR_BVR_WCR_WVR(4),
+ DBG_BCR_BVR_WCR_WVR(5),
+ /* DBGWFAR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
+ /* DBGOSECCR */
+ { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
+ DBG_BCR_BVR_WCR_WVR(6),
+ /* DBGVCR */
+ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
+ DBG_BCR_BVR_WCR_WVR(7),
+ DBG_BCR_BVR_WCR_WVR(8),
+ DBG_BCR_BVR_WCR_WVR(9),
+ DBG_BCR_BVR_WCR_WVR(10),
+ DBG_BCR_BVR_WCR_WVR(11),
+ DBG_BCR_BVR_WCR_WVR(12),
+ DBG_BCR_BVR_WCR_WVR(13),
+ DBG_BCR_BVR_WCR_WVR(14),
+ DBG_BCR_BVR_WCR_WVR(15),
+
+ /* DBGDRAR (32bit) */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
+
+ DBGBXVR(0),
+ /* DBGOSLAR */
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
+ DBGBXVR(1),
+ /* DBGOSLSR */
+ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
+ DBGBXVR(2),
+ DBGBXVR(3),
+ /* DBGOSDLR */
+ { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
+ DBGBXVR(4),
+ /* DBGPRCR */
+ { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
+ DBGBXVR(5),
+ DBGBXVR(6),
+ DBGBXVR(7),
+ DBGBXVR(8),
+ DBGBXVR(9),
+ DBGBXVR(10),
+ DBGBXVR(11),
+ DBGBXVR(12),
+ DBGBXVR(13),
+ DBGBXVR(14),
+ DBGBXVR(15),
+
+ /* DBGDSAR (32bit) */
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
+
+ /* DBGDEVID2 */
+ { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
+ /* DBGDEVID1 */
+ { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
+ /* DBGDEVID */
+ { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
+ /* DBGCLAIMSET */
+ { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
+ /* DBGCLAIMCLR */
+ { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
+ /* DBGAUTHSTATUS */
+ { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
+};
+
+/* Trapped cp14 64bit registers */
+static const struct sys_reg_desc cp14_64_regs[] = {
+ /* DBGDRAR (64bit) */
+ { Op1( 0), CRm( 1), .access = trap_raz_wi },
+
+ /* DBGDSAR (64bit) */
+ { Op1( 0), CRm( 2), .access = trap_raz_wi },
+};
+
+/*
+ * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
+ * depending on the way they are accessed (as a 32bit or a 64bit
+ * register).
+ */
static const struct sys_reg_desc cp15_regs[] = {
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
+ { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
+ { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
+ { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
+ { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
+ { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
+
/*
* DC{C,I,CI}SW operations:
*/
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
- { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
- { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
+
+ /* PMU */
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+
+ { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
+ { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
+ { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
+ { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
+ { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+};
+
+static const struct sys_reg_desc cp15_64_regs[] = {
+ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
};
/* Target specific emulation tables */
@@ -385,26 +755,29 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
-static void emulate_cp15(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
+/*
+ * emulate_cp -- tries to match a sys_reg access in a handling table, and
+ * call the corresponding trap handler.
+ *
+ * @params: pointer to the descriptor of the access
+ * @table: array of trap descriptors
+ * @num: size of the trap descriptor array
+ *
+ * Return 0 if the access has been handled, and -1 if not.
+ */
+static int emulate_cp(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params,
+ const struct sys_reg_desc *table,
+ size_t num)
{
- size_t num;
- const struct sys_reg_desc *table, *r;
+ const struct sys_reg_desc *r;
- table = get_target_table(vcpu->arch.target, false, &num);
+ if (!table)
+ return -1; /* Not handled */
- /* Search target-specific then generic table. */
r = find_reg(params, table, num);
- if (!r)
- r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
- if (likely(r)) {
+ if (r) {
/*
* Not having an accessor means that we have
* configured a trap that we don't know how to
@@ -416,27 +789,58 @@ static void emulate_cp15(struct kvm_vcpu *vcpu,
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return;
}
- /* If access function fails, it should complain. */
+
+ /* Handled */
+ return 0;
}
- kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
+ /* Not handled */
+ return -1;
+}
+
+static void unhandled_cp_access(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params)
+{
+ u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ int cp;
+
+ switch(hsr_ec) {
+ case ESR_EL2_EC_CP15_32:
+ case ESR_EL2_EC_CP15_64:
+ cp = 15;
+ break;
+ case ESR_EL2_EC_CP14_MR:
+ case ESR_EL2_EC_CP14_64:
+ cp = 14;
+ break;
+ default:
+ WARN_ON((cp = -1));
+ }
+
+ kvm_err("Unsupported guest CP%d access at: %08lx\n",
+ cp, *vcpu_pc(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt2 = (hsr >> 10) & 0xf;
+ params.is_aarch32 = true;
+ params.is_32bit = false;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
@@ -458,8 +862,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
*vcpu_reg(vcpu, params.Rt) = val;
}
- emulate_cp15(vcpu, &params);
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ goto out;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ goto out;
+
+ unhandled_cp_access(vcpu, &params);
+out:
/* Do the opposite hack for the read side */
if (!params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
@@ -475,11 +885,17 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *global,
+ size_t nr_global,
+ const struct sys_reg_desc *target_specific,
+ size_t nr_specific)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ params.is_aarch32 = true;
+ params.is_32bit = true;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
@@ -488,10 +904,51 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
- emulate_cp15(vcpu, &params);
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
+ return 1;
+ if (!emulate_cp(vcpu, &params, global, nr_global))
+ return 1;
+
+ unhandled_cp_access(vcpu, &params);
return 1;
}
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_64(vcpu,
+ cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ const struct sys_reg_desc *target_specific;
+ size_t num;
+
+ target_specific = get_target_table(vcpu->arch.target, false, &num);
+ return kvm_handle_cp_32(vcpu,
+ cp15_regs, ARRAY_SIZE(cp15_regs),
+ target_specific, num);
+}
+
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_64(vcpu,
+ cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
+ NULL, 0);
+}
+
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ return kvm_handle_cp_32(vcpu,
+ cp14_regs, ARRAY_SIZE(cp14_regs),
+ NULL, 0);
+}
+
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
@@ -549,6 +1006,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ params.is_aarch32 = false;
+ params.is_32bit = false;
params.Op0 = (esr >> 20) & 3;
params.Op1 = (esr >> 14) & 0x7;
params.CRn = (esr >> 10) & 0xf;
@@ -701,17 +1160,15 @@ static struct sys_reg_desc invariant_sys_regs[] = {
NULL, get_ctr_el0 },
};
-static int reg_from_user(void *val, const void __user *uaddr, u64 id)
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
{
- /* This Just Works because we are little endian. */
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
-static int reg_to_user(void __user *uaddr, const void *val, u64 id)
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
{
- /* This Just Works because we are little endian. */
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
@@ -761,7 +1218,7 @@ static bool is_valid_cache(u32 val)
u32 level, ctype;
if (val >= CSSELR_MAX)
- return -ENOENT;
+ return false;
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
level = (val >> 1);
@@ -887,7 +1344,7 @@ static unsigned int num_demux_regs(void)
static int write_demux_regids(u64 __user *uindices)
{
- u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
+ u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
unsigned int i;
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
@@ -994,14 +1451,32 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return write_demux_regids(uindices);
}
+static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 1; i < n; i++) {
+ if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
+ kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
void kvm_sys_reg_table_init(void)
{
unsigned int i;
struct sys_reg_desc clidr;
/* Make sure tables are unique and in order. */
- for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
- BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
+ BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
+ BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
+ BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
+ BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+ BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
+ BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
/* We abuse the reset function to overwrite the table itself. */
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index d50d3722998e..d411e251412c 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -30,6 +30,8 @@ struct sys_reg_params {
u8 Op2;
u8 Rt;
bool is_write;
+ bool is_aarch32;
+ bool is_32bit; /* Only valid if is_aarch32 is true */
};
struct sys_reg_desc {
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 8fe6f76b0edc..475fd2929310 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -88,6 +88,8 @@ static int __init sys_reg_genericv8_init(void)
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
&genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53,
+ &genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
new file mode 100644
index 000000000000..ae211772f991
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+ENTRY(__save_vgic_v2_state)
+__save_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* Save all interesting registers */
+ ldr w4, [x2, #GICH_HCR]
+ ldr w5, [x2, #GICH_VMCR]
+ ldr w6, [x2, #GICH_MISR]
+ ldr w7, [x2, #GICH_EISR0]
+ ldr w8, [x2, #GICH_EISR1]
+ ldr w9, [x2, #GICH_ELRSR0]
+ ldr w10, [x2, #GICH_ELRSR1]
+ ldr w11, [x2, #GICH_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+CPU_BE( rev w7, w7 )
+CPU_BE( rev w8, w8 )
+CPU_BE( rev w9, w9 )
+CPU_BE( rev w10, w10 )
+CPU_BE( rev w11, w11 )
+
+ str w4, [x3, #VGIC_V2_CPU_HCR]
+ str w5, [x3, #VGIC_V2_CPU_VMCR]
+ str w6, [x3, #VGIC_V2_CPU_MISR]
+ str w7, [x3, #VGIC_V2_CPU_EISR]
+ str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
+ str w9, [x3, #VGIC_V2_CPU_ELRSR]
+ str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+ str w11, [x3, #VGIC_V2_CPU_APR]
+
+ /* Clear GICH_HCR */
+ str wzr, [x2, #GICH_HCR]
+
+ /* Save list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x2], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x3], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__save_vgic_v2_state)
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+ENTRY(__restore_vgic_v2_state)
+__restore_vgic_v2_state:
+ /* Get VGIC VCTRL base into x2 */
+ ldr x2, [x0, #VCPU_KVM]
+ kern_hyp_va x2
+ ldr x2, [x2, #KVM_VGIC_VCTRL]
+ kern_hyp_va x2
+ cbz x2, 2f // disabled
+
+ /* Compute the address of struct vgic_cpu */
+ add x3, x0, #VCPU_VGIC_CPU
+
+ /* We only restore a minimal set of registers */
+ ldr w4, [x3, #VGIC_V2_CPU_HCR]
+ ldr w5, [x3, #VGIC_V2_CPU_VMCR]
+ ldr w6, [x3, #VGIC_V2_CPU_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+
+ str w4, [x2, #GICH_HCR]
+ str w5, [x2, #GICH_VMCR]
+ str w6, [x2, #GICH_APR]
+
+ /* Restore list registers */
+ add x2, x2, #GICH_LR0
+ ldr w4, [x3, #VGIC_CPU_NR_LR]
+ add x3, x3, #VGIC_V2_CPU_LR
+1: ldr w5, [x3], #4
+CPU_BE( rev w5, w5 )
+ str w5, [x2], #4
+ sub w4, w4, #1
+ cbnz w4, 1b
+2:
+ ret
+ENDPROC(__restore_vgic_v2_state)
+
+ .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
new file mode 100644
index 000000000000..d16046999e06
--- /dev/null
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/assembler.h>
+#include <asm/memory.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
+
+/*
+ * Save the VGIC CPU state into memory
+ * x0: Register pointing to VCPU struct
+ * Do not corrupt x1!!!
+ */
+.macro save_vgic_v3_state
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Make sure stores to the GIC via the memory mapped interface
+ // are now visible to the system register interface
+ dsb st
+
+ // Save all interesting registers
+ mrs_s x4, ICH_HCR_EL2
+ mrs_s x5, ICH_VMCR_EL2
+ mrs_s x6, ICH_MISR_EL2
+ mrs_s x7, ICH_EISR_EL2
+ mrs_s x8, ICH_ELSR_EL2
+
+ str w4, [x3, #VGIC_V3_CPU_HCR]
+ str w5, [x3, #VGIC_V3_CPU_VMCR]
+ str w6, [x3, #VGIC_V3_CPU_MISR]
+ str w7, [x3, #VGIC_V3_CPU_EISR]
+ str w8, [x3, #VGIC_V3_CPU_ELRSR]
+
+ msr_s ICH_HCR_EL2, xzr
+
+ mrs_s x21, ICH_VTR_EL2
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ mrs_s x20, ICH_LR15_EL2
+ mrs_s x19, ICH_LR14_EL2
+ mrs_s x18, ICH_LR13_EL2
+ mrs_s x17, ICH_LR12_EL2
+ mrs_s x16, ICH_LR11_EL2
+ mrs_s x15, ICH_LR10_EL2
+ mrs_s x14, ICH_LR9_EL2
+ mrs_s x13, ICH_LR8_EL2
+ mrs_s x12, ICH_LR7_EL2
+ mrs_s x11, ICH_LR6_EL2
+ mrs_s x10, ICH_LR5_EL2
+ mrs_s x9, ICH_LR4_EL2
+ mrs_s x8, ICH_LR3_EL2
+ mrs_s x7, ICH_LR2_EL2
+ mrs_s x6, ICH_LR1_EL2
+ mrs_s x5, ICH_LR0_EL2
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ str x20, [x3, #LR_OFFSET(15)]
+ str x19, [x3, #LR_OFFSET(14)]
+ str x18, [x3, #LR_OFFSET(13)]
+ str x17, [x3, #LR_OFFSET(12)]
+ str x16, [x3, #LR_OFFSET(11)]
+ str x15, [x3, #LR_OFFSET(10)]
+ str x14, [x3, #LR_OFFSET(9)]
+ str x13, [x3, #LR_OFFSET(8)]
+ str x12, [x3, #LR_OFFSET(7)]
+ str x11, [x3, #LR_OFFSET(6)]
+ str x10, [x3, #LR_OFFSET(5)]
+ str x9, [x3, #LR_OFFSET(4)]
+ str x8, [x3, #LR_OFFSET(3)]
+ str x7, [x3, #LR_OFFSET(2)]
+ str x6, [x3, #LR_OFFSET(1)]
+ str x5, [x3, #LR_OFFSET(0)]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP0R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ mrs_s x19, ICH_AP0R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+6: mrs_s x18, ICH_AP0R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+5: mrs_s x17, ICH_AP0R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP0R]
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ mrs_s x20, ICH_AP1R3_EL2
+ str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ mrs_s x19, ICH_AP1R2_EL2
+ str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+6: mrs_s x18, ICH_AP1R1_EL2
+ str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+5: mrs_s x17, ICH_AP1R0_EL2
+ str w17, [x3, #VGIC_V3_CPU_AP1R]
+
+ // Restore SRE_EL1 access and re-enable SRE at EL1.
+ mrs_s x5, ICC_SRE_EL2
+ orr x5, x5, #ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+ isb
+ mov x5, #1
+ msr_s ICC_SRE_EL1, x5
+.endm
+
+/*
+ * Restore the VGIC CPU state from memory
+ * x0: Register pointing to VCPU struct
+ */
+.macro restore_vgic_v3_state
+ // Disable SRE_EL1 access. Necessary, otherwise
+ // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
+ msr_s ICC_SRE_EL1, xzr
+ isb
+
+ // Compute the address of struct vgic_cpu
+ add x3, x0, #VCPU_VGIC_CPU
+
+ // Restore all interesting registers
+ ldr w4, [x3, #VGIC_V3_CPU_HCR]
+ ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+
+ msr_s ICH_HCR_EL2, x4
+ msr_s ICH_VMCR_EL2, x5
+
+ mrs_s x21, ICH_VTR_EL2
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
+ msr_s ICH_AP1R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
+ msr_s ICH_AP1R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
+ msr_s ICH_AP1R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
+ msr_s ICH_AP1R0_EL2, x17
+
+ tbnz w21, #29, 6f // 6 bits
+ tbz w21, #30, 5f // 5 bits
+ // 7 bits
+ ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
+ msr_s ICH_AP0R3_EL2, x20
+ ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
+ msr_s ICH_AP0R2_EL2, x19
+6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
+ msr_s ICH_AP0R1_EL2, x18
+5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
+ msr_s ICH_AP0R0_EL2, x17
+
+ and w22, w21, #0xf
+ mvn w22, w21
+ ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ ldr x20, [x3, #LR_OFFSET(15)]
+ ldr x19, [x3, #LR_OFFSET(14)]
+ ldr x18, [x3, #LR_OFFSET(13)]
+ ldr x17, [x3, #LR_OFFSET(12)]
+ ldr x16, [x3, #LR_OFFSET(11)]
+ ldr x15, [x3, #LR_OFFSET(10)]
+ ldr x14, [x3, #LR_OFFSET(9)]
+ ldr x13, [x3, #LR_OFFSET(8)]
+ ldr x12, [x3, #LR_OFFSET(7)]
+ ldr x11, [x3, #LR_OFFSET(6)]
+ ldr x10, [x3, #LR_OFFSET(5)]
+ ldr x9, [x3, #LR_OFFSET(4)]
+ ldr x8, [x3, #LR_OFFSET(3)]
+ ldr x7, [x3, #LR_OFFSET(2)]
+ ldr x6, [x3, #LR_OFFSET(1)]
+ ldr x5, [x3, #LR_OFFSET(0)]
+
+ adr x24, 1f
+ add x24, x24, x23
+ br x24
+
+1:
+ msr_s ICH_LR15_EL2, x20
+ msr_s ICH_LR14_EL2, x19
+ msr_s ICH_LR13_EL2, x18
+ msr_s ICH_LR12_EL2, x17
+ msr_s ICH_LR11_EL2, x16
+ msr_s ICH_LR10_EL2, x15
+ msr_s ICH_LR9_EL2, x14
+ msr_s ICH_LR8_EL2, x13
+ msr_s ICH_LR7_EL2, x12
+ msr_s ICH_LR6_EL2, x11
+ msr_s ICH_LR5_EL2, x10
+ msr_s ICH_LR4_EL2, x9
+ msr_s ICH_LR3_EL2, x8
+ msr_s ICH_LR2_EL2, x7
+ msr_s ICH_LR1_EL2, x6
+ msr_s ICH_LR0_EL2, x5
+
+ // Ensure that the above will have reached the
+ // (re)distributors. This ensure the guest will read
+ // the correct values from the memory-mapped interface.
+ isb
+ dsb sy
+
+ // Prevent the guest from touching the GIC system registers
+ mrs_s x5, ICC_SRE_EL2
+ and x5, x5, #~ICC_SRE_EL2_ENABLE
+ msr_s ICC_SRE_EL2, x5
+.endm
+
+ENTRY(__save_vgic_v3_state)
+ save_vgic_v3_state
+ ret
+ENDPROC(__save_vgic_v3_state)
+
+ENTRY(__restore_vgic_v3_state)
+ restore_vgic_v3_state
+ ret
+ENDPROC(__restore_vgic_v3_state)
+
+ENTRY(__vgic_v3_get_ich_vtr_el2)
+ mrs_s x0, ICH_VTR_EL2
+ ret
+ENDPROC(__vgic_v3_get_ich_vtr_el2)
+
+ .popsection
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e0ef63cd05dc..e085ee6ef4e2 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -209,8 +209,14 @@ ENTRY(__cpu_setup)
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
- ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
+ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
TCR_ASID16 | TCR_TBI0 | (1 << 31)
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
+ * TCR_EL1.
+ */
+ mrs x9, ID_AA64MMFR0_EL1
+ bfi x10, x9, #32, #3
#ifdef CONFIG_ARM64_64K_PAGES
orr x10, x10, TCR_TG0_64K
orr x10, x10, TCR_TG1_64K