From c2537ed9fb8e17d713e5e67fcede047699d25814 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 6 Feb 2015 10:56:27 +0000 Subject: MIPS: KVM: Add MSA exception handling Add guest exception handling for MIPS SIMD Architecture (MSA) floating point exceptions and MSA disabled exceptions. MSA floating point exceptions from the guest need passing to the guest kernel, so for these a guest MSAFPE is emulated. MSA disabled exceptions are normally handled by passing a reserved instruction exception to the guest (because no guest MSA was supported), but the hypervisor can now handle them if the guest has MSA by passing an MSA disabled exception to the guest, or if the guest has MSA enabled by transparently restoring the guest MSA context and enabling MSA and the FPU. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Paul Burton Cc: Ralf Baechle Cc: Gleb Natapov Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/emulate.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++ arch/mips/kvm/mips.c | 10 +++++++ arch/mips/kvm/stats.c | 2 ++ arch/mips/kvm/trap_emul.c | 43 ++++++++++++++++++++++++++-- 4 files changed, 124 insertions(+), 2 deletions(-) (limited to 'arch/mips/kvm') diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 07f554c72cb8..6230f376a44e 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -2179,6 +2179,41 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, return er; } +enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSAFPE << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, uint32_t *opc, struct kvm_run *run, @@ -2214,6 +2249,41 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, return er; } +enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSADIS << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSADIS when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + /* ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 @@ -2421,6 +2491,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, case T_BREAK: case T_RES_INST: case T_TRAP: + case T_MSAFPE: case T_FPE: case T_MSADIS: break; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a17f21015a0b..e02c7e5a12ff 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -50,7 +50,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, + { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, + { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, @@ -1256,6 +1258,12 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ret = kvm_mips_callbacks->handle_trap(vcpu); break; + case T_MSAFPE: + ++vcpu->stat.msa_fpe_exits; + trace_kvm_exit(vcpu, MSA_FPE_EXITS); + ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); + break; + case T_FPE: ++vcpu->stat.fpe_exits; trace_kvm_exit(vcpu, FPE_EXITS); @@ -1263,6 +1271,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case T_MSADIS: + ++vcpu->stat.msa_disabled_exits; + trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); break; diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index 3843828f3b91..888bb67070ac 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c @@ -26,7 +26,9 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { "Reserved Inst", "Break Inst", "Trap Inst", + "MSA FPE", "FPE", + "MSA Disabled", "D-Cache Flushes", }; diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 421d5b815f24..d836ed5b0bc7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -362,6 +362,24 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) return ret; } +static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; @@ -380,16 +398,36 @@ static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) return ret; } +/** + * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use MSA when it is disabled. + */ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) { + struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_run *run = vcpu->run; uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; - /* No MSA supported in guest, guest reserved instruction exception */ - er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + if (!kvm_mips_guest_has_msa(&vcpu->arch) || + (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { + /* + * No MSA in guest, or FPU enabled and not in FR=1 mode, + * guest reserved instruction exception + */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { + /* MSA disabled by guest, guest MSA disabled exception */ + er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); + } else { + /* Restore MSA/FPU state */ + kvm_own_msa(vcpu); + er = EMULATE_DONE; + } switch (er) { case EMULATE_DONE: @@ -608,6 +646,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .handle_res_inst = kvm_trap_emul_handle_res_inst, .handle_break = kvm_trap_emul_handle_break, .handle_trap = kvm_trap_emul_handle_trap, + .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, .handle_fpe = kvm_trap_emul_handle_fpe, .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, -- cgit v1.2.3