diff options
Diffstat (limited to 'target/arm/kvm.c')
-rw-r--r-- | target/arm/kvm.c | 1523 |
1 files changed, 1446 insertions, 77 deletions
diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 94b970bbf9..ab85d628a8 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2,6 +2,8 @@ * ARM implementation of KVM hooks * * Copyright Christoffer Dall 2009-2010 + * Copyright Mian-M. Hamayun 2013, Virtual Open Systems + * Copyright Alex BennĂ©e 2014, Linaro * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -13,13 +15,13 @@ #include <linux/kvm.h> -#include "qemu-common.h" #include "qemu/timer.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qom/object.h" #include "qapi/error.h" #include "sysemu/sysemu.h" +#include "sysemu/runstate.h" #include "sysemu/kvm.h" #include "sysemu/kvm_int.h" #include "kvm_arm.h" @@ -29,9 +31,14 @@ #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "exec/address-spaces.h" +#include "exec/gdbstub.h" #include "hw/boards.h" #include "hw/irq.h" +#include "qapi/visitor.h" #include "qemu/log.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ghes.h" +#include "target/arm/gtimer.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO @@ -41,28 +48,54 @@ static bool cap_has_mp_state; static bool cap_has_inject_serror_esr; static bool cap_has_inject_ext_dabt; +/** + * ARMHostCPUFeatures: information about the host CPU (identified + * by asking the host kernel) + */ +typedef struct ARMHostCPUFeatures { + ARMISARegisters isar; + uint64_t features; + uint32_t target; + const char *dtb_compatible; +} ARMHostCPUFeatures; + static ARMHostCPUFeatures arm_host_cpu_features; -int kvm_arm_vcpu_init(CPUState *cs) +/** + * kvm_arm_vcpu_init: + * @cpu: ARMCPU + * + * Initialize (or reinitialize) the VCPU by invoking the + * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature + * bitmask specified in the CPUState. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_init(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); struct kvm_vcpu_init init; init.target = cpu->kvm_target; memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init); } -int kvm_arm_vcpu_finalize(CPUState *cs, int feature) -{ - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature); -} - -void kvm_arm_init_serror_injection(CPUState *cs) +/** + * kvm_arm_vcpu_finalize: + * @cpu: ARMCPU + * @feature: feature to finalize + * + * Finalizes the configuration of the specified VCPU feature by + * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring + * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of + * KVM's API documentation. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) { - cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, - KVM_CAP_ARM_INJECT_SERROR_ESR); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, @@ -80,7 +113,9 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, if (max_vm_pa_size < 0) { max_vm_pa_size = 0; } - vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size); + do { + vmfd = ioctl(kvmfd, KVM_CREATE_VM, max_vm_pa_size); + } while (vmfd == -1 && errno == EINTR); if (vmfd < 0) { goto err; } @@ -165,6 +200,260 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray) } } +static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +{ + uint64_t ret; + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; + int err; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + err = ioctl(fd, KVM_GET_ONE_REG, &idreg); + if (err < 0) { + return -1; + } + *pret = ret; + return 0; +} + +static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) +{ + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + return ioctl(fd, KVM_GET_ONE_REG, &idreg); +} + +static bool kvm_arm_pauth_supported(void) +{ + return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && + kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); +} + +static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) +{ + /* Identify the feature bits corresponding to the host CPU, and + * fill out the ARMHostCPUClass fields accordingly. To do this + * we have to create a scratch VM, create a single CPU inside it, + * and then query that CPU for the relevant ID registers. + */ + int fdarray[3]; + bool sve_supported; + bool pmu_supported = false; + uint64_t features = 0; + int err; + + /* Old kernels may not know about the PREFERRED_TARGET ioctl: however + * we know these will only support creating one kind of guest CPU, + * which is its preferred CPU type. Fortunately these old kernels + * support only a very limited number of CPUs. + */ + static const uint32_t cpus_to_try[] = { + KVM_ARM_TARGET_AEM_V8, + KVM_ARM_TARGET_FOUNDATION_V8, + KVM_ARM_TARGET_CORTEX_A57, + QEMU_KVM_ARM_TARGET_NONE + }; + /* + * target = -1 informs kvm_arm_create_scratch_host_vcpu() + * to use the preferred target + */ + struct kvm_vcpu_init init = { .target = -1, }; + + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + + /* + * Ask for Pointer Authentication if supported, so that we get + * the unsanitized field values for AA64ISAR1_EL1. + */ + if (kvm_arm_pauth_supported()) { + init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + if (kvm_arm_pmu_supported()) { + init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + pmu_supported = true; + } + + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + return false; + } + + ahcf->target = init.target; + ahcf->dtb_compatible = "arm,arm-v8"; + + err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + ARM64_SYS_REG(3, 0, 0, 4, 0)); + if (unlikely(err < 0)) { + /* + * Before v4.15, the kernel only exposed a limited number of system + * registers, not including any of the interesting AArch64 ID regs. + * For the most part we could leave these fields as zero with minimal + * effect, since this does not affect the values seen by the guest. + * + * However, it could cause problems down the line for QEMU, + * so provide a minimal v8.0 default. + * + * ??? Could read MIDR and use knowledge from cpu64.c. + * ??? Could map a page of memory into our temp guest and + * run the tiniest of hand-crafted kernels to extract + * the values seen by the guest. + * ??? Either of these sounds like too much effort just + * to work around running a modern host kernel. + */ + ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + err = 0; + } else { + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, + ARM64_SYS_REG(3, 0, 0, 4, 5)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + ARM64_SYS_REG(3, 0, 0, 5, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + ARM64_SYS_REG(3, 0, 0, 6, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + ARM64_SYS_REG(3, 0, 0, 6, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, + ARM64_SYS_REG(3, 0, 0, 6, 2)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + ARM64_SYS_REG(3, 0, 0, 7, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + ARM64_SYS_REG(3, 0, 0, 7, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, + ARM64_SYS_REG(3, 0, 0, 7, 2)); + + /* + * Note that if AArch32 support is not present in the host, + * the AArch32 sysregs are present to be read, but will + * return UNKNOWN values. This is neither better nor worse + * than skipping the reads and leaving 0, as we must avoid + * considering the values in every case. + */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, + ARM64_SYS_REG(3, 0, 0, 1, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, + ARM64_SYS_REG(3, 0, 0, 1, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM64_SYS_REG(3, 0, 0, 1, 7)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + ARM64_SYS_REG(3, 0, 0, 2, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + ARM64_SYS_REG(3, 0, 0, 2, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + ARM64_SYS_REG(3, 0, 0, 2, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + ARM64_SYS_REG(3, 0, 0, 2, 3)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + ARM64_SYS_REG(3, 0, 0, 2, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + ARM64_SYS_REG(3, 0, 0, 2, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM64_SYS_REG(3, 0, 0, 2, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + ARM64_SYS_REG(3, 0, 0, 2, 7)); + + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + ARM64_SYS_REG(3, 0, 0, 3, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + ARM64_SYS_REG(3, 0, 0, 3, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.dbgdidr = dbgdidr; + } + + if (pmu_supported) { + /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + ARM64_SYS_REG(3, 3, 9, 12, 0)); + } + + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } + } + + kvm_arm_destroy_scratch_host_vcpu(fdarray); + + if (err < 0) { + return false; + } + + /* + * We can assume any KVM supporting CPU is at least a v8 + * with VFPv4+Neon; this in turn implies most of the other + * feature bits. + */ + features |= 1ULL << ARM_FEATURE_V8; + features |= 1ULL << ARM_FEATURE_NEON; + features |= 1ULL << ARM_FEATURE_AARCH64; + features |= 1ULL << ARM_FEATURE_PMU; + features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + + ahcf->features = features; + + return true; +} + void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) { CPUARMState *env = &cpu->env; @@ -208,10 +497,10 @@ static void kvm_steal_time_set(Object *obj, bool value, Error **errp) } /* KVM VCPU properties should be prefixed with "kvm-". */ -void kvm_arm_add_vcpu_properties(Object *obj) +void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(obj); CPUARMState *env = &cpu->env; + Object *obj = OBJECT(cpu); if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { cpu->kvm_adjvtime = true; @@ -246,6 +535,13 @@ int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa) return ret > 0 ? ret : 40; } +int kvm_arch_get_default_type(MachineState *ms) +{ + bool fixed_ipa; + int size = kvm_arm_get_max_vm_ipa_size(ms, &fixed_ipa); + return fixed_ipa ? 0 : size; +} + int kvm_arch_init(MachineState *ms, KVMState *s) { int ret = 0; @@ -262,6 +558,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + /* Check whether user space can specify guest syndrome value */ + cap_has_inject_serror_esr = + kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR); + if (ms->smp.cpus > 256 && !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) { error_report("Using more than 256 vcpus requires a host kernel " @@ -279,6 +579,34 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + if (s->kvm_eager_split_size) { + uint32_t sizes; + + sizes = kvm_vm_check_extension(s, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES); + if (!sizes) { + s->kvm_eager_split_size = 0; + warn_report("Eager Page Split support not available"); + } else if (!(s->kvm_eager_split_size & sizes)) { + error_report("Eager Page Split requested chunk size not valid"); + ret = -EINVAL; + } else { + ret = kvm_vm_enable_cap(s, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE, 0, + s->kvm_eager_split_size); + if (ret < 0) { + error_report("Enabling of Eager Page Split failed: %s", + strerror(-ret)); + } + } + } + + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); + hw_watchpoints = g_array_sized_new(true, true, + sizeof(HWWatchpoint), max_hw_wps); + + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); + hw_breakpoints = g_array_sized_new(true, true, + sizeof(HWBreakpoint), max_hw_bps); + return ret; } @@ -335,8 +663,10 @@ static void kvm_arm_devlistener_del(MemoryListener *listener, } static MemoryListener devlistener = { + .name = "kvm-arm", .region_add = kvm_arm_devlistener_add, .region_del = kvm_arm_devlistener_del, + .priority = MEMORY_LISTENER_PRIORITY_MIN, }; static void kvm_arm_set_device_addr(KVMDevice *kd) @@ -436,11 +766,36 @@ static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) return &cpu->cpreg_values[res - cpu->cpreg_indexes]; } -/* Initialize the ARMCPU cpreg list according to the kernel's +/** + * kvm_arm_reg_syncs_via_cpreg_list: + * @regidx: KVM register index + * + * Return true if this KVM register should be synchronized via the + * cpreg list of arbitrary system registers, false if it is synchronized + * by hand using code in kvm_arch_get/put_registers(). + */ +static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) +{ + switch (regidx & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: + case KVM_REG_ARM64_SVE: + return false; + default: + return true; + } +} + +/** + * kvm_arm_init_cpreg_list: + * @cpu: ARMCPU + * + * Initialize the ARMCPU cpreg list according to the kernel's * definition of what CPU registers it knows about (and throw away * the previous TCG-created cpreg list). + * + * Returns: 0 if success, else < 0 error code */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu) +static int kvm_arm_init_cpreg_list(ARMCPU *cpu) { struct kvm_reg_list rl; struct kvm_reg_list *rlp; @@ -513,6 +868,28 @@ out: return ret; } +/** + * kvm_arm_cpreg_level: + * @regidx: KVM register index + * + * Return the level of this coprocessor/system register. Return value is + * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. + */ +static int kvm_arm_cpreg_level(uint64_t regidx) +{ + /* + * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE. + * If a register should be written less often, you must add it here + * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. + */ + switch (regidx) { + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_PTIMER_CNT: + return KVM_PUT_FULL_STATE; + } + return KVM_PUT_RUNTIME_STATE; +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -520,27 +897,22 @@ bool write_kvmstate_to_list(ARMCPU *cpu) bool ok = true; for (i = 0; i < cpu->cpreg_array_len; i++) { - struct kvm_one_reg r; uint64_t regidx = cpu->cpreg_indexes[i]; uint32_t v32; int ret; - r.id = regidx; - switch (regidx & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: - r.addr = (uintptr_t)&v32; - ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); + ret = kvm_get_one_reg(cs, regidx, &v32); if (!ret) { cpu->cpreg_values[i] = v32; } break; case KVM_REG_SIZE_U64: - r.addr = (uintptr_t)(cpu->cpreg_values + i); - ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); + ret = kvm_get_one_reg(cs, regidx, cpu->cpreg_values + i); break; default: - abort(); + g_assert_not_reached(); } if (ret) { ok = false; @@ -556,7 +928,6 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) bool ok = true; for (i = 0; i < cpu->cpreg_array_len; i++) { - struct kvm_one_reg r; uint64_t regidx = cpu->cpreg_indexes[i]; uint32_t v32; int ret; @@ -565,19 +936,17 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) continue; } - r.id = regidx; switch (regidx & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: v32 = cpu->cpreg_values[i]; - r.addr = (uintptr_t)&v32; + ret = kvm_set_one_reg(cs, regidx, &v32); break; case KVM_REG_SIZE_U64: - r.addr = (uintptr_t)(cpu->cpreg_values + i); + ret = kvm_set_one_reg(cs, regidx, cpu->cpreg_values + i); break; default: - abort(); + g_assert_not_reached(); } - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); if (ret) { /* We might fail for "unknown register" and also for * "you tried to set a register which is constant with @@ -613,7 +982,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* Re-init VCPU so that all registers are set to * their respective reset values. */ - ret = kvm_arm_vcpu_init(CPU(cpu)); + ret = kvm_arm_vcpu_init(cpu); if (ret < 0) { fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); abort(); @@ -635,58 +1004,50 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* * Update KVM's MP_STATE based on what QEMU thinks it is */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { .mp_state = (cpu->power_state == PSCI_OFF) ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; - int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); - if (ret) { - fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - return -1; - } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); } - return 0; } /* * Sync the KVM MP_STATE into QEMU */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state); if (ret) { - fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - abort(); + return ret; } cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? PSCI_OFF : PSCI_ON; } - return 0; } -void kvm_arm_get_virtual_time(CPUState *cs) +/** + * kvm_arm_get_virtual_time: + * @cpu: ARMCPU + * + * Gets the VCPU's virtual counter and stores it in the KVM CPU state. + */ +static void kvm_arm_get_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); - struct kvm_one_reg reg = { - .id = KVM_REG_ARM_TIMER_CNT, - .addr = (uintptr_t)&cpu->kvm_vtime, - }; int ret; if (cpu->kvm_vtime_dirty) { return; } - ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to get KVM_REG_ARM_TIMER_CNT"); abort(); @@ -695,20 +1056,21 @@ void kvm_arm_get_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = true; } -void kvm_arm_put_virtual_time(CPUState *cs) +/** + * kvm_arm_put_virtual_time: + * @cpu: ARMCPU + * + * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. + */ +static void kvm_arm_put_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); - struct kvm_one_reg reg = { - .id = KVM_REG_ARM_TIMER_CNT, - .addr = (uintptr_t)&cpu->kvm_vtime, - }; int ret; if (!cpu->kvm_vtime_dirty) { return; } - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); + ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to set KVM_REG_ARM_TIMER_CNT"); abort(); @@ -717,7 +1079,15 @@ void kvm_arm_put_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = false; } -int kvm_put_vcpu_events(ARMCPU *cpu) +/** + * kvm_put_vcpu_events: + * @cpu: ARMCPU + * + * Put VCPU related state to kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_put_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -746,7 +1116,15 @@ int kvm_put_vcpu_events(ARMCPU *cpu) return ret; } -int kvm_get_vcpu_events(ARMCPU *cpu) +/** + * kvm_get_vcpu_events: + * @cpu: ARMCPU + * + * Get VCPU related state from kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_get_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -770,6 +1148,63 @@ int kvm_get_vcpu_events(ARMCPU *cpu) return 0; } +#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) +#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) + +/* + * ESR_EL1 + * ISS encoding + * AARCH64: DFSC, bits [5:0] + * AARCH32: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define ESR_DFSC(aarch64, lpae, v) \ + ((aarch64 || (lpae)) ? ((v) & 0x3F) \ + : (((v) >> 6) | ((v) & 0x1F))) + +#define ESR_DFSC_EXTABT(aarch64, lpae) \ + ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) + +/** + * kvm_arm_verify_ext_dabt_pending: + * @cpu: ARMCPU + * + * Verify the fault status code wrt the Ext DABT injection + * + * Returns: true if the fault status code is as expected, false otherwise + */ +static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + uint64_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { + CPUARMState *env = &cpu->env; + int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); + int lpae = 0; + + if (!aarch64_mode) { + uint64_t ttbcr; + + if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) + && (ttbcr & TTBCR_EAE); + } + } + /* + * The verification here is based on the DFSC bits + * of the ESR_EL1 reg only + */ + return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == + ESR_DFSC_EXTABT(aarch64_mode, lpae)); + } + return false; +} + void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { ARMCPU *cpu = ARM_CPU(cs); @@ -784,7 +1219,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) */ if (!arm_feature(env, ARM_FEATURE_AARCH64) && - unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) { + unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) { error_report("Data abort exception with no valid ISS generated by " "guest memory access. KVM unable to emulate faulting " @@ -816,7 +1251,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) if (run->s.regs.device_irq_level != cpu->device_irq_level) { switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; - qemu_mutex_lock_iothread(); + bql_lock(); if (switched_level & KVM_ARM_DEV_EL1_VTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], @@ -845,41 +1280,39 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) /* We also mark unknown levels as processed to not waste cycles */ cpu->device_irq_level = run->s.regs.device_irq_level; - qemu_mutex_unlock_iothread(); + bql_unlock(); } return MEMTXATTRS_UNSPECIFIED; } -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) +static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) { - CPUState *cs = opaque; - ARMCPU *cpu = ARM_CPU(cs); + ARMCPU *cpu = opaque; if (running) { if (cpu->kvm_adjvtime) { - kvm_arm_put_virtual_time(cs); + kvm_arm_put_virtual_time(cpu); } } else { if (cpu->kvm_adjvtime) { - kvm_arm_get_virtual_time(cs); + kvm_arm_get_virtual_time(cpu); } } } /** * kvm_arm_handle_dabt_nisv: - * @cs: CPUState + * @cpu: ARMCPU * @esr_iss: ISS encoding (limited) for the exception from Data Abort * ISV bit set to '0b0' -> no valid instruction syndrome * @fault_ipa: faulting address for the synchronous data abort * * Returns: 0 if the exception has been handled, < 0 otherwise */ -static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, +static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss, uint64_t fault_ipa) { - ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * Request KVM to inject the external data abort into the guest @@ -895,7 +1328,7 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, */ events.exception.ext_dabt_pending = 1; /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */ - if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) { + if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) { env->ext_dabt_raised = 1; return 0; } @@ -908,19 +1341,97 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, return -1; } +/** + * kvm_arm_handle_debug: + * @cpu: ARMCPU + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + * + * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register + * + * To minimise translating between kernel and user-space the kernel + * ABI just provides user-space with the full exception syndrome + * register value to be decoded in QEMU. + */ +static bool kvm_arm_handle_debug(ARMCPU *cpu, + struct kvm_debug_exit_arch *debug_exit) +{ + int hsr_ec = syn_get_ec(debug_exit->hsr); + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + switch (hsr_ec) { + case EC_SOFTWARESTEP: + if (cs->singlestep_enabled) { + return true; + } else { + /* + * The kernel should have suppressed the guest's ability to + * single step at this point so something has gone wrong. + */ + error_report("%s: guest single-step while debugging unsupported" + " (%"PRIx64", %"PRIx32")", + __func__, env->pc, debug_exit->hsr); + return false; + } + break; + case EC_AA64_BKPT: + if (kvm_find_sw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_BREAKPOINT: + if (find_hw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_WATCHPOINT: + { + CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); + if (wp) { + cs->watchpoint_hit = wp; + return true; + } + break; + } + default: + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", + __func__, debug_exit->hsr, env->pc); + } + + /* If we are not handling the debug exception it must belong to + * the guest. Let's re-use the existing TCG interrupt code to set + * everything up properly. + */ + cs->exception_index = EXCP_BKPT; + env->exception.syndrome = debug_exit->hsr; + env->exception.vaddress = debug_exit->far; + env->exception.target_el = 1; + bql_lock(); + arm_cpu_do_interrupt(cs); + bql_unlock(); + + return false; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + ARMCPU *cpu = ARM_CPU(cs); int ret = 0; switch (run->exit_reason) { case KVM_EXIT_DEBUG: - if (kvm_arm_handle_debug(cs, &run->debug.arch)) { + if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { ret = EXCP_DEBUG; } /* otherwise return to guest */ break; case KVM_EXIT_ARM_NISV: /* External DABT with no valid iss to decode */ - ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss, + ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, run->arm_nisv.fault_ipa); break; default: @@ -941,12 +1452,47 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } +/** + * kvm_arm_hw_debug_active: + * @cpu: ARMCPU + * + * Return: TRUE if any hardware breakpoints in use. + */ +static bool kvm_arm_hw_debug_active(ARMCPU *cpu) +{ + return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); +} + +/** + * kvm_arm_copy_hw_debug_data: + * @ptr: kvm_guest_debug_arch structure + * + * Copy the architecture specific debug registers into the + * kvm_guest_debug ioctl structure. + */ +static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) +{ + int i; + memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); + + for (i = 0; i < max_hw_wps; i++) { + HWWatchpoint *wp = get_hw_wp(i); + ptr->dbg_wcr[i] = wp->wcr; + ptr->dbg_wvr[i] = wp->wvr; + } + for (i = 0; i < max_hw_bps; i++) { + HWBreakpoint *bp = get_hw_bp(i); + ptr->dbg_bcr[i] = bp->bcr; + ptr->dbg_bvr[i] = bp->bvr; + } +} + void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) { if (kvm_sw_breakpoints_active(cs)) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; } - if (kvm_arm_hw_debug_active(cs)) { + if (kvm_arm_hw_debug_active(ARM_CPU(cs))) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; kvm_arm_copy_hw_debug_data(&dbg->arch); } @@ -959,7 +1505,7 @@ void kvm_arch_init_irq_routing(KVMState *s) int kvm_arch_irqchip_create(KVMState *s) { if (kvm_kernel_irqchip_split()) { - perror("-machine kernel_irqchip=split is not supported on ARM."); + error_report("-machine kernel_irqchip=split is not supported on ARM."); exit(1); } @@ -1056,3 +1602,826 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint64_t value = s->kvm_eager_split_size; + + visit_type_size(v, name, &value, errp); +} + +static void kvm_arch_set_eager_split_size(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint64_t value; + + if (s->fd != -1) { + error_setg(errp, "Unable to set early-split-size after KVM has been initialized"); + return; + } + + if (!visit_type_size(v, name, &value, errp)) { + return; + } + + if (value && !is_power_of_2(value)) { + error_setg(errp, "early-split-size must be a power of two"); + return; + } + + s->kvm_eager_split_size = value; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ + object_class_property_add(oc, "eager-split-size", "size", + kvm_arch_get_eager_split_size, + kvm_arch_set_eager_split_size, NULL, NULL); + + object_class_property_set_description(oc, "eager-split-size", + "Eager Page Split chunk size for hugepages. (default: 0, disabled)"); +} + +int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return insert_hw_breakpoint(addr); + break; + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return insert_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return delete_hw_breakpoint(addr); + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return delete_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + if (cur_hw_wps > 0) { + g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); + } + if (cur_hw_bps > 0) { + g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); + } +} + +static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr, + const char *name) +{ + int err; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + return true; +} + +void kvm_arm_pmu_init(ARMCPU *cpu) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to init PMU"); + abort(); + } +} + +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .addr = (intptr_t)&irq, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to set irq for PMU"); + abort(); + } +} + +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + .addr = (uint64_t)&ipa, + }; + + if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) { + error_report("failed to init PVTIME IPA"); + abort(); + } +} + +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) +{ + bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); + + if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { + if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_steal_time = ON_OFF_AUTO_OFF; + } else { + cpu->kvm_steal_time = ON_OFF_AUTO_ON; + } + } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { + if (!has_steal_time) { + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "on this host"); + return; + } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + /* + * DEN0057A chapter 2 says "This specification only covers + * systems in which the Execution state of the hypervisor + * as well as EL1 of virtual machines is AArch64.". And, + * to ensure that, the smc/hvc calls are only specified as + * smc64/hvc64. + */ + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "for AArch32 guests"); + return; + } + } +} + +bool kvm_arm_aarch32_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); +} + +bool kvm_arm_sve_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); +} + +QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); + +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) +{ + /* Only call this function if kvm_arm_sve_supported() returns true. */ + static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; + static bool probed; + uint32_t vq = 0; + int i; + + /* + * KVM ensures all host CPUs support the same set of vector lengths. + * So we only need to create the scratch VCPUs once and then cache + * the results. + */ + if (!probed) { + struct kvm_vcpu_init init = { + .target = -1, + .features[0] = (1 << KVM_ARM_VCPU_SVE), + }; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + int fdarray[3], ret; + + probed = true; + + if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + error_report("failed to create scratch VCPU with SVE enabled"); + abort(); + } + ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); + kvm_arm_destroy_scratch_host_vcpu(fdarray); + if (ret) { + error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", + strerror(errno)); + abort(); + } + + for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { + if (vls[i]) { + vq = 64 - clz64(vls[i]) + i * 64; + break; + } + } + if (vq > ARM_MAX_VQ) { + warn_report("KVM supports vector lengths larger than " + "QEMU can enable"); + vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); + } + } + + return vls[0]; +} + +static int kvm_arm_sve_set_vls(ARMCPU *cpu) +{ + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + + return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]); +} + +#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 + +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret; + uint64_t mpidr; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t psciver; + + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || + !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + error_report("KVM is not supported for this guest CPU type"); + return -EINVAL; + } + + qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu); + + /* Determine init features for this CPU */ + memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); + if (cs->start_powered_off) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { + cpu->psci_version = QEMU_PSCI_VERSION_0_2; + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; + } + if (!arm_feature(env, ARM_FEATURE_AARCH64)) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; + } + if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { + cpu->has_pmu = false; + } + if (cpu->has_pmu) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + } else { + env->features &= ~(1ULL << ARM_FEATURE_PMU); + } + if (cpu_isar_feature(aa64_sve, cpu)) { + assert(kvm_arm_sve_supported()); + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + /* Do KVM_ARM_VCPU_INIT ioctl */ + ret = kvm_arm_vcpu_init(cpu); + if (ret) { + return ret; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cpu); + if (ret) { + return ret; + } + ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE); + if (ret) { + return ret; + } + } + + /* + * KVM reports the exact PSCI version it is implementing via a + * special sysreg. If it is present, use its contents to determine + * what to report to the guest in the dtb (it is the PSCI version, + * in the same 15-bits major 16-bits minor format that PSCI_VERSION + * returns). + */ + if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { + cpu->psci_version = psciver; + } + + /* + * When KVM is in use, PSCI is emulated in-kernel and not by qemu. + * Currently KVM has its own idea about MPIDR assignment, so we + * override our defaults with what we get from KVM. + */ + ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); + if (ret) { + return ret; + } + cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; + + return kvm_arm_init_cpreg_list(cpu); +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +/* Callers must hold the iothread mutex lock */ +static void kvm_inject_arm_sea(CPUState *c) +{ + ARMCPU *cpu = ARM_CPU(c); + CPUARMState *env = &cpu->env; + uint32_t esr; + bool same_el; + + c->exception_index = EXCP_DATA_ABORT; + env->exception.target_el = 1; + + /* + * Set the DFSC to synchronous external abort and set FnV to not valid, + * this will tell guest the FAR_ELx is UNKNOWN for this abort. + */ + same_el = arm_current_el(env) == env->exception.target_el; + esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); + + env->exception.syndrome = esr; + + arm_cpu_do_interrupt(c); +} + +#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +static int kvm_arch_put_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); +#if HOST_BIG_ENDIAN + uint64_t fp_val[2] = { q[1], q[0] }; + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), + fp_val); +#else + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); +#endif + if (ret) { + return ret; + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_put_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t tmp[ARM_MAX_VQ * 2]; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + } + + r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + + return 0; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + uint64_t val; + uint32_t fpr; + int i, ret; + unsigned int el; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* If we are in AArch32 mode then we need to copy the AArch32 regs to the + * AArch64 registers before pushing them out to 64-bit KVM. + */ + if (!is_a64(env)) { + aarch64_sync_32_to_64(env); + } + + for (i = 0; i < 31; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_save_sp(env, 1); + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read(env); + } + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Saved Program State Registers + * + * Before we restore from the banked_spsr[] array we need to + * ensure that any modifications to env->spsr are correctly + * reflected in the banks. + */ + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->banked_spsr[i] = env->spsr; + } + + /* KVM 0-4 map to QEMU banks 1-5 */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_put_sve(cs); + } else { + ret = kvm_arch_put_fpsimd(cs); + } + if (ret) { + return ret; + } + + fpr = vfp_get_fpsr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + + fpr = vfp_get_fpcr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + + write_cpustate_to_list(cpu, true); + + if (!write_list_to_kvmstate(cpu, level)) { + return -EINVAL; + } + + /* + * Setting VCPU events should be triggered after syncing the registers + * to avoid overwriting potential changes made by KVM upon calling + * KVM_SET_VCPU_EVENTS ioctl + */ + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + + return kvm_arm_sync_mpstate_to_kvm(cpu); +} + +static int kvm_arch_get_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); + if (ret) { + return ret; + } else { +#if HOST_BIG_ENDIAN + uint64_t t; + t = q[0], q[0] = q[1], q[1] = t; +#endif + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_get_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = &env->vfp.zregs[n].d[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, cpu->sve_max_vq * 2); + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = &env->vfp.pregs[n].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + } + + r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + + return 0; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + uint64_t val; + unsigned int el; + uint32_t fpr; + int i, ret; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + for (i = 0; i < 31; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + env->aarch64 = ((val & PSTATE_nRW) == 0); + if (is_a64(env)) { + pstate_write(env, val); + } else { + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_restore_sp(env, 1); + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + /* If we are in AArch32 mode then we need to sync the AArch32 regs with the + * incoming AArch64 regs received from 64-bit KVM. + * We must perform this after all of the registers have been acquired from + * the kernel. + */ + if (!is_a64(env)) { + aarch64_sync_64_to_32(env); + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Fetch the SPSR registers + * + * KVM SPSRs 0-4 map to QEMU banks 1-5 + */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->spsr = env->banked_spsr[i]; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_get_sve(cs); + } else { + ret = kvm_arch_get_fpsimd(cs); + } + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpsr(env, fpr); + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpcr(env, fpr); + + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + + if (!write_kvmstate_to_list(cpu)) { + return -EINVAL; + } + /* Note that it's OK to have registers which aren't in CPUState, + * so we can ignore a failure return here. + */ + write_list_to_cpustate(cpu); + + ret = kvm_arm_sync_mpstate_to_qemu(cpu); + + /* TODO: other registers */ + return ret; +} + +void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) +{ + ram_addr_t ram_addr; + hwaddr paddr; + + assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); + + if (acpi_ghes_present() && addr) { + ram_addr = qemu_ram_addr_from_host(addr); + if (ram_addr != RAM_ADDR_INVALID && + kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { + kvm_hwpoison_page_add(ram_addr); + /* + * If this is a BUS_MCEERR_AR, we know we have been called + * synchronously from the vCPU thread, so we can easily + * synchronize the state and inject an error. + * + * TODO: we currently don't tell the guest at all about + * BUS_MCEERR_AO. In that case we might either be being + * called synchronously from the vCPU thread, or a bit + * later from the main thread, so doing the injection of + * the error would be more complicated. + */ + if (code == BUS_MCEERR_AR) { + kvm_cpu_synchronize_state(c); + if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + kvm_inject_arm_sea(c); + } else { + error_report("failed to record the error"); + abort(); + } + } + return; + } + if (code == BUS_MCEERR_AO) { + error_report("Hardware memory error at addr %p for memory used by " + "QEMU itself instead of guest system!", addr); + } + } + + if (code == BUS_MCEERR_AR) { + error_report("Hardware memory error!"); + exit(1); + } +} + +/* C6.6.29 BRK instruction */ +static const uint32_t brk_insn = 0xd4200000; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} |