aboutsummaryrefslogtreecommitdiff
path: root/target/i386/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/cpu.h')
-rw-r--r--target/i386/cpu.h1360
1 files changed, 1011 insertions, 349 deletions
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 730c06f80a..6b05738079 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1,4 +1,3 @@
-
/*
* i386 virtual CPU header
*
@@ -7,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -21,23 +20,20 @@
#ifndef I386_CPU_H
#define I386_CPU_H
-#include "qemu-common.h"
+#include "sysemu/tcg.h"
#include "cpu-qom.h"
-#include "hyperv-proto.h"
-
-#ifdef TARGET_X86_64
-#define TARGET_LONG_BITS 64
-#else
-#define TARGET_LONG_BITS 32
-#endif
-
+#include "kvm/hyperv-proto.h"
#include "exec/cpu-defs.h"
+#include "qapi/qapi-types-common.h"
+#include "qemu/cpu-float.h"
+#include "qemu/timer.h"
+
+#define XEN_NR_VIRQS 24
/* The x86 has a strong memory model with some store-after-load re-ordering */
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
-/* Maximum instruction code size */
-#define TARGET_MAX_INSN_SIZE 16
+#define KVM_HAVE_MCE_INJECTION 1
/* support for self modifying code even if the modified instruction is
close to the modifying instruction */
@@ -51,8 +47,6 @@
#define ELF_MACHINE_UNAME "i686"
#endif
-#define CPUArchState struct CPUX86State
-
enum {
R_EAX = 0,
R_ECX = 1,
@@ -177,6 +171,8 @@ typedef enum X86Seg {
#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
+#define HF_UMIP_SHIFT 27 /* CR4.UMIP */
+#define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
@@ -202,6 +198,8 @@ typedef enum X86Seg {
#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
+#define HF_UMIP_MASK (1 << HF_UMIP_SHIFT)
+#define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT)
/* hflags2 */
@@ -212,6 +210,8 @@ typedef enum X86Seg {
#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
#define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
+#define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
+#define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
@@ -220,6 +220,8 @@ typedef enum X86Seg {
#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
#define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
+#define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
+#define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT)
#define CR0_PE_SHIFT 0
#define CR0_MP_SHIFT 1
@@ -232,6 +234,8 @@ typedef enum X86Seg {
#define CR0_NE_MASK (1U << 5)
#define CR0_WP_MASK (1U << 16)
#define CR0_AM_MASK (1U << 18)
+#define CR0_NW_MASK (1U << 29)
+#define CR0_CD_MASK (1U << 30)
#define CR0_PG_MASK (1U << 31)
#define CR4_VME_MASK (1U << 0)
@@ -246,6 +250,7 @@ typedef enum X86Seg {
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1U << 10)
+#define CR4_UMIP_MASK (1U << 11)
#define CR4_LA57_MASK (1U << 12)
#define CR4_VMXE_MASK (1U << 13)
#define CR4_SMXE_MASK (1U << 14)
@@ -255,6 +260,16 @@ typedef enum X86Seg {
#define CR4_SMEP_MASK (1U << 20)
#define CR4_SMAP_MASK (1U << 21)
#define CR4_PKE_MASK (1U << 22)
+#define CR4_PKS_MASK (1U << 24)
+
+#define CR4_RESERVED_MASK \
+(~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
+ | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
+ | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
+ | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \
+ | CR4_LA57_MASK \
+ | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
+ | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK))
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -273,6 +288,8 @@ typedef enum X86Seg {
#define DR7_TYPE_IO_RW 0x2
#define DR7_TYPE_DATA_RW 0x3
+#define DR_RESERVED_MASK 0xffffffff00000000ULL
+
#define PG_PRESENT_BIT 0
#define PG_RW_BIT 1
#define PG_USER_BIT 2
@@ -297,7 +314,6 @@ typedef enum X86Seg {
#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
#define PG_ADDRESS_MASK 0x000ffffffffff000LL
-#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
#define PG_HI_USER_MASK 0x7ff0000000000000LL
#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
#define PG_NX_MASK (1ULL << PG_NX_BIT)
@@ -311,6 +327,19 @@ typedef enum X86Seg {
#define PG_ERROR_I_D_MASK 0x10
#define PG_ERROR_PK_MASK 0x20
+#define PG_MODE_PAE (1 << 0)
+#define PG_MODE_LMA (1 << 1)
+#define PG_MODE_NXE (1 << 2)
+#define PG_MODE_PSE (1 << 3)
+#define PG_MODE_LA57 (1 << 4)
+#define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
+
+/* Bits of CR4 that do not affect the NPT page format. */
+#define PG_MODE_WP (1 << 16)
+#define PG_MODE_PKE (1 << 17)
+#define PG_MODE_PKS (1 << 18)
+#define PG_MODE_SMEP (1 << 19)
+
#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
#define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
@@ -350,22 +379,50 @@ typedef enum X86Seg {
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_EXTD (1 << 10)
#define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
+#define MSR_IA32_APICBASE_RESERVED \
+ (~(uint64_t)(MSR_IA32_APICBASE_BSP | MSR_IA32_APICBASE_ENABLE \
+ | MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_BASE))
+
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define MSR_TSC_ADJUST 0x0000003b
#define MSR_IA32_SPEC_CTRL 0x48
#define MSR_VIRT_SSBD 0xc001011f
#define MSR_IA32_PRED_CMD 0x49
+#define MSR_IA32_UCODE_REV 0x8b
+#define MSR_IA32_CORE_CAPABILITY 0xcf
+
#define MSR_IA32_ARCH_CAPABILITIES 0x10a
+#define ARCH_CAP_TSX_CTRL_MSR (1<<7)
+
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+#define PERF_CAP_LBR_FMT 0x3f
+
+#define MSR_IA32_TSX_CTRL 0x122
#define MSR_IA32_TSCDEADLINE 0x6e0
+#define MSR_IA32_PKRS 0x6e1
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+#define FEATURE_CONTROL_SGX_LC (1ULL << 17)
+#define FEATURE_CONTROL_SGX (1ULL << 18)
#define FEATURE_CONTROL_LMCE (1<<20)
+#define MSR_IA32_SGXLEPUBKEYHASH0 0x8c
+#define MSR_IA32_SGXLEPUBKEYHASH1 0x8d
+#define MSR_IA32_SGXLEPUBKEYHASH2 0x8e
+#define MSR_IA32_SGXLEPUBKEYHASH3 0x8f
+
#define MSR_P6_PERFCTR0 0xc1
#define MSR_IA32_SMBASE 0x9e
#define MSR_SMI_COUNT 0x34
+#define MSR_CORE_THREAD_COUNT 0x35
#define MSR_MTRRcap 0xfe
#define MSR_MTRRcap_VCNT 8
#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
@@ -387,6 +444,7 @@ typedef enum X86Seg {
#define MSR_IA32_MISC_ENABLE 0x1a0
/* Indicates good rep/movs microcode on some processors: */
#define MSR_IA32_MISC_ENABLE_DEFAULT 1
+#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
@@ -446,6 +504,11 @@ typedef enum X86Seg {
#define MSR_EFER_SVME (1 << 12)
#define MSR_EFER_FFXSR (1 << 14)
+#define MSR_EFER_RESERVED\
+ (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+ | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+ | MSR_EFER_FFXSR))
+
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_CSTAR 0xc0000083
@@ -454,11 +517,40 @@ typedef enum X86Seg {
#define MSR_GSBASE 0xc0000101
#define MSR_KERNELGSBASE 0xc0000102
#define MSR_TSC_AUX 0xc0000103
+#define MSR_AMD64_TSC_RATIO 0xc0000104
+
+#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
#define MSR_VM_HSAVE_PA 0xc0010117
+#define MSR_IA32_XFD 0x000001c4
+#define MSR_IA32_XFD_ERR 0x000001c5
+
#define MSR_IA32_BNDCFGS 0x00000d90
#define MSR_IA32_XSS 0x00000da0
+#define MSR_IA32_UMWAIT_CONTROL 0xe1
+
+#define MSR_IA32_VMX_BASIC 0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
+#define MSR_IA32_VMX_MISC 0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+#define MSR_IA32_VMX_VMFUNC 0x00000491
+
+#define MSR_APIC_START 0x00000800
+#define MSR_APIC_END 0x000008ff
#define XSTATE_FP_BIT 0
#define XSTATE_SSE_BIT 1
@@ -469,6 +561,9 @@ typedef enum X86Seg {
#define XSTATE_ZMM_Hi256_BIT 6
#define XSTATE_Hi16_ZMM_BIT 7
#define XSTATE_PKRU_BIT 9
+#define XSTATE_ARCH_LBR_BIT 15
+#define XSTATE_XTILE_CFG_BIT 17
+#define XSTATE_XTILE_DATA_BIT 18
#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
@@ -479,6 +574,26 @@ typedef enum X86Seg {
#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
+#define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
+#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
+#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
+
+#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
+
+#define ESA_FEATURE_ALIGN64_BIT 1
+#define ESA_FEATURE_XFD_BIT 2
+
+#define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT)
+#define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT)
+
+
+/* CPUID feature bits available in XCR0 */
+#define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \
+ XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \
+ XSTATE_ZMM_Hi256_MASK | \
+ XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
+ XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
/* CPUID feature words */
typedef enum FeatureWord {
@@ -487,25 +602,46 @@ typedef enum FeatureWord {
FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
+ FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
+ FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */
FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
- FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
- FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
- FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
FEAT_SVM, /* CPUID[8000_000A].EDX */
FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
FEAT_6_EAX, /* CPUID[6].EAX */
- FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
- FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+ FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+ FEAT_ARCH_CAPABILITIES,
+ FEAT_CORE_CAPABILITY,
+ FEAT_PERF_CAPABILITIES,
+ FEAT_VMX_PROCBASED_CTLS,
+ FEAT_VMX_SECONDARY_CTLS,
+ FEAT_VMX_PINBASED_CTLS,
+ FEAT_VMX_EXIT_CTLS,
+ FEAT_VMX_ENTRY_CTLS,
+ FEAT_VMX_MISC,
+ FEAT_VMX_EPT_VPID_CAPS,
+ FEAT_VMX_BASIC,
+ FEAT_VMX_VMFUNC,
+ FEAT_14_0_ECX,
+ FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
+ FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
+ FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
+ FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
+ FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
+ FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */
+ FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */
FEATURE_WORDS,
} FeatureWord;
-typedef uint32_t FeatureWordArray[FEATURE_WORDS];
+typedef uint64_t FeatureWordArray[FEATURE_WORDS];
+uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only);
/* cpuid_features bits */
#define CPUID_FP87 (1U << 0)
@@ -599,7 +735,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_EXT2_3DNOWEXT (1U << 30)
#define CPUID_EXT2_3DNOW (1U << 31)
-/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+/* CPUID[8000_0001].EDX bits that are aliases of CPUID[1].EDX bits on AMD CPUs */
#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
CPUID_EXT2_DE | CPUID_EXT2_PSE | \
CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
@@ -633,70 +769,215 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_EXT3_PERFCORE (1U << 23)
#define CPUID_EXT3_PERFNB (1U << 24)
-#define CPUID_SVM_NPT (1U << 0)
-#define CPUID_SVM_LBRV (1U << 1)
-#define CPUID_SVM_SVMLOCK (1U << 2)
-#define CPUID_SVM_NRIPSAVE (1U << 3)
-#define CPUID_SVM_TSCSCALE (1U << 4)
-#define CPUID_SVM_VMCBCLEAN (1U << 5)
-#define CPUID_SVM_FLUSHASID (1U << 6)
-#define CPUID_SVM_DECODEASSIST (1U << 7)
-#define CPUID_SVM_PAUSEFILTER (1U << 10)
-#define CPUID_SVM_PFTHRESHOLD (1U << 12)
-
-#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
-#define CPUID_7_0_EBX_BMI1 (1U << 3)
-#define CPUID_7_0_EBX_HLE (1U << 4)
-#define CPUID_7_0_EBX_AVX2 (1U << 5)
-#define CPUID_7_0_EBX_SMEP (1U << 7)
-#define CPUID_7_0_EBX_BMI2 (1U << 8)
-#define CPUID_7_0_EBX_ERMS (1U << 9)
-#define CPUID_7_0_EBX_INVPCID (1U << 10)
-#define CPUID_7_0_EBX_RTM (1U << 11)
-#define CPUID_7_0_EBX_MPX (1U << 14)
-#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
-#define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
-#define CPUID_7_0_EBX_RDSEED (1U << 18)
-#define CPUID_7_0_EBX_ADX (1U << 19)
-#define CPUID_7_0_EBX_SMAP (1U << 20)
-#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
-#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
-#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
-#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
-#define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */
-#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
-#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
-#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
-#define CPUID_7_0_EBX_SHA_NI (1U << 29) /* SHA1/SHA256 Instruction Extensions */
-#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
-#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
-
-#define CPUID_7_0_ECX_AVX512BMI (1U << 1)
-#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
-#define CPUID_7_0_ECX_UMIP (1U << 2)
-#define CPUID_7_0_ECX_PKU (1U << 3)
-#define CPUID_7_0_ECX_OSPKE (1U << 4)
-#define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */
-#define CPUID_7_0_ECX_GFNI (1U << 8)
-#define CPUID_7_0_ECX_VAES (1U << 9)
-#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
-#define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
-#define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
-#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */
-#define CPUID_7_0_ECX_LA57 (1U << 16)
-#define CPUID_7_0_ECX_RDPID (1U << 22)
-#define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */
-
-#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
-#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
-#define CPUID_7_0_EDX_PCONFIG (1U << 18) /* Platform Configuration */
-#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */
-#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/
-#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
-
-#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
- do not invalidate cache */
-#define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */
+#define CPUID_SVM_NPT (1U << 0)
+#define CPUID_SVM_LBRV (1U << 1)
+#define CPUID_SVM_SVMLOCK (1U << 2)
+#define CPUID_SVM_NRIPSAVE (1U << 3)
+#define CPUID_SVM_TSCSCALE (1U << 4)
+#define CPUID_SVM_VMCBCLEAN (1U << 5)
+#define CPUID_SVM_FLUSHASID (1U << 6)
+#define CPUID_SVM_DECODEASSIST (1U << 7)
+#define CPUID_SVM_PAUSEFILTER (1U << 10)
+#define CPUID_SVM_PFTHRESHOLD (1U << 12)
+#define CPUID_SVM_AVIC (1U << 13)
+#define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15)
+#define CPUID_SVM_VGIF (1U << 16)
+#define CPUID_SVM_VNMI (1U << 25)
+#define CPUID_SVM_SVME_ADDR_CHK (1U << 28)
+
+/* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
+#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+/* Support SGX */
+#define CPUID_7_0_EBX_SGX (1U << 2)
+/* 1st Group of Advanced Bit Manipulation Extensions */
+#define CPUID_7_0_EBX_BMI1 (1U << 3)
+/* Hardware Lock Elision */
+#define CPUID_7_0_EBX_HLE (1U << 4)
+/* Intel Advanced Vector Extensions 2 */
+#define CPUID_7_0_EBX_AVX2 (1U << 5)
+/* Supervisor-mode Execution Prevention */
+#define CPUID_7_0_EBX_SMEP (1U << 7)
+/* 2nd Group of Advanced Bit Manipulation Extensions */
+#define CPUID_7_0_EBX_BMI2 (1U << 8)
+/* Enhanced REP MOVSB/STOSB */
+#define CPUID_7_0_EBX_ERMS (1U << 9)
+/* Invalidate Process-Context Identifier */
+#define CPUID_7_0_EBX_INVPCID (1U << 10)
+/* Restricted Transactional Memory */
+#define CPUID_7_0_EBX_RTM (1U << 11)
+/* Memory Protection Extension */
+#define CPUID_7_0_EBX_MPX (1U << 14)
+/* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512F (1U << 16)
+/* AVX-512 Doubleword & Quadword Instruction */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17)
+/* Read Random SEED */
+#define CPUID_7_0_EBX_RDSEED (1U << 18)
+/* ADCX and ADOX instructions */
+#define CPUID_7_0_EBX_ADX (1U << 19)
+/* Supervisor Mode Access Prevention */
+#define CPUID_7_0_EBX_SMAP (1U << 20)
+/* AVX-512 Integer Fused Multiply Add */
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
+/* Persistent Commit */
+#define CPUID_7_0_EBX_PCOMMIT (1U << 22)
+/* Flush a Cache Line Optimized */
+#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
+/* Cache Line Write Back */
+#define CPUID_7_0_EBX_CLWB (1U << 24)
+/* Intel Processor Trace */
+#define CPUID_7_0_EBX_INTEL_PT (1U << 25)
+/* AVX-512 Prefetch */
+#define CPUID_7_0_EBX_AVX512PF (1U << 26)
+/* AVX-512 Exponential and Reciprocal */
+#define CPUID_7_0_EBX_AVX512ER (1U << 27)
+/* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512CD (1U << 28)
+/* SHA1/SHA256 Instruction Extensions */
+#define CPUID_7_0_EBX_SHA_NI (1U << 29)
+/* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30)
+/* AVX-512 Vector Length Extensions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31)
+
+/* AVX-512 Vector Byte Manipulation Instruction */
+#define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
+/* User-Mode Instruction Prevention */
+#define CPUID_7_0_ECX_UMIP (1U << 2)
+/* Protection Keys for User-mode Pages */
+#define CPUID_7_0_ECX_PKU (1U << 3)
+/* OS Enable Protection Keys */
+#define CPUID_7_0_ECX_OSPKE (1U << 4)
+/* UMONITOR/UMWAIT/TPAUSE Instructions */
+#define CPUID_7_0_ECX_WAITPKG (1U << 5)
+/* Additional AVX-512 Vector Byte Manipulation Instruction */
+#define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
+/* Galois Field New Instructions */
+#define CPUID_7_0_ECX_GFNI (1U << 8)
+/* Vector AES Instructions */
+#define CPUID_7_0_ECX_VAES (1U << 9)
+/* Carry-Less Multiplication Quadword */
+#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
+/* Vector Neural Network Instructions */
+#define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
+/* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
+#define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
+/* POPCNT for vectors of DW/QW */
+#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
+/* 5-level Page Tables */
+#define CPUID_7_0_ECX_LA57 (1U << 16)
+/* Read Processor ID */
+#define CPUID_7_0_ECX_RDPID (1U << 22)
+/* Bus Lock Debug Exception */
+#define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
+/* Cache Line Demote Instruction */
+#define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
+/* Move Doubleword as Direct Store Instruction */
+#define CPUID_7_0_ECX_MOVDIRI (1U << 27)
+/* Move 64 Bytes as Direct Store Instruction */
+#define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
+/* Support SGX Launch Control */
+#define CPUID_7_0_ECX_SGX_LC (1U << 30)
+/* Protection Keys for Supervisor-mode Pages */
+#define CPUID_7_0_ECX_PKS (1U << 31)
+
+/* AVX512 Neural Network Instructions */
+#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
+/* AVX512 Multiply Accumulation Single Precision */
+#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
+/* Fast Short Rep Mov */
+#define CPUID_7_0_EDX_FSRM (1U << 4)
+/* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
+#define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
+/* SERIALIZE instruction */
+#define CPUID_7_0_EDX_SERIALIZE (1U << 14)
+/* TSX Suspend Load Address Tracking instruction */
+#define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
+/* Architectural LBRs */
+#define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
+/* AMX_BF16 instruction */
+#define CPUID_7_0_EDX_AMX_BF16 (1U << 22)
+/* AVX512_FP16 instruction */
+#define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
+/* AMX tile (two-dimensional register) */
+#define CPUID_7_0_EDX_AMX_TILE (1U << 24)
+/* AMX_INT8 instruction */
+#define CPUID_7_0_EDX_AMX_INT8 (1U << 25)
+/* Speculation Control */
+#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
+/* Single Thread Indirect Branch Predictors */
+#define CPUID_7_0_EDX_STIBP (1U << 27)
+/* Flush L1D cache */
+#define CPUID_7_0_EDX_FLUSH_L1D (1U << 28)
+/* Arch Capabilities */
+#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
+/* Core Capability */
+#define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
+/* Speculative Store Bypass Disable */
+#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
+
+/* AVX VNNI Instruction */
+#define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
+/* AVX512 BFloat16 Instruction */
+#define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
+/* CMPCCXADD Instructions */
+#define CPUID_7_1_EAX_CMPCCXADD (1U << 7)
+/* Fast Zero REP MOVS */
+#define CPUID_7_1_EAX_FZRM (1U << 10)
+/* Fast Short REP STOS */
+#define CPUID_7_1_EAX_FSRS (1U << 11)
+/* Fast Short REP CMPS/SCAS */
+#define CPUID_7_1_EAX_FSRC (1U << 12)
+/* Support Tile Computational Operations on FP16 Numbers */
+#define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
+/* Support for VPMADD52[H,L]UQ */
+#define CPUID_7_1_EAX_AVX_IFMA (1U << 23)
+
+/* Support for VPDPB[SU,UU,SS]D[,S] */
+#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4)
+/* AVX NE CONVERT Instructions */
+#define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5)
+/* AMX COMPLEX Instructions */
+#define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8)
+/* PREFETCHIT0/1 Instructions */
+#define CPUID_7_1_EDX_PREFETCHITI (1U << 14)
+
+/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */
+#define CPUID_7_2_EDX_MCDT_NO (1U << 5)
+
+/* XFD Extend Feature Disabled */
+#define CPUID_D_1_EAX_XFD (1U << 4)
+
+/* Packets which contain IP payload have LIP values */
+#define CPUID_14_0_ECX_LIP (1U << 31)
+
+/* CLZERO instruction */
+#define CPUID_8000_0008_EBX_CLZERO (1U << 0)
+/* Always save/restore FP error pointers */
+#define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
+/* Write back and do not invalidate cache */
+#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
+/* Indirect Branch Prediction Barrier */
+#define CPUID_8000_0008_EBX_IBPB (1U << 12)
+/* Indirect Branch Restricted Speculation */
+#define CPUID_8000_0008_EBX_IBRS (1U << 14)
+/* Single Thread Indirect Branch Predictors */
+#define CPUID_8000_0008_EBX_STIBP (1U << 15)
+/* STIBP mode has enhanced performance and may be left always on */
+#define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17)
+/* Speculative Store Bypass Disable */
+#define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
+/* Predictive Store Forwarding Disable */
+#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28)
+
+/* Processor ignores nested data breakpoints */
+#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0)
+/* LFENCE is always serializing */
+#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2)
+/* Null Selector Clears Base */
+#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6)
+/* Automatic IBRS */
+#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8)
#define CPUID_XSAVE_XSAVEOPT (1U << 0)
#define CPUID_XSAVE_XSAVEC (1U << 1)
@@ -722,6 +1003,15 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_VENDOR_VIA "CentaurHauls"
+#define CPUID_VENDOR_HYGON "HygonGenuine"
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
@@ -729,9 +1019,167 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
#define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
#define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
-
-#ifndef HYPERV_SPINLOCK_NEVER_RETRY
-#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
+#define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8)
+
+/* MSR Feature Bits */
+#define MSR_ARCH_CAP_RDCL_NO (1U << 0)
+#define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
+#define MSR_ARCH_CAP_RSBA (1U << 2)
+#define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
+#define MSR_ARCH_CAP_SSB_NO (1U << 4)
+#define MSR_ARCH_CAP_MDS_NO (1U << 5)
+#define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
+#define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
+#define MSR_ARCH_CAP_TAA_NO (1U << 8)
+#define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13)
+#define MSR_ARCH_CAP_FBSDP_NO (1U << 14)
+#define MSR_ARCH_CAP_PSDP_NO (1U << 15)
+#define MSR_ARCH_CAP_FB_CLEAR (1U << 17)
+#define MSR_ARCH_CAP_PBRSB_NO (1U << 24)
+
+#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
+
+/* VMX MSR features */
+#define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
+#define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
+#define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
+#define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
+#define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
+#define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
+#define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56)
+
+#define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
+#define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
+#define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
+#define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
+#define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
+#define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
+#define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
+#define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
+
+#define MSR_VMX_EPT_EXECONLY (1ULL << 0)
+#define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
+#define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
+#define MSR_VMX_EPT_UC (1ULL << 8)
+#define MSR_VMX_EPT_WB (1ULL << 14)
+#define MSR_VMX_EPT_2MB (1ULL << 16)
+#define MSR_VMX_EPT_1GB (1ULL << 17)
+#define MSR_VMX_EPT_INVEPT (1ULL << 20)
+#define MSR_VMX_EPT_AD_BITS (1ULL << 21)
+#define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
+#define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
+#define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
+#define MSR_VMX_EPT_INVVPID (1ULL << 32)
+#define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
+#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
+#define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
+#define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
+
+#define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
+
+
+/* VMX controls */
+#define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+#define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define VMX_CPU_BASED_HLT_EXITING 0x00000080
+#define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
+#define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
+#define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
+#define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
+#define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
+#define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
+#define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
+#define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
+#define VMX_CPU_BASED_TPR_SHADOW 0x00200000
+#define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
+#define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
+#define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
+#define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
+#define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
+#define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
+#define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
+#define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
+#define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
+
+#define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define VMX_SECONDARY_EXEC_DESC 0x00000004
+#define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
+#define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
+#define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
+#define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
+#define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
+#define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
+#define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
+#define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
+#define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
+#define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
+#define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
+#define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
+#define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
+#define VMX_SECONDARY_EXEC_XSAVES 0x00100000
+#define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000
+#define VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE 0x04000000
+
+#define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
+#define VMX_PIN_BASED_NMI_EXITING 0x00000008
+#define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
+#define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
+#define VMX_PIN_BASED_POSTED_INTR 0x00000080
+
+#define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
+#define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
+#define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
+#define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
+#define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
+#define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
+#define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
+#define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
+#define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
+#define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
+#define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
+#define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
+#define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
+
+#define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
+#define VMX_VM_ENTRY_IA32E_MODE 0x00000200
+#define VMX_VM_ENTRY_SMM 0x00000400
+#define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
+#define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
+#define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
+#define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
+#define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
+#define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
+#define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
+#define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000
+
+/* Supported Hyper-V Enlightenments */
+#define HYPERV_FEAT_RELAXED 0
+#define HYPERV_FEAT_VAPIC 1
+#define HYPERV_FEAT_TIME 2
+#define HYPERV_FEAT_CRASH 3
+#define HYPERV_FEAT_RESET 4
+#define HYPERV_FEAT_VPINDEX 5
+#define HYPERV_FEAT_RUNTIME 6
+#define HYPERV_FEAT_SYNIC 7
+#define HYPERV_FEAT_STIMER 8
+#define HYPERV_FEAT_FREQUENCIES 9
+#define HYPERV_FEAT_REENLIGHTENMENT 10
+#define HYPERV_FEAT_TLBFLUSH 11
+#define HYPERV_FEAT_EVMCS 12
+#define HYPERV_FEAT_IPI 13
+#define HYPERV_FEAT_STIMER_DIRECT 14
+#define HYPERV_FEAT_AVIC 15
+#define HYPERV_FEAT_SYNDBG 16
+#define HYPERV_FEAT_MSR_BITMAP 17
+#define HYPERV_FEAT_XMM_INPUT 18
+#define HYPERV_FEAT_TLBFLUSH_EXT 19
+#define HYPERV_FEAT_TLBFLUSH_DIRECT 20
+
+#ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
+#define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
#endif
#define EXCP00_DIVZ 0
@@ -753,9 +1201,9 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define EXCP11_ALGN 17
#define EXCP12_MCHK 18
-#define EXCP_SYSCALL 0x100 /* only happens in user only emulation
- for syscall instruction */
-#define EXCP_VMEXIT 0x100
+#define EXCP_VMEXIT 0x100 /* only for system emulation */
+#define EXCP_SYSCALL 0x101 /* only for user emulation */
+#define EXCP_VSYSCALL 0x102 /* only for user emulation */
/* i386-specific interrupt pending bits. */
#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
@@ -844,6 +1292,7 @@ typedef enum {
CC_OP_NB,
} CCOp;
+QEMU_BUILD_BUG_ON(CC_OP_NB >= 128);
typedef struct SegmentCache {
uint32_t selector;
@@ -852,32 +1301,35 @@ typedef struct SegmentCache {
uint32_t flags;
} SegmentCache;
-#define MMREG_UNION(n, bits) \
- union n { \
- uint8_t _b_##n[(bits)/8]; \
- uint16_t _w_##n[(bits)/16]; \
- uint32_t _l_##n[(bits)/32]; \
- uint64_t _q_##n[(bits)/64]; \
- float32 _s_##n[(bits)/32]; \
- float64 _d_##n[(bits)/64]; \
- }
-
-typedef union {
- uint8_t _b[16];
- uint16_t _w[8];
- uint32_t _l[4];
- uint64_t _q[2];
+typedef union MMXReg {
+ uint8_t _b_MMXReg[64 / 8];
+ uint16_t _w_MMXReg[64 / 16];
+ uint32_t _l_MMXReg[64 / 32];
+ uint64_t _q_MMXReg[64 / 64];
+ float32 _s_MMXReg[64 / 32];
+ float64 _d_MMXReg[64 / 64];
+} MMXReg;
+
+typedef union XMMReg {
+ uint64_t _q_XMMReg[128 / 64];
} XMMReg;
-typedef union {
- uint8_t _b[32];
- uint16_t _w[16];
- uint32_t _l[8];
- uint64_t _q[4];
+typedef union YMMReg {
+ uint64_t _q_YMMReg[256 / 64];
+ XMMReg _x_YMMReg[256 / 128];
} YMMReg;
-typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
-typedef MMREG_UNION(MMXReg, 64) MMXReg;
+typedef union ZMMReg {
+ uint8_t _b_ZMMReg[512 / 8];
+ uint16_t _w_ZMMReg[512 / 16];
+ uint32_t _l_ZMMReg[512 / 32];
+ uint64_t _q_ZMMReg[512 / 64];
+ float16 _h_ZMMReg[512 / 16];
+ float32 _s_ZMMReg[512 / 32];
+ float64 _d_ZMMReg[512 / 64];
+ XMMReg _x_ZMMReg[512 / 128];
+ YMMReg _y_ZMMReg[512 / 256];
+} ZMMReg;
typedef struct BNDReg {
uint64_t lb;
@@ -893,13 +1345,21 @@ typedef struct BNDCSReg {
#define BNDCFG_BNDPRESERVE 2ULL
#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
#define ZMM_B(n) _b_ZMMReg[63 - (n)]
#define ZMM_W(n) _w_ZMMReg[31 - (n)]
#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_H(n) _h_ZMMReg[31 - (n)]
#define ZMM_S(n) _s_ZMMReg[15 - (n)]
#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+#define ZMM_X(n) _x_ZMMReg[3 - (n)]
+#define ZMM_Y(n) _y_ZMMReg[1 - (n)]
+
+#define XMM_Q(n) _q_XMMReg[1 - (n)]
+
+#define YMM_Q(n) _q_YMMReg[3 - (n)]
+#define YMM_X(n) _x_YMMReg[1 - (n)]
#define MMX_B(n) _b_MMXReg[7 - (n)]
#define MMX_W(n) _w_MMXReg[3 - (n)]
@@ -909,9 +1369,17 @@ typedef struct BNDCSReg {
#define ZMM_B(n) _b_ZMMReg[n]
#define ZMM_W(n) _w_ZMMReg[n]
#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_H(n) _h_ZMMReg[n]
#define ZMM_S(n) _s_ZMMReg[n]
#define ZMM_Q(n) _q_ZMMReg[n]
#define ZMM_D(n) _d_ZMMReg[n]
+#define ZMM_X(n) _x_ZMMReg[n]
+#define ZMM_Y(n) _y_ZMMReg[n]
+
+#define XMM_Q(n) _q_XMMReg[n]
+
+#define YMM_Q(n) _q_YMMReg[n]
+#define YMM_X(n) _x_YMMReg[n]
#define MMX_B(n) _b_MMXReg[n]
#define MMX_W(n) _w_MMXReg[n]
@@ -942,7 +1410,6 @@ typedef struct {
#define MAX_FIXED_COUNTERS 3
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
-#define NB_MMU_MODES 3
#define TARGET_INSN_START_EXTRA_WORDS 1
#define NB_OPMASK_REGS 8
@@ -1013,40 +1480,54 @@ typedef struct XSavePKRU {
uint32_t padding;
} XSavePKRU;
-typedef struct X86XSaveArea {
- X86LegacyXSaveArea legacy;
- X86XSaveHeader header;
-
- /* Extended save areas: */
-
- /* AVX State: */
- XSaveAVX avx_state;
- uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
- /* MPX State: */
- XSaveBNDREG bndreg_state;
- XSaveBNDCSR bndcsr_state;
- /* AVX-512 State: */
- XSaveOpmask opmask_state;
- XSaveZMM_Hi256 zmm_hi256_state;
- XSaveHi16_ZMM hi16_zmm_state;
- /* PKRU State: */
- XSavePKRU pkru_state;
-} X86XSaveArea;
-
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
+/* Ext. save area 17: AMX XTILECFG state */
+typedef struct XSaveXTILECFG {
+ uint8_t xtilecfg[64];
+} XSaveXTILECFG;
+
+/* Ext. save area 18: AMX XTILEDATA state */
+typedef struct XSaveXTILEDATA {
+ uint8_t xtiledata[8][1024];
+} XSaveXTILEDATA;
+
+typedef struct {
+ uint64_t from;
+ uint64_t to;
+ uint64_t info;
+} LBREntry;
+
+#define ARCH_LBR_NR_ENTRIES 32
+
+/* Ext. save area 19: Supervisor mode Arch LBR state */
+typedef struct XSavesArchLBR {
+ uint64_t lbr_ctl;
+ uint64_t lbr_depth;
+ uint64_t ler_from;
+ uint64_t ler_to;
+ uint64_t ler_info;
+ LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+} XSavesArchLBR;
+
QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
-QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
+QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40);
+QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000);
+QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328);
+
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+ uint32_t ecx;
+} ExtSaveArea;
+
+#define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1)
+
+extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
typedef enum TPRAccess {
TPR_ACCESS_READ,
@@ -1112,7 +1593,12 @@ typedef struct CPUCaches {
CPUCacheInfo *l3_cache;
} CPUCaches;
-typedef struct CPUX86State {
+typedef struct HVFX86LazyFlags {
+ target_ulong result;
+ target_ulong auxbits;
+} HVFX86LazyFlags;
+
+typedef struct CPUArchState {
/* standard registers */
target_ulong regs[CPU_NB_REGS];
target_ulong eip;
@@ -1138,6 +1624,9 @@ typedef struct CPUX86State {
SegmentCache idt; /* only base and limit are used */
target_ulong cr[5]; /* NOTE: cr1 is unused */
+
+ bool pdptrs_valid;
+ uint64_t pdptrs[4];
int32_t a20_mask;
BNDReg bnd_regs[4];
@@ -1156,6 +1645,8 @@ typedef struct CPUX86State {
FPReg fpregs[8];
/* KVM-only so far */
uint16_t fpop;
+ uint16_t fpcs;
+ uint16_t fpds;
uint64_t fpip;
uint64_t fpdp;
@@ -1166,15 +1657,15 @@ typedef struct CPUX86State {
float_status mmx_status; /* for 3DNow! float ops */
float_status sse_status;
uint32_t mxcsr;
- ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
- ZMMReg xmm_t0;
+ ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16);
+ ZMMReg xmm_t0 QEMU_ALIGNED(16);
MMXReg mmx_t0;
- XMMReg ymmh_regs[CPU_NB_REGS];
-
uint64_t opmask_regs[NB_OPMASK_REGS];
- YMMReg zmmh_regs[CPU_NB_REGS];
- ZMMReg hi16_zmm_regs[CPU_NB_REGS];
+#ifdef TARGET_X86_64
+ uint8_t xtilecfg[64];
+ uint8_t xtiledata[8192];
+#endif
/* sysenter registers */
uint32_t sysenter_cs;
@@ -1191,7 +1682,6 @@ typedef struct CPUX86State {
target_ulong kernelgsbase;
#endif
- uint64_t tsc;
uint64_t tsc_adjust;
uint64_t tsc_deadline;
uint64_t tsc_aux;
@@ -1201,6 +1691,7 @@ typedef struct CPUX86State {
uint64_t mcg_status;
uint64_t msr_ia32_misc_enable;
uint64_t msr_ia32_feature_control;
+ uint64_t msr_ia32_sgxlepubkeyhash[4];
uint64_t msr_fixed_ctr_ctrl;
uint64_t msr_global_ctrl;
@@ -1215,8 +1706,11 @@ typedef struct CPUX86State {
uint64_t msr_smi_count;
uint32_t pkru;
+ uint32_t pkrs;
+ uint32_t tsx_ctrl;
uint64_t spec_ctrl;
+ uint64_t amd_tsc_scale_msr;
uint64_t virt_ssbd;
/* End of state preserved by INIT (dummy marker). */
@@ -1226,12 +1720,20 @@ typedef struct CPUX86State {
uint64_t wall_clock_msr;
uint64_t steal_time_msr;
uint64_t async_pf_en_msr;
+ uint64_t async_pf_int_msr;
uint64_t pv_eoi_en_msr;
+ uint64_t poll_control_msr;
/* Partition-wide HV MSRs, will be updated only on the first vcpu */
uint64_t msr_hv_hypercall;
uint64_t msr_hv_guest_os_id;
uint64_t msr_hv_tsc;
+ uint64_t msr_hv_syndbg_control;
+ uint64_t msr_hv_syndbg_status;
+ uint64_t msr_hv_syndbg_send_page;
+ uint64_t msr_hv_syndbg_recv_page;
+ uint64_t msr_hv_syndbg_pending_page;
+ uint64_t msr_hv_syndbg_options;
/* Per-VCPU HV MSRs */
uint64_t msr_hv_vapic;
@@ -1254,6 +1756,15 @@ typedef struct CPUX86State {
uint64_t msr_rtit_cr3_match;
uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
+ /* Per-VCPU XFD MSRs */
+ uint64_t msr_xfd;
+ uint64_t msr_xfd_err;
+
+ /* Per-VCPU Arch LBR MSRs */
+ uint64_t msr_lbr_ctl;
+ uint64_t msr_lbr_depth;
+ LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -1276,6 +1787,7 @@ typedef struct CPUX86State {
uint64_t nested_cr3;
uint32_t nested_pg_mode;
uint8_t v_tpr;
+ uint32_t int_ctl;
/* KVM states, automatically cleared on reset */
uint8_t nmi_injected;
@@ -1286,11 +1798,13 @@ typedef struct CPUX86State {
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
- CPU_COMMON
-
- /* Fields after CPU_COMMON are preserved across CPU reset. */
+ /* Fields after this point are preserved across CPU reset. */
/* processor features (e.g. for CPUID insn) */
+ /* Minimum cpuid leaf 7 value */
+ uint32_t cpuid_level_func7;
+ /* Actual cpuid leaf 7 value */
+ uint32_t cpuid_min_level_func7;
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
/* Maximum level/xlevel/xlevel2 value for auto-assignment: */
@@ -1318,20 +1832,46 @@ typedef struct CPUX86State {
/* For KVM */
uint32_t mp_state;
- int32_t exception_injected;
+ int32_t exception_nr;
int32_t interrupt_injected;
uint8_t soft_interrupt;
+ uint8_t exception_pending;
+ uint8_t exception_injected;
uint8_t has_error_code;
+ uint8_t exception_has_payload;
+ uint64_t exception_payload;
+ uint8_t triple_fault_pending;
uint32_t ins_len;
uint32_t sipi_vector;
bool tsc_valid;
int64_t tsc_khz;
int64_t user_tsc_khz; /* for sanity check only */
+ uint64_t apic_bus_freq;
+ uint64_t tsc;
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
void *xsave_buf;
+ uint32_t xsave_buf_len;
+#endif
+#if defined(CONFIG_KVM)
+ struct kvm_nested_state *nested_state;
+ MemoryRegion *xen_vcpu_info_mr;
+ void *xen_vcpu_info_hva;
+ uint64_t xen_vcpu_info_gpa;
+ uint64_t xen_vcpu_info_default_gpa;
+ uint64_t xen_vcpu_time_info_gpa;
+ uint64_t xen_vcpu_runstate_gpa;
+ uint8_t xen_vcpu_callback_vector;
+ bool xen_callback_asserted;
+ uint16_t xen_virq[XEN_NR_VIRQS];
+ uint64_t xen_singleshot_timer_ns;
+ QEMUTimer *xen_singleshot_timer;
+ uint64_t xen_periodic_timer_period;
+ QEMUTimer *xen_periodic_timer;
+ QemuMutex xen_timers_lock;
#endif
#if defined(CONFIG_HVF)
- HVFX86EmulatorState *hvf_emul;
+ HVFX86LazyFlags hvf_lflags;
+ void *hvf_mmio_buf;
#endif
uint64_t mcg_cap;
@@ -1346,8 +1886,12 @@ typedef struct CPUX86State {
uint16_t fpregs_format_vmstate;
uint64_t xss;
+ uint32_t umwait;
TPRAccess tpr_access_type;
+
+ /* Number of dies within this CPU package. */
+ unsigned nr_dies;
} CPUX86State;
struct kvm_msrs;
@@ -1361,29 +1905,39 @@ struct kvm_msrs;
*
* An x86 CPU.
*/
-struct X86CPU {
- /*< private >*/
+struct ArchCPU {
CPUState parent_obj;
- /*< public >*/
CPUX86State env;
+ VMChangeStateEntry *vmsentry;
+
+ uint64_t ucode_rev;
+
+ uint32_t hyperv_spinlock_attempts;
+ char *hyperv_vendor;
+ bool hyperv_synic_kvm_only;
+ uint64_t hyperv_features;
+ bool hyperv_passthrough;
+ OnOffAuto hyperv_no_nonarch_cs;
+ uint32_t hyperv_vendor_id[3];
+ uint32_t hyperv_interface_id[4];
+ uint32_t hyperv_limits[3];
+ bool hyperv_enforce_cpuid;
+ uint32_t hyperv_ver_id_build;
+ uint16_t hyperv_ver_id_major;
+ uint16_t hyperv_ver_id_minor;
+ uint32_t hyperv_ver_id_sp;
+ uint8_t hyperv_ver_id_sb;
+ uint32_t hyperv_ver_id_sn;
- bool hyperv_vapic;
- bool hyperv_relaxed_timing;
- int hyperv_spinlock_attempts;
- char *hyperv_vendor_id;
- bool hyperv_time;
- bool hyperv_crash;
- bool hyperv_reset;
- bool hyperv_vpindex;
- bool hyperv_runtime;
- bool hyperv_synic;
- bool hyperv_stimer;
- bool hyperv_frequencies;
- bool hyperv_reenlightenment;
- bool hyperv_tlbflush;
bool check_cpuid;
bool enforce_cpuid;
+ /*
+ * Force features to be enabled even if the host doesn't support them.
+ * This is dangerous and should be done only for testing CPUID
+ * compatibility.
+ */
+ bool force_features;
bool expose_kvm;
bool expose_tcg;
bool migratable;
@@ -1408,7 +1962,7 @@ struct X86CPU {
} mwait;
/* Features that were filtered out because of missing host capabilities */
- uint32_t filtered_features[FEATURE_WORDS];
+ FeatureWordArray filtered_features;
/* Enable PMU CPUID bits. This can't be enabled by default yet because
* it doesn't have ABI stability guarantees, as it passes all PMU CPUID
@@ -1417,6 +1971,15 @@ struct X86CPU {
*/
bool enable_pmu;
+ /*
+ * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR.
+ * This can't be initialized with a default because it doesn't have
+ * stable ABI support yet. It is only allowed to pass all LBR_FMT bits
+ * returned by kvm_arch_get_supported_msr_feature()(which depends on both
+ * host CPU and kernel capabilities) to the guest.
+ */
+ uint64_t lbr_fmt;
+
/* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
* disabled by default to avoid breaking migration between QEMU with
* different LMCE configurations.
@@ -1440,15 +2003,27 @@ struct X86CPU {
/* Enable auto level-increase for all CPUID leaves */
bool full_cpuid_auto_level;
+ /* Only advertise CPUID leaves defined by the vendor */
+ bool vendor_cpuid_only;
+
+ /* Enable auto level-increase for Intel Processor Trace leave */
+ bool intel_pt_auto_level;
+
/* if true fill the top bits of the MTRR_PHYSMASKn variable range */
bool fill_mtrr_mask;
/* if true override the phys_bits value with a value read from the host */
bool host_phys_bits;
+ /* if set, limit maximum value for phys_bits when host_phys_bits is true */
+ uint8_t host_phys_bits_limit;
+
/* Stop SMI delivery for migration compatibility with old machines */
bool kvm_no_smi_migration;
+ /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
+ bool kvm_pv_enforce_cpuid;
+
/* Number of physical address bits supported */
uint32_t phys_bits;
@@ -1462,69 +2037,99 @@ struct X86CPU {
int32_t node_id; /* NUMA node this CPU belongs to */
int32_t socket_id;
+ int32_t die_id;
int32_t core_id;
int32_t thread_id;
int32_t hv_max_vps;
+
+ bool xen_vapic;
};
-static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
-{
- return container_of(env, X86CPU, env);
-}
+typedef struct X86CPUModel X86CPUModel;
-#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
+/**
+ * X86CPUClass:
+ * @cpu_def: CPU model definition
+ * @host_cpuid_required: Whether CPU model requires cpuid from host.
+ * @ordering: Ordering on the "-cpu help" CPU model list.
+ * @migration_safe: See CpuDefinitionInfo::migration_safe
+ * @static_model: See CpuDefinitionInfo::static
+ * @parent_realize: The parent class' realize handler.
+ * @parent_phases: The parent class' reset phase handlers.
+ *
+ * An x86 CPU model or family.
+ */
+struct X86CPUClass {
+ CPUClass parent_class;
-#define ENV_OFFSET offsetof(X86CPU, env)
+ /*
+ * CPU definition, automatically loaded by instance_init if not NULL.
+ * Should be eventually replaced by subclass-specific property defaults.
+ */
+ X86CPUModel *model;
+
+ bool host_cpuid_required;
+ int ordering;
+ bool migration_safe;
+ bool static_model;
+
+ /*
+ * Optional description of CPU model.
+ * If unavailable, cpu_def->model_id is used.
+ */
+ const char *model_description;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ ResettablePhases parent_phases;
+};
#ifndef CONFIG_USER_ONLY
-extern struct VMStateDescription vmstate_x86_cpu;
+extern const VMStateDescription vmstate_x86_cpu;
#endif
-/**
- * x86_cpu_do_interrupt:
- * @cpu: vCPU the interrupt is to be handled by.
- */
-void x86_cpu_do_interrupt(CPUState *cpu);
-bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
-void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+bool x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
Error **errp);
-void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
- int flags);
+void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
-hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
-
-int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-void x86_cpu_exec_enter(CPUState *cpu);
-void x86_cpu_exec_exit(CPUState *cpu);
-
-void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+void x86_cpu_list(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
+#ifndef CONFIG_USER_ONLY
+hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
int cpu_get_pic_interrupt(CPUX86State *s);
-/* MSDOS compatibility mode FPU exception support */
-void cpu_set_ferr(CPUX86State *s);
+
+/* MS-DOS compatibility mode FPU exception support */
+void x86_register_ferr_irq(qemu_irq irq);
+void fpu_check_raise_ferr_irq(CPUX86State *s);
+void cpu_set_ignne(void);
+void cpu_clear_ignne(void);
+#endif
+
/* mpx_helper.c */
void cpu_sync_bndcs_hflags(CPUX86State *env);
/* this function must always be used to load data in the segment
cache: it synchronizes the hflags with the segment cache values */
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
- int seg_reg, unsigned int selector,
+ X86Seg seg_reg, unsigned int selector,
target_ulong base,
unsigned int limit,
unsigned int flags)
@@ -1613,31 +2218,39 @@ int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
/* cpu-exec.c */
/* the following helpers are only usable in user mode simulation as
they can trigger unexpected exceptions */
-void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
+void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector);
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
-
-/* you can call this signal handler from your SIGBUS and SIGSEGV
- signal handlers to inform the virtual CPU of exceptions. non zero
- is returned if the signal was handled by the virtual CPU. */
-int cpu_x86_signal_handler(int host_signum, void *pinfo,
- void *puc);
+void cpu_x86_xsave(CPUX86State *s, target_ulong ptr);
+void cpu_x86_xrstor(CPUX86State *s, target_ulong ptr);
/* cpu.c */
+void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
+ uint32_t vendor2, uint32_t vendor3);
+typedef struct PropValue {
+ const char *prop, *value;
+} PropValue;
+void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
+
+void x86_cpu_after_reset(X86CPU *cpu);
+
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
+
+/* cpu.c other functions (cpuid) */
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
void cpu_clear_apic_feature(CPUX86State *env);
+void cpu_set_apic_feature(CPUX86State *env);
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
-void host_vendor_fms(char *vendor, int *family, int *model, int *stepping);
+bool cpu_has_x2apic_feature(CPUX86State *env);
/* helper.c */
-int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr, int size,
- int is_write, int mmu_idx);
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
+void cpu_sync_avx_hflag(CPUX86State *env);
#ifndef CONFIG_USER_ONLY
static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
@@ -1650,6 +2263,11 @@ static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
}
+/*
+ * load efer and update the corresponding hflags. XXX: do consistency
+ * checks with cpuid bits?
+ */
+void cpu_load_efer(CPUX86State *env, uint64_t val);
uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
@@ -1661,8 +2279,6 @@ void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
#endif
-void breakpoint_handler(CPUState *cs);
-
/* will be suppressed */
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
@@ -1672,31 +2288,6 @@ void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
/* hw/pc.c */
uint64_t cpu_get_tsc(CPUX86State *env);
-#define TARGET_PAGE_BITS 12
-
-#ifdef TARGET_X86_64
-#define TARGET_PHYS_ADDR_SPACE_BITS 52
-/* ??? This is really 48 bits, sign-extended, but the only thing
- accessible to userland with bit 48 set is the VSYSCALL, and that
- is handled via other mechanisms. */
-#define TARGET_VIRT_ADDR_SPACE_BITS 47
-#else
-#define TARGET_PHYS_ADDR_SPACE_BITS 36
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
-#endif
-
-/* XXX: This value should match the one returned by CPUID
- * and in exec.c */
-# if defined(TARGET_X86_64)
-# define TCG_PHYS_ADDR_BITS 40
-# else
-# define TCG_PHYS_ADDR_BITS 36
-# endif
-
-#define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
-
-#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
-#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
#define CPU_RESOLVING_TYPE TYPE_X86_CPU
#ifdef TARGET_X86_64
@@ -1705,28 +2296,50 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
#endif
-#define cpu_signal_handler cpu_x86_signal_handler
#define cpu_list x86_cpu_list
/* MMU modes definitions */
-#define MMU_MODE0_SUFFIX _ksmap
-#define MMU_MODE1_SUFFIX _user
-#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
-#define MMU_KSMAP_IDX 0
-#define MMU_USER_IDX 1
-#define MMU_KNOSMAP_IDX 2
-static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
+#define MMU_KSMAP64_IDX 0
+#define MMU_KSMAP32_IDX 1
+#define MMU_USER64_IDX 2
+#define MMU_USER32_IDX 3
+#define MMU_KNOSMAP64_IDX 4
+#define MMU_KNOSMAP32_IDX 5
+#define MMU_PHYS_IDX 6
+#define MMU_NESTED_IDX 7
+
+#ifdef CONFIG_USER_ONLY
+#ifdef TARGET_X86_64
+#define MMU_USER_IDX MMU_USER64_IDX
+#else
+#define MMU_USER_IDX MMU_USER32_IDX
+#endif
+#endif
+
+static inline bool is_mmu_index_smap(int mmu_index)
+{
+ return (mmu_index & ~1) == MMU_KSMAP64_IDX;
+}
+
+static inline bool is_mmu_index_user(int mmu_index)
+{
+ return (mmu_index & ~1) == MMU_USER64_IDX;
+}
+
+static inline bool is_mmu_index_32(int mmu_index)
{
- return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
- (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ assert(mmu_index < MMU_PHYS_IDX);
+ return mmu_index & 1;
}
static inline int cpu_mmu_index_kernel(CPUX86State *env)
{
- return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
- ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 0 : 1;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
+
+ return mmu_index_base + mmu_index_32;
}
#define CC_DST (env->cc_dst)
@@ -1734,25 +2347,6 @@ static inline int cpu_mmu_index_kernel(CPUX86State *env)
#define CC_SRC2 (env->cc_src2)
#define CC_OP (env->cc_op)
-/* n must be a constant to be efficient */
-static inline target_long lshift(target_long x, int n)
-{
- if (n >= 0) {
- return x << n;
- } else {
- return x >> (-n);
- }
-}
-
-/* float macros */
-#define FT0 (env->ft0)
-#define ST0 (env->fpregs[env->fpstt].d)
-#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
-#define ST1 ST(1)
-
-/* translate.c */
-void tcg_x86_init(void);
-
#include "exec/cpu-all.h"
#include "svm.h"
@@ -1760,17 +2354,21 @@ void tcg_x86_init(void);
#include "hw/i386/apic.h"
#endif
-static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
- *cs_base = env->segs[R_CS].base;
- *pc = *cs_base + env->eip;
*flags = env->hflags |
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+ if (env->hflags & HF_CS64_MASK) {
+ *cs_base = 0;
+ *pc = env->eip;
+ } else {
+ *cs_base = env->segs[R_CS].base;
+ *pc = (uint32_t)(*cs_base + env->eip);
+ }
}
void do_cpu_init(X86CPU *cpu);
-void do_cpu_sipi(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
@@ -1779,57 +2377,17 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
-/* excp_helper.c */
-void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
-void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
- uintptr_t retaddr);
-void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
- int error_code);
-void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
- int error_code, uintptr_t retaddr);
-void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
- int error_code, int next_eip_addend);
-
-/* cc_helper.c */
-extern const uint8_t parity_table[256];
-uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+uint32_t cpu_cc_compute_all(CPUX86State *env1);
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
{
uint32_t eflags = env->eflags;
if (tcg_enabled()) {
- eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
+ eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK);
}
return eflags;
}
-/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
- * after generating a call to a helper that uses this.
- */
-static inline void cpu_load_eflags(CPUX86State *env, int eflags,
- int update_mask)
-{
- CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- CC_OP = CC_OP_EFLAGS;
- env->df = 1 - (2 * ((eflags >> 10) & 1));
- env->eflags = (env->eflags & ~update_mask) |
- (eflags & update_mask) | 0x2;
-}
-
-/* load efer and update the corresponding hflags. XXX: do consistency
- checks with cpuid bits? */
-static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
-{
- env->efer = val;
- env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
- if (env->efer & MSR_EFER_LMA) {
- env->hflags |= HF_LMA_MASK;
- }
- if (env->efer & MSR_EFER_SVME) {
- env->hflags |= HF_SVME_MASK;
- }
-}
-
static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
{
return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
@@ -1844,9 +2402,45 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
}
}
+static inline bool cpu_has_vmx(CPUX86State *env)
+{
+ return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
+}
+
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+ return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
+}
+
+/*
+ * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
+ * Since it was set, CR4.VMXE must remain set as long as vCPU is in
+ * VMX operation. This is because CR4.VMXE is one of the bits set
+ * in MSR_IA32_VMX_CR4_FIXED1.
+ *
+ * There is one exception to above statement when vCPU enters SMM mode.
+ * When a vCPU enters SMM mode, it temporarily exit VMX operation and
+ * may also reset CR4.VMXE during execution in SMM mode.
+ * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
+ * and CR4.VMXE is restored to it's original value of being set.
+ *
+ * Therefore, when vCPU is not in SMM mode, we can infer whether
+ * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
+ * know for certain.
+ */
+static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
+{
+ return cpu_has_vmx(env) &&
+ ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
+}
+
+/* excp_helper.c */
+int get_pg_mode(CPUX86State *env);
+
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
+void update_mxcsr_from_sse_status(CPUX86State *env);
static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
{
@@ -1864,54 +2458,122 @@ static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
}
}
-/* mem_helper.c */
-void helper_lock_init(void);
-
/* svm_helper.c */
+#ifdef CONFIG_USER_ONLY
+static inline void
+cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param, uintptr_t retaddr)
+{ /* no-op */ }
+static inline bool
+cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
+{ return false; }
+#else
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
uint64_t param, uintptr_t retaddr);
-void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code,
- uint64_t exit_info_1, uintptr_t retaddr);
-void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1);
-
-/* seg_helper.c */
-void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
-
-/* smm_helper.c */
-void do_smm_enter(X86CPU *cpu);
+bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type);
+#endif
/* apic.c */
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access);
+/* Special values for X86CPUVersion: */
-/* Change the value of a KVM-specific default
- *
- * If value is NULL, no default will be set and the original
- * value from the CPU model table will be kept.
- *
- * It is valid to call this function only for properties that
- * are already present in the kvm_default_props table.
+/* Resolve to latest CPU version */
+#define CPU_VERSION_LATEST -1
+
+/*
+ * Resolve to version defined by current machine type.
+ * See x86_cpu_set_default_version()
+ */
+#define CPU_VERSION_AUTO -2
+
+/* Don't resolve to any versioned CPU models, like old QEMU versions */
+#define CPU_VERSION_LEGACY 0
+
+typedef int X86CPUVersion;
+
+/*
+ * Set default CPU model version for CPU models having
+ * version == CPU_VERSION_AUTO.
*/
-void x86_cpu_change_kvm_default(const char *prop, const char *value);
+void x86_cpu_set_default_version(X86CPUVersion version);
-/* Return name of 32-bit register, from a R_* constant */
-const char *get_register_name_32(unsigned int reg);
+#ifndef CONFIG_USER_ONLY
-void enable_compat_apic_id_mode(void);
+void do_cpu_sipi(X86CPU *cpu);
#define APIC_DEFAULT_ADDRESS 0xfee00000
#define APIC_SPACE_SIZE 0x100000
-void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
- fprintf_function cpu_fprintf, int flags);
+/* cpu-dump.c */
+void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
+
+#endif
/* cpu.c */
bool cpu_is_bsp(X86CPU *cpu);
-void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
-void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
+void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
+void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
+uint32_t xsave_area_size(uint64_t mask, bool compacted);
void x86_update_hflags(CPUX86State* env);
+static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
+{
+ return !!(cpu->hyperv_features & BIT(feat));
+}
+
+static inline uint64_t cr4_reserved_bits(CPUX86State *env)
+{
+ uint64_t reserved_bits = CR4_RESERVED_MASK;
+ if (!env->features[FEAT_XSAVE]) {
+ reserved_bits |= CR4_OSXSAVE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
+ reserved_bits |= CR4_SMEP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+ reserved_bits |= CR4_SMAP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
+ reserved_bits |= CR4_FSGSBASE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ reserved_bits |= CR4_PKE_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
+ reserved_bits |= CR4_LA57_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
+ reserved_bits |= CR4_UMIP_MASK;
+ }
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
+ reserved_bits |= CR4_PKS_MASK;
+ }
+ return reserved_bits;
+}
+
+static inline bool ctl_has_irq(CPUX86State *env)
+{
+ uint32_t int_prio;
+ uint32_t tpr;
+
+ int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
+ tpr = env->int_ctl & V_TPR_MASK;
+
+ if (env->int_ctl & V_IGN_TPR_MASK) {
+ return (env->int_ctl & V_IRQ_MASK);
+ }
+
+ return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
+}
+
+#if defined(TARGET_X86_64) && \
+ defined(CONFIG_USER_ONLY) && \
+ defined(CONFIG_LINUX)
+# define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
+#endif
+
#endif /* I386_CPU_H */