diff options
Diffstat (limited to 'target/arm/internals.h')
-rw-r--r-- | target/arm/internals.h | 879 |
1 files changed, 674 insertions, 205 deletions
diff --git a/target/arm/internals.h b/target/arm/internals.h index 9fbb364968..dd3da211a3 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -28,6 +28,7 @@ #include "hw/registerfields.h" #include "tcg/tcg-gvec-desc.h" #include "syndrome.h" +#include "cpu-features.h" /* register banks for CPU modes */ #define BANK_USRSYS 0 @@ -39,6 +40,11 @@ #define BANK_HYP 6 #define BANK_MON 7 +static inline int arm_env_mmu_index(CPUARMState *env) +{ + return EX_TBFLAG_ANY(env->hflags, MMUIDX); +} + static inline bool excp_is_internal(int excp) { /* Return true if this exception number represents a QEMU-internal @@ -81,6 +87,169 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ */ #define FNC_RETURN_MIN_MAGIC 0xfefffffe +/* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ +FIELD(DBGWCR, E, 0, 1) +FIELD(DBGWCR, PAC, 1, 2) +FIELD(DBGWCR, LSC, 3, 2) +FIELD(DBGWCR, BAS, 5, 8) +FIELD(DBGWCR, HMC, 13, 1) +FIELD(DBGWCR, SSC, 14, 2) +FIELD(DBGWCR, LBN, 16, 4) +FIELD(DBGWCR, WT, 20, 1) +FIELD(DBGWCR, MASK, 24, 5) +FIELD(DBGWCR, SSCE, 29, 1) + +#define VTCR_NSW (1u << 29) +#define VTCR_NSA (1u << 30) +#define VSTCR_SW VTCR_NSW +#define VSTCR_SA VTCR_NSA + +/* Bit definitions for CPACR (AArch32 only) */ +FIELD(CPACR, CP10, 20, 2) +FIELD(CPACR, CP11, 22, 2) +FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ +FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ +FIELD(CPACR, ASEDIS, 31, 1) + +/* Bit definitions for CPACR_EL1 (AArch64 only) */ +FIELD(CPACR_EL1, ZEN, 16, 2) +FIELD(CPACR_EL1, FPEN, 20, 2) +FIELD(CPACR_EL1, SMEN, 24, 2) +FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ + +/* Bit definitions for HCPTR (AArch32 only) */ +FIELD(HCPTR, TCP10, 10, 1) +FIELD(HCPTR, TCP11, 11, 1) +FIELD(HCPTR, TASE, 15, 1) +FIELD(HCPTR, TTA, 20, 1) +FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ +FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ + +/* Bit definitions for CPTR_EL2 (AArch64 only) */ +FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ +FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ +FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ +FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ +FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ +FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ +FIELD(CPTR_EL2, TTA, 28, 1) +FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ +FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ + +/* Bit definitions for CPTR_EL3 (AArch64 only) */ +FIELD(CPTR_EL3, EZ, 8, 1) +FIELD(CPTR_EL3, TFP, 10, 1) +FIELD(CPTR_EL3, ESM, 12, 1) +FIELD(CPTR_EL3, TTA, 20, 1) +FIELD(CPTR_EL3, TAM, 30, 1) +FIELD(CPTR_EL3, TCPAC, 31, 1) + +#define MDCR_MTPME (1U << 28) +#define MDCR_TDCC (1U << 27) +#define MDCR_HLP (1U << 26) /* MDCR_EL2 */ +#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ +#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ +#define MDCR_EPMAD (1U << 21) +#define MDCR_EDAD (1U << 20) +#define MDCR_TTRF (1U << 19) +#define MDCR_STE (1U << 18) /* MDCR_EL3 */ +#define MDCR_SPME (1U << 17) /* MDCR_EL3 */ +#define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ +#define MDCR_SDD (1U << 16) +#define MDCR_SPD (3U << 14) +#define MDCR_TDRA (1U << 11) +#define MDCR_TDOSA (1U << 10) +#define MDCR_TDA (1U << 9) +#define MDCR_TDE (1U << 8) +#define MDCR_HPME (1U << 7) +#define MDCR_TPM (1U << 6) +#define MDCR_TPMCR (1U << 5) +#define MDCR_HPMN (0x1fU) + +/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ +#define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ + MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ + MDCR_STE | MDCR_SPME | MDCR_SPD) + +#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ +#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ +#define TTBCR_PD0 (1U << 4) +#define TTBCR_PD1 (1U << 5) +#define TTBCR_EPD0 (1U << 7) +#define TTBCR_IRGN0 (3U << 8) +#define TTBCR_ORGN0 (3U << 10) +#define TTBCR_SH0 (3U << 12) +#define TTBCR_T1SZ (3U << 16) +#define TTBCR_A1 (1U << 22) +#define TTBCR_EPD1 (1U << 23) +#define TTBCR_IRGN1 (3U << 24) +#define TTBCR_ORGN1 (3U << 26) +#define TTBCR_SH1 (1U << 28) +#define TTBCR_EAE (1U << 31) + +FIELD(VTCR, T0SZ, 0, 6) +FIELD(VTCR, SL0, 6, 2) +FIELD(VTCR, IRGN0, 8, 2) +FIELD(VTCR, ORGN0, 10, 2) +FIELD(VTCR, SH0, 12, 2) +FIELD(VTCR, TG0, 14, 2) +FIELD(VTCR, PS, 16, 3) +FIELD(VTCR, VS, 19, 1) +FIELD(VTCR, HA, 21, 1) +FIELD(VTCR, HD, 22, 1) +FIELD(VTCR, HWU59, 25, 1) +FIELD(VTCR, HWU60, 26, 1) +FIELD(VTCR, HWU61, 27, 1) +FIELD(VTCR, HWU62, 28, 1) +FIELD(VTCR, NSW, 29, 1) +FIELD(VTCR, NSA, 30, 1) +FIELD(VTCR, DS, 32, 1) +FIELD(VTCR, SL2, 33, 1) + +#define HCRX_ENAS0 (1ULL << 0) +#define HCRX_ENALS (1ULL << 1) +#define HCRX_ENASR (1ULL << 2) +#define HCRX_FNXS (1ULL << 3) +#define HCRX_FGTNXS (1ULL << 4) +#define HCRX_SMPME (1ULL << 5) +#define HCRX_TALLINT (1ULL << 6) +#define HCRX_VINMI (1ULL << 7) +#define HCRX_VFNMI (1ULL << 8) +#define HCRX_CMOW (1ULL << 9) +#define HCRX_MCE2 (1ULL << 10) +#define HCRX_MSCEN (1ULL << 11) + +#define HPFAR_NS (1ULL << 63) + +#define HSTR_TTEE (1 << 16) +#define HSTR_TJDBX (1 << 17) + +/* + * Depending on the value of HCR_EL2.E2H, bits 0 and 1 + * have different bit definitions, and EL1PCTEN might be + * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to + * disambiguate if necessary. + */ +FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) +FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) +FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) +FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) +FIELD(CNTHCTL, EVNTEN, 2, 1) +FIELD(CNTHCTL, EVNTDIR, 3, 1) +FIELD(CNTHCTL, EVNTI, 4, 4) +FIELD(CNTHCTL, EL0VTEN, 8, 1) +FIELD(CNTHCTL, EL0PTEN, 9, 1) +FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) +FIELD(CNTHCTL, EL1PTEN, 11, 1) +FIELD(CNTHCTL, ECV, 12, 1) +FIELD(CNTHCTL, EL1TVT, 13, 1) +FIELD(CNTHCTL, EL1TVCT, 14, 1) +FIELD(CNTHCTL, EL1NVPCT, 15, 1) +FIELD(CNTHCTL, EL1NVVCT, 16, 1) +FIELD(CNTHCTL, EVNTIS, 17, 1) +FIELD(CNTHCTL, CNTVMASK, 18, 1) +FIELD(CNTHCTL, CNTPMASK, 19, 1) + /* We use a few fake FSR values for internal purposes in M profile. * M profile cores don't have A/R format FSRs, but currently our * get_phys_addr() code assumes A/R profile and reports failures via @@ -102,13 +271,13 @@ FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ * and target exception level. This should be called from helper functions, * and never returns because we will longjump back up to the CPU main loop. */ -void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el); +G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, + uint32_t syndrome, uint32_t target_el); /* * Similarly, but also use unwinding to restore cpu state. */ -void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, +G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, uint32_t target_el, uintptr_t ra); @@ -170,34 +339,39 @@ static inline int r14_bank_number(int mode) return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); } +void arm_cpu_register(const ARMCPUInfo *info); +void aarch64_cpu_register(const ARMCPUInfo *info); + +void register_cp_regs_for_features(ARMCPU *cpu); +void init_cpreg_list(ARMCPU *cpu); + void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); +void arm_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data); + #ifdef CONFIG_TCG void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); #endif /* CONFIG_TCG */ -/** - * aarch64_sve_zcr_get_valid_len: - * @cpu: cpu context - * @start_len: maximum len to consider - * - * Return the maximum supported sve vector length <= @start_len. - * Note that both @start_len and the return value are in units - * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128. - */ -uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len); - -enum arm_fprounding { +typedef enum ARMFPRounding { FPROUNDING_TIEEVEN, FPROUNDING_POSINF, FPROUNDING_NEGINF, FPROUNDING_ZERO, FPROUNDING_TIEAWAY, FPROUNDING_ODD -}; +} ARMFPRounding; + +extern const FloatRoundMode arm_rmode_to_sf_map[6]; -int arm_rmode_to_sf(int rmode); +static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) +{ + assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); + return arm_rmode_to_sf_map[rmode]; +} static inline void aarch64_save_sp(CPUARMState *env, int el) { @@ -243,24 +417,7 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm) * Returns the implementation defined bit-width of physical addresses. * The ARMv8 reference manuals refer to this as PAMax(). */ -static inline unsigned int arm_pamax(ARMCPU *cpu) -{ - static const unsigned int pamax_map[] = { - [0] = 32, - [1] = 36, - [2] = 40, - [3] = 42, - [4] = 44, - [5] = 48, - }; - unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); - - /* id_aa64mmfr0 is a read-only register so values outside of the - * supported mappings can be considered an implementation error. */ - assert(parange < ARRAY_SIZE(pamax_map)); - return pamax_map[parange]; -} +unsigned int arm_pamax(ARMCPU *cpu); /* Return true if extended addresses are enabled. * This is always the case if our translation regime is 64 bit, @@ -268,9 +425,13 @@ static inline unsigned int arm_pamax(ARMCPU *cpu) */ static inline bool extended_addresses_enabled(CPUARMState *env) { - TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + return true; + } return arm_el_is_aa64(env, 1) || - (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); + (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); } /* Update a QEMU watchpoint based on the information the guest has set in the @@ -354,19 +515,33 @@ typedef enum ARMFaultType { ARMFault_AsyncExternal, ARMFault_Debug, ARMFault_TLBConflict, + ARMFault_UnsuppAtomicUpdate, ARMFault_Lockdown, ARMFault_Exclusive, ARMFault_ICacheMaint, ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ + ARMFault_GPCFOnWalk, + ARMFault_GPCFOnOutput, } ARMFaultType; +typedef enum ARMGPCF { + GPCF_None, + GPCF_AddressSize, + GPCF_Walk, + GPCF_EABT, + GPCF_Fail, +} ARMGPCF; + /** * ARMMMUFaultInfo: Information describing an ARM MMU Fault * @type: Type of fault + * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. * @level: Table walk level (for translation, access flag and permission faults) * @domain: Domain of the fault address (for non-LPAE CPUs only) * @s2addr: Address that caused a fault at stage 2 + * @paddr: physical address that caused a fault for gpc + * @paddr_space: physical address space that caused a fault for gpc * @stage2: True if we faulted at stage 2 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk * @s1ns: True if we faulted on a non-secure IPA while in secure state @@ -375,7 +550,10 @@ typedef enum ARMFaultType { typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; struct ARMMMUFaultInfo { ARMFaultType type; + ARMGPCF gpcf; target_ulong s2addr; + target_ulong paddr; + ARMSecuritySpace paddr_space; int level; int domain; bool stage2; @@ -479,28 +657,51 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) case ARMFault_None: return 0; case ARMFault_AddressSize: - fsc = fi->level & 3; + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b101001; + } else { + fsc = fi->level; + } break; case ARMFault_AccessFlag: - fsc = (fi->level & 3) | (0x2 << 2); + assert(fi->level >= 0 && fi->level <= 3); + fsc = 0b001000 | fi->level; break; case ARMFault_Permission: - fsc = (fi->level & 3) | (0x3 << 2); + assert(fi->level >= 0 && fi->level <= 3); + fsc = 0b001100 | fi->level; break; case ARMFault_Translation: - fsc = (fi->level & 3) | (0x1 << 2); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b101011; + } else { + fsc = 0b000100 | fi->level; + } break; case ARMFault_SyncExternal: fsc = 0x10 | (fi->ea << 12); break; case ARMFault_SyncExternalOnWalk: - fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b010011; + } else { + fsc = 0b010100 | fi->level; + } + fsc |= fi->ea << 12; break; case ARMFault_SyncParity: fsc = 0x18; break; case ARMFault_SyncParityOnWalk: - fsc = (fi->level & 3) | (0x7 << 2); + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b011011; + } else { + fsc = 0b011100 | fi->level; + } break; case ARMFault_AsyncParity: fsc = 0x19; @@ -517,12 +718,26 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) case ARMFault_TLBConflict: fsc = 0x30; break; + case ARMFault_UnsuppAtomicUpdate: + fsc = 0x31; + break; case ARMFault_Lockdown: fsc = 0x34; break; case ARMFault_Exclusive: fsc = 0x35; break; + case ARMFault_GPCFOnWalk: + assert(fi->level >= -1 && fi->level <= 3); + if (fi->level < 0) { + fsc = 0b100011; + } else { + fsc = 0b100100 | fi->level; + } + break; + case ARMFault_GPCFOnOutput: + fsc = 0b101000; + break; default: /* Other faults can't occur in a context that requires a * long-format status code. @@ -544,9 +759,17 @@ static inline bool arm_extabort_type(MemTxResult result) return result != MEMTX_DECODE_ERROR; } +#ifdef CONFIG_USER_ONLY +void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); +#else bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) { @@ -570,32 +793,21 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); -/* - * Return the MMU index for a v7M CPU with all relevant information - * manually specified. - */ -ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, - bool secstate, bool priv, bool negpri); - -/* - * Return the MMU index for a v7M CPU in the specified security and - * privilege state. - */ -ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, - bool secstate, bool priv); - /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); -/* Return true if the stage 1 translation regime is using LPAE format page - * tables */ +/* + * Return true if the stage 1 translation regime is using LPAE + * format page tables + */ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); /* Raise a data fault alignment exception for the specified virtual address */ -void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; +G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); +#ifndef CONFIG_USER_ONLY /* arm_cpu_do_transaction_failed: handle a memory system error response * (eg "no device/memory present at address") by raising an external abort * exception @@ -605,6 +817,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); +#endif /* Call any registered EL change hooks */ static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) @@ -629,112 +842,53 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: return true; default: return false; } } -/* Return true if this address translation regime is secure */ -static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_E1: - case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_E2: - case ARMMMUIdx_Stage2: - case ARMMMUIdx_MPrivNegPri: - case ARMMMUIdx_MUserNegPri: - case ARMMMUIdx_MPriv: - case ARMMMUIdx_MUser: - return false; - case ARMMMUIdx_SE3: - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: - case ARMMMUIdx_SE2: - case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_MSPrivNegPri: - case ARMMMUIdx_MSUserNegPri: - case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSUser: - return true; - default: - g_assert_not_reached(); - } -} - static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE1_PAN: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_SE10_1_PAN: - case ARMMMUIdx_SE20_2_PAN: return true; default: return false; } } +static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) +{ + return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; +} + /* Return the exception level which controls this address translation regime */ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { - case ARMMMUIdx_SE20_0: - case ARMMMUIdx_SE20_2: - case ARMMMUIdx_SE20_2_PAN: case ARMMMUIdx_E20_0: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_Stage2: case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_SE2: case ARMMMUIdx_E2: return 2; - case ARMMMUIdx_SE3: + case ARMMMUIdx_E3: return 3; - case ARMMMUIdx_SE10_0: - case ARMMMUIdx_Stage1_SE0: - return arm_el_is_aa64(env, 3) ? 1 : 3; - case ARMMMUIdx_SE10_1: - case ARMMMUIdx_SE10_1_PAN: + case ARMMMUIdx_E10_0: case ARMMMUIdx_Stage1_E0: + return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: - case ARMMMUIdx_E10_0: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: case ARMMMUIdx_MPrivNegPri: @@ -751,45 +905,79 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) } } -/* Return the TCR controlling this translation regime */ -static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) +static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + switch (mmu_idx) { + case ARMMMUIdx_E20_0: + case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_MUser: + case ARMMMUIdx_MSUser: + case ARMMMUIdx_MUserNegPri: + case ARMMMUIdx_MSUserNegPri: + return true; + default: + return false; + case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + g_assert_not_reached(); + } +} + +/* Return the SCTLR value which controls this address translation regime */ +static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; +} + +/* + * These are the fields in VTCR_EL2 which affect both the Secure stage 2 + * and the Non-Secure stage 2 translation regimes (and hence which are + * not present in VSTCR_EL2). + */ +#define VTCR_SHARED_FIELD_MASK \ + (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ + R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ + R_VTCR_DS_MASK) + +/* Return the value of the TCR controlling this translation regime */ +static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) { if (mmu_idx == ARMMMUIdx_Stage2) { - return &env->cp15.vtcr_el2; + return env->cp15.vtcr_el2; } if (mmu_idx == ARMMMUIdx_Stage2_S) { /* - * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but - * those are not currently used by QEMU, so just return VSTCR_EL2. + * Secure stage 2 shares fields from VTCR_EL2. We merge those + * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format + * value so the callers don't need to special case this. + * + * If a future architecture change defines bits in VSTCR_EL2 that + * overlap with these VTCR_EL2 fields we may need to revisit this. */ - return &env->cp15.vstcr_el2; + uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; + v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; + return v; } - return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; + return env->cp15.tcr_el[regime_el(env, mmu_idx)]; } -/* Return the FSR value for a debug exception (watchpoint, hardware - * breakpoint or BKPT insn) targeting the specified exception level. - */ -static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) +/* Return true if the translation regime is using LPAE format page tables */ +static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) { - ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; - int target_el = arm_debug_target_el(env); - bool using_lpae = false; - - if (target_el == 2 || arm_el_is_aa64(env, target_el)) { - using_lpae = true; - } else { - if (arm_feature(env, ARM_FEATURE_LPAE) && - (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { - using_lpae = true; - } + int el = regime_el(env, mmu_idx); + if (el == 2 || arm_el_is_aa64(env, el)) { + return true; } - - if (using_lpae) { - return arm_fi_to_lfsc(&fi); - } else { - return arm_fi_to_sfsc(&fi); + if (arm_feature(env, ARM_FEATURE_PMSA) && + arm_feature(env, ARM_FEATURE_V8)) { + return true; + } + if (arm_feature(env, ARM_FEATURE_LPAE) + && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { + return true; } + return false; } /** @@ -908,7 +1096,7 @@ static inline const char *aarch32_mode_name(uint32_t psr) * * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_virq(ARMCPU *cpu); @@ -917,11 +1105,19 @@ void arm_cpu_update_virq(ARMCPU *cpu); * * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_vfiq(ARMCPU *cpu); /** + * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit + * + * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, + * following a change to the HCR_EL2.VSE bit. + */ +void arm_cpu_update_vserr(ARMCPU *cpu); + +/** * arm_mmu_idx_el: * @env: The cpu environment * @el: The EL to use. @@ -945,11 +1141,16 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env); * Return the ARMMMUIdx for the stage1 traversal for the current regime. */ #ifdef CONFIG_USER_ONLY +static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) +{ + return ARMMMUIdx_Stage1_E0; +} static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) { return ARMMMUIdx_Stage1_E0; } #else +ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); #endif @@ -966,9 +1167,6 @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) case ARMMMUIdx_Stage1_E0: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_Stage1_SE0: - case ARMMMUIdx_Stage1_SE1: - case ARMMMUIdx_Stage1_SE1_PAN: return true; default: return false; @@ -1035,37 +1233,70 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) return valid; } +/* Granule size (i.e. page size) */ +typedef enum ARMGranuleSize { + /* Same order as TG0 encoding */ + Gran4K, + Gran64K, + Gran16K, + GranInvalid, +} ARMGranuleSize; + +/** + * arm_granule_bits: Return address size of the granule in bits + * + * Return the address size of the granule in bits. This corresponds + * to the pseudocode TGxGranuleBits(). + */ +static inline int arm_granule_bits(ARMGranuleSize gran) +{ + switch (gran) { + case Gran64K: + return 16; + case Gran16K: + return 14; + case Gran4K: + return 12; + default: + g_assert_not_reached(); + } +} + /* * Parameters of a given virtual address, as extracted from the * translation control register (TCR) for a given regime. */ typedef struct ARMVAParameters { unsigned tsz : 8; + unsigned ps : 3; + unsigned sh : 2; unsigned select : 1; bool tbi : 1; bool epd : 1; bool hpd : 1; - bool using16k : 1; - bool using64k : 1; + bool tsz_oob : 1; /* tsz has been clamped to legal range */ + bool ds : 1; + bool ha : 1; + bool hd : 1; + ARMGranuleSize gran : 2; } ARMVAParameters; +/** + * aa64_va_parameters: Return parameters for an AArch64 virtual address + * @env: CPU + * @va: virtual address to look up + * @mmu_idx: determines translation regime to use + * @data: true if this is a data access + * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 + * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) + */ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx, bool data); + ARMMMUIdx mmu_idx, bool data, + bool el1_is_aa32); -static inline int exception_target_el(CPUARMState *env) -{ - int target_el = MAX(1, arm_current_el(env)); - - /* - * No such thing as secure EL1 if EL3 is aarch32, - * so update the target EL to EL3 in this case. - */ - if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { - target_el = 3; - } - - return target_el; -} +int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); +int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); +int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); /* Determine if allocation tags are available. */ static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, @@ -1076,7 +1307,7 @@ static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, && !(env->cp15.scr_el3 & SCR_ATA)) { return false; } - if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { + if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { return false; @@ -1101,40 +1332,81 @@ typedef struct V8M_SAttributes { void v8m_security_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, - V8M_SAttributes *sattrs); - -bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, bool *is_subpage, - ARMMMUFaultInfo *fi, uint32_t *mregion); + bool secure, V8M_SAttributes *sattrs); /* Cacheability and shareability attributes for a memory access */ typedef struct ARMCacheAttrs { - unsigned int attrs:8; /* as in the MAIR register encoding */ + /* + * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] + * Otherwise, attrs is the same as the MAIR_EL1 8-bit format + */ + unsigned int attrs:8; unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ + bool is_s2_format:1; } ARMCacheAttrs; +/* Fields that are valid upon success. */ +typedef struct GetPhysAddrResult { + CPUTLBEntryFull f; + ARMCacheAttrs cacheattrs; +} GetPhysAddrResult; + +/** + * get_phys_addr: get the physical address for a virtual address + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index indicating required translation regime + * @result: set on translation success. + * @fi: set to fault info if the translation fails + * + * Find the physical address corresponding to the given virtual address, + * by doing a translation table walk on MMU based systems or using the + * MPU state on MPU based systems. + * + * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, + * prot and page_size may not be filled in, and the populated fsr value provides + * information on why the translation aborted, in the format of a + * DFSR/IFSR fault register, with the following caveats: + * * we honour the short vs long DFSR format differences. + * * the WnR bit is never set (the caller must do this). + * * for PSMAv5 based systems we don't bother to return a full FSR format + * value. + */ bool get_phys_addr(CPUARMState *env, target_ulong address, MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, - target_ulong *page_size, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); -void arm_log_exception(int idx); +/** + * get_phys_addr_with_space_nogpc: get the physical address for a virtual + * address + * @env: CPUARMState + * @address: virtual address to get physical address for + * @access_type: 0 for read, 1 for write, 2 for execute + * @mmu_idx: MMU index indicating required translation regime + * @space: security space for the access + * @result: set on translation success. + * @fi: set to fault info if the translation fails + * + * Similar to get_phys_addr, but use the given security space and don't perform + * a Granule Protection Check on the resulting address. + */ +bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, + MMUAccessType access_type, + ARMMMUIdx mmu_idx, ARMSecuritySpace space, + GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) + __attribute__((nonnull)); -#endif /* !CONFIG_USER_ONLY */ +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool is_secure, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi, uint32_t *mregion); -/* - * The log2 of the words in the tag block, for GMID_EL1.BS. - * The is the maximum, 256 bytes, which manipulates 64-bits of tags. - */ -#define GMID_EL1_BS 6 +void arm_log_exception(CPUState *cs); -/* We associate one allocation tag per 16 bytes, the minimum. */ -#define LOG2_TAG_GRANULE 4 -#define TAG_GRANULE (1 << LOG2_TAG_GRANULE) +#endif /* !CONFIG_USER_ONLY */ /* * SVE predicates are 1/8 the size of SVE vectors, and cannot use @@ -1156,11 +1428,67 @@ FIELD(MTEDESC, MIDX, 0, 4) FIELD(MTEDESC, TBI, 4, 2) FIELD(MTEDESC, TCMA, 6, 2) FIELD(MTEDESC, WRITE, 8, 1) -FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ +FIELD(MTEDESC, ALIGN, 9, 3) +FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); +/** + * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation + * @env: CPU env + * @ptr: start address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross a page boundary) + * @desc: MTEDESC descriptor word (0 means no MTE checks) + * Returns: the size of the region that can be copied without hitting + * an MTE tag failure + * + * Note that we assume that the caller has already checked the TBI + * and TCMA bits with mte_checks_needed() and an MTE check is definitely + * required. + */ +uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc); + +/** + * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS + * operation going in the reverse direction + * @env: CPU env + * @ptr: *end* address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross a page boundary) + * @desc: MTEDESC descriptor word (0 means no MTE checks) + * Returns: the size of the region that can be copied without hitting + * an MTE tag failure + * + * Note that we assume that the caller has already checked the TBI + * and TCMA bits with mte_checks_needed() and an MTE check is definitely + * required. + */ +uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, + uint32_t desc); + +/** + * mte_check_fail: Record an MTE tag check failure + * @env: CPU env + * @desc: MTEDESC descriptor word + * @dirty_ptr: Failing dirty address + * @ra: TCG retaddr + * + * This may never return (if the MTE tag checks are configured to fault). + */ +void mte_check_fail(CPUARMState *env, uint32_t desc, + uint64_t dirty_ptr, uintptr_t ra); + +/** + * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation + * @env: CPU env + * @dirty_ptr: Start address of memory region (dirty pointer) + * @size: length of region (guaranteed not to cross page boundary) + * @desc: MTEDESC descriptor word + */ +void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, + uint32_t desc); + static inline int allocation_tag_from_addr(uint64_t ptr) { return extract64(ptr, 56, 4); @@ -1229,6 +1557,7 @@ enum MVEECIState { /* Definitions for the PMU registers */ #define PMCRN_MASK 0xf800 #define PMCRN_SHIFT 11 +#define PMCRLP 0x80 #define PMCRLC 0x40 #define PMCRDP 0x20 #define PMCRX 0x10 @@ -1237,10 +1566,10 @@ enum MVEECIState { #define PMCRP 0x2 #define PMCRE 0x1 /* - * Mask of PMCR bits writeable by guest (not including WO bits like C, P, + * Mask of PMCR bits writable by guest (not including WO bits like C, P, * which can be written as 1 to trigger behaviour but which stay RAZ). */ -#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) +#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) #define PMXEVTYPER_P 0x80000000 #define PMXEVTYPER_U 0x40000000 @@ -1261,13 +1590,153 @@ enum MVEECIState { static inline uint32_t pmu_num_counters(CPUARMState *env) { - return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; + ARMCPU *cpu = env_archcpu(env); + + return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; } /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ static inline uint64_t pmu_counter_mask(CPUARMState *env) { - return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); + return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); +} + +#ifdef TARGET_AARCH64 +GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); +int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); +int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); +int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); +void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); +void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); +void aarch64_max_tcg_initfn(Object *obj); +void aarch64_add_pauth_properties(Object *obj); +void aarch64_add_sve_properties(Object *obj); +void aarch64_add_sme_properties(Object *obj); +#endif + +/* Read the CONTROL register as the MRS instruction would. */ +uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); + +/* + * Return a pointer to the location where we currently store the + * stack pointer for the requested security state and thread mode. + * This pointer will become invalid if the CPU state is updated + * such that the stack pointers are switched around (eg changing + * the SPSEL control bit). + */ +uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, + bool threadmode, bool spsel); + +bool el_is_in_host(CPUARMState *env, int el); + +void aa32_max_features(ARMCPU *cpu); +int exception_target_el(CPUARMState *env); +bool arm_singlestep_active(CPUARMState *env); +bool arm_generate_debug_exceptions(CPUARMState *env); + +/** + * pauth_ptr_mask: + * @param: parameters defining the MMU setup + * + * Return a mask of the address bits that contain the authentication code, + * given the MMU config defined by @param. + */ +static inline uint64_t pauth_ptr_mask(ARMVAParameters param) +{ + int bot_pac_bit = 64 - param.tsz; + int top_pac_bit = 64 - 8 * param.tbi; + + return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); +} + +/* Add the cpreg definitions for debug related system registers */ +void define_debug_regs(ARMCPU *cpu); + +/* Effective value of MDCR_EL2 */ +static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) +{ + return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; } +/* Powers of 2 for sve_vq_map et al. */ +#define SVE_VQ_POW2_MAP \ + ((1 << (1 - 1)) | (1 << (2 - 1)) | \ + (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) + +/* + * Return true if it is possible to take a fine-grained-trap to EL2. + */ +static inline bool arm_fgt_active(CPUARMState *env, int el) +{ + /* + * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps + * that can affect EL0, but it is harmless to do the test also for + * traps on registers that are only accessible at EL1 because if the test + * returns true then we can't be executing at EL1 anyway. + * FGT traps only happen when EL2 is enabled and EL1 is AArch64; + * traps from AArch32 only happen for the EL0 is AArch32 case. + */ + return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && + el < 2 && arm_is_el2_enabled(env) && + arm_el_is_aa64(env, 1) && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && + (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); +} + +void assert_hflags_rebuild_correctly(CPUARMState *env); + +/* + * Although the ARM implementation of hardware assisted debugging + * allows for different breakpoints per-core, the current GDB + * interface treats them as a global pool of registers (which seems to + * be the case for x86, ppc and s390). As a result we store one copy + * of registers which is used for all active cores. + * + * Write access is serialised by virtue of the GDB protocol which + * updates things. Read access (i.e. when the values are copied to the + * vCPU) is also gated by GDB's run control. + * + * This is not unreasonable as most of the time debugging kernels you + * never know which core will eventually execute your function. + */ + +typedef struct { + uint64_t bcr; + uint64_t bvr; +} HWBreakpoint; + +/* + * The watchpoint registers can cover more area than the requested + * watchpoint so we need to store the additional information + * somewhere. We also need to supply a CPUWatchpoint to the GDB stub + * when the watchpoint is hit. + */ +typedef struct { + uint64_t wcr; + uint64_t wvr; + CPUWatchpoint details; +} HWWatchpoint; + +/* Maximum and current break/watch point counts */ +extern int max_hw_bps, max_hw_wps; +extern GArray *hw_breakpoints, *hw_watchpoints; + +#define cur_hw_wps (hw_watchpoints->len) +#define cur_hw_bps (hw_breakpoints->len) +#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) +#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) + +bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); +int insert_hw_breakpoint(target_ulong pc); +int delete_hw_breakpoint(target_ulong pc); + +bool check_watchpoint_in_range(int i, target_ulong addr); +CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); +int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); +int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); #endif |