aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-05-28 15:00:33 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-05-28 15:00:33 +0100
commit052367ba8573809957f1abd288f79fbbda05a284 (patch)
tree2ec28baee6f44534fe3cb19518227ecb43d4ce03
parentadbfc34103f05a2137e9f00c422393918b04bd96 (diff)
parenta1ba125c0ca64b604484ddc104e533546d92088a (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140527' into staging
target-arm: * Preliminary restructuring for EL2/EL3 support * improve CPACR handling * fix pxa2xx_lcd palette formats * update highbank/midway maintainer # gpg: Signature made Tue 27 May 2014 17:26:27 BST using RSA key ID 14360CDE # gpg: Can't check signature: public key not found * remotes/pmaydell/tags/pull-target-arm-20140527: (26 commits) target-arm: A64: Register VBAR_EL3 target-arm: A64: Register VBAR_EL2 target-arm: Make vbar_write writeback to any CPREG target-arm: A64: Generalize update_spsel for the various ELs target-arm: A64: Generalize ERET to various ELs target-arm: A64: Trap ERET from EL0 at translation time target-arm: A64: Forbid ERET to higher or unimplemented ELs target-arm: Register EL3 versions of ELR and SPSR target-arm: Register EL2 versions of ELR and SPSR target-arm: Add a feature flag for EL3 target-arm: Add a feature flag for EL2 target-arm: A64: Introduce aarch64_banked_spsr_index() target-arm: Add SPSR entries for EL2/HYP and EL3/MON target-arm: A64: Add ELR entries for EL2 and 3 target-arm: A64: Add SP entries for EL2 and 3 target-arm: c12_vbar -> vbar_el[] target-arm: Make esr_el1 an array target-arm: Make elr_el1 an array target-arm: Use a 1:1 mapping between EL and MMU index target-arm: A32: Use get_mem_index for load/stores ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS4
-rw-r--r--hw/display/pxa2xx_lcd.c16
-rw-r--r--target-arm/cpu.h22
-rw-r--r--target-arm/helper-a64.c12
-rw-r--r--target-arm/helper.c113
-rw-r--r--target-arm/internals.h25
-rw-r--r--target-arm/kvm64.c4
-rw-r--r--target-arm/machine.c10
-rw-r--r--target-arm/op_helper.c20
-rw-r--r--target-arm/translate-a64.c13
-rw-r--r--target-arm/translate.c249
-rw-r--r--target-arm/translate.h5
12 files changed, 305 insertions, 188 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1de05f0525..51a6f51842 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -243,8 +243,8 @@ S: Maintained
F: hw/*/exynos*
Calxeda Highbank
-M: Mark Langsdorf <mark.langsdorf@calxeda.com>
-S: Supported
+M: Rob Herring <robh@kernel.org>
+S: Maintained
F: hw/arm/highbank.c
F: hw/net/xgmac.c
diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c
index 80edb70676..611fb174cd 100644
--- a/hw/display/pxa2xx_lcd.c
+++ b/hw/display/pxa2xx_lcd.c
@@ -620,24 +620,24 @@ static void pxa2xx_palette_parse(PXA2xxLCDState *s, int ch, int bpp)
src += 2;
break;
case 1: /* 16 bpp plus transparency */
- alpha = *(uint16_t *) src & (1 << 24);
+ alpha = *(uint32_t *) src & (1 << 24);
if (s->control[0] & LCCR0_CMS)
- r = g = b = *(uint16_t *) src & 0xff;
+ r = g = b = *(uint32_t *) src & 0xff;
else {
- r = (*(uint16_t *) src & 0xf800) >> 8;
- g = (*(uint16_t *) src & 0x07e0) >> 3;
- b = (*(uint16_t *) src & 0x001f) << 3;
+ r = (*(uint32_t *) src & 0xf80000) >> 16;
+ g = (*(uint32_t *) src & 0x00fc00) >> 8;
+ b = (*(uint32_t *) src & 0x0000f8);
}
- src += 2;
+ src += 4;
break;
case 2: /* 18 bpp plus transparency */
alpha = *(uint32_t *) src & (1 << 24);
if (s->control[0] & LCCR0_CMS)
r = g = b = *(uint32_t *) src & 0xff;
else {
- r = (*(uint32_t *) src & 0xf80000) >> 16;
+ r = (*(uint32_t *) src & 0xfc0000) >> 16;
g = (*(uint32_t *) src & 0x00fc00) >> 8;
- b = (*(uint32_t *) src & 0x0000f8);
+ b = (*(uint32_t *) src & 0x0000fc);
}
src += 4;
break;
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index c83f2495a8..8d04385261 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -143,7 +143,7 @@ typedef struct CPUARMState {
uint32_t spsr;
/* Banked registers. */
- uint64_t banked_spsr[6];
+ uint64_t banked_spsr[8];
uint32_t banked_r13[6];
uint32_t banked_r14[6];
@@ -162,8 +162,8 @@ typedef struct CPUARMState {
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
- uint64_t elr_el1; /* AArch64 ELR_EL1 */
- uint64_t sp_el[2]; /* AArch64 banked stack pointers */
+ uint64_t elr_el[4]; /* AArch64 exception link regs */
+ uint64_t sp_el[4]; /* AArch64 banked stack pointers */
/* System control coprocessor (cp15) */
struct {
@@ -185,7 +185,7 @@ typedef struct CPUARMState {
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
uint32_t ifsr_el2; /* Fault status registers. */
- uint64_t esr_el1;
+ uint64_t esr_el[2];
uint32_t c6_region[8]; /* MPU base/size registers. */
uint64_t far_el1; /* Fault address registers. */
uint64_t par_el1; /* Translation result. */
@@ -198,7 +198,7 @@ typedef struct CPUARMState {
uint32_t c9_pmuserenr; /* perf monitor user enable */
uint32_t c9_pminten; /* perf monitor interrupt enables */
uint64_t mair_el1;
- uint64_t c12_vbar; /* vector base address register */
+ uint64_t vbar_el[4]; /* vector base address register */
uint32_t c13_fcse; /* FCSE PID. */
uint64_t contextidr_el1; /* Context ID. */
uint64_t tpidr_el0; /* User RW Thread register. */
@@ -563,7 +563,9 @@ enum arm_cpu_mode {
ARM_CPU_MODE_FIQ = 0x11,
ARM_CPU_MODE_IRQ = 0x12,
ARM_CPU_MODE_SVC = 0x13,
+ ARM_CPU_MODE_MON = 0x16,
ARM_CPU_MODE_ABT = 0x17,
+ ARM_CPU_MODE_HYP = 0x1a,
ARM_CPU_MODE_UND = 0x1b,
ARM_CPU_MODE_SYS = 0x1f
};
@@ -631,6 +633,8 @@ enum arm_features {
ARM_FEATURE_CBAR, /* has cp15 CBAR */
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
+ ARM_FEATURE_EL2, /* has EL2 Virtualization support */
+ ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -1080,12 +1084,12 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define cpu_list arm_cpu_list
/* MMU modes definitions */
-#define MMU_MODE0_SUFFIX _kernel
-#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+#define MMU_MODE0_SUFFIX _user
+#define MMU_MODE1_SUFFIX _kernel
+#define MMU_USER_IDX 0
static inline int cpu_mmu_index (CPUARMState *env)
{
- return arm_current_pl(env) ? 0 : 1;
+ return arm_current_pl(env);
}
#include "exec/cpu-all.h"
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index bf921ccdc0..b970fd1d69 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -443,7 +443,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- target_ulong addr = env->cp15.c12_vbar;
+ target_ulong addr = env->cp15.vbar_el[1];
int i;
if (arm_current_pl(env) == 0) {
@@ -464,7 +464,7 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
env->exception.syndrome);
}
- env->cp15.esr_el1 = env->exception.syndrome;
+ env->cp15.esr_el[1] = env->exception.syndrome;
env->cp15.far_el1 = env->exception.vaddress;
switch (cs->exception_index) {
@@ -488,16 +488,16 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
}
if (is_a64(env)) {
- env->banked_spsr[0] = pstate_read(env);
+ env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env);
env->sp_el[arm_current_pl(env)] = env->xregs[31];
env->xregs[31] = env->sp_el[1];
- env->elr_el1 = env->pc;
+ env->elr_el[1] = env->pc;
} else {
env->banked_spsr[0] = cpsr_read(env);
if (!env->thumb) {
- env->cp15.esr_el1 |= 1 << 25;
+ env->cp15.esr_el[1] |= 1 << 25;
}
- env->elr_el1 = env->regs[15];
+ env->elr_el[1] = env->regs[15];
for (i = 0; i < 15; i++) {
env->xregs[i] = env->regs[i];
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 417161e216..6a01c6a82a 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -477,11 +477,35 @@ static const ARMCPRegInfo not_v7_cp_reginfo[] = {
static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- if (env->cp15.c1_coproc != value) {
- env->cp15.c1_coproc = value;
- /* ??? Is this safe when called from within a TB? */
- tb_flush(env);
+ uint32_t mask = 0;
+
+ /* In ARMv8 most bits of CPACR_EL1 are RES0. */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
+ * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
+ * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
+ */
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ /* VFP coprocessor: cp10 & cp11 [23:20] */
+ mask |= (1 << 31) | (1 << 30) | (0xf << 20);
+
+ if (!arm_feature(env, ARM_FEATURE_NEON)) {
+ /* ASEDIS [31] bit is RAO/WI */
+ value |= (1 << 31);
+ }
+
+ /* VFPv3 and upwards with NEON implement 32 double precision
+ * registers (D0-D31).
+ */
+ if (!arm_feature(env, ARM_FEATURE_NEON) ||
+ !arm_feature(env, ARM_FEATURE_VFP3)) {
+ /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
+ value |= (1 << 30);
+ }
+ }
+ value &= mask;
}
+ env->cp15.c1_coproc = value;
}
static const ARMCPRegInfo v6_cp_reginfo[] = {
@@ -657,7 +681,7 @@ static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
* contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
* requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
*/
- env->cp15.c12_vbar = value & ~0x1FULL;
+ raw_write(env, ri, value & ~0x1FULL);
}
static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
@@ -766,7 +790,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
- .fieldoffset = offsetof(CPUARMState, cp15.c12_vbar),
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[1]),
.resetvalue = 0 },
{ .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
@@ -1452,7 +1476,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
.resetfn = arm_cp_reset_ignore, },
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
@@ -1460,7 +1484,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.esr_el1), .resetvalue = 0, },
+ .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
@@ -1521,7 +1545,7 @@ static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo omap_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
.resetvalue = 0, },
{ .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NOP },
@@ -2055,7 +2079,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, elr_el1) },
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NO_MIGRATE,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
@@ -2076,6 +2101,51 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
REGINFO_SENTINEL
};
+/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
+static const ARMCPRegInfo v8_el3_no_el2_cp_reginfo[] = {
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v8_el2_cp_reginfo[] = {
+ { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
+ { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v8_el3_cp_reginfo[] = {
+ { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
+ { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
+ { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -2327,6 +2397,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, v8_cp_reginfo);
define_aarch64_debug_regs(cpu);
}
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ define_arm_cp_regs(cpu, v8_el2_cp_reginfo);
+ } else {
+ /* If EL2 is missing but higher ELs are enabled, we need to
+ * register the no_el2 reginfos.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_arm_cp_regs(cpu, v8_el3_no_el2_cp_reginfo);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_arm_cp_regs(cpu, v8_el3_cp_reginfo);
+ }
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
* PMSA core later than the ARM946 will require that we
@@ -3083,6 +3166,10 @@ int bank_number(int mode)
return 4;
case ARM_CPU_MODE_FIQ:
return 5;
+ case ARM_CPU_MODE_HYP:
+ return 6;
+ case ARM_CPU_MODE_MON:
+ return 7;
}
hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
}
@@ -3337,11 +3424,11 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 4;
break;
case EXCP_DATA_ABORT:
- env->cp15.esr_el1 = env->exception.fsr;
+ env->cp15.esr_el[1] = env->exception.fsr;
env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
- (uint32_t)env->cp15.esr_el1,
+ (uint32_t)env->cp15.esr_el[1],
(uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
addr = 0x10;
@@ -3378,7 +3465,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
* and is never in monitor mode this feature is always active.
* Note: only bits 31:5 are valid.
*/
- addr += env->cp15.c12_vbar;
+ addr += env->cp15.vbar_el[1];
}
switch_mode (env, new_mode);
env->spsr = cpsr_read(env);
diff --git a/target-arm/internals.h b/target-arm/internals.h
index d63a975a7e..564b5fa602 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -75,6 +75,20 @@ static inline void arm_log_exception(int idx)
*/
#define GTIMER_SCALE 16
+/*
+ * For AArch64, map a given EL to an index in the banked_spsr array.
+ */
+static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
+{
+ static const unsigned int map[4] = {
+ [1] = 0, /* EL1. */
+ [2] = 6, /* EL2. */
+ [3] = 7, /* EL3. */
+ };
+ assert(el >= 1 && el <= 3);
+ return map[el];
+}
+
int bank_number(int mode);
void switch_mode(CPUARMState *, int);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
@@ -93,6 +107,7 @@ int arm_rmode_to_sf(int rmode);
static inline void update_spsel(CPUARMState *env, uint32_t imm)
{
+ unsigned int cur_el = arm_current_pl(env);
/* Update PSTATE SPSel bit; this requires us to update the
* working stack pointer in xregs[31].
*/
@@ -101,17 +116,17 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
}
env->pstate = deposit32(env->pstate, 0, 1, imm);
- /* EL0 has no access rights to update SPSel, and this code
- * assumes we are updating SP for EL1 while running as EL1.
+ /* We rely on illegal updates to SPsel from EL0 to get trapped
+ * at translation time.
*/
- assert(arm_current_pl(env) == 1);
+ assert(cur_el >= 1 && cur_el <= 3);
if (env->pstate & PSTATE_SP) {
/* Switch from using SP_EL0 to using SP_ELx */
env->sp_el[0] = env->xregs[31];
- env->xregs[31] = env->sp_el[1];
+ env->xregs[31] = env->sp_el[cur_el];
} else {
/* Switch from SP_EL0 to SP_ELx */
- env->sp_el[1] = env->xregs[31];
+ env->sp_el[cur_el] = env->xregs[31];
env->xregs[31] = env->sp_el[0];
}
}
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index c729b9ec9f..70f311bed6 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -161,7 +161,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
reg.id = AARCH64_CORE_REG(elr_el1);
- reg.addr = (uintptr_t) &env->elr_el1;
+ reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret) {
return ret;
@@ -241,7 +241,7 @@ int kvm_arch_get_registers(CPUState *cs)
}
reg.id = AARCH64_CORE_REG(elr_el1);
- reg.addr = (uintptr_t) &env->elr_el1;
+ reg.addr = (uintptr_t) &env->elr_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 5092dcda79..3bcc7cc833 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -218,8 +218,8 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 17,
- .minimum_version_id = 17,
+ .version_id = 20,
+ .minimum_version_id = 20,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@@ -233,13 +233,13 @@ const VMStateDescription vmstate_arm_cpu = {
.offset = 0,
},
VMSTATE_UINT32(env.spsr, ARMCPU),
- VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 6),
+ VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
- VMSTATE_UINT64(env.elr_el1, ARMCPU),
- VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 2),
+ VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
+ VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
/* The length-check must come before the arrays to avoid
* incoming data possibly overflowing the array.
*/
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index fb90676bd5..50a4157acd 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -386,11 +386,13 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
void HELPER(exception_return)(CPUARMState *env)
{
- uint32_t spsr = env->banked_spsr[0];
+ int cur_el = arm_current_pl(env);
+ unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
+ uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el, i;
if (env->pstate & PSTATE_SP) {
- env->sp_el[1] = env->xregs[31];
+ env->sp_el[cur_el] = env->xregs[31];
} else {
env->sp_el[0] = env->xregs[31];
}
@@ -398,6 +400,7 @@ void HELPER(exception_return)(CPUARMState *env)
env->exclusive_addr = -1;
if (spsr & PSTATE_nRW) {
+ /* TODO: We currently assume EL1/2/3 are running in AArch64. */
env->aarch64 = 0;
new_el = 0;
env->uncached_cpsr = 0x10;
@@ -406,11 +409,14 @@ void HELPER(exception_return)(CPUARMState *env)
env->regs[i] = env->xregs[i];
}
- env->regs[15] = env->elr_el1 & ~0x1;
+ env->regs[15] = env->elr_el[1] & ~0x1;
} else {
new_el = extract32(spsr, 2, 2);
- if (new_el > 1) {
- /* Return to unimplemented EL */
+ if (new_el > cur_el
+ || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
+ /* Disallow return to an EL which is unimplemented or higher
+ * than the current one.
+ */
goto illegal_return;
}
if (extract32(spsr, 1, 1)) {
@@ -424,7 +430,7 @@ void HELPER(exception_return)(CPUARMState *env)
env->aarch64 = 1;
pstate_write(env, spsr);
env->xregs[31] = env->sp_el[new_el];
- env->pc = env->elr_el1;
+ env->pc = env->elr_el[cur_el];
}
return;
@@ -438,7 +444,7 @@ illegal_return:
* no change to exception level, execution state or stack pointer
*/
env->pstate |= PSTATE_IL;
- env->pc = env->elr_el1;
+ env->pc = env->elr_el[cur_el];
spsr &= PSTATE_NZCV | PSTATE_DAIF;
spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
pstate_write(env, spsr);
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index b62db4d566..ec6a39d1d6 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -162,15 +162,6 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
}
}
-static int get_mem_index(DisasContext *s)
-{
-#ifdef CONFIG_USER_ONLY
- return 1;
-#else
- return s->user;
-#endif
-}
-
void gen_a64_set_pc_im(uint64_t val)
{
tcg_gen_movi_i64(cpu_pc, val);
@@ -1516,6 +1507,10 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
break;
case 4: /* ERET */
+ if (s->current_pl == 0) {
+ unallocated_encoding(s);
+ return;
+ }
gen_helper_exception_return(cpu_env);
s->is_jmp = DISAS_JUMP;
return;
diff --git a/target-arm/translate.c b/target-arm/translate.c
index a4d920b629..c2dfbfe477 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -1165,18 +1165,18 @@ VFP_GEN_FIX(ulto, )
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
+ gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
+ gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
}
}
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
+ gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
+ gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
}
}
@@ -1514,24 +1514,24 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
iwmmxt_store_creg(wrd, tmp);
} else {
i = 1;
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WLDRD */
- gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
i = 0;
} else { /* WLDRW wRd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
}
} else {
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) { /* WLDRH */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
} else { /* WLDRB */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
}
}
if (i) {
@@ -1543,24 +1543,24 @@ static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
} else {
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
tmp = iwmmxt_load_creg(wrd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
tmp = tcg_temp_new_i32();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
- gen_aa32_st64(cpu_M0, addr, IS_USER(s));
+ gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
} else { /* WSTRW wRd */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
} else { /* WSTRB */
tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
}
}
}
@@ -2625,15 +2625,15 @@ static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
TCGv_i32 tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
gen_neon_dup_u8(tmp, 0);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
gen_neon_dup_low16(tmp);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -4304,11 +4304,11 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (size == 3) {
tmp64 = tcg_temp_new_i64();
if (load) {
- gen_aa32_ld64(tmp64, addr, IS_USER(s));
+ gen_aa32_ld64(tmp64, addr, get_mem_index(s));
neon_store_reg64(tmp64, rd);
} else {
neon_load_reg64(tmp64, rd);
- gen_aa32_st64(tmp64, addr, IS_USER(s));
+ gen_aa32_st64(tmp64, addr, get_mem_index(s));
}
tcg_temp_free_i64(tmp64);
tcg_gen_addi_i32(addr, addr, stride);
@@ -4317,21 +4317,21 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (size == 2) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
neon_store_reg(rd, pass, tmp);
} else {
tmp = neon_load_reg(rd, pass);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, stride);
} else if (size == 1) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp2, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp, tmp, tmp2);
@@ -4341,10 +4341,10 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp = neon_load_reg(rd, pass);
tmp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp2, tmp, 16);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
- gen_aa32_st16(tmp2, addr, IS_USER(s));
+ gen_aa32_st16(tmp2, addr, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -4353,7 +4353,7 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
TCGV_UNUSED_I32(tmp2);
for (n = 0; n < 4; n++) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
if (n == 0) {
tmp2 = tmp;
@@ -4373,7 +4373,7 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
} else {
tcg_gen_shri_i32(tmp, tmp2, n * 8);
}
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -4497,13 +4497,13 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -4521,13 +4521,13 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tcg_gen_shri_i32(tmp, tmp, shift);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
}
tcg_temp_free_i32(tmp);
@@ -7173,14 +7173,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7191,7 +7191,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
store_reg(s, rt2, tmp3);
@@ -7242,14 +7242,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7260,7 +7260,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(val64, tmp, tmp3);
tcg_temp_free_i32(tmp3);
@@ -7275,14 +7275,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = load_reg(s, rt);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7291,7 +7291,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
if (size == 3) {
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rt2);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[rd], 0);
@@ -7338,11 +7338,11 @@ static void gen_srs(DisasContext *s,
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tmp = load_cpu_field(spsr);
tcg_gen_addi_i32(addr, addr, 4);
- gen_aa32_st32(tmp, addr, 0);
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
if (writeback) {
switch (amode) {
@@ -7495,10 +7495,10 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tcg_gen_addi_i32(addr, addr, offset);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
switch (i) {
@@ -8087,13 +8087,13 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = tcg_temp_new_i32();
switch (op1) {
case 0: /* lda */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
case 2: /* ldab */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 3: /* ldah */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -8104,13 +8104,13 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = load_reg(s, rm);
switch (op1) {
case 0: /* stl */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
case 2: /* stlb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 3: /* stlh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -8165,11 +8165,11 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- gen_aa32_ld8u(tmp2, addr, IS_USER(s));
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(tmp2, addr, IS_USER(s));
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
@@ -8191,14 +8191,14 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
tmp = tcg_temp_new_i32();
switch(sh) {
case 1:
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
break;
default:
case 3:
- gen_aa32_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
break;
}
load = 1;
@@ -8208,21 +8208,21 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (sh & 1) {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd + 1);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
load = 0;
} else {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
rd++;
load = 1;
}
@@ -8230,7 +8230,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
load = 0;
}
@@ -8568,7 +8568,12 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
rn = (insn >> 16) & 0xf;
rd = (insn >> 12) & 0xf;
tmp2 = load_reg(s, rn);
- i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
+ if ((insn & 0x01200000) == 0x00200000) {
+ /* ldrt/strt */
+ i = MMU_USER_IDX;
+ } else {
+ i = get_mem_index(s);
+ }
if (insn & (1 << 24))
gen_add_data_offset(s, insn, tmp2);
if (insn & (1 << 20)) {
@@ -8652,7 +8657,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (insn & (1 << 20)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (user) {
tmp2 = tcg_const_i32(i);
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
@@ -8679,7 +8684,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
} else {
tmp = load_reg(s, i);
}
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
j++;
@@ -8945,20 +8950,20 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* ldrd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rs, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* strd */
tmp = load_reg(s, rs);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
if (insn & (1 << 21)) {
@@ -8996,11 +9001,11 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_add_i32(addr, addr, tmp);
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
} else { /* tbb */
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
@@ -9037,13 +9042,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0: /* ldab */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 1: /* ldah */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 2: /* lda */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -9053,13 +9058,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0: /* stlb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 1: /* stlh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2: /* stl */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -9087,10 +9092,10 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_addi_i32(addr, addr, -8);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, 0);
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, 0);
+ gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
if (insn & (1 << 24)) {
@@ -9129,7 +9134,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* Load. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (i == 15) {
gen_bx(s, tmp);
} else if (i == rn) {
@@ -9141,7 +9146,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
} else {
/* Store. */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -9841,7 +9846,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
{
int postinc = 0;
int writeback = 0;
- int user;
+ int memidx;
if ((insn & 0x01100000) == 0x01000000) {
if (disas_neon_ls_insn(env, s, insn))
goto illegal_op;
@@ -9885,7 +9890,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
return 1;
}
}
- user = IS_USER(s);
+ memidx = get_mem_index(s);
if (rn == 15) {
addr = tcg_temp_new_i32();
/* PC relative. */
@@ -9922,7 +9927,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- user = 1;
+ memidx = MMU_USER_IDX;
break;
case 0x9: /* Post-decrement. */
imm = -imm;
@@ -9949,19 +9954,19 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0:
- gen_aa32_ld8u(tmp, addr, user);
+ gen_aa32_ld8u(tmp, addr, memidx);
break;
case 4:
- gen_aa32_ld8s(tmp, addr, user);
+ gen_aa32_ld8s(tmp, addr, memidx);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, user);
+ gen_aa32_ld16u(tmp, addr, memidx);
break;
case 5:
- gen_aa32_ld16s(tmp, addr, user);
+ gen_aa32_ld16s(tmp, addr, memidx);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, user);
+ gen_aa32_ld32u(tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
@@ -9978,13 +9983,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0:
- gen_aa32_st8(tmp, addr, user);
+ gen_aa32_st8(tmp, addr, memidx);
break;
case 1:
- gen_aa32_st16(tmp, addr, user);
+ gen_aa32_st16(tmp, addr, memidx);
break;
case 2:
- gen_aa32_st32(tmp, addr, user);
+ gen_aa32_st32(tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
@@ -10121,7 +10126,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, val);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(addr);
store_reg(s, rd, tmp);
break;
@@ -10324,28 +10329,28 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
switch (op) {
case 0: /* str */
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
break;
case 1: /* strh */
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
break;
case 2: /* strb */
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
break;
case 3: /* ldrsb */
- gen_aa32_ld8s(tmp, addr, IS_USER(s));
+ gen_aa32_ld8s(tmp, addr, get_mem_index(s));
break;
case 4: /* ldr */
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
break;
case 5: /* ldrh */
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
break;
case 6: /* ldrb */
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
break;
case 7: /* ldrsh */
- gen_aa32_ld16s(tmp, addr, IS_USER(s));
+ gen_aa32_ld16s(tmp, addr, get_mem_index(s));
break;
}
if (op >= 3) { /* load */
@@ -10367,12 +10372,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10389,12 +10394,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, IS_USER(s));
+ gen_aa32_ld8u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st8(tmp, addr, IS_USER(s));
+ gen_aa32_st8(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10411,12 +10416,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, IS_USER(s));
+ gen_aa32_ld16u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, IS_USER(s));
+ gen_aa32_st16(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10432,12 +10437,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10505,12 +10510,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
store_reg(s, i, tmp);
} else {
/* push */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address. */
@@ -10522,13 +10527,13 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop pc */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -10657,7 +10662,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, IS_USER(s));
+ gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (i == rn) {
loaded_var = tmp;
} else {
@@ -10666,7 +10671,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
} else {
/* store */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, IS_USER(s));
+ gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address */
@@ -11047,8 +11052,8 @@ void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
}
static const char *cpu_mode_names[16] = {
- "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
- "???", "???", "???", "und", "???", "???", "???", "sys"
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
};
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 34328f4660..31a0104b58 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -52,6 +52,11 @@ static inline int arm_dc_feature(DisasContext *dc, int feature)
return (dc->features & (1ULL << feature)) != 0;
}
+static inline int get_mem_index(DisasContext *s)
+{
+ return s->current_pl;
+}
+
/* target-specific extra values for is_jmp */
/* These instructions trap after executing, so the A32/T32 decoder must
* defer them until after the conditional execution state has been updated.