aboutsummaryrefslogtreecommitdiff
path: root/target/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'target/ppc')
-rw-r--r--target/ppc/arch_dump.c22
-rw-r--r--target/ppc/compat.c19
-rw-r--r--target/ppc/cpu-models.c88
-rw-r--r--target/ppc/cpu-models.h48
-rw-r--r--target/ppc/cpu-param.h12
-rw-r--r--target/ppc/cpu-qom.h176
-rw-r--r--target/ppc/cpu.c116
-rw-r--r--target/ppc/cpu.h937
-rw-r--r--target/ppc/cpu_init.c3646
-rw-r--r--target/ppc/dfp_helper.c268
-rw-r--r--target/ppc/excp_helper.c2969
-rw-r--r--target/ppc/fpu_helper.c1781
-rw-r--r--target/ppc/gdbstub.c221
-rw-r--r--target/ppc/helper.h643
-rw-r--r--target/ppc/helper_regs.c544
-rw-r--r--target/ppc/helper_regs.h1
-rw-r--r--target/ppc/insn32.decode892
-rw-r--r--target/ppc/insn64.decode183
-rw-r--r--target/ppc/int_helper.c1395
-rw-r--r--target/ppc/internal.h62
-rw-r--r--target/ppc/kvm-stub.c19
-rw-r--r--target/ppc/kvm.c136
-rw-r--r--target/ppc/kvm_ppc.h81
-rw-r--r--target/ppc/machine.c232
-rw-r--r--target/ppc/mem_helper.c131
-rw-r--r--target/ppc/meson.build17
-rw-r--r--target/ppc/mfrom_table.c.inc78
-rw-r--r--target/ppc/mfrom_table_gen.c34
-rw-r--r--target/ppc/misc_helper.c171
-rw-r--r--target/ppc/mmu-book3s-v3.c5
-rw-r--r--target/ppc/mmu-book3s-v3.h15
-rw-r--r--target/ppc/mmu-hash32.c62
-rw-r--r--target/ppc/mmu-hash32.h9
-rw-r--r--target/ppc/mmu-hash64.c129
-rw-r--r--target/ppc/mmu-hash64.h10
-rw-r--r--target/ppc/mmu-radix64.c323
-rw-r--r--target/ppc/mmu-radix64.h1
-rw-r--r--target/ppc/mmu_common.c410
-rw-r--r--target/ppc/mmu_helper.c513
-rw-r--r--target/ppc/power8-pmu-regs.c.inc309
-rw-r--r--target/ppc/power8-pmu.c365
-rw-r--r--target/ppc/power8-pmu.h27
-rw-r--r--target/ppc/ppc-qmp-cmds.c (renamed from target/ppc/monitor.c)80
-rw-r--r--target/ppc/spr_common.h (renamed from target/ppc/spr_tcg.h)102
-rw-r--r--target/ppc/tcg-stub.c15
-rw-r--r--target/ppc/timebase_helper.c356
-rw-r--r--target/ppc/trace-events10
-rw-r--r--target/ppc/translate.c3559
-rw-r--r--target/ppc/translate/branch-impl.c.inc33
-rw-r--r--target/ppc/translate/dfp-impl.c.inc401
-rw-r--r--target/ppc/translate/dfp-ops.c.inc165
-rw-r--r--target/ppc/translate/fixedpoint-impl.c.inc354
-rw-r--r--target/ppc/translate/fp-impl.c.inc842
-rw-r--r--target/ppc/translate/fp-ops.c.inc41
-rw-r--r--target/ppc/translate/processor-ctrl-impl.c.inc105
-rw-r--r--target/ppc/translate/spe-impl.c.inc97
-rw-r--r--target/ppc/translate/storage-ctrl-impl.c.inc248
-rw-r--r--target/ppc/translate/vector-impl.c.inc56
-rw-r--r--target/ppc/translate/vmx-impl.c.inc2504
-rw-r--r--target/ppc/translate/vmx-ops.c.inc97
-rw-r--r--target/ppc/translate/vsx-impl.c.inc2342
-rw-r--r--target/ppc/translate/vsx-ops.c.inc101
-rw-r--r--target/ppc/user_only_helper.c18
63 files changed, 16787 insertions, 11839 deletions
diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c
index bb392f6d88..a8315659d9 100644
--- a/target/ppc/arch_dump.c
+++ b/target/ppc/arch_dump.c
@@ -161,7 +161,7 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
bool needs_byteswap;
ppc_avr_t *avr = cpu_avr_ptr(&cpu->env, i);
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB;
#else
needs_byteswap = s->dump_info.d_endian == ELFDATA2MSB;
@@ -237,7 +237,7 @@ int cpu_get_dump_info(ArchDumpInfo *info,
info->d_machine = PPC_ELF_MACHINE;
info->d_class = ELFCLASS;
- if (ppc_interrupts_little_endian(cpu)) {
+ if (ppc_interrupts_little_endian(cpu, !!(cpu->env.msr_mask & MSR_HVB))) {
info->d_endian = ELFDATA2LSB;
} else {
info->d_endian = ELFDATA2MSB;
@@ -270,23 +270,23 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
static int ppc_write_all_elf_notes(const char *note_name,
WriteCoreDumpFunction f,
PowerPCCPU *cpu, int id,
- void *opaque)
+ DumpState *s)
{
- NoteFuncArg arg = { .state = opaque };
+ NoteFuncArg arg = { .state = s };
int ret = -1;
int note_size;
const NoteFuncDesc *nf;
for (nf = note_func; nf->note_contents_func; nf++) {
- arg.note.hdr.n_namesz = cpu_to_dump32(opaque, sizeof(arg.note.name));
- arg.note.hdr.n_descsz = cpu_to_dump32(opaque, nf->contents_size);
+ arg.note.hdr.n_namesz = cpu_to_dump32(s, sizeof(arg.note.name));
+ arg.note.hdr.n_descsz = cpu_to_dump32(s, nf->contents_size);
strncpy(arg.note.name, note_name, sizeof(arg.note.name));
(*nf->note_contents_func)(&arg, cpu);
note_size =
sizeof(arg.note) - sizeof(arg.note.contents) + nf->contents_size;
- ret = f(&arg.note, note_size, opaque);
+ ret = f(&arg.note, note_size, s);
if (ret < 0) {
return -1;
}
@@ -295,15 +295,15 @@ static int ppc_write_all_elf_notes(const char *note_name,
}
int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
- int cpuid, void *opaque)
+ int cpuid, DumpState *s)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
+ return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, s);
}
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
- int cpuid, void *opaque)
+ int cpuid, DumpState *s)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, opaque);
+ return ppc_write_all_elf_notes("CORE", f, cpu, cpuid, s);
}
diff --git a/target/ppc/compat.c b/target/ppc/compat.c
index 7949a24f5a..ebef2cccec 100644
--- a/target/ppc/compat.c
+++ b/target/ppc/compat.c
@@ -229,6 +229,25 @@ int ppc_set_compat_all(uint32_t compat_pvr, Error **errp)
return 0;
}
+/* To be used when the machine is not running */
+int ppc_init_compat_all(uint32_t compat_pvr, Error **errp)
+{
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ int ret;
+
+ ret = ppc_set_compat(cpu, compat_pvr, errp);
+
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int ppc_compat_max_vthreads(PowerPCCPU *cpu)
{
const CompatInfo *compat = compat_by_pvr(cpu->compat_pvr);
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
index 4baa111713..f2301b43f7 100644
--- a/target/ppc/cpu-models.c
+++ b/target/ppc/cpu-models.c
@@ -67,40 +67,6 @@
POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type)
/* Embedded PowerPC */
- /* PowerPC 401 family */
- POWERPC_DEF("401", CPU_POWERPC_401, 401,
- "Generic PowerPC 401")
- /* PowerPC 401 cores */
- POWERPC_DEF("401a1", CPU_POWERPC_401A1, 401,
- "PowerPC 401A1")
- POWERPC_DEF("401b2", CPU_POWERPC_401B2, 401x2,
- "PowerPC 401B2")
- POWERPC_DEF("401c2", CPU_POWERPC_401C2, 401x2,
- "PowerPC 401C2")
- POWERPC_DEF("401d2", CPU_POWERPC_401D2, 401x2,
- "PowerPC 401D2")
- POWERPC_DEF("401e2", CPU_POWERPC_401E2, 401x2,
- "PowerPC 401E2")
- POWERPC_DEF("401f2", CPU_POWERPC_401F2, 401x2,
- "PowerPC 401F2")
- /* XXX: to be checked */
- POWERPC_DEF("401g2", CPU_POWERPC_401G2, 401x2,
- "PowerPC 401G2")
- /* PowerPC 401 microcontrollers */
- POWERPC_DEF("iop480", CPU_POWERPC_IOP480, IOP480,
- "IOP480 (401 microcontroller)")
- POWERPC_DEF("cobra", CPU_POWERPC_COBRA, 401,
- "IBM Processor for Network Resources")
- /* PowerPC 403 family */
- /* PowerPC 403 microcontrollers */
- POWERPC_DEF("403ga", CPU_POWERPC_403GA, 403,
- "PowerPC 403 GA")
- POWERPC_DEF("403gb", CPU_POWERPC_403GB, 403,
- "PowerPC 403 GB")
- POWERPC_DEF("403gc", CPU_POWERPC_403GC, 403,
- "PowerPC 403 GC")
- POWERPC_DEF("403gcx", CPU_POWERPC_403GCX, 403GCX,
- "PowerPC 403 GCX")
/* PowerPC 405 family */
/* PowerPC 405 cores */
POWERPC_DEF("405d2", CPU_POWERPC_405D2, 405,
@@ -419,19 +385,19 @@
POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1",
CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2)
POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0",
- CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2)
+ CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v1)
POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1",
- CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2)
+ CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v1)
POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0",
- CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2)
+ CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v1)
POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1",
- CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2)
+ CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v1)
POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0",
- CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2)
+ CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v1)
POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0",
- CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2)
+ CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v1)
POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1",
- CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2)
+ CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v1)
POWERPC_DEF_SVR("mpc8567", "MPC8567",
CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2)
POWERPC_DEF_SVR("mpc8567e", "MPC8567E",
@@ -456,14 +422,6 @@
CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600)
/* 32 bits "classic" PowerPC */
/* PowerPC 6xx family */
- POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601,
- "PowerPC 601v0")
- POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601,
- "PowerPC 601v1")
- POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v,
- "PowerPC 601v2")
- POWERPC_DEF("602", CPU_POWERPC_602, 602,
- "PowerPC 602")
POWERPC_DEF("603", CPU_POWERPC_603, 603,
"PowerPC 603")
POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E,
@@ -670,13 +628,13 @@
"PowerPC 7410 v1.3 (G4)")
POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410,
"PowerPC 7410 v1.4 (G4)")
- POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400,
+ POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7445,
"PowerPC 7448 v1.0 (G4)")
- POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400,
+ POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7445,
"PowerPC 7448 v1.1 (G4)")
- POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400,
+ POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7445,
"PowerPC 7448 v2.0 (G4)")
- POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400,
+ POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7445,
"PowerPC 7448 v2.1 (G4)")
POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450,
"PowerPC 7450 v1.0 (G4)")
@@ -758,11 +716,11 @@
"PowerPC 970MP v1.0")
POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970,
"PowerPC 970MP v1.1")
- POWERPC_DEF("power5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P,
+ POWERPC_DEF("power5p_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P,
"POWER5+ v2.1")
POWERPC_DEF("power7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
"POWER7 v2.3")
- POWERPC_DEF("power7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
+ POWERPC_DEF("power7p_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
"POWER7+ v2.1")
POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
"POWER8E v2.1")
@@ -770,12 +728,10 @@
"POWER8 v2.0")
POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8,
"POWER8NVL v1.0")
- POWERPC_DEF("power9_v1.0", CPU_POWERPC_POWER9_DD1, POWER9,
- "POWER9 v1.0")
POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9,
"POWER9 v2.0")
- POWERPC_DEF("power10_v1.0", CPU_POWERPC_POWER10_DD1, POWER10,
- "POWER10 v1.0")
+ POWERPC_DEF("power9_v2.2", CPU_POWERPC_POWER9_DD22, POWER9,
+ "POWER9 v2.2")
POWERPC_DEF("power10_v2.0", CPU_POWERPC_POWER10_DD20, POWER10,
"POWER10 v2.0")
#endif /* defined (TARGET_PPC64) */
@@ -784,7 +740,6 @@
/* PowerPC CPU aliases */
PowerPCCPUAlias ppc_cpu_aliases[] = {
- { "403", "403gc" },
{ "405", "405d4" },
{ "405cr", "405crc" },
{ "405gp", "405gpd" },
@@ -896,8 +851,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "mpc8555", "mpc8555_v11" },
{ "mpc8555e", "mpc8555e_v11" },
{ "mpc8560", "mpc8560_v21" },
- { "601", "601_v2" },
- { "601v", "601_v2" },
{ "vanilla", "603" },
{ "603e", "603e_v4.1" },
{ "stretch", "603e_v4.1" },
@@ -924,7 +877,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "755", "755_v2.8" },
{ "goldfinger", "755_v2.8" },
{ "7400", "7400_v2.9" },
- { "max", "7400_v2.9" },
{ "g4", "7400_v2.9" },
{ "7410", "7410_v1.4" },
{ "nitro", "7410_v1.4" },
@@ -946,14 +898,16 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
- { "power5+", "power5+_v2.1" },
+ { "power5+", "power5p_v2.1" },
+ { "power5+_v2.1", "power5p_v2.1" },
{ "power5gs", "power5+_v2.1" },
{ "power7", "power7_v2.3" },
- { "power7+", "power7+_v2.1" },
+ { "power7+", "power7p_v2.1" },
+ { "power7+_v2.1", "power7p_v2.1" },
{ "power8e", "power8e_v2.1" },
{ "power8", "power8_v2.0" },
{ "power8nvl", "power8nvl_v1.0" },
- { "power9", "power9_v2.0" },
+ { "power9", "power9_v2.2" },
{ "power10", "power10_v2.0" },
#endif
@@ -963,6 +917,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
#endif
{ "ppc32", "604" },
{ "ppc", "604" },
- { "default", "604" },
+
{ NULL, NULL }
};
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
index 0952592759..0229ef3a9a 100644
--- a/target/ppc/cpu-models.h
+++ b/target/ppc/cpu-models.h
@@ -38,32 +38,13 @@ extern PowerPCCPUAlias ppc_cpu_aliases[];
/*****************************************************************************/
/* PVR definitions for most known PowerPC */
enum {
- /* PowerPC 401 family */
- /* Generic PowerPC 401 */
-#define CPU_POWERPC_401 CPU_POWERPC_401G2
- /* PowerPC 401 cores */
- CPU_POWERPC_401A1 = 0x00210000,
- CPU_POWERPC_401B2 = 0x00220000,
- CPU_POWERPC_401C2 = 0x00230000,
- CPU_POWERPC_401D2 = 0x00240000,
- CPU_POWERPC_401E2 = 0x00250000,
- CPU_POWERPC_401F2 = 0x00260000,
- CPU_POWERPC_401G2 = 0x00270000,
- /* PowerPC 401 microcontrolers */
-#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2
/* IBM Processor for Network Resources */
CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */
- /* PowerPC 403 family */
- /* PowerPC 403 microcontrollers */
- CPU_POWERPC_403GA = 0x00200011,
- CPU_POWERPC_403GB = 0x00200100,
- CPU_POWERPC_403GC = 0x00200200,
- CPU_POWERPC_403GCX = 0x00201400,
/* PowerPC 405 family */
/* PowerPC 405 cores */
CPU_POWERPC_405D2 = 0x20010000,
CPU_POWERPC_405D4 = 0x41810000,
- /* PowerPC 405 microcontrolers */
+ /* PowerPC 405 microcontrollers */
/* XXX: missing 0x200108a0 */
CPU_POWERPC_405CRa = 0x40110041,
CPU_POWERPC_405CRb = 0x401100C5,
@@ -93,7 +74,7 @@ enum {
#define CPU_POWERPC_440 CPU_POWERPC_440GXf
/* PowerPC 440 cores */
CPU_POWERPC_440_XILINX = 0x7ff21910,
- /* PowerPC 440 microcontrolers */
+ /* PowerPC 440 microcontrollers */
CPU_POWERPC_440EPa = 0x42221850,
CPU_POWERPC_440EPb = 0x422218D3,
CPU_POWERPC_440GPb = 0x40120440,
@@ -203,13 +184,13 @@ enum {
#define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11
#define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20
#define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21
-#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10
-#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11
-#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10
-#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11
-#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10
-#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20
-#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v1_v10
+#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v1_v20
#define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22
#define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22
#define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22
@@ -224,10 +205,6 @@ enum {
#define CPU_POWERPC_MPC8641 CPU_POWERPC_e600
#define CPU_POWERPC_MPC8641D CPU_POWERPC_e600
/* PowerPC 6xx cores */
- CPU_POWERPC_601_v0 = 0x00010001,
- CPU_POWERPC_601_v1 = 0x00010001,
- CPU_POWERPC_601_v2 = 0x00010002,
- CPU_POWERPC_602 = 0x00050100,
CPU_POWERPC_603 = 0x00030100,
CPU_POWERPC_603E_v11 = 0x00060101,
CPU_POWERPC_603E_v12 = 0x00060102,
@@ -371,11 +348,12 @@ enum {
CPU_POWERPC_POWER8NVL_BASE = 0x004C0000,
CPU_POWERPC_POWER8NVL_v10 = 0x004C0100,
CPU_POWERPC_POWER9_BASE = 0x004E0000,
- CPU_POWERPC_POWER9_DD1 = 0x004E0100,
+ CPU_POWERPC_POWER9_DD1 = 0x004E1100,
CPU_POWERPC_POWER9_DD20 = 0x004E1200,
+ CPU_POWERPC_POWER9_DD22 = 0x004E1202,
CPU_POWERPC_POWER10_BASE = 0x00800000,
- CPU_POWERPC_POWER10_DD1 = 0x00800100,
- CPU_POWERPC_POWER10_DD20 = 0x00800200,
+ CPU_POWERPC_POWER10_DD1 = 0x00801100,
+ CPU_POWERPC_POWER10_DD20 = 0x00801200,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
diff --git a/target/ppc/cpu-param.h b/target/ppc/cpu-param.h
index 37b458d33d..b7ad52de03 100644
--- a/target/ppc/cpu-param.h
+++ b/target/ppc/cpu-param.h
@@ -6,7 +6,7 @@
*/
#ifndef PPC_CPU_PARAM_H
-#define PPC_CPU_PARAM_H 1
+#define PPC_CPU_PARAM_H
#ifdef TARGET_PPC64
# define TARGET_LONG_BITS 64
@@ -31,7 +31,13 @@
# define TARGET_PHYS_ADDR_SPACE_BITS 36
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-#define TARGET_PAGE_BITS 12
-#define NB_MMU_MODES 10
+
+#ifdef CONFIG_USER_ONLY
+/* Allow user-only to vary page size from 4k */
+# define TARGET_PAGE_BITS_VARY
+# define TARGET_PAGE_BITS_MIN 12
+#else
+# define TARGET_PAGE_BITS 12
+#endif
#endif
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
index 5800fa324e..8247fa2336 100644
--- a/target/ppc/cpu-qom.h
+++ b/target/ppc/cpu-qom.h
@@ -1,5 +1,5 @@
/*
- * QEMU PowerPC CPU
+ * QEMU PowerPC CPU QOM header (target agnostic)
*
* Copyright (c) 2012 SUSE LINUX Products GmbH
*
@@ -20,8 +20,8 @@
#ifndef QEMU_PPC_CPU_QOM_H
#define QEMU_PPC_CPU_QOM_H
+#include "exec/gdbstub.h"
#include "hw/core/cpu.h"
-#include "qom/object.h"
#ifdef TARGET_PPC64
#define TYPE_POWERPC_CPU "powerpc64-cpu"
@@ -29,176 +29,12 @@
#define TYPE_POWERPC_CPU "powerpc-cpu"
#endif
-OBJECT_DECLARE_TYPE(PowerPCCPU, PowerPCCPUClass,
- POWERPC_CPU)
+OBJECT_DECLARE_CPU_TYPE(PowerPCCPU, PowerPCCPUClass, POWERPC_CPU)
-typedef struct CPUPPCState CPUPPCState;
-typedef struct ppc_tb_t ppc_tb_t;
-typedef struct ppc_dcr_t ppc_dcr_t;
+#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU
+#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX
-/*****************************************************************************/
-/* MMU model */
-typedef enum powerpc_mmu_t powerpc_mmu_t;
-enum powerpc_mmu_t {
- POWERPC_MMU_UNKNOWN = 0x00000000,
- /* Standard 32 bits PowerPC MMU */
- POWERPC_MMU_32B = 0x00000001,
- /* PowerPC 6xx MMU with software TLB */
- POWERPC_MMU_SOFT_6xx = 0x00000002,
- /* PowerPC 74xx MMU with software TLB */
- POWERPC_MMU_SOFT_74xx = 0x00000003,
- /* PowerPC 4xx MMU with software TLB */
- POWERPC_MMU_SOFT_4xx = 0x00000004,
- /* PowerPC 4xx MMU with software TLB and zones protections */
- POWERPC_MMU_SOFT_4xx_Z = 0x00000005,
- /* PowerPC MMU in real mode only */
- POWERPC_MMU_REAL = 0x00000006,
- /* Freescale MPC8xx MMU model */
- POWERPC_MMU_MPC8xx = 0x00000007,
- /* BookE MMU model */
- POWERPC_MMU_BOOKE = 0x00000008,
- /* BookE 2.06 MMU model */
- POWERPC_MMU_BOOKE206 = 0x00000009,
- /* PowerPC 601 MMU model (specific BATs format) */
- POWERPC_MMU_601 = 0x0000000A,
-#define POWERPC_MMU_64 0x00010000
- /* 64 bits PowerPC MMU */
- POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
- /* Architecture 2.03 and later (has LPCR) */
- POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
- /* Architecture 2.06 variant */
- POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003,
- /* Architecture 2.07 variant */
- POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004,
- /* Architecture 3.00 variant */
- POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005,
-};
-
-static inline bool mmu_is_64bit(powerpc_mmu_t mmu_model)
-{
- return mmu_model & POWERPC_MMU_64;
-}
-
-/*****************************************************************************/
-/* Exception model */
-typedef enum powerpc_excp_t powerpc_excp_t;
-enum powerpc_excp_t {
- POWERPC_EXCP_UNKNOWN = 0,
- /* Standard PowerPC exception model */
- POWERPC_EXCP_STD,
- /* PowerPC 40x exception model */
- POWERPC_EXCP_40x,
- /* PowerPC 601 exception model */
- POWERPC_EXCP_601,
- /* PowerPC 602 exception model */
- POWERPC_EXCP_602,
- /* PowerPC 603 exception model */
- POWERPC_EXCP_603,
- /* PowerPC 603e exception model */
- POWERPC_EXCP_603E,
- /* PowerPC G2 exception model */
- POWERPC_EXCP_G2,
- /* PowerPC 604 exception model */
- POWERPC_EXCP_604,
- /* PowerPC 7x0 exception model */
- POWERPC_EXCP_7x0,
- /* PowerPC 7x5 exception model */
- POWERPC_EXCP_7x5,
- /* PowerPC 74xx exception model */
- POWERPC_EXCP_74xx,
- /* BookE exception model */
- POWERPC_EXCP_BOOKE,
- /* PowerPC 970 exception model */
- POWERPC_EXCP_970,
- /* POWER7 exception model */
- POWERPC_EXCP_POWER7,
- /* POWER8 exception model */
- POWERPC_EXCP_POWER8,
- /* POWER9 exception model */
- POWERPC_EXCP_POWER9,
- /* POWER10 exception model */
- POWERPC_EXCP_POWER10,
-};
-
-/*****************************************************************************/
-/* PM instructions */
-typedef enum {
- PPC_PM_DOZE,
- PPC_PM_NAP,
- PPC_PM_SLEEP,
- PPC_PM_RVWINKLE,
- PPC_PM_STOP,
-} powerpc_pm_insn_t;
-
-/*****************************************************************************/
-/* Input pins model */
-typedef enum powerpc_input_t powerpc_input_t;
-enum powerpc_input_t {
- PPC_FLAGS_INPUT_UNKNOWN = 0,
- /* PowerPC 6xx bus */
- PPC_FLAGS_INPUT_6xx,
- /* BookE bus */
- PPC_FLAGS_INPUT_BookE,
- /* PowerPC 405 bus */
- PPC_FLAGS_INPUT_405,
- /* PowerPC 970 bus */
- PPC_FLAGS_INPUT_970,
- /* PowerPC POWER7 bus */
- PPC_FLAGS_INPUT_POWER7,
- /* PowerPC POWER9 bus */
- PPC_FLAGS_INPUT_POWER9,
- /* PowerPC 401 bus */
- PPC_FLAGS_INPUT_401,
- /* Freescale RCPU bus */
- PPC_FLAGS_INPUT_RCPU,
-};
-
-typedef struct PPCHash64Options PPCHash64Options;
-
-/**
- * PowerPCCPUClass:
- * @parent_realize: The parent class' realize handler.
- * @parent_reset: The parent class' reset handler.
- *
- * A PowerPC CPU model.
- */
-struct PowerPCCPUClass {
- /*< private >*/
- CPUClass parent_class;
- /*< public >*/
-
- DeviceRealize parent_realize;
- DeviceUnrealize parent_unrealize;
- DeviceReset parent_reset;
- void (*parent_parse_features)(const char *type, char *str, Error **errp);
-
- uint32_t pvr;
- bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr);
- uint64_t pcr_mask; /* Available bits in PCR register */
- uint64_t pcr_supported; /* Bits for supported PowerISA versions */
- uint32_t svr;
- uint64_t insns_flags;
- uint64_t insns_flags2;
- uint64_t msr_mask;
- uint64_t lpcr_mask; /* Available bits in the LPCR */
- uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */
- powerpc_mmu_t mmu_model;
- powerpc_excp_t excp_model;
- powerpc_input_t bus_model;
- uint32_t flags;
- int bfd_mach;
- uint32_t l1_dcache_size, l1_icache_size;
-#ifndef CONFIG_USER_ONLY
- unsigned int gdb_num_sprs;
- const char *gdb_spr_xml;
-#endif
- const PPCHash64Options *hash64_opts;
- struct ppc_radix_page_info *radix_page_info;
- uint32_t lrg_decr_bits;
- int n_host_threads;
- void (*init_proc)(CPUPPCState *env);
- int (*check_pow)(CPUPPCState *env);
-};
+#define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host")
#ifndef CONFIG_USER_ONLY
typedef struct PPCTimebase {
diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c
index 7ad9bd6044..e3ad8e0c27 100644
--- a/target/ppc/cpu.c
+++ b/target/ppc/cpu.c
@@ -27,7 +27,7 @@
#include "helper_regs.h"
#include "sysemu/tcg.h"
-target_ulong cpu_read_xer(CPUPPCState *env)
+target_ulong cpu_read_xer(const CPUPPCState *env)
{
if (is_isa300(env)) {
return env->xer | (env->so << XER_SO) |
@@ -59,6 +59,7 @@ void ppc_store_vscr(CPUPPCState *env, uint32_t vscr)
env->vscr_sat.u64[0] = vscr & (1u << VSCR_SAT);
env->vscr_sat.u64[1] = 0;
set_flush_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
+ set_flush_inputs_to_zero((vscr >> VSCR_NJ) & 1, &env->vec_status);
}
uint32_t ppc_get_vscr(CPUPPCState *env)
@@ -67,12 +68,30 @@ uint32_t ppc_get_vscr(CPUPPCState *env)
return env->vscr | (sat << VSCR_SAT);
}
+void ppc_set_cr(CPUPPCState *env, uint64_t cr)
+{
+ for (int i = 7; i >= 0; i--) {
+ env->crf[i] = cr & 0xf;
+ cr >>= 4;
+ }
+}
+
+uint64_t ppc_get_cr(const CPUPPCState *env)
+{
+ uint64_t cr = 0;
+ for (int i = 0; i < 8; i++) {
+ cr |= (env->crf[i] & 0xf) << (4 * (7 - i));
+ }
+ return cr;
+}
+
/* GDBstub can read and write MSR... */
void ppc_store_msr(CPUPPCState *env, target_ulong value)
{
hreg_store_msr(env, value, 0);
}
+#if !defined(CONFIG_USER_ONLY)
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
@@ -81,14 +100,103 @@ void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
/* The gtse bit affects hflags */
hreg_compute_hflags(env);
+
+ ppc_maybe_interrupt(env);
+}
+
+#if defined(TARGET_PPC64)
+void ppc_update_ciabr(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong ciabr = env->spr[SPR_CIABR];
+ target_ulong ciea, priv;
+
+ ciea = ciabr & PPC_BITMASK(0, 61);
+ priv = ciabr & PPC_BITMASK(62, 63);
+
+ if (env->ciabr_breakpoint) {
+ cpu_breakpoint_remove_by_ref(cs, env->ciabr_breakpoint);
+ env->ciabr_breakpoint = NULL;
+ }
+
+ if (priv) {
+ cpu_breakpoint_insert(cs, ciea, BP_CPU, &env->ciabr_breakpoint);
+ }
+}
+
+void ppc_store_ciabr(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_CIABR] = val;
+ ppc_update_ciabr(env);
+}
+
+void ppc_update_daw0(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong deaw = env->spr[SPR_DAWR0] & PPC_BITMASK(0, 60);
+ uint32_t dawrx = env->spr[SPR_DAWRX0];
+ int mrd = extract32(dawrx, PPC_BIT_NR(48), 54 - 48);
+ bool dw = extract32(dawrx, PPC_BIT_NR(57), 1);
+ bool dr = extract32(dawrx, PPC_BIT_NR(58), 1);
+ bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
+ bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
+ bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
+ vaddr len;
+ int flags;
+
+ if (env->dawr0_watchpoint) {
+ cpu_watchpoint_remove_by_ref(cs, env->dawr0_watchpoint);
+ env->dawr0_watchpoint = NULL;
+ }
+
+ if (!dr && !dw) {
+ return;
+ }
+
+ if (!hv && !sv && !pr) {
+ return;
+ }
+
+ len = (mrd + 1) * 8;
+ flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+ if (dr) {
+ flags |= BP_MEM_READ;
+ }
+ if (dw) {
+ flags |= BP_MEM_WRITE;
+ }
+
+ cpu_watchpoint_insert(cs, deaw, len, flags, &env->dawr0_watchpoint);
+}
+
+void ppc_store_dawr0(CPUPPCState *env, target_ulong val)
+{
+ env->spr[SPR_DAWR0] = val;
+ ppc_update_daw0(env);
+}
+
+void ppc_store_dawrx0(CPUPPCState *env, uint32_t val)
+{
+ int hrammc = extract32(val, PPC_BIT_NR(56), 1);
+
+ if (hrammc) {
+ /* This might be done with a second watchpoint at the xor of DEAW[0] */
+ qemu_log_mask(LOG_UNIMP, "%s: DAWRX0[HRAMMC] is unimplemented\n",
+ __func__);
+ }
+
+ env->spr[SPR_DAWRX0] = val;
+ ppc_update_daw0(env);
}
+#endif
+#endif
static inline void fpscr_set_rounding_mode(CPUPPCState *env)
{
int rnd_type;
/* Set rounding mode */
- switch (fpscr_rn) {
+ switch (env->fpscr & FP_RN) {
case 0:
/* Best approximation (round to nearest) */
rnd_type = float_round_nearest_even;
@@ -112,7 +220,7 @@ static inline void fpscr_set_rounding_mode(CPUPPCState *env)
void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
{
- val &= ~(FP_VX | FP_FEX);
+ val &= FPSCR_MTFS_MASK;
if (val & FPSCR_IX) {
val |= FP_VX;
}
@@ -120,6 +228,8 @@ void ppc_store_fpscr(CPUPPCState *env, target_ulong val)
val |= FP_FEX;
}
env->fpscr = val;
+ env->fp_status.rebias_overflow = (FP_OE & env->fpscr) ? true : false;
+ env->fp_status.rebias_underflow = (FP_UE & env->fpscr) ? true : false;
if (tcg_enabled()) {
fpscr_set_rounding_mode(env);
}
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 01d3773bc7..67e6b2effd 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -21,9 +21,13 @@
#define PPC_CPU_H
#include "qemu/int128.h"
+#include "qemu/cpu-float.h"
#include "exec/cpu-defs.h"
#include "cpu-qom.h"
#include "qom/object.h"
+#include "hw/registerfields.h"
+
+#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU
#define TCG_GUEST_DEFAULT_MO 0
@@ -36,6 +40,7 @@
#define PPC_ELF_MACHINE EM_PPC
#endif
+#define PPC_BIT_NR(bit) (63 - (bit))
#define PPC_BIT(bit) (0x8000000000000000ULL >> (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
@@ -44,6 +49,18 @@
PPC_BIT32(bs))
#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs))
+/*
+ * QEMU version of the GETFIELD/SETFIELD macros from skiboot
+ *
+ * It might be better to use the existing extract64() and
+ * deposit64() but this means that all the register definitions will
+ * change and become incompatible with the ones found in skiboot.
+ */
+#define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1)
+#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m))
+#define SETFIELD(m, v, val) \
+ (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
+
/*****************************************************************************/
/* Exception vectors definitions */
enum {
@@ -89,11 +106,9 @@ enum {
POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */
/* 40x specific exceptions */
POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */
- /* 601 specific exceptions */
- POWERPC_EXCP_IO = 75, /* IO error exception */
- POWERPC_EXCP_RUNM = 76, /* Run mode exception */
+ /* Vectors 75-76 are 601 specific exceptions */
/* 602 specific exceptions */
- POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */
+ POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */
/* 602/603 specific exceptions */
POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */
POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */
@@ -129,8 +144,10 @@ enum {
/* ISA 3.00 additions */
POWERPC_EXCP_HVIRT = 101,
POWERPC_EXCP_SYSCALL_VECTORED = 102, /* scv exception */
+ POWERPC_EXCP_PERFM_EBB = 103, /* Performance Monitor EBB Exception */
+ POWERPC_EXCP_EXTERNAL_EBB = 104, /* External EBB Exception */
/* EOL */
- POWERPC_EXCP_NB = 103,
+ POWERPC_EXCP_NB = 105,
/* QEMU exceptions: special cases we want to stop translation */
POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
};
@@ -175,6 +192,95 @@ enum {
POWERPC_EXCP_TRAP = 0x40,
};
+/* Exception model */
+typedef enum powerpc_excp_t {
+ POWERPC_EXCP_UNKNOWN = 0,
+ /* Standard PowerPC exception model */
+ POWERPC_EXCP_STD,
+ /* PowerPC 40x exception model */
+ POWERPC_EXCP_40x,
+ /* PowerPC 603/604/G2 exception model */
+ POWERPC_EXCP_6xx,
+ /* PowerPC 7xx exception model */
+ POWERPC_EXCP_7xx,
+ /* PowerPC 74xx exception model */
+ POWERPC_EXCP_74xx,
+ /* BookE exception model */
+ POWERPC_EXCP_BOOKE,
+ /* PowerPC 970 exception model */
+ POWERPC_EXCP_970,
+ /* POWER7 exception model */
+ POWERPC_EXCP_POWER7,
+ /* POWER8 exception model */
+ POWERPC_EXCP_POWER8,
+ /* POWER9 exception model */
+ POWERPC_EXCP_POWER9,
+ /* POWER10 exception model */
+ POWERPC_EXCP_POWER10,
+} powerpc_excp_t;
+
+/*****************************************************************************/
+/* MMU model */
+typedef enum powerpc_mmu_t {
+ POWERPC_MMU_UNKNOWN = 0x00000000,
+ /* Standard 32 bits PowerPC MMU */
+ POWERPC_MMU_32B = 0x00000001,
+ /* PowerPC 6xx MMU with software TLB */
+ POWERPC_MMU_SOFT_6xx = 0x00000002,
+ /*
+ * PowerPC 74xx MMU with software TLB (this has been
+ * disabled, see git history for more information.
+ * keywords: tlbld tlbli TLBMISS PTEHI PTELO)
+ */
+ POWERPC_MMU_SOFT_74xx = 0x00000003,
+ /* PowerPC 4xx MMU with software TLB */
+ POWERPC_MMU_SOFT_4xx = 0x00000004,
+ /* PowerPC MMU in real mode only */
+ POWERPC_MMU_REAL = 0x00000006,
+ /* Freescale MPC8xx MMU model */
+ POWERPC_MMU_MPC8xx = 0x00000007,
+ /* BookE MMU model */
+ POWERPC_MMU_BOOKE = 0x00000008,
+ /* BookE 2.06 MMU model */
+ POWERPC_MMU_BOOKE206 = 0x00000009,
+#define POWERPC_MMU_64 0x00010000
+ /* 64 bits PowerPC MMU */
+ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
+ /* Architecture 2.03 and later (has LPCR) */
+ POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
+ /* Architecture 2.06 variant */
+ POWERPC_MMU_2_06 = POWERPC_MMU_64 | 0x00000003,
+ /* Architecture 2.07 variant */
+ POWERPC_MMU_2_07 = POWERPC_MMU_64 | 0x00000004,
+ /* Architecture 3.00 variant */
+ POWERPC_MMU_3_00 = POWERPC_MMU_64 | 0x00000005,
+} powerpc_mmu_t;
+
+static inline bool mmu_is_64bit(powerpc_mmu_t mmu_model)
+{
+ return mmu_model & POWERPC_MMU_64;
+}
+
+/*****************************************************************************/
+/* Input pins model */
+typedef enum powerpc_input_t {
+ PPC_FLAGS_INPUT_UNKNOWN = 0,
+ /* PowerPC 6xx bus */
+ PPC_FLAGS_INPUT_6xx,
+ /* BookE bus */
+ PPC_FLAGS_INPUT_BookE,
+ /* PowerPC 405 bus */
+ PPC_FLAGS_INPUT_405,
+ /* PowerPC 970 bus */
+ PPC_FLAGS_INPUT_970,
+ /* PowerPC POWER7 bus */
+ PPC_FLAGS_INPUT_POWER7,
+ /* PowerPC POWER9 bus */
+ PPC_FLAGS_INPUT_POWER9,
+ /* Freescale RCPU bus */
+ PPC_FLAGS_INPUT_RCPU,
+} powerpc_input_t;
+
#define PPC_INPUT(env) ((env)->bus_model)
/*****************************************************************************/
@@ -183,9 +289,14 @@ typedef struct opc_handler_t opc_handler_t;
/*****************************************************************************/
/* Types used to describe some PowerPC registers etc. */
typedef struct DisasContext DisasContext;
+typedef struct ppc_dcr_t ppc_dcr_t;
typedef struct ppc_spr_t ppc_spr_t;
+typedef struct ppc_tb_t ppc_tb_t;
typedef union ppc_tlb_t ppc_tlb_t;
typedef struct ppc_hash_pte64 ppc_hash_pte64_t;
+typedef struct PPCHash64Options PPCHash64Options;
+
+typedef struct CPUArchState CPUPPCState;
/* SPR access micro-ops generations callbacks */
struct ppc_spr_t {
@@ -224,17 +335,19 @@ typedef union _ppc_vsr_t {
int16_t s16[8];
int32_t s32[4];
int64_t s64[2];
+ float16 f16[8];
float32 f32[4];
float64 f64[2];
float128 f128;
#ifdef CONFIG_INT128
__uint128_t u128;
#endif
- Int128 s128;
+ Int128 s128;
} ppc_vsr_t;
typedef ppc_vsr_t ppc_avr_t;
typedef ppc_vsr_t ppc_fprp_t;
+typedef ppc_vsr_t ppc_acc_t;
#if !defined(CONFIG_USER_ONLY)
/* Software TLB cache */
@@ -296,51 +409,170 @@ typedef struct ppc_v3_pate_t {
uint64_t dw1;
} ppc_v3_pate_t;
+/* PMU related structs and defines */
+#define PMU_COUNTERS_NUM 6
+typedef enum {
+ PMU_EVENT_INVALID = 0,
+ PMU_EVENT_INACTIVE,
+ PMU_EVENT_CYCLES,
+ PMU_EVENT_INSTRUCTIONS,
+ PMU_EVENT_INSN_RUN_LATCH,
+} PMUEventType;
+
/*****************************************************************************/
/* Machine state register bits definition */
-#define MSR_SF 63 /* Sixty-four-bit mode hflags */
-#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
-#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
-#define MSR_HV 60 /* hypervisor state hflags */
-#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */
-#define MSR_TS1 33
-#define MSR_TM 32 /* Transactional Memory Available (Book3s) */
-#define MSR_CM 31 /* Computation mode for BookE hflags */
-#define MSR_ICM 30 /* Interrupt computation mode for BookE */
-#define MSR_GS 28 /* guest state for BookE */
-#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
-#define MSR_VR 25 /* altivec available x hflags */
-#define MSR_SPE 25 /* SPE enable for BookE x hflags */
-#define MSR_AP 23 /* Access privilege state on 602 hflags */
-#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
-#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
-#define MSR_KEY 19 /* key bit on 603e */
-#define MSR_POW 18 /* Power management */
-#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
-#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
-#define MSR_ILE 16 /* Interrupt little-endian mode */
-#define MSR_EE 15 /* External interrupt enable */
-#define MSR_PR 14 /* Problem state hflags */
-#define MSR_FP 13 /* Floating point available hflags */
-#define MSR_ME 12 /* Machine check interrupt enable */
-#define MSR_FE0 11 /* Floating point exception mode 0 */
-#define MSR_SE 10 /* Single-step trace enable x hflags */
-#define MSR_DWE 10 /* Debug wait enable on 405 x */
-#define MSR_UBLE 10 /* User BTB lock enable on e500 x */
-#define MSR_BE 9 /* Branch trace enable x hflags */
-#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */
-#define MSR_FE1 8 /* Floating point exception mode 1 */
-#define MSR_AL 7 /* AL bit on POWER */
-#define MSR_EP 6 /* Exception prefix on 601 */
-#define MSR_IR 5 /* Instruction relocate */
-#define MSR_DR 4 /* Data relocate */
-#define MSR_IS 5 /* Instruction address space (BookE) */
-#define MSR_DS 4 /* Data address space (BookE) */
-#define MSR_PE 3 /* Protection enable on 403 */
-#define MSR_PX 2 /* Protection exclusive on 403 x */
-#define MSR_PMM 2 /* Performance monitor mark on POWER x */
-#define MSR_RI 1 /* Recoverable interrupt 1 */
-#define MSR_LE 0 /* Little-endian mode 1 hflags */
+#define MSR_SF PPC_BIT_NR(0) /* Sixty-four-bit mode hflags */
+#define MSR_TAG PPC_BIT_NR(1) /* Tag-active mode (POWERx ?) */
+#define MSR_ISF PPC_BIT_NR(2) /* Sixty-four-bit interrupt mode on 630 */
+#define MSR_HV PPC_BIT_NR(3) /* hypervisor state hflags */
+#define MSR_TS0 PPC_BIT_NR(29) /* Transactional state, 2 bits (Book3s) */
+#define MSR_TS1 PPC_BIT_NR(30)
+#define MSR_TM PPC_BIT_NR(31) /* Transactional Memory Available (Book3s) */
+#define MSR_CM PPC_BIT_NR(32) /* Computation mode for BookE hflags */
+#define MSR_ICM PPC_BIT_NR(33) /* Interrupt computation mode for BookE */
+#define MSR_GS PPC_BIT_NR(35) /* guest state for BookE */
+#define MSR_UCLE PPC_BIT_NR(37) /* User-mode cache lock enable for BookE */
+#define MSR_VR PPC_BIT_NR(38) /* altivec available x hflags */
+#define MSR_SPE PPC_BIT_NR(38) /* SPE enable for BookE x hflags */
+#define MSR_VSX PPC_BIT_NR(40) /* Vector Scalar Extension (>= 2.06)x hflags */
+#define MSR_S PPC_BIT_NR(41) /* Secure state */
+#define MSR_KEY PPC_BIT_NR(44) /* key bit on 603e */
+#define MSR_POW PPC_BIT_NR(45) /* Power management */
+#define MSR_WE PPC_BIT_NR(45) /* Wait State Enable on 405 */
+#define MSR_TGPR PPC_BIT_NR(46) /* TGPR usage on 602/603 x */
+#define MSR_CE PPC_BIT_NR(46) /* Critical int. enable on embedded PPC x */
+#define MSR_ILE PPC_BIT_NR(47) /* Interrupt little-endian mode */
+#define MSR_EE PPC_BIT_NR(48) /* External interrupt enable */
+#define MSR_PR PPC_BIT_NR(49) /* Problem state hflags */
+#define MSR_FP PPC_BIT_NR(50) /* Floating point available hflags */
+#define MSR_ME PPC_BIT_NR(51) /* Machine check interrupt enable */
+#define MSR_FE0 PPC_BIT_NR(52) /* Floating point exception mode 0 */
+#define MSR_SE PPC_BIT_NR(53) /* Single-step trace enable x hflags */
+#define MSR_DWE PPC_BIT_NR(53) /* Debug wait enable on 405 x */
+#define MSR_UBLE PPC_BIT_NR(53) /* User BTB lock enable on e500 x */
+#define MSR_BE PPC_BIT_NR(54) /* Branch trace enable x hflags */
+#define MSR_DE PPC_BIT_NR(54) /* Debug int. enable on embedded PPC x */
+#define MSR_FE1 PPC_BIT_NR(55) /* Floating point exception mode 1 */
+#define MSR_AL PPC_BIT_NR(56) /* AL bit on POWER */
+#define MSR_EP PPC_BIT_NR(57) /* Exception prefix on 601 */
+#define MSR_IR PPC_BIT_NR(58) /* Instruction relocate */
+#define MSR_IS PPC_BIT_NR(58) /* Instruction address space (BookE) */
+#define MSR_DR PPC_BIT_NR(59) /* Data relocate */
+#define MSR_DS PPC_BIT_NR(59) /* Data address space (BookE) */
+#define MSR_PE PPC_BIT_NR(60) /* Protection enable on 403 */
+#define MSR_PX PPC_BIT_NR(61) /* Protection exclusive on 403 x */
+#define MSR_PMM PPC_BIT_NR(61) /* Performance monitor mark on POWER x */
+#define MSR_RI PPC_BIT_NR(62) /* Recoverable interrupt 1 */
+#define MSR_LE PPC_BIT_NR(63) /* Little-endian mode 1 hflags */
+
+FIELD(MSR, SF, MSR_SF, 1)
+FIELD(MSR, TAG, MSR_TAG, 1)
+FIELD(MSR, ISF, MSR_ISF, 1)
+#if defined(TARGET_PPC64)
+FIELD(MSR, HV, MSR_HV, 1)
+#define FIELD_EX64_HV(storage) FIELD_EX64(storage, MSR, HV)
+#else
+#define FIELD_EX64_HV(storage) 0
+#endif
+FIELD(MSR, TS0, MSR_TS0, 1)
+FIELD(MSR, TS1, MSR_TS1, 1)
+FIELD(MSR, TS, MSR_TS0, 2)
+FIELD(MSR, TM, MSR_TM, 1)
+FIELD(MSR, CM, MSR_CM, 1)
+FIELD(MSR, ICM, MSR_ICM, 1)
+FIELD(MSR, GS, MSR_GS, 1)
+FIELD(MSR, UCLE, MSR_UCLE, 1)
+FIELD(MSR, VR, MSR_VR, 1)
+FIELD(MSR, SPE, MSR_SPE, 1)
+FIELD(MSR, VSX, MSR_VSX, 1)
+FIELD(MSR, S, MSR_S, 1)
+FIELD(MSR, KEY, MSR_KEY, 1)
+FIELD(MSR, POW, MSR_POW, 1)
+FIELD(MSR, WE, MSR_WE, 1)
+FIELD(MSR, TGPR, MSR_TGPR, 1)
+FIELD(MSR, CE, MSR_CE, 1)
+FIELD(MSR, ILE, MSR_ILE, 1)
+FIELD(MSR, EE, MSR_EE, 1)
+FIELD(MSR, PR, MSR_PR, 1)
+FIELD(MSR, FP, MSR_FP, 1)
+FIELD(MSR, ME, MSR_ME, 1)
+FIELD(MSR, FE0, MSR_FE0, 1)
+FIELD(MSR, SE, MSR_SE, 1)
+FIELD(MSR, DWE, MSR_DWE, 1)
+FIELD(MSR, UBLE, MSR_UBLE, 1)
+FIELD(MSR, BE, MSR_BE, 1)
+FIELD(MSR, DE, MSR_DE, 1)
+FIELD(MSR, FE1, MSR_FE1, 1)
+FIELD(MSR, AL, MSR_AL, 1)
+FIELD(MSR, EP, MSR_EP, 1)
+FIELD(MSR, IR, MSR_IR, 1)
+FIELD(MSR, DR, MSR_DR, 1)
+FIELD(MSR, IS, MSR_IS, 1)
+FIELD(MSR, DS, MSR_DS, 1)
+FIELD(MSR, PE, MSR_PE, 1)
+FIELD(MSR, PX, MSR_PX, 1)
+FIELD(MSR, PMM, MSR_PMM, 1)
+FIELD(MSR, RI, MSR_RI, 1)
+FIELD(MSR, LE, MSR_LE, 1)
+
+/*
+ * FE0 and FE1 bits are not side-by-side
+ * so we can't combine them using FIELD()
+ */
+#define FIELD_EX64_FE(msr) \
+ ((FIELD_EX64(msr, MSR, FE0) << 1) | FIELD_EX64(msr, MSR, FE1))
+
+/* PMU bits */
+#define MMCR0_FC PPC_BIT(32) /* Freeze Counters */
+#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Occurred */
+#define MMCR0_PMAE PPC_BIT(37) /* Perf Monitor Alert Enable */
+#define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */
+#define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */
+#define MMCR0_PMCC0 PPC_BIT(44) /* PMC Control bit 0 */
+#define MMCR0_PMCC1 PPC_BIT(45) /* PMC Control bit 1 */
+#define MMCR0_PMCC PPC_BITMASK(44, 45) /* PMC Control */
+#define MMCR0_FC14 PPC_BIT(58) /* PMC Freeze Counters 1-4 bit */
+#define MMCR0_FC56 PPC_BIT(59) /* PMC Freeze Counters 5-6 bit */
+#define MMCR0_PMC1CE PPC_BIT(48) /* MMCR0 PMC1 Condition Enabled */
+#define MMCR0_PMCjCE PPC_BIT(49) /* MMCR0 PMCj Condition Enabled */
+/* MMCR0 userspace r/w mask */
+#define MMCR0_UREG_MASK (MMCR0_FC | MMCR0_PMAO | MMCR0_PMAE)
+/* MMCR2 userspace r/w mask */
+#define MMCR2_FC1P0 PPC_BIT(1) /* MMCR2 FCnP0 for PMC1 */
+#define MMCR2_FC2P0 PPC_BIT(10) /* MMCR2 FCnP0 for PMC2 */
+#define MMCR2_FC3P0 PPC_BIT(19) /* MMCR2 FCnP0 for PMC3 */
+#define MMCR2_FC4P0 PPC_BIT(28) /* MMCR2 FCnP0 for PMC4 */
+#define MMCR2_FC5P0 PPC_BIT(37) /* MMCR2 FCnP0 for PMC5 */
+#define MMCR2_FC6P0 PPC_BIT(46) /* MMCR2 FCnP0 for PMC6 */
+#define MMCR2_UREG_MASK (MMCR2_FC1P0 | MMCR2_FC2P0 | MMCR2_FC3P0 | \
+ MMCR2_FC4P0 | MMCR2_FC5P0 | MMCR2_FC6P0)
+
+#define MMCR1_EVT_SIZE 8
+/* extract64() does a right shift before extracting */
+#define MMCR1_PMC1SEL_START 32
+#define MMCR1_PMC1EVT_EXTR (64 - MMCR1_PMC1SEL_START - MMCR1_EVT_SIZE)
+#define MMCR1_PMC2SEL_START 40
+#define MMCR1_PMC2EVT_EXTR (64 - MMCR1_PMC2SEL_START - MMCR1_EVT_SIZE)
+#define MMCR1_PMC3SEL_START 48
+#define MMCR1_PMC3EVT_EXTR (64 - MMCR1_PMC3SEL_START - MMCR1_EVT_SIZE)
+#define MMCR1_PMC4SEL_START 56
+#define MMCR1_PMC4EVT_EXTR (64 - MMCR1_PMC4SEL_START - MMCR1_EVT_SIZE)
+
+/* PMU uses CTRL_RUN to sample PM_RUN_INST_CMPL */
+#define CTRL_RUN PPC_BIT(63)
+
+/* EBB/BESCR bits */
+/* Global Enable */
+#define BESCR_GE PPC_BIT(0)
+/* External Event-based Exception Enable */
+#define BESCR_EE PPC_BIT(30)
+/* Performance Monitor Event-based Exception Enable */
+#define BESCR_PME PPC_BIT(31)
+/* External Event-based Exception Occurred */
+#define BESCR_EEO PPC_BIT(62)
+/* Performance Monitor Event-based Exception Occurred */
+#define BESCR_PMEO PPC_BIT(63)
+#define BESCR_INVALID PPC_BITMASK(32, 33)
/* LPCR bits */
#define LPCR_VPM0 PPC_BIT(0)
@@ -400,52 +632,6 @@ typedef struct ppc_v3_pate_t {
#define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */
#define HFSCR_IC_MSGP 0xA
-#define msr_sf ((env->msr >> MSR_SF) & 1)
-#define msr_isf ((env->msr >> MSR_ISF) & 1)
-#if defined(TARGET_PPC64)
-#define msr_hv ((env->msr >> MSR_HV) & 1)
-#else
-#define msr_hv (0)
-#endif
-#define msr_cm ((env->msr >> MSR_CM) & 1)
-#define msr_icm ((env->msr >> MSR_ICM) & 1)
-#define msr_gs ((env->msr >> MSR_GS) & 1)
-#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
-#define msr_vr ((env->msr >> MSR_VR) & 1)
-#define msr_spe ((env->msr >> MSR_SPE) & 1)
-#define msr_ap ((env->msr >> MSR_AP) & 1)
-#define msr_vsx ((env->msr >> MSR_VSX) & 1)
-#define msr_sa ((env->msr >> MSR_SA) & 1)
-#define msr_key ((env->msr >> MSR_KEY) & 1)
-#define msr_pow ((env->msr >> MSR_POW) & 1)
-#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
-#define msr_ce ((env->msr >> MSR_CE) & 1)
-#define msr_ile ((env->msr >> MSR_ILE) & 1)
-#define msr_ee ((env->msr >> MSR_EE) & 1)
-#define msr_pr ((env->msr >> MSR_PR) & 1)
-#define msr_fp ((env->msr >> MSR_FP) & 1)
-#define msr_me ((env->msr >> MSR_ME) & 1)
-#define msr_fe0 ((env->msr >> MSR_FE0) & 1)
-#define msr_se ((env->msr >> MSR_SE) & 1)
-#define msr_dwe ((env->msr >> MSR_DWE) & 1)
-#define msr_uble ((env->msr >> MSR_UBLE) & 1)
-#define msr_be ((env->msr >> MSR_BE) & 1)
-#define msr_de ((env->msr >> MSR_DE) & 1)
-#define msr_fe1 ((env->msr >> MSR_FE1) & 1)
-#define msr_al ((env->msr >> MSR_AL) & 1)
-#define msr_ep ((env->msr >> MSR_EP) & 1)
-#define msr_ir ((env->msr >> MSR_IR) & 1)
-#define msr_dr ((env->msr >> MSR_DR) & 1)
-#define msr_is ((env->msr >> MSR_IS) & 1)
-#define msr_ds ((env->msr >> MSR_DS) & 1)
-#define msr_pe ((env->msr >> MSR_PE) & 1)
-#define msr_px ((env->msr >> MSR_PX) & 1)
-#define msr_pmm ((env->msr >> MSR_PMM) & 1)
-#define msr_ri ((env->msr >> MSR_RI) & 1)
-#define msr_le ((env->msr >> MSR_LE) & 1)
-#define msr_ts ((env->msr >> MSR_TS1) & 3)
-#define msr_tm ((env->msr >> MSR_TM) & 1)
-
#define DBCR0_ICMP (1 << 27)
#define DBCR0_BRT (1 << 26)
#define DBSR_ICMP (1 << 27)
@@ -572,8 +758,7 @@ enum {
POWERPC_FLAG_PX = 0x00000200,
POWERPC_FLAG_PMM = 0x00000400,
/* Flag for special features */
- /* Decrementer clock: RTC clock (POWER, 601) or bus clock */
- POWERPC_FLAG_RTC_CLK = 0x00010000,
+ /* Decrementer clock */
POWERPC_FLAG_BUS_CLK = 0x00020000,
/* Has CFAR */
POWERPC_FLAG_CFAR = 0x00040000,
@@ -583,8 +768,10 @@ enum {
POWERPC_FLAG_TM = 0x00100000,
/* Has SCV (ISA 3.00) */
POWERPC_FLAG_SCV = 0x00200000,
- /* Has HID0 for LE bit (601) */
- POWERPC_FLAG_HID0_LE = 0x00400000,
+ /* Has >1 thread per core */
+ POWERPC_FLAG_SMT = 0x00400000,
+ /* Using "LPAR per core" mode (as opposed to per-thread) */
+ POWERPC_FLAG_SMT_1LPAR = 0x00800000,
};
/*
@@ -595,17 +782,23 @@ enum {
* the MSR are validated in hreg_compute_hflags.
*/
enum {
- HFLAGS_LE = 0, /* MSR_LE -- comes from elsewhere on 601 */
+ HFLAGS_LE = 0, /* MSR_LE */
HFLAGS_HV = 1, /* computed from MSR_HV and other state */
HFLAGS_64 = 2, /* computed from MSR_CE and MSR_SF */
HFLAGS_GTSE = 3, /* computed from SPR_LPCR[GTSE] */
HFLAGS_DR = 4, /* MSR_DR */
+ HFLAGS_HR = 5, /* computed from SPR_LPCR[HR] */
HFLAGS_SPE = 6, /* from MSR_SPE if cpu has SPE; avoid overlap w/ MSR_VR */
HFLAGS_TM = 8, /* computed from MSR_TM */
HFLAGS_BE = 9, /* MSR_BE -- from elsewhere on embedded ppc */
HFLAGS_SE = 10, /* MSR_SE -- from elsewhere on embedded ppc */
HFLAGS_FP = 13, /* MSR_FP */
HFLAGS_PR = 14, /* MSR_PR */
+ HFLAGS_PMCC0 = 15, /* MMCR0 PMCC bit 0 */
+ HFLAGS_PMCC1 = 16, /* MMCR0 PMCC bit 1 */
+ HFLAGS_PMCJCE = 17, /* MMCR0 PMCjCE bit */
+ HFLAGS_PMC_OTHER = 18, /* PMC other than PMC5-6 is enabled */
+ HFLAGS_INSN_CNT = 19, /* PMU instruction count enabled */
HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */
HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */
@@ -615,77 +808,50 @@ enum {
/*****************************************************************************/
/* Floating point status and control register */
-#define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */
-#define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */
-#define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */
-#define FPSCR_FX 31 /* Floating-point exception summary */
-#define FPSCR_FEX 30 /* Floating-point enabled exception summary */
-#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */
-#define FPSCR_OX 28 /* Floating-point overflow exception */
-#define FPSCR_UX 27 /* Floating-point underflow exception */
-#define FPSCR_ZX 26 /* Floating-point zero divide exception */
-#define FPSCR_XX 25 /* Floating-point inexact exception */
-#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */
-#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */
-#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */
-#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */
-#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */
-#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */
-#define FPSCR_FR 18 /* Floating-point fraction rounded */
-#define FPSCR_FI 17 /* Floating-point fraction inexact */
-#define FPSCR_C 16 /* Floating-point result class descriptor */
-#define FPSCR_FL 15 /* Floating-point less than or negative */
-#define FPSCR_FG 14 /* Floating-point greater than or negative */
-#define FPSCR_FE 13 /* Floating-point equal or zero */
-#define FPSCR_FU 12 /* Floating-point unordered or NaN */
-#define FPSCR_FPCC 12 /* Floating-point condition code */
-#define FPSCR_FPRF 12 /* Floating-point result flags */
-#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */
-#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */
-#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */
-#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */
-#define FPSCR_OE 6 /* Floating-point overflow exception enable */
-#define FPSCR_UE 5 /* Floating-point underflow exception enable */
-#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */
-#define FPSCR_XE 3 /* Floating-point inexact exception enable */
-#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
-#define FPSCR_RN1 1
-#define FPSCR_RN0 0 /* Floating-point rounding control */
-#define fpscr_drn (((env->fpscr) & FP_DRN) >> FPSCR_DRN0)
-#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
-#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
-#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
-#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1)
-#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1)
-#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1)
-#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1)
-#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1)
-#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1)
-#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1)
-#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1)
-#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1)
-#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF)
-#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1)
-#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1)
-#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1)
-#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1)
-#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1)
-#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1)
-#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
-#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
-#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
-#define fpscr_rn (((env->fpscr) >> FPSCR_RN0) & 0x3)
+#define FPSCR_DRN2 PPC_BIT_NR(29) /* Decimal Floating-Point rounding ctrl. */
+#define FPSCR_DRN1 PPC_BIT_NR(30) /* Decimal Floating-Point rounding ctrl. */
+#define FPSCR_DRN0 PPC_BIT_NR(31) /* Decimal Floating-Point rounding ctrl. */
+#define FPSCR_FX PPC_BIT_NR(32) /* Floating-point exception summary */
+#define FPSCR_FEX PPC_BIT_NR(33) /* Floating-point enabled exception summ.*/
+#define FPSCR_VX PPC_BIT_NR(34) /* Floating-point invalid op. excp. summ.*/
+#define FPSCR_OX PPC_BIT_NR(35) /* Floating-point overflow exception */
+#define FPSCR_UX PPC_BIT_NR(36) /* Floating-point underflow exception */
+#define FPSCR_ZX PPC_BIT_NR(37) /* Floating-point zero divide exception */
+#define FPSCR_XX PPC_BIT_NR(38) /* Floating-point inexact exception */
+#define FPSCR_VXSNAN PPC_BIT_NR(39) /* Floating-point invalid op. excp (sNan)*/
+#define FPSCR_VXISI PPC_BIT_NR(40) /* Floating-point invalid op. excp (inf) */
+#define FPSCR_VXIDI PPC_BIT_NR(41) /* Floating-point invalid op. excp (inf) */
+#define FPSCR_VXZDZ PPC_BIT_NR(42) /* Floating-point invalid op. excp (zero)*/
+#define FPSCR_VXIMZ PPC_BIT_NR(43) /* Floating-point invalid op. excp (inf) */
+#define FPSCR_VXVC PPC_BIT_NR(44) /* Floating-point invalid op. excp (comp)*/
+#define FPSCR_FR PPC_BIT_NR(45) /* Floating-point fraction rounded */
+#define FPSCR_FI PPC_BIT_NR(46) /* Floating-point fraction inexact */
+#define FPSCR_C PPC_BIT_NR(47) /* Floating-point result class descriptor*/
+#define FPSCR_FL PPC_BIT_NR(48) /* Floating-point less than or negative */
+#define FPSCR_FG PPC_BIT_NR(49) /* Floating-point greater than or neg. */
+#define FPSCR_FE PPC_BIT_NR(50) /* Floating-point equal or zero */
+#define FPSCR_FU PPC_BIT_NR(51) /* Floating-point unordered or NaN */
+#define FPSCR_FPCC PPC_BIT_NR(51) /* Floating-point condition code */
+#define FPSCR_FPRF PPC_BIT_NR(51) /* Floating-point result flags */
+#define FPSCR_VXSOFT PPC_BIT_NR(53) /* Floating-point invalid op. excp (soft)*/
+#define FPSCR_VXSQRT PPC_BIT_NR(54) /* Floating-point invalid op. excp (sqrt)*/
+#define FPSCR_VXCVI PPC_BIT_NR(55) /* Floating-point invalid op. excp (int) */
+#define FPSCR_VE PPC_BIT_NR(56) /* Floating-point invalid op. excp enable*/
+#define FPSCR_OE PPC_BIT_NR(57) /* Floating-point overflow excp. enable */
+#define FPSCR_UE PPC_BIT_NR(58) /* Floating-point underflow excp. enable */
+#define FPSCR_ZE PPC_BIT_NR(59) /* Floating-point zero divide excp enable*/
+#define FPSCR_XE PPC_BIT_NR(60) /* Floating-point inexact excp. enable */
+#define FPSCR_NI PPC_BIT_NR(61) /* Floating-point non-IEEE mode */
+#define FPSCR_RN1 PPC_BIT_NR(62)
+#define FPSCR_RN0 PPC_BIT_NR(63) /* Floating-point rounding control */
/* Invalid operation exception summary */
#define FPSCR_IX ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
(1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
(1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \
(1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \
(1 << FPSCR_VXCVI))
-/* exception summary */
-#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F)
-/* enabled exception summary */
-#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
- 0x1F)
+
+FIELD(FPSCR, FI, FPSCR_FI, 1)
#define FP_DRN2 (1ull << FPSCR_DRN2)
#define FP_DRN1 (1ull << FPSCR_DRN1)
@@ -735,6 +901,10 @@ enum {
FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
FP_VXSQRT | FP_VXCVI)
+/* FPSCR bits that can be set by mtfsf, mtfsfi and mtfsb1 */
+#define FPSCR_MTFS_MASK (~(MAKE_64BIT_MASK(36, 28) | PPC_BIT(28) | \
+ FP_FEX | FP_VX | PPC_BIT(52)))
+
/*****************************************************************************/
/* Vector status and control register */
#define VSCR_NJ 16 /* Vector non-java */
@@ -999,6 +1169,36 @@ struct ppc_radix_page_info {
};
/*****************************************************************************/
+/* Dynamic Execution Control Register */
+
+#define DEXCR_ASPECT(name, num) \
+FIELD(DEXCR, PNH_##name, PPC_BIT_NR(num), 1) \
+FIELD(DEXCR, PRO_##name, PPC_BIT_NR(num + 32), 1) \
+FIELD(HDEXCR, HNU_##name, PPC_BIT_NR(num), 1) \
+FIELD(HDEXCR, ENF_##name, PPC_BIT_NR(num + 32), 1) \
+
+DEXCR_ASPECT(SBHE, 0)
+DEXCR_ASPECT(IBRTPD, 1)
+DEXCR_ASPECT(SRAPD, 4)
+DEXCR_ASPECT(NPHIE, 5)
+DEXCR_ASPECT(PHIE, 6)
+
+/*****************************************************************************/
+/* PowerNV ChipTOD and TimeBase State Machine */
+struct pnv_tod_tbst {
+ int tb_ready_for_tod; /* core TB ready to receive TOD from chiptod */
+ int tod_sent_to_tb; /* chiptod sent TOD to the core TB */
+
+ /*
+ * "Timers" for async TBST events are simulated by mfTFAC because TFAC
+ * is polled for such events. These are just used to ensure firmware
+ * performs the polling at least a few times.
+ */
+ int tb_state_timer;
+ int tb_sync_pulse_timer;
+};
+
+/*****************************************************************************/
/* The whole PowerPC CPU context */
/*
@@ -1012,7 +1212,7 @@ struct ppc_radix_page_info {
#define PPC_CPU_OPCODES_LEN 0x40
#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
-struct CPUPPCState {
+struct CPUArchState {
/* Most commonly used resources during translated code execution first */
target_ulong gpr[32]; /* general purpose registers */
target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */
@@ -1029,9 +1229,12 @@ struct CPUPPCState {
target_ulong ov32;
target_ulong ca32;
- target_ulong reserve_addr; /* Reservation address */
- target_ulong reserve_val; /* Reservation value */
+ target_ulong reserve_addr; /* Reservation address */
+ target_ulong reserve_length; /* Reservation larx op size (bytes) */
+ target_ulong reserve_val; /* Reservation value */
+#if defined(TARGET_PPC64)
target_ulong reserve_val2;
+#endif
/* These are used in supervisor mode only */
target_ulong msr; /* machine state register */
@@ -1039,7 +1242,6 @@ struct CPUPPCState {
/* used to speed-up TLB assist handlers */
target_ulong nip; /* next instruction pointer */
- uint64_t retxh; /* high part of 128-bit helper return */
/* when a memory exception occurs, the access type is stored here */
int access_type;
@@ -1048,6 +1250,8 @@ struct CPUPPCState {
/* MMU context, only relevant for full system emulation */
#if defined(TARGET_PPC64)
ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */
+ struct CPUBreakpoint *ciabr_breakpoint;
+ struct CPUWatchpoint *dawr0_watchpoint;
#endif
target_ulong sr[32]; /* segment registers */
uint32_t nb_BATs; /* number of BATs */
@@ -1062,17 +1266,27 @@ struct CPUPPCState {
int nb_pids; /* Number of available PID registers */
int tlb_type; /* Type of TLB we're dealing with */
ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */
- target_ulong pb[4]; /* 403 dedicated access protection registers */
+#ifdef CONFIG_KVM
bool tlb_dirty; /* Set to non-zero when modifying TLB */
bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
+#endif /* CONFIG_KVM */
uint32_t tlb_need_flush; /* Delayed flush needed */
#define TLB_NEED_LOCAL_FLUSH 0x1
#define TLB_NEED_GLOBAL_FLUSH 0x2
+
+#if defined(TARGET_PPC64)
+ /* PowerNV chiptod / timebase facility state. */
+ /* Would be nice to put these into PnvCore */
+ struct pnv_tod_tbst pnv_tod_tbst;
+#endif
#endif
/* Other registers */
target_ulong spr[1024]; /* special purpose registers */
ppc_spr_t spr_cb[1024];
+ /* Composite status for PMC[1-6] enabled and counting insns or cycles. */
+ uint8_t pmc_ins_cnt;
+ uint8_t pmc_cyc_cnt;
/* Vector status and control register, minus VSCR_SAT */
uint32_t vscr;
/* VSX registers (including FP and AVR) */
@@ -1108,13 +1322,13 @@ struct CPUPPCState {
int error_code;
uint32_t pending_interrupts;
#if !defined(CONFIG_USER_ONLY)
+ uint64_t excp_stats[POWERPC_EXCP_NB];
/*
* This is the IRQ controller, which is implementation dependent and only
* relevant when emulating a complete machine. Note that this isn't used
* by recent Book3s compatible CPUs (POWER7 and newer).
*/
uint32_t irq_input_state;
- void **irq_inputs;
target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */
target_ulong excp_prefix;
@@ -1167,8 +1381,27 @@ struct CPUPPCState {
uint32_t tm_vscr;
uint64_t tm_dscr;
uint64_t tm_tar;
+
+ /*
+ * Timers used to fire performance monitor alerts
+ * when counting cycles.
+ */
+ QEMUTimer *pmu_cyc_overflow_timers[PMU_COUNTERS_NUM];
+
+ /*
+ * PMU base time value used by the PMU to calculate
+ * running cycles.
+ */
+ uint64_t pmu_base_time;
};
+#define _CORE_ID(cs) \
+ (POWERPC_CPU(cs)->env.spr_cb[SPR_PIR].default_value & ~(cs->nr_threads - 1))
+
+#define THREAD_SIBLING_FOREACH(cs, cs_sibling) \
+ CPU_FOREACH(cs_sibling) \
+ if (_CORE_ID(cs) == _CORE_ID(cs_sibling))
+
#define SET_FIT_PERIOD(a_, b_, c_, d_) \
do { \
env->fit_period[0] = (a_); \
@@ -1196,12 +1429,9 @@ typedef struct PPCVirtualHypervisorClass PPCVirtualHypervisorClass;
*
* A PowerPC CPU.
*/
-struct PowerPCCPU {
- /*< private >*/
+struct ArchCPU {
CPUState parent_obj;
- /*< public >*/
- CPUNegativeOffsetState neg;
CPUPPCState env;
int vcpu_id;
@@ -1226,7 +1456,53 @@ struct PowerPCCPU {
int32_t mig_slb_nr;
};
+/**
+ * PowerPCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_phases: The parent class' reset phase handlers.
+ *
+ * A PowerPC CPU model.
+ */
+struct PowerPCCPUClass {
+ CPUClass parent_class;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ ResettablePhases parent_phases;
+ void (*parent_parse_features)(const char *type, char *str, Error **errp);
+
+ uint32_t pvr;
+ /*
+ * If @best is false, match if pcc is in the family of pvr
+ * Else match only if pcc is the best match for pvr in this family.
+ */
+ bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr, bool best);
+ uint64_t pcr_mask; /* Available bits in PCR register */
+ uint64_t pcr_supported; /* Bits for supported PowerISA versions */
+ uint32_t svr;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+ uint64_t msr_mask;
+ uint64_t lpcr_mask; /* Available bits in the LPCR */
+ uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ uint32_t flags;
+ int bfd_mach;
+ uint32_t l1_dcache_size, l1_icache_size;
+#ifndef CONFIG_USER_ONLY
+ GDBFeature gdb_spr;
+#endif
+ const PPCHash64Options *hash64_opts;
+ struct ppc_radix_page_info *radix_page_info;
+ uint32_t lrg_decr_bits;
+ int n_host_threads;
+ void (*init_proc)(CPUPPCState *env);
+ int (*check_pow)(CPUPPCState *env);
+};
+ObjectClass *ppc_cpu_class_by_name(const char *name);
PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc);
@@ -1234,6 +1510,8 @@ PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc);
#ifndef CONFIG_USER_ONLY
struct PPCVirtualHypervisorClass {
InterfaceClass parent;
+ bool (*cpu_in_nested)(PowerPCCPU *cpu);
+ void (*deliver_hv_excp)(PowerPCCPU *cpu, int excp);
void (*hypercall)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
hwaddr (*hpt_mask)(PPCVirtualHypervisor *vhyp);
const ppc_hash_pte64_t *(*map_hptes)(PPCVirtualHypervisor *vhyp,
@@ -1243,7 +1521,8 @@ struct PPCVirtualHypervisorClass {
hwaddr ptex, int n);
void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1);
- void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry);
+ bool (*get_pate)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
+ target_ulong lpid, ppc_v3_pate_t *entry);
target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp);
void (*cpu_exec_enter)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
void (*cpu_exec_exit)(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu);
@@ -1252,23 +1531,27 @@ struct PPCVirtualHypervisorClass {
#define TYPE_PPC_VIRTUAL_HYPERVISOR "ppc-virtual-hypervisor"
DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass,
PPC_VIRTUAL_HYPERVISOR, TYPE_PPC_VIRTUAL_HYPERVISOR)
+
+static inline bool vhyp_cpu_in_nested(PowerPCCPU *cpu)
+{
+ return PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp)->cpu_in_nested(cpu);
+}
#endif /* CONFIG_USER_ONLY */
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
-hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int ppc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int ppc_cpu_gdb_read_register_apple(CPUState *cpu, GByteArray *buf, int reg);
int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
#ifndef CONFIG_USER_ONLY
-void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu);
-const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name);
+hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
#endif
int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
#ifndef CONFIG_USER_ONLY
+void ppc_maybe_interrupt(CPUPPCState *env);
void ppc_cpu_do_interrupt(CPUState *cpu);
bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void ppc_cpu_do_system_reset(CPUState *cs);
@@ -1278,20 +1561,21 @@ extern const VMStateDescription vmstate_ppc_cpu;
/*****************************************************************************/
void ppc_translate_init(void);
-bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr);
#if !defined(CONFIG_USER_ONLY)
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
+void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
+void ppc_update_ciabr(CPUPPCState *env);
+void ppc_store_ciabr(CPUPPCState *env, target_ulong value);
+void ppc_update_daw0(CPUPPCState *env);
+void ppc_store_dawr0(CPUPPCState *env, target_ulong value);
+void ppc_store_dawrx0(CPUPPCState *env, uint32_t value);
#endif /* !defined(CONFIG_USER_ONLY) */
void ppc_store_msr(CPUPPCState *env, target_ulong value);
-void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val);
void ppc_cpu_list(void);
/* Time-base and decrementer management */
-#ifndef NO_CPU_IO_DEFS
uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value);
@@ -1300,6 +1584,8 @@ uint64_t cpu_ppc_load_atbl(CPUPPCState *env);
uint32_t cpu_ppc_load_atbu(CPUPPCState *env);
void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value);
void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value);
+void cpu_ppc_increase_tb_by_offset(CPUPPCState *env, int64_t offset);
+void cpu_ppc_decrease_tb_by_offset(CPUPPCState *env, int64_t offset);
uint64_t cpu_ppc_load_vtb(CPUPPCState *env);
void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value);
bool ppc_decr_clear_on_delivery(CPUPPCState *env);
@@ -1310,30 +1596,23 @@ void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value);
void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value);
uint64_t cpu_ppc_load_purr(CPUPPCState *env);
void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value);
-uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env);
-uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env);
#if !defined(CONFIG_USER_ONLY)
-void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value);
-void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value);
target_ulong load_40x_pit(CPUPPCState *env);
void store_40x_pit(CPUPPCState *env, target_ulong val);
void store_40x_dbcr0(CPUPPCState *env, uint32_t val);
void store_40x_sler(CPUPPCState *env, uint32_t val);
+void store_40x_tcr(CPUPPCState *env, target_ulong val);
+void store_40x_tsr(CPUPPCState *env, target_ulong val);
void store_booke_tcr(CPUPPCState *env, target_ulong val);
void store_booke_tsr(CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all(CPUPPCState *env);
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr);
void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp);
-int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
- hwaddr *raddrp, target_ulong address,
- uint32_t pid);
-int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddrp,
- target_ulong address, uint32_t pid, int ext,
- int i);
-hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
- ppcmas_tlb_t *tlb);
-#endif
+void cpu_ppc_set_1lpar(PowerPCCPU *cpu);
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp,
+ target_ulong address, uint32_t pid);
+int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid);
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb);
#endif
void ppc_store_fpscr(CPUPPCState *env, target_ulong val);
@@ -1361,15 +1640,11 @@ static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
-#define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU
-#define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX
-#define CPU_RESOLVING_TYPE TYPE_POWERPC_CPU
-
#define cpu_list ppc_cpu_list
/* MMU modes definitions */
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch)
+static inline int ppc_env_mmu_index(CPUPPCState *env, bool ifetch)
{
#ifdef CONFIG_USER_ONLY
return MMU_USER_IDX;
@@ -1389,15 +1664,13 @@ int ppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr, Error **errp);
#if !defined(CONFIG_USER_ONLY)
int ppc_set_compat_all(uint32_t compat_pvr, Error **errp);
+int ppc_init_compat_all(uint32_t compat_pvr, Error **errp);
#endif
int ppc_compat_max_vthreads(PowerPCCPU *cpu);
void ppc_compat_add_property(Object *obj, const char *name,
uint32_t *compat_pvr, const char *basedesc);
#endif /* defined(TARGET_PPC64) */
-typedef CPUPPCState CPUArchState;
-typedef PowerPCCPU ArchCPU;
-
#include "exec/cpu-all.h"
/*****************************************************************************/
@@ -1425,27 +1698,18 @@ typedef PowerPCCPU ArchCPU;
#define XER_CMP 8
#define XER_BC 0
#define xer_so (env->so)
-#define xer_ov (env->ov)
-#define xer_ca (env->ca)
-#define xer_ov32 (env->ov)
-#define xer_ca32 (env->ca)
#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
#define xer_bc ((env->xer >> XER_BC) & 0x7F)
/* SPR definitions */
#define SPR_MQ (0x000)
#define SPR_XER (0x001)
-#define SPR_601_VRTCU (0x004)
-#define SPR_601_VRTCL (0x005)
-#define SPR_601_UDECR (0x006)
#define SPR_LR (0x008)
#define SPR_CTR (0x009)
#define SPR_UAMR (0x00D)
#define SPR_DSCR (0x011)
#define SPR_DSISR (0x012)
-#define SPR_DAR (0x013) /* DAE for PowerPC 601 */
-#define SPR_601_RTCU (0x014)
-#define SPR_601_RTCL (0x015)
+#define SPR_DAR (0x013)
#define SPR_DECR (0x016)
#define SPR_SDR1 (0x019)
#define SPR_SRR0 (0x01A)
@@ -1493,9 +1757,11 @@ typedef PowerPCCPU ArchCPU;
#define SPR_PSPB (0x09F)
#define SPR_DPDES (0x0B0)
#define SPR_DAWR0 (0x0B4)
+#define SPR_DAWR1 (0x0B5)
#define SPR_RPR (0x0BA)
#define SPR_CIABR (0x0BB)
#define SPR_DAWRX0 (0x0BC)
+#define SPR_DAWRX1 (0x0BD)
#define SPR_HFSCR (0x0BE)
#define SPR_VRSAVE (0x100)
#define SPR_USPRG0 (0x100)
@@ -1506,8 +1772,8 @@ typedef PowerPCCPU ArchCPU;
#define SPR_USPRG5 (0x105)
#define SPR_USPRG6 (0x106)
#define SPR_USPRG7 (0x107)
-#define SPR_VTBL (0x10C)
-#define SPR_VTBU (0x10D)
+#define SPR_TBL (0x10C)
+#define SPR_TBU (0x10D)
#define SPR_SPRG0 (0x110)
#define SPR_SPRG1 (0x111)
#define SPR_SPRG2 (0x112)
@@ -1520,8 +1786,8 @@ typedef PowerPCCPU ArchCPU;
#define SPR_SPRG7 (0x117)
#define SPR_ASR (0x118)
#define SPR_EAR (0x11A)
-#define SPR_TBL (0x11C)
-#define SPR_TBU (0x11D)
+#define SPR_WR_TBL (0x11C)
+#define SPR_WR_TBU (0x11D)
#define SPR_TBU40 (0x11E)
#define SPR_SVR (0x11E)
#define SPR_BOOKE_PIR (0x11E)
@@ -1563,6 +1829,7 @@ typedef PowerPCCPU ArchCPU;
#define SPR_HMER (0x150)
#define SPR_HMEER (0x151)
#define SPR_PCR (0x152)
+#define SPR_HEIR (0x153)
#define SPR_BOOKE_LPIDR (0x152)
#define SPR_BOOKE_TCR (0x154)
#define SPR_BOOKE_TLB0PS (0x158)
@@ -1599,7 +1866,11 @@ typedef PowerPCCPU ArchCPU;
#define SPR_BOOKE_GIVOR13 (0x1BC)
#define SPR_BOOKE_GIVOR14 (0x1BD)
#define SPR_TIR (0x1BE)
+#define SPR_UHDEXCR (0x1C7)
#define SPR_PTCR (0x1D0)
+#define SPR_HASHKEYR (0x1D4)
+#define SPR_HASHPKEYR (0x1D5)
+#define SPR_HDEXCR (0x1D7)
#define SPR_BOOKE_SPEFSCR (0x200)
#define SPR_Exxx_BBEAR (0x201)
#define SPR_Exxx_BBTAR (0x202)
@@ -1684,6 +1955,12 @@ typedef PowerPCCPU ArchCPU;
#define SPR_BOOKE_TLB2CFG (0x2B2)
#define SPR_BOOKE_TLB3CFG (0x2B3)
#define SPR_BOOKE_EPR (0x2BE)
+#define SPR_POWER_USIER2 (0x2E0)
+#define SPR_POWER_USIER3 (0x2E1)
+#define SPR_POWER_UMMCR3 (0x2E2)
+#define SPR_POWER_SIER2 (0x2F0)
+#define SPR_POWER_SIER3 (0x2F1)
+#define SPR_POWER_MMCR3 (0x2F2)
#define SPR_PERF0 (0x300)
#define SPR_RCPU_MI_RBA0 (0x300)
#define SPR_MPC_MI_CTR (0x300)
@@ -1788,15 +2065,19 @@ typedef PowerPCCPU ArchCPU;
#define SPR_RCPU_L2U_RA2 (0x32A)
#define SPR_MPC_MD_DBRAM1 (0x32A)
#define SPR_RCPU_L2U_RA3 (0x32B)
+#define SPR_UDEXCR (0x32C)
#define SPR_TAR (0x32F)
#define SPR_ASDR (0x330)
+#define SPR_DEXCR (0x33C)
#define SPR_IC (0x350)
#define SPR_VTB (0x351)
#define SPR_MMCRC (0x353)
#define SPR_PSSCR (0x357)
#define SPR_440_INV0 (0x370)
#define SPR_440_INV1 (0x371)
+#define SPR_TRIG1 (0x371)
#define SPR_440_INV2 (0x372)
+#define SPR_TRIG2 (0x372)
#define SPR_440_INV3 (0x373)
#define SPR_440_ITV0 (0x374)
#define SPR_440_ITV1 (0x375)
@@ -1922,7 +2203,6 @@ typedef PowerPCCPU ArchCPU;
#define SPR_HID1 (0x3F1)
#define SPR_IABR (0x3F2)
#define SPR_40x_DBCR0 (0x3F2)
-#define SPR_601_HID2 (0x3F2)
#define SPR_Exxx_L1CSR0 (0x3F2)
#define SPR_ICTRL (0x3F3)
#define SPR_HID2 (0x3F3)
@@ -1938,7 +2218,6 @@ typedef PowerPCCPU ArchCPU;
#define DABR_MASK (~(target_ulong)0x7)
#define SPR_Exxx_BUCSR (0x3F5)
#define SPR_40x_IAC2 (0x3F5)
-#define SPR_601_HID5 (0x3F5)
#define SPR_40x_DAC1 (0x3F6)
#define SPR_MSSCR0 (0x3F6)
#define SPR_970_HID5 (0x3F6)
@@ -1971,7 +2250,6 @@ typedef PowerPCCPU ArchCPU;
#define SPR_403_PBL2 (0x3FE)
#define SPR_PIR (0x3FF)
#define SPR_403_PBU2 (0x3FF)
-#define SPR_601_HID15 (0x3FF)
#define SPR_604_HID15 (0x3FF)
#define SPR_E500_SVR (0x3FF)
@@ -2036,15 +2314,6 @@ enum {
#define PPC_RES PPC_INSNS_BASE
/* spr/msr access instructions */
#define PPC_MISC PPC_INSNS_BASE
- /* Deprecated instruction sets */
- /* Original POWER instruction set */
- PPC_POWER = 0x0000000000000002ULL,
- /* POWER2 instruction set extension */
- PPC_POWER2 = 0x0000000000000004ULL,
- /* Power RTC support */
- PPC_POWER_RTC = 0x0000000000000008ULL,
- /* Power-to-PowerPC bridge (601) */
- PPC_POWER_BR = 0x0000000000000010ULL,
/* 64 bits PowerPC instruction set */
PPC_64B = 0x0000000000000020ULL,
/* New 64 bits extensions (PowerPC 2.0x) */
@@ -2057,8 +2326,6 @@ enum {
PPC_MFTB = 0x0000000000000200ULL,
/* Fixed-point unit extensions */
- /* PowerPC 602 specific */
- PPC_602_SPEC = 0x0000000000000400ULL,
/* isel instruction */
PPC_ISEL = 0x0000000000000800ULL,
/* popcntb instruction */
@@ -2117,8 +2384,6 @@ enum {
PPC_SEGMENT = 0x0000020000000000ULL,
/* PowerPC 6xx TLB management instructions */
PPC_6xx_TLB = 0x0000040000000000ULL,
- /* PowerPC 74xx TLB management instructions */
- PPC_74xx_TLB = 0x0000080000000000ULL,
/* PowerPC 40x TLB management instructions */
PPC_40x_TLB = 0x0000100000000000ULL,
/* segment register access instructions for PowerPC 64 "bridge" */
@@ -2154,15 +2419,12 @@ enum {
PPC_DCR = 0x1000000000000000ULL,
/* DCR extended accesse */
PPC_DCRX = 0x2000000000000000ULL,
- /* user-mode DCR access, implemented in PowerPC 460 */
- PPC_DCRUX = 0x4000000000000000ULL,
/* popcntw and popcntd instructions */
PPC_POPCNTWD = 0x8000000000000000ULL,
-#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \
- | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \
+#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_64B \
| PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \
- | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \
+ | PPC_ISEL | PPC_POPCNTB \
| PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \
| PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \
| PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \
@@ -2175,13 +2437,13 @@ enum {
| PPC_CACHE_DCBZ \
| PPC_CACHE_DCBA | PPC_CACHE_LOCK \
| PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \
- | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \
+ | PPC_40x_TLB | PPC_SEGMENT_64B \
| PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \
| PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \
| PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \
| PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \
- | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \
- | PPC_POPCNTWD | PPC_CILDST)
+ | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_POPCNTWD \
+ | PPC_CILDST)
/* extended type values */
@@ -2227,6 +2489,10 @@ enum {
PPC2_ISA300 = 0x0000000000080000ULL,
/* POWER ISA 3.1 */
PPC2_ISA310 = 0x0000000000100000ULL,
+ /* lwsync instruction */
+ PPC2_MEM_LWSYNC = 0x0000000000200000ULL,
+ /* ISA 2.06 BCD assist instructions */
+ PPC2_BCDA_ISA206 = 0x0000000000400000ULL,
#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
@@ -2235,7 +2501,8 @@ enum {
PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \
PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \
PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \
- PPC2_ISA300 | PPC2_ISA310)
+ PPC2_ISA300 | PPC2_ISA310 | PPC2_MEM_LWSYNC | \
+ PPC2_BCDA_ISA206)
};
/*****************************************************************************/
@@ -2356,26 +2623,27 @@ enum {
/* Hardware exceptions definitions */
enum {
/* External hardware exception sources */
- PPC_INTERRUPT_RESET = 0, /* Reset exception */
- PPC_INTERRUPT_WAKEUP, /* Wakeup exception */
- PPC_INTERRUPT_MCK, /* Machine check exception */
- PPC_INTERRUPT_EXT, /* External interrupt */
- PPC_INTERRUPT_SMI, /* System management interrupt */
- PPC_INTERRUPT_CEXT, /* Critical external interrupt */
- PPC_INTERRUPT_DEBUG, /* External debug exception */
- PPC_INTERRUPT_THERM, /* Thermal exception */
+ PPC_INTERRUPT_RESET = 0x00001, /* Reset exception */
+ PPC_INTERRUPT_WAKEUP = 0x00002, /* Wakeup exception */
+ PPC_INTERRUPT_MCK = 0x00004, /* Machine check exception */
+ PPC_INTERRUPT_EXT = 0x00008, /* External interrupt */
+ PPC_INTERRUPT_SMI = 0x00010, /* System management interrupt */
+ PPC_INTERRUPT_CEXT = 0x00020, /* Critical external interrupt */
+ PPC_INTERRUPT_DEBUG = 0x00040, /* External debug exception */
+ PPC_INTERRUPT_THERM = 0x00080, /* Thermal exception */
/* Internal hardware exception sources */
- PPC_INTERRUPT_DECR, /* Decrementer exception */
- PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */
- PPC_INTERRUPT_PIT, /* Programmable interval timer interrupt */
- PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */
- PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */
- PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */
- PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */
- PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */
- PPC_INTERRUPT_HMI, /* Hypervisor Maintenance interrupt */
- PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */
- PPC_INTERRUPT_HVIRT, /* Hypervisor virtualization interrupt */
+ PPC_INTERRUPT_DECR = 0x00100, /* Decrementer exception */
+ PPC_INTERRUPT_HDECR = 0x00200, /* Hypervisor decrementer exception */
+ PPC_INTERRUPT_PIT = 0x00400, /* Programmable interval timer int. */
+ PPC_INTERRUPT_FIT = 0x00800, /* Fixed interval timer interrupt */
+ PPC_INTERRUPT_WDT = 0x01000, /* Watchdog timer interrupt */
+ PPC_INTERRUPT_CDOORBELL = 0x02000, /* Critical doorbell interrupt */
+ PPC_INTERRUPT_DOORBELL = 0x04000, /* Doorbell interrupt */
+ PPC_INTERRUPT_PERFM = 0x08000, /* Performance monitor interrupt */
+ PPC_INTERRUPT_HMI = 0x10000, /* Hypervisor Maintenance interrupt */
+ PPC_INTERRUPT_HDOORBELL = 0x20000, /* Hypervisor Doorbell interrupt */
+ PPC_INTERRUPT_HVIRT = 0x40000, /* Hypervisor virtualization interrupt */
+ PPC_INTERRUPT_EBB = 0x80000, /* Event-based Branch exception */
};
/* Processor Compatibility mask (PCR) */
@@ -2408,10 +2676,38 @@ enum {
HMER_XSCOM_STATUS_MASK = PPC_BITMASK(21, 23),
};
+/* TFMR */
+enum {
+ TFMR_CONTROL_MASK = PPC_BITMASK(0, 24),
+ TFMR_MASK_HMI = PPC_BIT(10),
+ TFMR_TB_ECLIPZ = PPC_BIT(14),
+ TFMR_LOAD_TOD_MOD = PPC_BIT(16),
+ TFMR_MOVE_CHIP_TOD_TO_TB = PPC_BIT(18),
+ TFMR_CLEAR_TB_ERRORS = PPC_BIT(24),
+ TFMR_STATUS_MASK = PPC_BITMASK(25, 63),
+ TFMR_TBST_ENCODED = PPC_BITMASK(28, 31), /* TBST = TB State */
+ TFMR_TBST_LAST = PPC_BITMASK(32, 35), /* Previous TBST */
+ TFMR_TB_ENABLED = PPC_BIT(40),
+ TFMR_TB_VALID = PPC_BIT(41),
+ TFMR_TB_SYNC_OCCURED = PPC_BIT(42),
+ TFMR_FIRMWARE_CONTROL_ERROR = PPC_BIT(46),
+};
+
+/* TFMR TBST (Time Base State Machine). */
+enum {
+ TBST_RESET = 0x0,
+ TBST_SEND_TOD_MOD = 0x1,
+ TBST_NOT_SET = 0x2,
+ TBST_SYNC_WAIT = 0x6,
+ TBST_GET_TOD = 0x7,
+ TBST_TB_RUNNING = 0x8,
+ TBST_TB_ERROR = 0x9,
+};
+
/*****************************************************************************/
#define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300))
-target_ulong cpu_read_xer(CPUPPCState *env);
+target_ulong cpu_read_xer(const CPUPPCState *env);
void cpu_write_xer(CPUPPCState *env, target_ulong xer);
/*
@@ -2421,11 +2717,11 @@ void cpu_write_xer(CPUPPCState *env, target_ulong xer);
#define is_book3s_arch2x(ctx) (!!((ctx)->insns_flags & PPC_SEGMENT_64B))
#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags);
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags);
#else
-static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
*pc = env->nip;
*cs_base = 0;
@@ -2433,13 +2729,18 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
}
#endif
-void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception);
-void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception,
- uintptr_t raddr);
-void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception,
- uint32_t error_code);
-void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
- uint32_t error_code, uintptr_t raddr);
+G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception);
+G_NORETURN void raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr);
+G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code);
+G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr);
+
+/* PERFM EBB helper*/
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+void raise_ebb_perfm_exception(CPUPPCState *env);
+#endif
#if !defined(CONFIG_USER_ONLY)
static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm)
@@ -2579,7 +2880,7 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
}
/* Accessors for FP, VMX and VSX registers */
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define VsrB(i) u8[i]
#define VsrSB(i) s8[i]
#define VsrH(i) u16[i]
@@ -2588,6 +2889,9 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[i]
#define VsrD(i) u64[i]
#define VsrSD(i) s64[i]
+#define VsrHF(i) f16[i]
+#define VsrSF(i) f32[i]
+#define VsrDF(i) f64[i]
#else
#define VsrB(i) u8[15 - (i)]
#define VsrSB(i) s8[15 - (i)]
@@ -2597,6 +2901,9 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[3 - (i)]
#define VsrD(i) u64[1 - (i)]
#define VsrSD(i) s64[1 - (i)]
+#define VsrHF(i) f16[7 - (i)]
+#define VsrSF(i) f32[3 - (i)]
+#define VsrDF(i) f64[1 - (i)]
#endif
static inline int vsr64_offset(int i, bool high)
@@ -2609,6 +2916,11 @@ static inline int vsr_full_offset(int i)
return offsetof(CPUPPCState, vsr[i].u64[0]);
}
+static inline int acc_full_offset(int i)
+{
+ return vsr_full_offset(i * 4);
+}
+
static inline int fpr_offset(int i)
{
return vsr64_offset(i, true);
@@ -2645,24 +2957,75 @@ static inline bool ppc_has_spr(PowerPCCPU *cpu, int spr)
return cpu->env.spr_cb[spr].name != NULL;
}
-static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu)
+#if !defined(CONFIG_USER_ONLY)
+/* Sort out endianness of interrupt. Depends on the CPU, HV mode, etc. */
+static inline bool ppc_interrupts_little_endian(PowerPCCPU *cpu, bool hv)
{
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ bool ile;
+
+ if (hv && env->has_hv_mode) {
+ if (is_isa300(pcc)) {
+ ile = !!(env->spr[SPR_HID0] & HID0_POWER9_HILE);
+ } else {
+ ile = !!(env->spr[SPR_HID0] & HID0_HILE);
+ }
- /*
- * Only models that have an LPCR and know about LPCR_ILE can do little
- * endian.
- */
- if (pcc->lpcr_mask & LPCR_ILE) {
- return !!(cpu->env.spr[SPR_LPCR] & LPCR_ILE);
+ } else if (pcc->lpcr_mask & LPCR_ILE) {
+ ile = !!(env->spr[SPR_LPCR] & LPCR_ILE);
+ } else {
+ ile = FIELD_EX64(env->msr, MSR, ILE);
}
- return false;
+ return ile;
}
+#endif
void dump_mmu(CPUPPCState *env);
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
void ppc_store_vscr(CPUPPCState *env, uint32_t vscr);
uint32_t ppc_get_vscr(CPUPPCState *env);
+void ppc_set_cr(CPUPPCState *env, uint64_t cr);
+uint64_t ppc_get_cr(const CPUPPCState *env);
+
+/*****************************************************************************/
+/* Power management enable checks */
+static inline int check_pow_none(CPUPPCState *env)
+{
+ return 0;
+}
+
+static inline int check_pow_nocheck(CPUPPCState *env)
+{
+ return 1;
+}
+
+/*****************************************************************************/
+/* PowerPC implementations definitions */
+
+#define POWERPC_FAMILY(_name) \
+ static void \
+ glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
+ \
+ static const TypeInfo \
+ glue(glue(ppc_, _name), _cpu_family_type_info) = { \
+ .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \
+ .parent = TYPE_POWERPC_CPU, \
+ .abstract = true, \
+ .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \
+ }; \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \
+ { \
+ type_register_static( \
+ &glue(glue(ppc_, _name), _cpu_family_type_info)); \
+ } \
+ \
+ type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_class_init)
+
+
#endif /* PPC_CPU_H */
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
index 6aad01d1d3..6241de62ce 100644
--- a/target/ppc/cpu_init.c
+++ b/target/ppc/cpu_init.c
@@ -20,8 +20,7 @@
#include "qemu/osdep.h"
#include "disas/dis-asm.h"
-#include "exec/gdbstub.h"
-#include "kvm_ppc.h"
+#include "gdbstub/helpers.h"
#include "sysemu/cpus.h"
#include "sysemu/hw_accel.h"
#include "sysemu/tcg.h"
@@ -40,11 +39,17 @@
#include "qemu/cutils.h"
#include "disas/capstone.h"
#include "fpu/softfloat.h"
-#include "qapi/qapi-commands-machine-target.h"
#include "helper_regs.h"
#include "internal.h"
-#include "spr_tcg.h"
+#include "spr_common.h"
+#include "power8-pmu.h"
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/boards.h"
+#include "hw/intc/intc.h"
+#include "kvm_ppc.h"
+#endif
/* #define PPC_DEBUG_SPR */
/* #define USE_APPLE_GDB */
@@ -56,406 +61,52 @@ static inline void vscr_init(CPUPPCState *env, uint32_t val)
ppc_store_vscr(env, val);
}
-/**
- * _spr_register
- *
- * Register an SPR with all the callbacks required for tcg,
- * and the ID number for KVM.
- *
- * The reason for the conditional compilation is that the tcg functions
- * may be compiled out, and the system kvm header may not be available
- * for supplying the ID numbers. This is ugly, but the best we can do.
- */
-
-#ifdef CONFIG_TCG
-# define USR_ARG(X) X,
-# ifdef CONFIG_USER_ONLY
-# define SYS_ARG(X)
-# else
-# define SYS_ARG(X) X,
-# endif
-#else
-# define USR_ARG(X)
-# define SYS_ARG(X)
-#endif
-#ifdef CONFIG_KVM
-# define KVM_ARG(X) X,
-#else
-# define KVM_ARG(X)
-#endif
-
-typedef void spr_callback(DisasContext *, int, int);
-
-static void _spr_register(CPUPPCState *env, int num, const char *name,
- USR_ARG(spr_callback *uea_read)
- USR_ARG(spr_callback *uea_write)
- SYS_ARG(spr_callback *oea_read)
- SYS_ARG(spr_callback *oea_write)
- SYS_ARG(spr_callback *hea_read)
- SYS_ARG(spr_callback *hea_write)
- KVM_ARG(uint64_t one_reg_id)
- target_ulong initial_value)
+static void register_745_sprs(CPUPPCState *env)
{
- ppc_spr_t *spr = &env->spr_cb[num];
-
- /* No SPR should be registered twice. */
- assert(spr->name == NULL);
- assert(name != NULL);
-
- spr->name = name;
- spr->default_value = initial_value;
- env->spr[num] = initial_value;
-
-#ifdef CONFIG_TCG
- spr->uea_read = uea_read;
- spr->uea_write = uea_write;
-# ifndef CONFIG_USER_ONLY
- spr->oea_read = oea_read;
- spr->oea_write = oea_write;
- spr->hea_read = hea_read;
- spr->hea_write = hea_write;
-# endif
-#endif
-#ifdef CONFIG_KVM
- spr->one_reg_id = one_reg_id;
-#endif
-}
-
-/* spr_register_kvm_hv passes all required arguments. */
-#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, hea_read, hea_write, \
- one_reg_id, initial_value) \
- _spr_register(env, num, name, \
- USR_ARG(uea_read) USR_ARG(uea_write) \
- SYS_ARG(oea_read) SYS_ARG(oea_write) \
- SYS_ARG(hea_read) SYS_ARG(hea_write) \
- KVM_ARG(one_reg_id) initial_value)
-
-/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */
-#define spr_register_kvm(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, one_reg_id, ival) \
- spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
- oea_write, oea_read, oea_write, one_reg_id, ival)
-
-/* spr_register_hv and spr_register are similar, except there is no kvm id. */
-#define spr_register_hv(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, hea_read, hea_write, ival) \
- spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
- oea_write, hea_read, hea_write, 0, ival)
-
-#define spr_register(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, ival) \
- spr_register_kvm(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, 0, ival)
-
-/* Generic PowerPC SPRs */
-static void register_generic_sprs(CPUPPCState *env)
-{
- /* Integer processing */
- spr_register(env, SPR_XER, "XER",
- &spr_read_xer, &spr_write_xer,
- &spr_read_xer, &spr_write_xer,
- 0x00000000);
- /* Branch control */
- spr_register(env, SPR_LR, "LR",
- &spr_read_lr, &spr_write_lr,
- &spr_read_lr, &spr_write_lr,
- 0x00000000);
- spr_register(env, SPR_CTR, "CTR",
- &spr_read_ctr, &spr_write_ctr,
- &spr_read_ctr, &spr_write_ctr,
- 0x00000000);
- /* Interrupt processing */
- spr_register(env, SPR_SRR0, "SRR0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_SRR1, "SRR1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Processor control */
- spr_register(env, SPR_SPRG0, "SPRG0",
+ /* SGPRs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_SPRG1, "SPRG1",
+ spr_register(env, SPR_SPRG5, "SPRG5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_SPRG2, "SPRG2",
+ spr_register(env, SPR_SPRG6, "SPRG6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_SPRG3, "SPRG3",
+ spr_register(env, SPR_SPRG7, "SPRG7",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
-}
-
-/* SPR common to all non-embedded PowerPC, including 601 */
-static void register_ne_601_sprs(CPUPPCState *env)
-{
- /* Exception processing */
- spr_register_kvm(env, SPR_DSISR, "DSISR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- KVM_REG_PPC_DSISR, 0x00000000);
- spr_register_kvm(env, SPR_DAR, "DAR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- KVM_REG_PPC_DAR, 0x00000000);
- /* Timer */
- spr_register(env, SPR_DECR, "DECR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_decr, &spr_write_decr,
- 0x00000000);
-}
-
-/* Storage Description Register 1 */
-static void register_sdr1_sprs(CPUPPCState *env)
-{
-#ifndef CONFIG_USER_ONLY
- if (env->has_hv_mode) {
- /*
- * SDR1 is a hypervisor resource on CPUs which have a
- * hypervisor mode
- */
- spr_register_hv(env, SPR_SDR1, "SDR1",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_sdr1,
- 0x00000000);
- } else {
- spr_register(env, SPR_SDR1, "SDR1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_sdr1,
- 0x00000000);
- }
-#endif
-}
-/* BATs 0-3 */
-static void register_low_BATs(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- spr_register(env, SPR_IBAT0U, "IBAT0U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatu,
- 0x00000000);
- spr_register(env, SPR_IBAT0L, "IBAT0L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatl,
- 0x00000000);
- spr_register(env, SPR_IBAT1U, "IBAT1U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatu,
- 0x00000000);
- spr_register(env, SPR_IBAT1L, "IBAT1L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatl,
- 0x00000000);
- spr_register(env, SPR_IBAT2U, "IBAT2U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatu,
- 0x00000000);
- spr_register(env, SPR_IBAT2L, "IBAT2L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatl,
- 0x00000000);
- spr_register(env, SPR_IBAT3U, "IBAT3U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatu,
- 0x00000000);
- spr_register(env, SPR_IBAT3L, "IBAT3L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat, &spr_write_ibatl,
- 0x00000000);
- spr_register(env, SPR_DBAT0U, "DBAT0U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatu,
- 0x00000000);
- spr_register(env, SPR_DBAT0L, "DBAT0L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatl,
- 0x00000000);
- spr_register(env, SPR_DBAT1U, "DBAT1U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatu,
- 0x00000000);
- spr_register(env, SPR_DBAT1L, "DBAT1L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatl,
- 0x00000000);
- spr_register(env, SPR_DBAT2U, "DBAT2U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatu,
- 0x00000000);
- spr_register(env, SPR_DBAT2L, "DBAT2L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatl,
- 0x00000000);
- spr_register(env, SPR_DBAT3U, "DBAT3U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatu,
- 0x00000000);
- spr_register(env, SPR_DBAT3L, "DBAT3L",
+ /* Hardware implementation registers */
+ spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat, &spr_write_dbatl,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- env->nb_BATs += 4;
-#endif
-}
-/* BATs 4-7 */
-static void register_high_BATs(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- spr_register(env, SPR_IBAT4U, "IBAT4U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatu_h,
- 0x00000000);
- spr_register(env, SPR_IBAT4L, "IBAT4L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatl_h,
- 0x00000000);
- spr_register(env, SPR_IBAT5U, "IBAT5U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatu_h,
- 0x00000000);
- spr_register(env, SPR_IBAT5L, "IBAT5L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatl_h,
- 0x00000000);
- spr_register(env, SPR_IBAT6U, "IBAT6U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatu_h,
- 0x00000000);
- spr_register(env, SPR_IBAT6L, "IBAT6L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatl_h,
- 0x00000000);
- spr_register(env, SPR_IBAT7U, "IBAT7U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatu_h,
- 0x00000000);
- spr_register(env, SPR_IBAT7L, "IBAT7L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_ibat_h, &spr_write_ibatl_h,
- 0x00000000);
- spr_register(env, SPR_DBAT4U, "DBAT4U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatu_h,
- 0x00000000);
- spr_register(env, SPR_DBAT4L, "DBAT4L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatl_h,
- 0x00000000);
- spr_register(env, SPR_DBAT5U, "DBAT5U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatu_h,
- 0x00000000);
- spr_register(env, SPR_DBAT5L, "DBAT5L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatl_h,
- 0x00000000);
- spr_register(env, SPR_DBAT6U, "DBAT6U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatu_h,
- 0x00000000);
- spr_register(env, SPR_DBAT6L, "DBAT6L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatl_h,
- 0x00000000);
- spr_register(env, SPR_DBAT7U, "DBAT7U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatu_h,
- 0x00000000);
- spr_register(env, SPR_DBAT7L, "DBAT7L",
+ spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_dbat_h, &spr_write_dbatl_h,
- 0x00000000);
- env->nb_BATs += 4;
-#endif
-}
-
-/* Generic PowerPC time base */
-static void register_tbl(CPUPPCState *env)
-{
- spr_register(env, SPR_VTBL, "TBL",
- &spr_read_tbl, SPR_NOACCESS,
- &spr_read_tbl, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_TBL, "TBL",
- &spr_read_tbl, SPR_NOACCESS,
- &spr_read_tbl, &spr_write_tbl,
- 0x00000000);
- spr_register(env, SPR_VTBU, "TBU",
- &spr_read_tbu, SPR_NOACCESS,
- &spr_read_tbu, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_TBU, "TBU",
- &spr_read_tbu, SPR_NOACCESS,
- &spr_read_tbu, &spr_write_tbu,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
-}
-/* Softare table search registers */
-static void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
-{
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = nb_tlbs;
- env->nb_ways = nb_ways;
- env->id_tlbs = 1;
- env->tlb_type = TLB_6XX;
- spr_register(env, SPR_DMISS, "DMISS",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_DCMP, "DCMP",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_HASH1, "HASH1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_HASH2, "HASH2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_IMISS, "IMISS",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_ICMP, "ICMP",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_RPA, "RPA",
+ spr_register(env, SPR_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
-#endif
}
-/* SPR common to MPC755 and G2 */
-static void register_G2_755_sprs(CPUPPCState *env)
+static void register_755_sprs(CPUPPCState *env)
{
- /* SGPRs */
- spr_register(env, SPR_SPRG4, "SPRG4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_SPRG5, "SPRG5",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_SPRG6, "SPRG6",
+ /* L2 cache control */
+ spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, spr_access_nop,
0x00000000);
- spr_register(env, SPR_SPRG7, "SPRG7",
+
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
@@ -465,99 +116,106 @@ static void register_G2_755_sprs(CPUPPCState *env)
static void register_7xx_sprs(CPUPPCState *env)
{
/* Breakpoints */
- /* XXX : not implemented */
spr_register_kvm(env, SPR_DABR, "DABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DABR, 0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_IABR, "IABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Cache management */
- /* XXX : not implemented */
spr_register(env, SPR_ICTC, "ICTC",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Performance monitors */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_MMCR0, "MMCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_MMCR1, "MMCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC1, "PMC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC2, "PMC2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC3, "PMC3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC4, "PMC4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_SIAR, "SIAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UMMCR0, "UMMCR0",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UMMCR1, "UMMCR1",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC1, "UPMC1",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC2, "UPMC2",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC3, "UPMC3",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC4, "UPMC4",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_USIAR, "USIAR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
/* External access control */
- /* XXX : not implemented */
spr_register(env, SPR_EAR, "EAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
+
+ /* Hardware implementation registers */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
}
#ifdef TARGET_PPC64
@@ -605,26 +263,6 @@ static void register_iamr_sprs(CPUPPCState *env)
}
#endif /* TARGET_PPC64 */
-static void register_thrm_sprs(CPUPPCState *env)
-{
- /* Thermal management */
- /* XXX : not implemented */
- spr_register(env, SPR_THRM1, "THRM1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_thrm, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_THRM2, "THRM2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_thrm, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_THRM3, "THRM3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_thrm, &spr_write_generic,
- 0x00000000);
-}
-
/* SPR specific to PowerPC 604 implementation */
static void register_604_sprs(CPUPPCState *env)
{
@@ -634,66 +272,133 @@ static void register_604_sprs(CPUPPCState *env)
&spr_read_generic, &spr_write_pir,
0x00000000);
/* Breakpoints */
- /* XXX : not implemented */
spr_register(env, SPR_IABR, "IABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register_kvm(env, SPR_DABR, "DABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DABR, 0x00000000);
/* Performance counters */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_MMCR0, "MMCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC1, "PMC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC2, "PMC2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_SIAR, "SIAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_SDA, "SDA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
/* External access control */
- /* XXX : not implemented */
spr_register(env, SPR_EAR, "EAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
+
+ /* Hardware implementation registers */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_604e_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
}
/* SPR specific to PowerPC 603 implementation */
static void register_603_sprs(CPUPPCState *env)
{
/* External access control */
- /* XXX : not implemented */
spr_register(env, SPR_EAR, "EAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Breakpoints */
- /* XXX : not implemented */
spr_register(env, SPR_IABR, "IABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void register_e300_sprs(CPUPPCState *env)
+{
+ /* hardware implementation registers */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
}
/* SPR specific to PowerPC G2 implementation */
@@ -701,7 +406,6 @@ static void register_G2_sprs(CPUPPCState *env)
{
/* Memory base address */
/* MBAR */
- /* XXX : not implemented */
spr_register(env, SPR_MBAR, "MBAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -716,197 +420,200 @@ static void register_G2_sprs(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Breakpoints */
- /* XXX : not implemented */
spr_register(env, SPR_DABR, "DABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_DABR2, "DABR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_IABR, "IABR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_IABR2, "IABR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_IBCR, "IBCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_DBCR, "DBCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
-}
-/* SPR specific to PowerPC 602 implementation */
-static void register_602_sprs(CPUPPCState *env)
-{
- /* ESA registers */
- /* XXX : not implemented */
- spr_register(env, SPR_SER, "SER",
+ /* External access control */
+ spr_register(env, SPR_EAR, "EAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_SEBR, "SEBR",
+ /* Hardware implementation register */
+ spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_ESASRR, "ESASRR",
+
+ spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* Floating point status */
- /* XXX : not implemented */
- spr_register(env, SPR_SP, "SP",
+
+ spr_register(env, SPR_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_LT, "LT",
+
+ /* SGPRs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* Watchdog timer */
- /* XXX : not implemented */
- spr_register(env, SPR_TCR, "TCR",
+ spr_register(env, SPR_SPRG5, "SPRG5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* Interrupt base */
- spr_register(env, SPR_IBR, "IBR",
+ spr_register(env, SPR_SPRG6, "SPRG6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_IABR, "IABR",
+ spr_register(env, SPR_SPRG7, "SPRG7",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
}
-/* SPR specific to PowerPC 601 implementation */
-static void register_601_sprs(CPUPPCState *env)
+static void register_74xx_sprs(CPUPPCState *env)
{
- /* Multiplication/division register */
- /* MQ */
- spr_register(env, SPR_MQ, "MQ",
- &spr_read_generic, &spr_write_generic,
+ /* Breakpoints */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* RTC registers */
- spr_register(env, SPR_601_RTCU, "RTCU",
+ /* Cache management */
+ spr_register(env, SPR_ICTC, "ICTC",
SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, &spr_write_601_rtcu,
- 0x00000000);
- spr_register(env, SPR_601_VRTCU, "RTCU",
- &spr_read_601_rtcu, SPR_NOACCESS,
- &spr_read_601_rtcu, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_601_RTCL, "RTCL",
+ /* Performance monitors */
+ spr_register(env, SPR_7XX_MMCR0, "MMCR0",
SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, &spr_write_601_rtcl,
- 0x00000000);
- spr_register(env, SPR_601_VRTCL, "RTCL",
- &spr_read_601_rtcl, SPR_NOACCESS,
- &spr_read_601_rtcl, SPR_NOACCESS,
- 0x00000000);
- /* Timer */
-#if 0 /* ? */
- spr_register(env, SPR_601_UDECR, "UDECR",
- &spr_read_decr, SPR_NOACCESS,
- &spr_read_decr, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
-#endif
- /* External access control */
- /* XXX : not implemented */
- spr_register(env, SPR_EAR, "EAR",
+
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* Memory management */
-#if !defined(CONFIG_USER_ONLY)
- spr_register(env, SPR_IBAT0U, "IBAT0U",
+
+ spr_register(env, SPR_7XX_PMC1, "PMC1",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatu,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_IBAT0L, "IBAT0L",
+
+ spr_register(env, SPR_7XX_PMC2, "PMC2",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatl,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_IBAT1U, "IBAT1U",
+
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatu,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_IBAT1L, "IBAT1L",
+
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatl,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register(env, SPR_IBAT2U, "IBAT2U",
+
+ spr_register(env, SPR_7XX_SIAR, "SIAR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatu,
+ &spr_read_generic, SPR_NOACCESS,
0x00000000);
- spr_register(env, SPR_IBAT2L, "IBAT2L",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatl,
+
+ spr_register(env, SPR_7XX_UMMCR0, "UMMCR0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
0x00000000);
- spr_register(env, SPR_IBAT3U, "IBAT3U",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatu,
+
+ spr_register(env, SPR_7XX_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
0x00000000);
- spr_register(env, SPR_IBAT3L, "IBAT3L",
+
+ spr_register(env, SPR_7XX_UPMC1, "UPMC1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_UPMC2, "UPMC2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_UPMC3, "UPMC3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_UPMC4, "UPMC4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+
+ spr_register(env, SPR_7XX_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ spr_register(env, SPR_EAR, "EAR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_601_ubat, &spr_write_601_ubatl,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
- env->nb_BATs = 4;
-#endif
-}
-static void register_74xx_sprs(CPUPPCState *env)
-{
/* Processor identification */
spr_register(env, SPR_PIR, "PIR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_pir,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_74XX_MMCR2, "MMCR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_74XX_UMMCR2, "UMMCR2",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX: not implemented */
+
spr_register(env, SPR_BAMR, "BAMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MSSCR0, "MSSCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
- /* XXX : not implemented */
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -916,7 +623,7 @@ static void register_74xx_sprs(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
@@ -926,78 +633,22 @@ static void register_74xx_sprs(CPUPPCState *env)
static void register_l3_ctrl(CPUPPCState *env)
{
/* L3CR */
- /* XXX : not implemented */
spr_register(env, SPR_L3CR, "L3CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3ITCR0 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR0, "L3ITCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3PM */
- /* XXX : not implemented */
spr_register(env, SPR_L3PM, "L3PM",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
}
-static void register_74xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
-{
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = nb_tlbs;
- env->nb_ways = nb_ways;
- env->id_tlbs = 1;
- env->tlb_type = TLB_6XX;
- /* XXX : not implemented */
- spr_register(env, SPR_PTEHI, "PTEHI",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_PTELO, "PTELO",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_TLBMISS, "TLBMISS",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-#endif
-}
-
-static void register_usprg3_sprs(CPUPPCState *env)
-{
- spr_register(env, SPR_USPRG3, "USPRG3",
- &spr_read_ureg, SPR_NOACCESS,
- &spr_read_ureg, SPR_NOACCESS,
- 0x00000000);
-}
-
-static void register_usprgh_sprs(CPUPPCState *env)
-{
- spr_register(env, SPR_USPRG4, "USPRG4",
- &spr_read_ureg, SPR_NOACCESS,
- &spr_read_ureg, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_USPRG5, "USPRG5",
- &spr_read_ureg, SPR_NOACCESS,
- &spr_read_ureg, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_USPRG6, "USPRG6",
- &spr_read_ureg, SPR_NOACCESS,
- &spr_read_ureg, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_USPRG7, "USPRG7",
- &spr_read_ureg, SPR_NOACCESS,
- &spr_read_ureg, SPR_NOACCESS,
- 0x00000000);
-}
-
/* PowerPC BookE SPR */
static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask)
{
@@ -1050,37 +701,36 @@ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask)
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Debug */
- /* XXX : not implemented */
spr_register(env, SPR_BOOKE_IAC1, "IAC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_IAC2, "IAC2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DAC1, "DAC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DAC2, "DAC2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DBCR0, "DBCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_40x_dbcr0,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DBCR1, "DBCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DBCR2, "DBCR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1093,7 +743,7 @@ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DBSR, "DBSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_clear,
@@ -1202,7 +852,6 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
int i;
/* TLB assist registers */
- /* XXX : not implemented */
for (i = 0; i < 8; i++) {
if (mas_mask & (1 << i)) {
spr_register(env, mas_sprn[i], mas_names[i],
@@ -1214,14 +863,12 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
}
}
if (env->nb_pids > 1) {
- /* XXX : not implemented */
spr_register(env, SPR_BOOKE_PID1, "PID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_booke_pid,
0x00000000);
}
if (env->nb_pids > 2) {
- /* XXX : not implemented */
spr_register(env, SPR_BOOKE_PID2, "PID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_booke_pid,
@@ -1237,7 +884,6 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
&spr_read_generic, &spr_write_epsc,
0x00000000);
- /* XXX : not implemented */
spr_register(env, SPR_MMUCFG, "MMUCFG",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
@@ -1272,131 +918,127 @@ static void register_BookE206_sprs(CPUPPCState *env, uint32_t mas_mask,
break;
}
#endif
-
- register_usprgh_sprs(env);
}
/* SPR specific to PowerPC 440 implementation */
static void register_440_sprs(CPUPPCState *env)
{
/* Cache control */
- /* XXX : not implemented */
spr_register(env, SPR_440_DNV0, "DNV0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DNV1, "DNV1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DNV2, "DNV2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DNV3, "DNV3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DTV0, "DTV0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DTV1, "DTV1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DTV2, "DTV2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DTV3, "DTV3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DVLIM, "DVLIM",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_INV0, "INV0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_INV1, "INV1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_INV2, "INV2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_INV3, "INV3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_ITV0, "ITV0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_ITV1, "ITV1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_ITV2, "ITV2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_ITV3, "ITV3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_IVLIM, "IVLIM",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Cache debug */
- /* XXX : not implemented */
spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_DBDR, "DBDR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1415,6 +1057,32 @@ static void register_440_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
+
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
}
/* SPR shared between PowerPC 40x implementations */
@@ -1464,11 +1132,11 @@ static void register_40x_sprs(CPUPPCState *env)
0x00000000);
spr_register(env, SPR_40x_TCR, "TCR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_booke_tcr,
+ &spr_read_generic, &spr_write_40x_tcr,
0x00000000);
spr_register(env, SPR_40x_TSR, "TSR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_booke_tsr,
+ &spr_read_generic, &spr_write_40x_tsr,
0x00000000);
}
@@ -1478,30 +1146,29 @@ static void register_405_sprs(CPUPPCState *env)
/* MMU */
spr_register(env, SPR_40x_PID, "PID",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_40x_pid,
0x00000000);
spr_register(env, SPR_4xx_CCR0, "CCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00700000);
/* Debug interface */
- /* XXX : not implemented */
spr_register(env, SPR_40x_DBCR0, "DBCR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_40x_dbcr0,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_DBCR1, "DBCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_40x_DBSR, "DBSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_clear,
/* Last reset was system reset */
0x00000300);
- /* XXX : not implemented */
+
spr_register(env, SPR_40x_DAC1, "DAC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1510,17 +1177,17 @@ static void register_405_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_DVC1, "DVC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_DVC2, "DVC2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_40x_IAC1, "IAC1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1529,18 +1196,17 @@ static void register_405_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_IAC3, "IAC3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_IAC4, "IAC4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Storage control */
- /* XXX: TODO: not implemented */
spr_register(env, SPR_405_SLER, "SLER",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_40x_sler,
@@ -1549,7 +1215,7 @@ static void register_405_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_405_SU0R, "SU0R",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1575,68 +1241,8 @@ static void register_405_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
spr_read_generic, &spr_write_generic,
0x00000000);
- register_usprgh_sprs(env);
-}
-
-/* SPR shared between PowerPC 401 & 403 implementations */
-static void register_401_403_sprs(CPUPPCState *env)
-{
- /* Time base */
- spr_register(env, SPR_403_VTBL, "TBL",
- &spr_read_tbl, SPR_NOACCESS,
- &spr_read_tbl, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_403_TBL, "TBL",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, &spr_write_tbl,
- 0x00000000);
- spr_register(env, SPR_403_VTBU, "TBU",
- &spr_read_tbu, SPR_NOACCESS,
- &spr_read_tbu, SPR_NOACCESS,
- 0x00000000);
- spr_register(env, SPR_403_TBU, "TBU",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, &spr_write_tbu,
- 0x00000000);
- /* Debug */
- /* not emulated, as QEMU do not emulate caches */
- spr_register(env, SPR_403_CDBCR, "CDBCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-/* SPR specific to PowerPC 401 implementation */
-static void register_401_sprs(CPUPPCState *env)
-{
- /* Debug interface */
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DBCR0, "DBCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_40x_dbcr0,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DBSR, "DBSR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_clear,
- /* Last reset was system reset */
- 0x00000300);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DAC1, "DAC",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_IAC1, "IAC",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Storage control */
- /* XXX: TODO: not implemented */
- spr_register(env, SPR_405_SLER, "SLER",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_40x_sler,
- 0x00000000);
+ /* Bus access control */
/* not emulated, as QEMU never does speculative access */
spr_register(env, SPR_40x_SGR, "SGR",
SPR_NOACCESS, SPR_NOACCESS,
@@ -1649,98 +1255,6 @@ static void register_401_sprs(CPUPPCState *env)
0x00000000);
}
-static void register_401x2_sprs(CPUPPCState *env)
-{
- register_401_sprs(env);
- spr_register(env, SPR_40x_PID, "PID",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_40x_ZPR, "ZPR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
-/* SPR specific to PowerPC 403 implementation */
-static void register_403_sprs(CPUPPCState *env)
-{
- /* Debug interface */
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DBCR0, "DBCR0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_40x_dbcr0,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DBSR, "DBSR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_clear,
- /* Last reset was system reset */
- 0x00000300);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DAC1, "DAC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_DAC2, "DAC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_IAC1, "IAC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_40x_IAC2, "IAC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
-static void register_403_real_sprs(CPUPPCState *env)
-{
- spr_register(env, SPR_403_PBL1, "PBL1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_403_pbr, &spr_write_403_pbr,
- 0x00000000);
- spr_register(env, SPR_403_PBU1, "PBU1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_403_pbr, &spr_write_403_pbr,
- 0x00000000);
- spr_register(env, SPR_403_PBL2, "PBL2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_403_pbr, &spr_write_403_pbr,
- 0x00000000);
- spr_register(env, SPR_403_PBU2, "PBU2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_403_pbr, &spr_write_403_pbr,
- 0x00000000);
-}
-
-static void register_403_mmu_sprs(CPUPPCState *env)
-{
- /* MMU */
- spr_register(env, SPR_40x_PID, "PID",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register(env, SPR_40x_ZPR, "ZPR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
-/* SPR specific to PowerPC compression coprocessor extension */
-static void register_compress_sprs(CPUPPCState *env)
-{
- /* XXX : not implemented */
- spr_register(env, SPR_401_SKR, "SKR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
static void register_5xx_8xx_sprs(CPUPPCState *env)
{
@@ -1758,102 +1272,102 @@ static void register_5xx_8xx_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_decr, &spr_write_decr,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_EIE, "EIE",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_EID, "EID",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_NRI, "NRI",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPA, "CMPA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPB, "CMPB",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPC, "CMPC",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPD, "CMPD",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_ECR, "ECR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_DER, "DER",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_COUNTA, "COUNTA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_COUNTB, "COUNTB",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPE, "CMPE",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPF, "CMPF",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPG, "CMPG",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_CMPH, "CMPH",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_LCTRL1, "LCTRL1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_LCTRL2, "LCTRL2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_BAR, "BAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_DPDR, "DPDR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_IMMR, "IMMR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1862,107 +1376,106 @@ static void register_5xx_8xx_sprs(CPUPPCState *env)
static void register_5xx_sprs(CPUPPCState *env)
{
- /* XXX : not implemented */
spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_RCPU_FPECR, "FPECR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -1971,127 +1484,127 @@ static void register_5xx_sprs(CPUPPCState *env)
static void register_8xx_sprs(CPUPPCState *env)
{
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_IC_CST, "IC_CST",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_IC_ADR, "IC_ADR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_IC_DAT, "IC_DAT",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_DC_CST, "DC_CST",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_DC_ADR, "DC_ADR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_DC_DAT, "DC_DAT",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_CTR, "MI_CTR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_AP, "MI_AP",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_EPN, "MI_EPN",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_TWC, "MI_TWC",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_RPN, "MI_RPN",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_CTR, "MD_CTR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_CASID, "MD_CASID",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_AP, "MD_AP",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_EPN, "MD_EPN",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_TWB, "MD_TWB",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_TWC, "MD_TWC",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_RPN, "MD_RPN",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_TW, "MD_TW",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -2117,6 +1630,7 @@ static void register_8xx_sprs(CPUPPCState *env)
* HSRR0 => SPR 314 (Power 2.04 hypv)
* HSRR1 => SPR 315 (Power 2.04 hypv)
* LPIDR => SPR 317 (970)
+ * HEIR => SPR 339 (Power 2.05 hypv) (64-bit reg from 3.1)
* EPR => SPR 702 (Power 2.04 emb)
* perf => 768-783 (Power 2.04)
* perf => 784-799 (Power 2.04)
@@ -2128,27 +1642,7 @@ static void register_8xx_sprs(CPUPPCState *env)
/*****************************************************************************/
/* Exception vectors models */
-static void init_excp_4xx_real(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
- env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
- env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
- env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
- env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
- env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
- env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
- env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
- env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
- env->ivor_mask = 0x0000FFF0UL;
- env->ivpr_mask = 0xFFFF0000UL;
- /* Hardware reset vector */
- env->hreset_vector = 0xFFFFFFFCUL;
-#endif
-}
-
-static void init_excp_4xx_softmmu(CPUPPCState *env)
+static void init_excp_4xx(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
@@ -2180,7 +1674,7 @@ static void init_excp_MPC5xx(CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
@@ -2207,7 +1701,7 @@ static void init_excp_MPC8xx(CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
@@ -2273,8 +1767,14 @@ static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask)
env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ /*
+ * These two are the same IVOR as POWERPC_EXCP_VPU and
+ * POWERPC_EXCP_VPUA. We deal with that when dispatching at
+ * powerpc_excp().
+ */
env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000;
env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000;
+
env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000;
env->ivor_mask = 0x0000FFF7UL;
env->ivpr_mask = ivpr_mask;
@@ -2309,53 +1809,6 @@ static void init_excp_BookE(CPUPPCState *env)
#endif
}
-static void init_excp_601(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
- env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
- env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
- env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
- env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
- env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
- env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
- env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
- env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00;
- env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
- env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000;
- /* Hardware reset vector */
- env->hreset_vector = 0x00000100UL;
-#endif
-}
-
-static void init_excp_602(CPUPPCState *env)
-{
-#if !defined(CONFIG_USER_ONLY)
- /* XXX: exception prefix has a special behavior on 602 */
- env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
- env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
- env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
- env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
- env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
- env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
- env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
- env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
- env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
- env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
- env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
- env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
- env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
- env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
- env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
- env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
- env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
- env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
- /* Hardware reset vector */
- env->hreset_vector = 0x00000100UL;
-#endif
-}
-
static void init_excp_603(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
@@ -2537,9 +1990,6 @@ static void init_excp_7450(CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
- env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
- env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
- env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
@@ -2615,6 +2065,10 @@ static void init_excp_POWER8(CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60;
env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80;
env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80;
+
+ /* Userland exceptions without vector value in PowerISA v3.1 */
+ env->excp_vectors[POWERPC_EXCP_PERFM_EBB] = 0x0;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL_EBB] = 0x0;
#endif
}
@@ -2635,18 +2089,6 @@ static void init_excp_POWER10(CPUPPCState *env)
#endif
-/*****************************************************************************/
-/* Power management enable checks */
-static int check_pow_none(CPUPPCState *env)
-{
- return 0;
-}
-
-static int check_pow_nocheck(CPUPPCState *env)
-{
- return 1;
-}
-
static int check_pow_hid0(CPUPPCState *env)
{
if (env->spr[SPR_HID0] & 0x00E00000) {
@@ -2665,377 +2107,12 @@ static int check_pow_hid0_74xx(CPUPPCState *env)
return 0;
}
-/*****************************************************************************/
-/* PowerPC implementations definitions */
-
-#define POWERPC_FAMILY(_name) \
- static void \
- glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
- \
- static const TypeInfo \
- glue(glue(ppc_, _name), _cpu_family_type_info) = { \
- .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \
- .parent = TYPE_POWERPC_CPU, \
- .abstract = true, \
- .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \
- }; \
- \
- static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \
- { \
- type_register_static( \
- &glue(glue(ppc_, _name), _cpu_family_type_info)); \
- } \
- \
- type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \
- \
- static void glue(glue(ppc_, _name), _cpu_family_class_init)
-
-static void init_proc_401(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_401_sprs(env);
- init_excp_4xx_real(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(12, 16, 20, 24);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(401)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 401";
- pcc->init_proc = init_proc_401;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
- PPC_WRTEE | PPC_DCR |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << MSR_KEY) |
- (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_DE) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_REAL;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
- POWERPC_FLAG_BUS_CLK;
-}
-
-static void init_proc_401x2(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_401x2_sprs(env);
- register_compress_sprs(env);
- /* Memory management */
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = 64;
- env->nb_ways = 1;
- env->id_tlbs = 0;
- env->tlb_type = TLB_EMB;
-#endif
- init_excp_4xx_softmmu(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(12, 16, 20, 24);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(401x2)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 401x2";
- pcc->init_proc = init_proc_401x2;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << 20) |
- (1ull << MSR_KEY) |
- (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_DE) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
- POWERPC_FLAG_BUS_CLK;
-}
-
-static void init_proc_401x3(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_401_sprs(env);
- register_401x2_sprs(env);
- register_compress_sprs(env);
- init_excp_4xx_softmmu(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(12, 16, 20, 24);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(401x3)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 401x3";
- pcc->init_proc = init_proc_401x3;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << 20) |
- (1ull << MSR_KEY) |
- (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_DWE) |
- (1ull << MSR_DE) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
- POWERPC_FLAG_BUS_CLK;
-}
-
-static void init_proc_IOP480(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_401x2_sprs(env);
- register_compress_sprs(env);
- /* Memory management */
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = 64;
- env->nb_ways = 1;
- env->id_tlbs = 0;
- env->tlb_type = TLB_EMB;
-#endif
- init_excp_4xx_softmmu(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(8, 12, 16, 20);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(IOP480)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "IOP480";
- pcc->init_proc = init_proc_IOP480;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << 20) |
- (1ull << MSR_KEY) |
- (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_DE) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
- POWERPC_FLAG_BUS_CLK;
-}
-
-static void init_proc_403(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_403_sprs(env);
- register_403_real_sprs(env);
- init_excp_4xx_real(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(8, 12, 16, 20);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(403)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 403";
- pcc->init_proc = init_proc_403;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_PE) |
- (1ull << MSR_PX) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_REAL;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
- POWERPC_FLAG_BUS_CLK;
-}
-
-static void init_proc_403GCX(CPUPPCState *env)
-{
- register_40x_sprs(env);
- register_401_403_sprs(env);
- register_403_sprs(env);
- register_403_real_sprs(env);
- register_403_mmu_sprs(env);
- /* Bus access control */
- /* not emulated, as QEMU never does speculative access */
- spr_register(env, SPR_40x_SGR, "SGR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0xFFFFFFFF);
- /* not emulated, as QEMU do not emulate caches */
- spr_register(env, SPR_40x_DCWR, "DCWR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = 64;
- env->nb_ways = 1;
- env->id_tlbs = 0;
- env->tlb_type = TLB_EMB;
-#endif
- init_excp_4xx_softmmu(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc40x_irq_init(env_archcpu(env));
-
- SET_FIT_PERIOD(8, 12, 16, 20);
- SET_WDT_PERIOD(16, 20, 24, 28);
-}
-
-POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 403 GCX";
- pcc->init_proc = init_proc_403GCX;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
- PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
- PPC_4xx_COMMON | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_ME) |
- (1ull << MSR_PE) |
- (1ull << MSR_PX) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
- pcc->excp_model = POWERPC_EXCP_40x;
- pcc->bus_model = PPC_FLAGS_INPUT_401;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
- POWERPC_FLAG_BUS_CLK;
-}
-
static void init_proc_405(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_40x_sprs(env);
register_405_sprs(env);
- /* Bus access control */
- /* not emulated, as QEMU never does speculative access */
- spr_register(env, SPR_40x_SGR, "SGR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0xFFFFFFFF);
- /* not emulated, as QEMU do not emulate caches */
- spr_register(env, SPR_40x_DCWR, "DCWR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+ register_usprgh_sprs(env);
+
/* Memory management */
#if !defined(CONFIG_USER_ONLY)
env->nb_tlb = 64;
@@ -3043,7 +2120,7 @@ static void init_proc_405(CPUPPCState *env)
env->id_tlbs = 0;
env->tlb_type = TLB_EMB;
#endif
- init_excp_4xx_softmmu(env);
+ init_excp_4xx(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
/* Allocate hardware IRQ controller */
@@ -3068,11 +2145,12 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP;
- pcc->msr_mask = (1ull << MSR_POW) |
+ pcc->msr_mask = (1ull << MSR_WE) |
(1ull << MSR_CE) |
(1ull << MSR_EE) |
(1ull << MSR_PR) |
(1ull << MSR_FP) |
+ (1ull << MSR_ME) |
(1ull << MSR_DWE) |
(1ull << MSR_DE) |
(1ull << MSR_IR) |
@@ -3087,37 +2165,10 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
static void init_proc_440EP(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_BookE_sprs(env, 0x000000000000FFFFULL);
register_440_sprs(env);
register_usprgh_sprs(env);
- /* Processor identification */
- spr_register(env, SPR_BOOKE_PIR, "PIR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_pir,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC3, "IAC3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC4, "IAC4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC1, "DVC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC2, "DVC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_MCSR, "MCSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3130,7 +2181,7 @@ static void init_proc_440EP(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_CCR1, "CCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3229,36 +2280,10 @@ POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data)
static void init_proc_440GP(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_BookE_sprs(env, 0x000000000000FFFFULL);
register_440_sprs(env);
register_usprgh_sprs(env);
- /* Processor identification */
- spr_register(env, SPR_BOOKE_PIR, "PIR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_pir,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC3, "IAC3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC4, "IAC4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC1, "DVC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC2, "DVC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
#if !defined(CONFIG_USER_ONLY)
env->nb_tlb = 64;
@@ -3310,122 +2335,12 @@ POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data)
POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
}
-static void init_proc_440x4(CPUPPCState *env)
-{
- /* Time base */
- register_tbl(env);
- register_BookE_sprs(env, 0x000000000000FFFFULL);
- register_440_sprs(env);
- register_usprgh_sprs(env);
- /* Processor identification */
- spr_register(env, SPR_BOOKE_PIR, "PIR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_pir,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC3, "IAC3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC4, "IAC4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC1, "DVC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC2, "DVC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
-#if !defined(CONFIG_USER_ONLY)
- env->nb_tlb = 64;
- env->nb_ways = 1;
- env->id_tlbs = 0;
- env->tlb_type = TLB_EMB;
-#endif
- init_excp_BookE(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* XXX: TODO: allocate internal IRQ controller */
-
- SET_FIT_PERIOD(12, 16, 20, 24);
- SET_WDT_PERIOD(20, 24, 28, 32);
-}
-
-POWERPC_FAMILY(440x4)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 440x4";
- pcc->init_proc = init_proc_440x4;
- pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
- PPC_DCR | PPC_WRTEE |
- PPC_CACHE | PPC_CACHE_ICBI |
- PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
- PPC_MEM_TLBSYNC | PPC_MFTB |
- PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
- PPC_440_SPEC;
- pcc->msr_mask = (1ull << MSR_POW) |
- (1ull << MSR_CE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_DWE) |
- (1ull << MSR_DE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR);
- pcc->mmu_model = POWERPC_MMU_BOOKE;
- pcc->excp_model = POWERPC_EXCP_BOOKE;
- pcc->bus_model = PPC_FLAGS_INPUT_BookE;
- pcc->bfd_mach = bfd_mach_ppc_403;
- pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
- POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
-}
-
static void init_proc_440x5(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_BookE_sprs(env, 0x000000000000FFFFULL);
register_440_sprs(env);
register_usprgh_sprs(env);
- /* Processor identification */
- spr_register(env, SPR_BOOKE_PIR, "PIR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_pir,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC3, "IAC3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_IAC4, "IAC4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC1, "DVC1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_BOOKE_DVC2, "DVC2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_MCSR, "MCSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3438,7 +2353,7 @@ static void init_proc_440x5(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_440_CCR1, "CCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3534,8 +2449,6 @@ POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data)
static void init_proc_MPC5xx(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_5xx_8xx_sprs(env);
register_5xx_sprs(env);
init_excp_MPC5xx(env);
@@ -3569,7 +2482,7 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_REAL;
- pcc->excp_model = POWERPC_EXCP_603;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
pcc->bfd_mach = bfd_mach_ppc_505;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -3578,8 +2491,6 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data)
static void init_proc_MPC8xx(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_5xx_8xx_sprs(env);
register_8xx_sprs(env);
init_excp_MPC8xx(env);
@@ -3612,7 +2523,7 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_MPC8xx;
- pcc->excp_model = POWERPC_EXCP_603;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
pcc->bfd_mach = bfd_mach_ppc_860;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -3623,34 +2534,10 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data)
static void init_proc_G2(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_G2_755_sprs(env);
register_G2_sprs(env);
- /* Time base */
- register_tbl(env);
- /* External access control */
- /* XXX : not implemented */
- spr_register(env, SPR_EAR, "EAR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Hardware implementation register */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
@@ -3693,62 +2580,20 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data)
(1ull << MSR_DR) |
(1ull << MSR_RI);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_ec603e;
pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
-static void init_proc_G2LE(CPUPPCState *env)
-{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_G2_755_sprs(env);
- register_G2_sprs(env);
- /* Time base */
- register_tbl(env);
- /* External access control */
- /* XXX : not implemented */
- spr_register(env, SPR_EAR, "EAR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Hardware implementation register */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-
- /* Memory management */
- register_low_BATs(env);
- register_high_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_G2(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
-}
-
POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->desc = "PowerPC G2LE";
- pcc->init_proc = init_proc_G2LE;
+ pcc->init_proc = init_proc_G2;
pcc->check_pow = check_pow_hid0;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
@@ -3775,7 +2620,7 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_ec603e;
pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
@@ -3784,87 +2629,86 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
static void init_proc_e200(CPUPPCState *env)
{
- /* Time base */
- register_tbl(env);
register_BookE_sprs(env, 0x000000070000FFFFULL);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
&spr_read_spefscr, &spr_write_spefscr,
&spr_read_spefscr, &spr_write_spefscr,
0x00000000);
/* Memory management */
register_BookE206_sprs(env, 0x0000005D, NULL, 0);
- /* XXX : not implemented */
+ register_usprgh_sprs(env);
+
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_CTXCR, "CTXCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_DBCNT, "DBCNT",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_DBCR3, "DBCR3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
&spr_read_generic, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_IAC3, "IAC3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_IAC4, "IAC4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MMUCSR0, "MMUCSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -3937,106 +2781,6 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
POWERPC_FLAG_BUS_CLK;
}
-static void init_proc_e300(CPUPPCState *env)
-{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_603_sprs(env);
- /* Time base */
- register_tbl(env);
- /* hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Breakpoints */
- /* XXX : not implemented */
- spr_register(env, SPR_DABR, "DABR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_DABR2, "DABR2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_IABR2, "IABR2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_IBCR, "IBCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_DBCR, "DBCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- register_high_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_603(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
-}
-
-POWERPC_FAMILY(e300)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "e300 core";
- pcc->init_proc = init_proc_e300;
- pcc->check_pow = check_pow_hid0;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
- PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
- PPC_FLOAT_STFIWX |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
- PPC_SEGMENT | PPC_EXTERN;
- pcc->msr_mask = (1ull << MSR_POW) |
- (1ull << MSR_TGPR) |
- (1ull << MSR_ILE) |
- (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_DE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_AL) |
- (1ull << MSR_EP) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR) |
- (1ull << MSR_RI) |
- (1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_603;
- pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_603;
- pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
- POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
-}
-
enum fsl_e500_version {
fsl_e500v1,
fsl_e500v2,
@@ -4059,8 +2803,6 @@ static void init_proc_e500(CPUPPCState *env, int version)
int i;
#endif
- /* Time base */
- register_tbl(env);
/*
* XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't
* complain when accessing them.
@@ -4081,13 +2823,18 @@ static void init_proc_e500(CPUPPCState *env, int version)
break;
}
register_BookE_sprs(env, ivor_mask);
- register_usprg3_sprs(env);
+
+ spr_register(env, SPR_USPRG3, "USPRG3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+
/* Processor identification */
spr_register(env, SPR_BOOKE_PIR, "PIR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_pir,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
&spr_read_spefscr, &spr_write_spefscr,
&spr_read_spefscr, &spr_write_spefscr,
@@ -4147,47 +2894,48 @@ static void init_proc_e500(CPUPPCState *env, int version)
env->spr[SPR_PVR]);
}
register_BookE206_sprs(env, 0x000000DF, tlbncfg, mmucfg);
- /* XXX : not implemented */
+ register_usprgh_sprs(env);
+
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_HID1, "HID1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_BBEAR, "BBEAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_BBTAR, "BBTAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_MCAR, "MCAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_BOOKE_MCSR, "MCSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_NPIDR, "NPIDR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
&spr_read_generic, SPR_NOACCESS,
&spr_read_generic, SPR_NOACCESS,
@@ -4499,165 +3247,38 @@ POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data)
#endif
/* Non-embedded PowerPC */
-
-#define POWERPC_MSRR_601 (0x0000000000001040ULL)
-
-static void init_proc_601(CPUPPCState *env)
+static void init_proc_603(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_601_sprs(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_hid0_601,
- 0x80010080);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_601_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_601_HID5, "HID5",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- init_excp_601(env);
- /*
- * XXX: beware that dcache line size is 64
- * but dcbz uses 32 bytes "sectors"
- * XXX: this breaks clcs instruction !
- */
- env->dcache_line_size = 32;
- env->icache_line_size = 64;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
-}
-
-POWERPC_FAMILY(601)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 601";
- pcc->init_proc = init_proc_601;
- pcc->check_pow = check_pow_none;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
- PPC_FLOAT |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
- PPC_SEGMENT | PPC_EXTERN;
- pcc->msr_mask = (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_EP) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR);
- pcc->mmu_model = POWERPC_MMU_601;
- pcc->excp_model = POWERPC_EXCP_601;
- pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_601;
- pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE;
-}
-
-#define POWERPC_MSRR_601v (0x0000000000001040ULL)
-
-static void init_proc_601v(CPUPPCState *env)
-{
- init_proc_601(env);
- /* XXX : not implemented */
- spr_register(env, SPR_601_HID15, "HID15",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
-POWERPC_FAMILY(601v)(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
-
- dc->desc = "PowerPC 601v";
- pcc->init_proc = init_proc_601v;
- pcc->check_pow = check_pow_none;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
- PPC_FLOAT |
- PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
- PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
- PPC_SEGMENT | PPC_EXTERN;
- pcc->msr_mask = (1ull << MSR_EE) |
- (1ull << MSR_PR) |
- (1ull << MSR_FP) |
- (1ull << MSR_ME) |
- (1ull << MSR_FE0) |
- (1ull << MSR_SE) |
- (1ull << MSR_FE1) |
- (1ull << MSR_EP) |
- (1ull << MSR_IR) |
- (1ull << MSR_DR);
- pcc->mmu_model = POWERPC_MMU_601;
- pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_601;
- pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_HID0_LE;
-}
+ register_603_sprs(env);
-static void init_proc_602(CPUPPCState *env)
-{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_602_sprs(env);
- /* Time base */
- register_tbl(env);
- /* hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
/* Memory management */
register_low_BATs(env);
register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_602(env);
+ init_excp_603(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
/* Allocate hardware IRQ controller */
ppc6xx_irq_init(env_archcpu(env));
}
-POWERPC_FAMILY(602)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- dc->desc = "PowerPC 602";
- pcc->init_proc = init_proc_602;
+ dc->desc = "PowerPC 603";
+ pcc->init_proc = init_proc_603;
pcc->check_pow = check_pow_hid0;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC |
- PPC_SEGMENT | PPC_602_SPEC;
- pcc->msr_mask = (1ull << MSR_VSX) |
- (1ull << MSR_SA) |
- (1ull << MSR_POW) |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
(1ull << MSR_TGPR) |
(1ull << MSR_ILE) |
(1ull << MSR_EE) |
@@ -4673,49 +3294,20 @@ POWERPC_FAMILY(602)(ObjectClass *oc, void *data)
(1ull << MSR_DR) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- /* XXX: 602 MMU is quite specific. Should add a special case */
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_602;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_602;
+ pcc->bfd_mach = bfd_mach_ppc_603;
pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
-static void init_proc_603(CPUPPCState *env)
-{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_603_sprs(env);
- /* Time base */
- register_tbl(env);
- /* hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_603(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
-}
-
-POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- dc->desc = "PowerPC 603";
+ dc->desc = "PowerPC 603e";
pcc->init_proc = init_proc_603;
pcc->check_pow = check_pow_hid0;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
@@ -4742,52 +3334,30 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_603;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_603;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
-static void init_proc_603E(CPUPPCState *env)
+static void init_proc_e300(CPUPPCState *env)
{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_603_sprs(env);
- /* Time base */
- register_tbl(env);
- /* hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_603(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
+ init_proc_603(env);
+ register_e300_sprs(env);
}
-POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
+POWERPC_FAMILY(e300)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- dc->desc = "PowerPC 603e";
- pcc->init_proc = init_proc_603E;
+ dc->desc = "e300 core";
+ pcc->init_proc = init_proc_e300;
pcc->check_pow = check_pow_hid0;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
- PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_FLOAT_STFIWX |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
@@ -4803,32 +3373,26 @@ POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
(1ull << MSR_SE) |
(1ull << MSR_DE) |
(1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
(1ull << MSR_EP) |
(1ull << MSR_IR) |
(1ull << MSR_DR) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_603E;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
- pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->bfd_mach = bfd_mach_ppc_603;
pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
}
static void init_proc_604(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_604_sprs(env);
- /* Time base */
- register_tbl(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
init_excp_604(env);
@@ -4870,7 +3434,7 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_604;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_604;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -4879,44 +3443,8 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data)
static void init_proc_604E(CPUPPCState *env)
{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_604_sprs(env);
- /* XXX : not implemented */
- spr_register(env, SPR_7XX_MMCR1, "MMCR1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_7XX_PMC3, "PMC3",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_7XX_PMC4, "PMC4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Time base */
- register_tbl(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- init_excp_604(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
+ init_proc_604(env);
+ register_604e_sprs(env);
}
POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
@@ -4951,7 +3479,7 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_604;
+ pcc->excp_model = POWERPC_EXCP_6xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_604;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -4960,24 +3488,12 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
static void init_proc_740(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
/* Thermal management */
register_thrm_sprs(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
init_excp_7x0(env);
@@ -5019,7 +3535,7 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5028,29 +3544,17 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data)
static void init_proc_750(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
0x00000000);
- /* Time base */
- register_tbl(env);
/* Thermal management */
register_thrm_sprs(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
/*
@@ -5096,7 +3600,7 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5105,16 +3609,14 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data)
static void init_proc_750cl(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
0x00000000);
- /* Time base */
- register_tbl(env);
/* Thermal management */
/* Those registers are fake on 750CL */
spr_register(env, SPR_THRM1, "THRM1",
@@ -5129,7 +3631,7 @@ static void init_proc_750cl(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX: not implemented */
+
spr_register(env, SPR_750_TDCL, "TDCL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5139,7 +3641,6 @@ static void init_proc_750cl(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
0x00000000);
/* DMA */
- /* XXX : not implemented */
spr_register(env, SPR_750_WPAR, "WPAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5153,63 +3654,51 @@ static void init_proc_750cl(CPUPPCState *env)
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
spr_register(env, SPR_750CL_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750CL_HID4, "HID4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Quantization registers */
- /* XXX : not implemented */
spr_register(env, SPR_750_GQR0, "GQR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR1, "GQR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR2, "GQR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR3, "GQR3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR4, "GQR4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR5, "GQR5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR6, "GQR6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_GQR7, "GQR7",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5296,7 +3785,7 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5305,34 +3794,22 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
static void init_proc_750cx(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
0x00000000);
- /* Time base */
- register_tbl(env);
/* Thermal management */
register_thrm_sprs(env);
- /* This register is not implemented but is present for compatibility */
+
spr_register(env, SPR_SDA, "SDA",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
/* PowerPC 750cx has 8 DBATs and 8 IBATs */
@@ -5376,7 +3853,7 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5385,35 +3862,22 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data)
static void init_proc_750fx(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
0x00000000);
- /* Time base */
- register_tbl(env);
/* Thermal management */
register_thrm_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_THRM4, "THRM4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
spr_register(env, SPR_750FX_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5461,7 +3925,7 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5470,35 +3934,22 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data)
static void init_proc_750gx(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- /* XXX : not implemented (XXX: different from 750fx) */
+
spr_register(env, SPR_L2CR, "L2CR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, spr_access_nop,
0x00000000);
- /* Time base */
- register_tbl(env);
/* Thermal management */
register_thrm_sprs(env);
- /* XXX : not implemented */
+
spr_register(env, SPR_750_THRM4, "THRM4",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* Hardware implementation registers */
- /* XXX : not implemented (XXX: different from 750fx) */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented (XXX: different from 750fx) */
spr_register(env, SPR_750FX_HID2, "HID2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5546,7 +3997,7 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_32B;
- pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5555,30 +4006,13 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data)
static void init_proc_745(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
register_7xx_sprs(env);
- register_G2_755_sprs(env);
- /* Time base */
- register_tbl(env);
+ register_745_sprs(env);
/* Thermal management */
register_thrm_sprs(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
@@ -5622,7 +4056,7 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5631,50 +4065,8 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data)
static void init_proc_755(CPUPPCState *env)
{
- register_ne_601_sprs(env);
- register_sdr1_sprs(env);
- register_7xx_sprs(env);
- register_G2_755_sprs(env);
- /* Time base */
- register_tbl(env);
- /* L2 cache control */
- /* XXX : not implemented */
- spr_register(env, SPR_L2CR, "L2CR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, spr_access_nop,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_L2PMCR, "L2PMCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Thermal management */
- register_thrm_sprs(env);
- /* Hardware implementation registers */
- /* XXX : not implemented */
- spr_register(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID1, "HID1",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* XXX : not implemented */
- spr_register(env, SPR_HID2, "HID2",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- /* Memory management */
- register_low_BATs(env);
- register_high_BATs(env);
- register_6xx_7xx_soft_tlb(env, 64, 2);
- init_excp_7x5(env);
- env->dcache_line_size = 32;
- env->icache_line_size = 32;
- /* Allocate hardware IRQ controller */
- ppc6xx_irq_init(env_archcpu(env));
+ init_proc_745(env);
+ register_755_sprs(env);
}
POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
@@ -5709,7 +4101,7 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
(1ull << MSR_RI) |
(1ull << MSR_LE);
pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
- pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->excp_model = POWERPC_EXCP_7xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_750;
pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
@@ -5718,21 +4110,16 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
static void init_proc_7400(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
- /* XXX : not implemented */
+
spr_register(env, SPR_UBAMR, "UBAMR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX: this seems not implemented on all revisions. */
- /* XXX : not implemented */
+
spr_register(env, SPR_MSSCR1, "MSSCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5795,15 +4182,11 @@ POWERPC_FAMILY(7400)(ObjectClass *oc, void *data)
static void init_proc_7410(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
- /* XXX : not implemented */
+
spr_register(env, SPR_UBAMR, "UBAMR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
@@ -5811,13 +4194,13 @@ static void init_proc_7410(CPUPPCState *env)
/* Thermal management */
register_thrm_sprs(env);
/* L2PMCR */
- /* XXX : not implemented */
+
spr_register(env, SPR_L2PMCR, "L2PMCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* LDSTDB */
- /* XXX : not implemented */
+
spr_register(env, SPR_LDSTDB, "LDSTDB",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -5878,61 +4261,52 @@ POWERPC_FAMILY(7410)(ObjectClass *oc, void *data)
static void init_proc_7440(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
- /* XXX : not implemented */
+
spr_register(env, SPR_UBAMR, "UBAMR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
/* LDSTCR */
- /* XXX : not implemented */
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* ICTRL */
- /* XXX : not implemented */
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* MSSSR0 */
- /* XXX : not implemented */
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* PMC */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
/* Memory management */
register_low_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -5956,7 +4330,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->msr_mask = (1ull << MSR_VR) |
@@ -5976,7 +4350,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->mmu_model = POWERPC_MMU_32B;
pcc->excp_model = POWERPC_EXCP_74xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_7400;
@@ -5987,87 +4361,74 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
static void init_proc_7450(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
/* Level 3 cache control */
register_l3_ctrl(env);
/* L3ITCR1 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR1, "L3ITCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3ITCR2 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR2, "L3ITCR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3ITCR3 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR3, "L3ITCR3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3OHCR */
- /* XXX : not implemented */
spr_register(env, SPR_L3OHCR, "L3OHCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_UBAMR, "UBAMR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
/* LDSTCR */
- /* XXX : not implemented */
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* ICTRL */
- /* XXX : not implemented */
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* MSSSR0 */
- /* XXX : not implemented */
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* PMC */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
/* Memory management */
register_low_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -6091,7 +4452,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->msr_mask = (1ull << MSR_VR) |
@@ -6111,7 +4472,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->mmu_model = POWERPC_MMU_32B;
pcc->excp_model = POWERPC_EXCP_74xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_7400;
@@ -6122,49 +4483,41 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
static void init_proc_7445(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
/* LDSTCR */
- /* XXX : not implemented */
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* ICTRL */
- /* XXX : not implemented */
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* MSSSR0 */
- /* XXX : not implemented */
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* PMC */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
@@ -6205,7 +4558,6 @@ static void init_proc_7445(CPUPPCState *env)
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -6229,7 +4581,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->msr_mask = (1ull << MSR_VR) |
@@ -6249,7 +4601,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->mmu_model = POWERPC_MMU_32B;
pcc->excp_model = POWERPC_EXCP_74xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_7400;
@@ -6260,51 +4612,43 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
static void init_proc_7455(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
/* Level 3 cache control */
register_l3_ctrl(env);
/* LDSTCR */
- /* XXX : not implemented */
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* ICTRL */
- /* XXX : not implemented */
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* MSSSR0 */
- /* XXX : not implemented */
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* PMC */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
@@ -6345,7 +4689,6 @@ static void init_proc_7455(CPUPPCState *env)
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -6369,7 +4712,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->msr_mask = (1ull << MSR_VR) |
@@ -6389,7 +4732,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->mmu_model = POWERPC_MMU_32B;
pcc->excp_model = POWERPC_EXCP_74xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_7400;
@@ -6400,75 +4743,63 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
static void init_proc_7457(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
/* Level 3 cache control */
register_l3_ctrl(env);
/* L3ITCR1 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR1, "L3ITCR1",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3ITCR2 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR2, "L3ITCR2",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3ITCR3 */
- /* XXX : not implemented */
spr_register(env, SPR_L3ITCR3, "L3ITCR3",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* L3OHCR */
- /* XXX : not implemented */
spr_register(env, SPR_L3OHCR, "L3OHCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* LDSTCR */
- /* XXX : not implemented */
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* ICTRL */
- /* XXX : not implemented */
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* MSSSR0 */
- /* XXX : not implemented */
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
/* PMC */
- /* XXX : not implemented */
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
@@ -6509,7 +4840,6 @@ static void init_proc_7457(CPUPPCState *env)
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -6533,7 +4863,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->msr_mask = (1ull << MSR_VR) |
@@ -6553,7 +4883,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->mmu_model = POWERPC_MMU_32B;
pcc->excp_model = POWERPC_EXCP_74xx;
pcc->bus_model = PPC_FLAGS_INPUT_6xx;
pcc->bfd_mach = bfd_mach_ppc_7400;
@@ -6564,50 +4894,46 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
static void init_proc_e600(CPUPPCState *env)
{
- register_ne_601_sprs(env);
+ register_non_embedded_sprs(env);
register_sdr1_sprs(env);
- register_7xx_sprs(env);
- /* Time base */
- register_tbl(env);
- /* 74xx specific SPR */
register_74xx_sprs(env);
vscr_init(env, 0x00010000);
- /* XXX : not implemented */
+
spr_register(env, SPR_UBAMR, "UBAMR",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_LDSTCR, "LDSTCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_ICTRL, "ICTRL",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_MSSSR0, "MSSSR0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC5, "UPMC5",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- /* XXX : not implemented */
+
spr_register(env, SPR_7XX_UPMC6, "UPMC6",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
@@ -6648,7 +4974,6 @@ static void init_proc_e600(CPUPPCState *env)
/* Memory management */
register_low_BATs(env);
register_high_BATs(env);
- register_74xx_soft_tlb(env, 128, 2);
init_excp_7450(env);
env->dcache_line_size = 32;
env->icache_line_size = 32;
@@ -6672,7 +4997,7 @@ POWERPC_FAMILY(e600)(ObjectClass *oc, void *data)
PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
- PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_MEM_TLBIA |
PPC_SEGMENT | PPC_EXTERN |
PPC_ALTIVEC;
pcc->insns_flags2 = PPC_NONE;
@@ -6721,7 +5046,6 @@ static int check_pow_970(CPUPPCState *env)
static void register_970_hid_sprs(CPUPPCState *env)
{
/* Hardware implementation registers */
- /* XXX : not implemented */
spr_register(env, SPR_HID0, "HID0",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_clear,
@@ -6738,7 +5062,7 @@ static void register_970_hid_sprs(CPUPPCState *env)
static void register_970_hior_sprs(CPUPPCState *env)
{
- spr_register(env, SPR_HIOR, "SPR_HIOR",
+ spr_register(env, SPR_HIOR, "HIOR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_hior, &spr_write_hior,
0x00000000);
@@ -6746,11 +5070,11 @@ static void register_970_hior_sprs(CPUPPCState *env)
static void register_book3s_ctrl_sprs(CPUPPCState *env)
{
- spr_register(env, SPR_CTRL, "SPR_CTRL",
+ spr_register(env, SPR_CTRL, "CTRL",
SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, &spr_write_generic,
+ SPR_NOACCESS, &spr_write_CTRL,
0x00000000);
- spr_register(env, SPR_UCTRL, "SPR_UCTRL",
+ spr_register(env, SPR_UCTRL, "UCTRL",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, SPR_NOACCESS,
0x00000000);
@@ -6763,8 +5087,8 @@ static void register_book3s_altivec_sprs(CPUPPCState *env)
}
spr_register_kvm(env, SPR_VRSAVE, "VRSAVE",
- &spr_read_generic, &spr_write_generic,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic32,
+ &spr_read_generic, &spr_write_generic32,
KVM_REG_PPC_VRSAVE, 0x00000000);
}
@@ -6793,17 +5117,17 @@ static void register_book3s_207_dbg_sprs(CPUPPCState *env)
spr_register_kvm_hv(env, SPR_DAWR0, "DAWR0",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_dawr0,
KVM_REG_PPC_DAWR, 0x00000000);
spr_register_kvm_hv(env, SPR_DAWRX0, "DAWRX0",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_dawrx0,
KVM_REG_PPC_DAWRX, 0x00000000);
spr_register_kvm_hv(env, SPR_CIABR, "CIABR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_ciabr,
KVM_REG_PPC_CIABR, 0x00000000);
}
@@ -6820,11 +5144,11 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env)
{
spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- KVM_REG_PPC_MMCR0, 0x00000000);
+ &spr_read_generic, &spr_write_MMCR0,
+ KVM_REG_PPC_MMCR0, 0x80000000);
spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_MMCR1,
KVM_REG_PPC_MMCR1, 0x00000000);
spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA",
SPR_NOACCESS, SPR_NOACCESS,
@@ -6832,27 +5156,27 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env)
KVM_REG_PPC_MMCRA, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC1, "PMC1",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC1, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC2, "PMC2",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC2, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC3, "PMC3",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC3, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC4, "PMC4",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC4, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC5, "PMC5",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC5, 0x00000000);
spr_register_kvm(env, SPR_POWER_PMC6, "PMC6",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_PMC, &spr_write_PMC,
KVM_REG_PPC_PMC6, 0x00000000);
spr_register_kvm(env, SPR_POWER_SIAR, "SIAR",
SPR_NOACCESS, SPR_NOACCESS,
@@ -6867,9 +5191,9 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env)
static void register_book3s_pmu_user_sprs(CPUPPCState *env)
{
spr_register(env, SPR_POWER_UMMCR0, "UMMCR0",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_MMCR0_ureg, &spr_write_MMCR0_ureg,
&spr_read_ureg, &spr_write_ureg,
- 0x00000000);
+ 0x80000000);
spr_register(env, SPR_POWER_UMMCR1, "UMMCR1",
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, &spr_write_ureg,
@@ -6879,27 +5203,27 @@ static void register_book3s_pmu_user_sprs(CPUPPCState *env)
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC1, "UPMC1",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC2, "UPMC2",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC3, "UPMC3",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC4, "UPMC4",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC14_ureg, &spr_write_PMC14_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC5, "UPMC5",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC56_ureg, &spr_write_PMC56_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_UPMC6, "UPMC6",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_PMC56_ureg, &spr_write_PMC56_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_USIAR, "USIAR",
@@ -6975,7 +5299,7 @@ static void register_power8_pmu_sup_sprs(CPUPPCState *env)
static void register_power8_pmu_user_sprs(CPUPPCState *env)
{
spr_register(env, SPR_POWER_UMMCR2, "UMMCR2",
- &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_MMCR2_ureg, &spr_write_MMCR2_ureg,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
spr_register(env, SPR_POWER_USIER, "USIER",
@@ -6984,6 +5308,38 @@ static void register_power8_pmu_user_sprs(CPUPPCState *env)
0x00000000);
}
+static void register_power10_pmu_sup_sprs(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_POWER_MMCR3, "MMCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR3, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIER2, "SIER2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIER2, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIER3, "SIER3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIER3, 0x00000000);
+}
+
+static void register_power10_pmu_user_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_POWER_UMMCR3, "UMMCR3",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIER2, "USIER2",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIER3, "USIER3",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
static void register_power5p_ear_sprs(CPUPPCState *env)
{
/* External access control */
@@ -7023,7 +5379,7 @@ static void register_970_lpar_sprs(CPUPPCState *env)
static void register_power5p_lpar_sprs(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
- /* Logical partitionning */
+ /* Logical partitioning */
spr_register_kvm_hv(env, SPR_LPCR, "LPCR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
@@ -7046,31 +5402,6 @@ static void register_book3s_ids_sprs(CPUPPCState *env)
&spr_read_generic, SPR_NOACCESS,
&spr_read_generic, NULL,
0x00000000);
- spr_register_hv(env, SPR_HID0, "HID0",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register_hv(env, SPR_TSCR, "TSCR",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register_hv(env, SPR_HMER, "HMER",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_hmer,
- 0x00000000);
- spr_register_hv(env, SPR_HMEER, "HMEER",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
- spr_register_hv(env, SPR_TFMR, "TFMR",
- SPR_NOACCESS, SPR_NOACCESS,
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
spr_register_hv(env, SPR_LPIDR, "LPIDR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
@@ -7084,7 +5415,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env)
spr_register_hv(env, SPR_MMCRC, "MMCRC",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic32,
0x00000000);
spr_register_hv(env, SPR_MMCRH, "MMCRH",
SPR_NOACCESS, SPR_NOACCESS,
@@ -7119,7 +5450,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env)
spr_register_hv(env, SPR_HDSISR, "HDSISR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic32,
0x00000000);
spr_register_hv(env, SPR_HRMOR, "HRMOR",
SPR_NOACCESS, SPR_NOACCESS,
@@ -7166,7 +5497,7 @@ static void register_book3s_purr_sprs(CPUPPCState *env)
static void register_power6_dbg_sprs(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
- spr_register(env, SPR_CFAR, "SPR_CFAR",
+ spr_register(env, SPR_CFAR, "CFAR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_cfar, &spr_write_cfar,
0x00000000);
@@ -7184,7 +5515,7 @@ static void register_power5p_common_sprs(CPUPPCState *env)
static void register_power6_common_sprs(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
- spr_register_kvm(env, SPR_DSCR, "SPR_DSCR",
+ spr_register_kvm(env, SPR_DSCR, "DSCR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_DSCR, 0x00000000);
@@ -7200,6 +5531,24 @@ static void register_power6_common_sprs(CPUPPCState *env)
0x00000000);
}
+static void register_HEIR32_spr(CPUPPCState *env)
+{
+ spr_register_hv(env, SPR_HEIR, "HEIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ 0x00000000);
+}
+
+static void register_HEIR64_spr(CPUPPCState *env)
+{
+ spr_register_hv(env, SPR_HEIR, "HEIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
static void register_power8_tce_address_control_sprs(CPUPPCState *env)
{
spr_register_kvm(env, SPR_TAR, "TAR",
@@ -7314,15 +5663,71 @@ static void register_power8_ic_sprs(CPUPPCState *env)
#endif
}
+/* SPRs specific to IBM POWER CPUs */
+static void register_power_common_book4_sprs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_core_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TSCR, "TSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMER, "HMER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hmer,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMEER, "HMEER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TFMR, "TFMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_tfmr, &spr_write_tfmr,
+ 0x00000000);
+ spr_register_hv(env, SPR_TRIG1, "TRIG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_access_nop, &spr_write_generic,
+ &spr_access_nop, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TRIG2, "TRIG2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_access_nop, &spr_write_generic,
+ &spr_access_nop, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+static void register_power9_book4_sprs(CPUPPCState *env)
+{
+ /* Add a number of P9 book4 registers */
+ register_power_common_book4_sprs(env);
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_WORT, "WORT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_WORT, 0);
+#endif
+}
+
static void register_power8_book4_sprs(CPUPPCState *env)
{
/* Add a number of P8 book4 registers */
+ register_power_common_book4_sprs(env);
#if !defined(CONFIG_USER_ONLY)
spr_register_kvm(env, SPR_ACOP, "ACOP",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_ACOP, 0);
- spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ /* PID is only in BookE in ISA v2.07 */
+ spr_register_kvm(env, SPR_BOOKS_PID, "PIDR",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_pidr,
KVM_REG_PPC_PID, 0);
@@ -7337,13 +5742,15 @@ static void register_power7_book4_sprs(CPUPPCState *env)
{
/* Add a number of P7 book4 registers */
#if !defined(CONFIG_USER_ONLY)
+ register_power_common_book4_sprs(env);
spr_register_kvm(env, SPR_ACOP, "ACOP",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_ACOP, 0);
- spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ /* PID is only in BookE in ISA v2.06 */
+ spr_register_kvm(env, SPR_BOOKS_PID, "PIDR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic32,
KVM_REG_PPC_PID, 0);
#endif
}
@@ -7374,14 +5781,80 @@ static void register_power9_mmu_sprs(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x0000000000000000);
+ /* PID is part of the BookS ISA from v3.0 */
+ spr_register_kvm(env, SPR_BOOKS_PID, "PIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pidr,
+ KVM_REG_PPC_PID, 0);
#endif
}
+static void register_power10_hash_sprs(CPUPPCState *env)
+{
+ /*
+ * it's the OS responsibility to generate a random value for the registers
+ * in each process' context. So, initialize it with 0 here.
+ */
+ uint64_t hashkeyr_initial_value = 0, hashpkeyr_initial_value = 0;
+#if defined(CONFIG_USER_ONLY)
+ /* in linux-user, setup the hash register with a random value */
+ GRand *rand = g_rand_new();
+ hashkeyr_initial_value =
+ ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand);
+ hashpkeyr_initial_value =
+ ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand);
+ g_rand_free(rand);
+#endif
+ spr_register(env, SPR_HASHKEYR, "HASHKEYR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ hashkeyr_initial_value);
+ spr_register_hv(env, SPR_HASHPKEYR, "HASHPKEYR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ hashpkeyr_initial_value);
+}
+
+static void register_power10_dexcr_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_DEXCR, "DEXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+
+ spr_register(env, SPR_UDEXCR, "UDEXCR",
+ &spr_read_dexcr_ureg, SPR_NOACCESS,
+ &spr_read_dexcr_ureg, SPR_NOACCESS,
+ 0);
+
+ spr_register_hv(env, SPR_HDEXCR, "HDEXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+
+ spr_register(env, SPR_UHDEXCR, "UHDEXCR",
+ &spr_read_dexcr_ureg, SPR_NOACCESS,
+ &spr_read_dexcr_ureg, SPR_NOACCESS,
+ 0);
+}
+
+/*
+ * Initialize PMU counter overflow timers for Power8 and
+ * newer Power chips when using TCG.
+ */
+static void init_tcg_pmu_power8(CPUPPCState *env)
+{
+ /* Init PMU overflow timers */
+ if (tcg_enabled()) {
+ cpu_ppc_pmu_init(env);
+ }
+}
+
static void init_proc_book3s_common(CPUPPCState *env)
{
- register_ne_601_sprs(env);
- register_tbl(env);
- register_usprg3_sprs(env);
+ register_non_embedded_sprs(env);
register_book3s_altivec_sprs(env);
register_book3s_pmu_sup_sprs(env);
register_book3s_pmu_user_sprs(env);
@@ -7391,6 +5864,11 @@ static void init_proc_book3s_common(CPUPPCState *env)
* value is the one used by 74xx processors.
*/
vscr_init(env, 0x00010000);
+
+ spr_register(env, SPR_USPRG3, "USPRG3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
}
static void init_proc_970(CPUPPCState *env)
@@ -7435,7 +5913,7 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI;
- pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64 | PPC2_MEM_LWSYNC;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_VR) |
(1ull << MSR_POW) |
@@ -7452,7 +5930,7 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI);
pcc->mmu_model = POWERPC_MMU_64B;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
pcc->hash64_opts = &ppc_hash64_opts_basic;
#endif
pcc->excp_model = POWERPC_EXCP_970;
@@ -7505,12 +5983,14 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B |
+ PPC_POPCNTB |
PPC_SEGMENT_64B | PPC_SLBI;
- pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64 | PPC2_MEM_LWSYNC;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_VR) |
(1ull << MSR_POW) |
@@ -7529,7 +6009,7 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
pcc->lpcr_mask = LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 |
LPCR_RMI | LPCR_HDICE;
pcc->mmu_model = POWERPC_MMU_2_03;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
pcc->hash64_opts = &ppc_hash64_opts_basic;
pcc->lrg_decr_bits = 32;
#endif
@@ -7560,6 +6040,7 @@ static void init_proc_POWER7(CPUPPCState *env)
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
+ register_HEIR32_spr(env);
register_power6_dbg_sprs(env);
register_power7_book4_sprs(env);
@@ -7572,56 +6053,31 @@ static void init_proc_POWER7(CPUPPCState *env)
ppcPOWER7_irq_init(env_archcpu(env));
}
-static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr)
-{
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) {
- return true;
- }
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) {
- return true;
- }
- return false;
-}
-
-static bool cpu_has_work_POWER7(CPUState *cs)
+static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
+ uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
- if (cs->halted) {
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
- return false;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
+ if (!best) {
+ if (base == CPU_POWERPC_POWER7_BASE) {
return true;
}
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
- return true;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
- return true;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
- (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
- return true;
- }
- if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ if (base == CPU_POWERPC_POWER7P_BASE) {
return true;
}
+ }
+
+ if (base != pcc_base) {
return false;
- } else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
+
+ return true;
}
POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER7";
dc->desc = "POWER7";
@@ -7630,7 +6086,6 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER7;
pcc->check_pow = check_pow_nocheck;
- cc->has_work = cpu_has_work_POWER7;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
@@ -7648,7 +6103,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 |
- PPC2_PM_ISA206;
+ PPC2_PM_ISA206 | PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
@@ -7672,7 +6127,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE;
pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2;
pcc->mmu_model = POWERPC_MMU_2_06;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
pcc->hash64_opts = &ppc_hash64_opts_POWER7;
pcc->lrg_decr_bits = 32;
#endif
@@ -7694,6 +6149,9 @@ static void init_proc_POWER8(CPUPPCState *env)
register_sdr1_sprs(env);
register_book3s_207_dbg_sprs(env);
+ /* Common TCG PMU */
+ init_tcg_pmu_power8(env);
+
/* POWER8 Specific Registers */
register_book3s_ids_sprs(env);
register_rmor_sprs(env);
@@ -7705,6 +6163,7 @@ static void init_proc_POWER8(CPUPPCState *env)
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
+ register_HEIR32_spr(env);
register_power6_dbg_sprs(env);
register_power8_tce_address_control_sprs(env);
register_power8_ids_sprs(env);
@@ -7729,67 +6188,33 @@ static void init_proc_POWER8(CPUPPCState *env)
ppcPOWER7_irq_init(env_archcpu(env));
}
-static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr)
+static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) {
- return true;
- }
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) {
- return true;
- }
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) {
- return true;
- }
- return false;
-}
-
-static bool cpu_has_work_POWER8(CPUState *cs)
-{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
+ uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
- if (cs->halted) {
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
- return false;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
- return true;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
- return true;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ if (!best) {
+ if (base == CPU_POWERPC_POWER8_BASE) {
return true;
}
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ if (base == CPU_POWERPC_POWER8E_BASE) {
return true;
}
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
- return true;
- }
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
- return true;
- }
- if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ if (base == CPU_POWERPC_POWER8NVL_BASE) {
return true;
}
+ }
+ if (base != pcc_base) {
return false;
- } else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
+
+ return true;
}
POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
@@ -7798,7 +6223,6 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER8;
pcc->check_pow = check_pow_nocheck;
- cc->has_work = cpu_has_work_POWER8;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
@@ -7818,7 +6242,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_TM | PPC2_PM_ISA206;
+ PPC2_TM | PPC2_PM_ISA206 | PPC2_MEM_LWSYNC |
+ PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
(1ull << MSR_TM) |
@@ -7847,7 +6272,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
LPCR_P8_PECE3 | LPCR_P8_PECE4;
pcc->mmu_model = POWERPC_MMU_2_07;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
pcc->hash64_opts = &ppc_hash64_opts_POWER7;
pcc->lrg_decr_bits = 32;
pcc->n_host_threads = 8;
@@ -7863,7 +6288,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x8000;
}
-#ifdef CONFIG_SOFTMMU
+#ifndef CONFIG_USER_ONLY
/*
* Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings
* Encoded as array of int_32s in the form:
@@ -7880,7 +6305,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = {
0x4000001e /* 1G - enc: 0x2 */
}
};
-#endif /* CONFIG_SOFTMMU */
+#endif /* CONFIG_USER_ONLY */
static void init_proc_POWER9(CPUPPCState *env)
{
@@ -7888,6 +6313,9 @@ static void init_proc_POWER9(CPUPPCState *env)
init_proc_book3s_common(env);
register_book3s_207_dbg_sprs(env);
+ /* Common TCG PMU */
+ init_tcg_pmu_power8(env);
+
/* POWER8 Specific Registers */
register_book3s_ids_sprs(env);
register_amr_sprs(env);
@@ -7898,6 +6326,7 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power5p_ear_sprs(env);
register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
+ register_HEIR32_spr(env);
register_power6_dbg_sprs(env);
register_power8_tce_address_control_sprs(env);
register_power8_ids_sprs(env);
@@ -7910,7 +6339,7 @@ static void init_proc_POWER9(CPUPPCState *env)
register_power8_dpdes_sprs(env);
register_vtb_sprs(env);
register_power8_ic_sprs(env);
- register_power8_book4_sprs(env);
+ register_power9_book4_sprs(env);
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
@@ -7933,77 +6362,47 @@ static void init_proc_POWER9(CPUPPCState *env)
ppcPOWER9_irq_init(env_archcpu(env));
}
-static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr)
+static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) {
- return true;
- }
- return false;
-}
+ uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
+ uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
-static bool cpu_has_work_POWER9(CPUState *cs)
-{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ if (!best) {
+ if (base == CPU_POWERPC_POWER9_BASE) {
+ return true;
+ }
+ }
- if (cs->halted) {
- uint64_t psscr = env->spr[SPR_PSSCR];
+ if (base != pcc_base) {
+ return false;
+ }
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
- return false;
- }
+ if ((pvr & 0x0f00) != (pcc->pvr & 0x0f00)) {
+ /* Major DD version does not match */
+ return false;
+ }
- /* If EC is clear, just return true on any pending interrupt */
- if (!(psscr & PSSCR_EC)) {
- return true;
- }
- /* External Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
- (env->spr[SPR_LPCR] & LPCR_EEE)) {
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
- if (heic == 0 || !msr_hv || msr_pr) {
+ if ((pvr & 0x0f00) == 0x200) {
+ if ((pvr & 0xf) < 2) {
+ /* DD2.0, DD2.1 match power9_v2.0 */
+ if ((pcc->pvr & 0xf) == 0) {
+ return true;
+ }
+ } else {
+ /* DD2.2, DD2.3 match power9_v2.2 */
+ if ((pcc->pvr & 0xf) == 2) {
return true;
}
}
- /* Decrementer Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
- (env->spr[SPR_LPCR] & LPCR_DEE)) {
- return true;
- }
- /* Machine Check or Hypervisor Maintenance Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK |
- 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) {
- return true;
- }
- /* Privileged Doorbell Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_PDEE)) {
- return true;
- }
- /* Hypervisor Doorbell Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_HDEE)) {
- return true;
- }
- /* Hypervisor virtualization exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) &&
- (env->spr[SPR_LPCR] & LPCR_HVEE)) {
- return true;
- }
- if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
- return true;
- }
- return false;
- } else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
+
+ return false;
}
POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER9";
dc->desc = "POWER9";
@@ -8013,7 +6412,6 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER9;
pcc->check_pow = check_pow_nocheck;
- cc->has_work = cpu_has_work_POWER9;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
@@ -8022,7 +6420,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
PPC_FLOAT_EXT |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD |
@@ -8033,7 +6431,8 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL;
+ PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC |
+ PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
(1ull << MSR_TM) |
@@ -8061,7 +6460,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
/* segment page size remain the same */
pcc->hash64_opts = &ppc_hash64_opts_POWER7;
pcc->radix_page_info = &POWER9_radix_page_info;
@@ -8079,7 +6478,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x8000;
}
-#ifdef CONFIG_SOFTMMU
+#ifndef CONFIG_USER_ONLY
/*
* Radix pg sizes and AP encodings for dt node ibm,processor-radix-AP-encodings
* Encoded as array of int_32s in the form:
@@ -8096,7 +6495,7 @@ static struct ppc_radix_page_info POWER10_radix_page_info = {
0x4000001e /* 1G - enc: 0x2 */
}
};
-#endif /* CONFIG_SOFTMMU */
+#endif /* !CONFIG_USER_ONLY */
static void init_proc_POWER10(CPUPPCState *env)
{
@@ -8104,6 +6503,9 @@ static void init_proc_POWER10(CPUPPCState *env)
init_proc_book3s_common(env);
register_book3s_207_dbg_sprs(env);
+ /* Common TCG PMU */
+ init_tcg_pmu_power8(env);
+
/* POWER8 Specific Registers */
register_book3s_ids_sprs(env);
register_amr_sprs(env);
@@ -8112,7 +6514,9 @@ static void init_proc_POWER10(CPUPPCState *env)
register_power5p_common_sprs(env);
register_power5p_lpar_sprs(env);
register_power5p_ear_sprs(env);
+ register_power5p_tb_sprs(env);
register_power6_common_sprs(env);
+ register_HEIR64_spr(env);
register_power6_dbg_sprs(env);
register_power8_tce_address_control_sprs(env);
register_power8_ids_sprs(env);
@@ -8122,11 +6526,16 @@ static void init_proc_POWER10(CPUPPCState *env)
register_power8_pmu_user_sprs(env);
register_power8_tm_sprs(env);
register_power8_pspb_sprs(env);
+ register_power8_dpdes_sprs(env);
register_vtb_sprs(env);
register_power8_ic_sprs(env);
- register_power8_book4_sprs(env);
+ register_power9_book4_sprs(env);
register_power8_rpr_sprs(env);
register_power9_mmu_sprs(env);
+ register_power10_hash_sprs(env);
+ register_power10_dexcr_sprs(env);
+ register_power10_pmu_sup_sprs(env);
+ register_power10_pmu_user_sprs(env);
/* FIXME: Filter fields properly based on privilege level */
spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL,
@@ -8142,77 +6551,33 @@ static void init_proc_POWER10(CPUPPCState *env)
ppcPOWER9_irq_init(env_archcpu(env));
}
-static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr)
-{
- if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER10_BASE) {
- return true;
- }
- return false;
-}
-
-static bool cpu_has_work_POWER10(CPUState *cs)
+static bool ppc_pvr_match_power10(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
-
- if (cs->halted) {
- uint64_t psscr = env->spr[SPR_PSSCR];
+ uint32_t base = pvr & CPU_POWERPC_POWER_SERVER_MASK;
+ uint32_t pcc_base = pcc->pvr & CPU_POWERPC_POWER_SERVER_MASK;
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
- return false;
- }
-
- /* If EC is clear, just return true on any pending interrupt */
- if (!(psscr & PSSCR_EC)) {
- return true;
- }
- /* External Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
- (env->spr[SPR_LPCR] & LPCR_EEE)) {
- bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
- if (heic == 0 || !msr_hv || msr_pr) {
- return true;
- }
- }
- /* Decrementer Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
- (env->spr[SPR_LPCR] & LPCR_DEE)) {
- return true;
- }
- /* Machine Check or Hypervisor Maintenance Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK |
- 1u << PPC_INTERRUPT_HMI)) && (env->spr[SPR_LPCR] & LPCR_OEE)) {
- return true;
- }
- /* Privileged Doorbell Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_PDEE)) {
- return true;
- }
- /* Hypervisor Doorbell Exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
- (env->spr[SPR_LPCR] & LPCR_HDEE)) {
- return true;
- }
- /* Hypervisor virtualization exception */
- if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HVIRT)) &&
- (env->spr[SPR_LPCR] & LPCR_HVEE)) {
- return true;
- }
- if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ if (!best) {
+ if (base == CPU_POWERPC_POWER10_BASE) {
return true;
}
+ }
+
+ if (base != pcc_base) {
return false;
- } else {
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
}
+
+ if ((pvr & 0x0f00) == (pcc->pvr & 0x0f00)) {
+ /* Major DD version matches power10_v2.0 */
+ return true;
+ }
+
+ return false;
}
POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
- CPUClass *cc = CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER10";
dc->desc = "POWER10";
@@ -8223,7 +6588,6 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->init_proc = init_proc_POWER10;
pcc->check_pow = check_pow_nocheck;
- cc->has_work = cpu_has_work_POWER10;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
@@ -8232,7 +6596,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
PPC_FLOAT_EXT |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
- PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD |
@@ -8243,10 +6607,10 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
- PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310;
+ PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 |
+ PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
(1ull << MSR_HV) |
- (1ull << MSR_TM) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
(1ull << MSR_EE) |
@@ -8274,7 +6638,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
-#if defined(CONFIG_SOFTMMU)
+#if !defined(CONFIG_USER_ONLY)
/* segment page size remain the same */
pcc->hash64_opts = &ppc_hash64_opts_POWER7;
pcc->radix_page_info = &POWER10_radix_page_info;
@@ -8286,7 +6650,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
- POWERPC_FLAG_VSX | POWERPC_FLAG_TM | POWERPC_FLAG_SCV;
+ POWERPC_FLAG_VSX | POWERPC_FLAG_SCV;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@@ -8305,6 +6669,18 @@ void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
env->msr_mask &= ~MSR_HVB;
}
+void cpu_ppc_set_1lpar(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ /*
+ * pseries SMT means "LPAR per core" mode, e.g., msgsndp is usable
+ * between threads.
+ */
+ if (env->flags & POWERPC_FLAG_SMT) {
+ env->flags |= POWERPC_FLAG_SMT_1LPAR;
+ }
+}
#endif /* !defined(CONFIG_USER_ONLY) */
#endif /* defined(TARGET_PPC64) */
@@ -8318,7 +6694,6 @@ static void init_ppc_proc(PowerPCCPU *cpu)
#if !defined(CONFIG_USER_ONLY)
int i;
- env->irq_inputs = NULL;
/* Set all exception vectors to an invalid address */
for (i = 0; i < POWERPC_EXCP_NB; i++) {
env->excp_vectors[i] = (target_ulong)(-1ULL);
@@ -8332,38 +6707,11 @@ static void init_ppc_proc(PowerPCCPU *cpu)
env->tlb_type = TLB_NONE;
#endif
/* Register SPR common to all PowerPC implementations */
- register_generic_sprs(env);
- spr_register(env, SPR_PVR, "PVR",
- /* Linux permits userspace to read PVR */
-#if defined(CONFIG_LINUX_USER)
- &spr_read_generic,
-#else
- SPR_NOACCESS,
-#endif
- SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- pcc->pvr);
- /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
- if (pcc->svr != POWERPC_SVR_NONE) {
- if (pcc->svr & POWERPC_SVR_E500) {
- spr_register(env, SPR_E500_SVR, "SVR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- pcc->svr & ~POWERPC_SVR_E500);
- } else {
- spr_register(env, SPR_SVR, "SVR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, SPR_NOACCESS,
- pcc->svr);
- }
- }
+ register_generic_sprs(cpu);
+
/* PowerPC implementation specific initialisations (SPRs, timers, ...) */
(*pcc->init_proc)(env);
-#if !defined(CONFIG_USER_ONLY)
- ppc_gdb_gen_spr_xml(cpu);
-#endif
-
/* MSR bits & flags consistency checks */
if (env->msr_mask & (1 << 25)) {
switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
@@ -8445,7 +6793,7 @@ static void init_ppc_proc(PowerPCCPU *cpu)
"Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n");
exit(1);
}
- if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) {
+ if ((env->flags & POWERPC_FLAG_BUS_CLK) == 0) {
fprintf(stderr, "PowerPC flags inconsistency\n"
"Should define the time-base and decrementer clock source\n");
exit(1);
@@ -8471,10 +6819,6 @@ static void init_ppc_proc(PowerPCCPU *cpu)
/* Pre-compute some useful values */
env->tlb_per_way = env->nb_tlb / env->nb_ways;
}
- if (env->irq_inputs == NULL) {
- warn_report("no internal IRQ controller registered."
- " Attempt QEMU to crash very soon !");
- }
#endif
if (env->check_pow == NULL) {
warn_report("no power management check handler registered."
@@ -8487,6 +6831,7 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
PowerPCCPU *cpu = POWERPC_CPU(dev);
+ CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Error *local_err = NULL;
@@ -8518,6 +6863,10 @@ static void ppc_cpu_realize(DeviceState *dev, Error **errp)
pcc->parent_realize(dev, errp);
+ if (env_cpu(env)->nr_threads > 1) {
+ env->flags |= POWERPC_FLAG_SMT;
+ }
+
return;
unrealize:
@@ -8578,7 +6927,7 @@ static gint ppc_cpu_compare_class_pvr_mask(gconstpointer a, gconstpointer b)
return -1;
}
- if (pcc->pvr_match(pcc, pvr)) {
+ if (pcc->pvr_match(pcc, pvr, true)) {
return 0;
}
@@ -8613,7 +6962,7 @@ static const char *ppc_cpu_lookup_alias(const char *alias)
return NULL;
}
-static ObjectClass *ppc_cpu_class_by_name(const char *name)
+ObjectClass *ppc_cpu_class_by_name(const char *name)
{
char *cpu_model, *typename;
ObjectClass *oc;
@@ -8632,6 +6981,21 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
}
}
+ /*
+ * All ppc CPUs represent hardware that exists in the real world, i.e.: we
+ * do not have a "max" CPU with all possible emulated features enabled.
+ * Return the default CPU type for the machine because that has greater
+ * chance of being useful as the "max" CPU.
+ */
+#if !defined(CONFIG_USER_ONLY)
+ if (strcmp(name, "max") == 0) {
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ if (mc) {
+ return object_class_by_name(mc->default_cpu_type);
+ }
+ }
+#endif
+
cpu_model = g_ascii_strdown(name, -1);
p = ppc_cpu_lookup_alias(cpu_model);
if (p) {
@@ -8698,8 +7062,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data)
return;
}
- name = g_strndup(typename,
- strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX));
+ name = cpu_model_from_type(typename);
qemu_printf("PowerPC %-16s PVR %08x\n", name, pcc->pvr);
for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
@@ -8734,85 +7097,60 @@ void ppc_cpu_list(void)
#ifdef CONFIG_KVM
qemu_printf("\n");
- qemu_printf("PowerPC %-16s\n", "host");
+ qemu_printf("PowerPC %s\n", "host");
#endif
}
-static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
+static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
{
- ObjectClass *oc = data;
- CpuDefinitionInfoList **first = user_data;
- const char *typename;
- CpuDefinitionInfo *info;
-
- typename = object_class_get_name(oc);
- info = g_malloc0(sizeof(*info));
- info->name = g_strndup(typename,
- strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX));
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
- QAPI_LIST_PREPEND(*first, info);
+ cpu->env.nip = value;
}
-CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+static vaddr ppc_cpu_get_pc(CPUState *cs)
{
- CpuDefinitionInfoList *cpu_list = NULL;
- GSList *list;
- int i;
-
- list = object_class_get_list(TYPE_POWERPC_CPU, false);
- g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list);
- g_slist_free(list);
-
- for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
- PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
- ObjectClass *oc;
- CpuDefinitionInfo *info;
-
- oc = ppc_cpu_class_by_name(alias->model);
- if (oc == NULL) {
- continue;
- }
-
- info = g_malloc0(sizeof(*info));
- info->name = g_strdup(alias->alias);
- info->q_typename = g_strdup(object_class_get_name(oc));
-
- QAPI_LIST_PREPEND(cpu_list, info);
- }
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
- return cpu_list;
+ return cpu->env.nip;
}
-static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
+#ifdef CONFIG_TCG
+static void ppc_restore_state_to_opc(CPUState *cs,
+ const TranslationBlock *tb,
+ const uint64_t *data)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- cpu->env.nip = value;
+ cpu->env.nip = data[0];
}
+#endif /* CONFIG_TCG */
static bool ppc_cpu_has_work(CPUState *cs)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
- return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return ppc_env_mmu_index(cpu_env(cs), ifetch);
}
-static void ppc_cpu_reset(DeviceState *dev)
+static void ppc_cpu_reset_hold(Object *obj)
{
- CPUState *s = CPU(dev);
- PowerPCCPU *cpu = POWERPC_CPU(s);
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUState *cs = CPU(obj);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(obj);
CPUPPCState *env = &cpu->env;
target_ulong msr;
int i;
- pcc->parent_reset(dev);
+ if (pcc->parent_phases.hold) {
+ pcc->parent_phases.hold(obj);
+ }
msr = (target_ulong)0;
msr |= (target_ulong)MSR_HVB;
- msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
- msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
msr |= (target_ulong)1 << MSR_EP;
#if defined(DO_SINGLE_STEP) && 0
/* Single step trace mode */
@@ -8830,7 +7168,7 @@ static void ppc_cpu_reset(DeviceState *dev)
#if defined(TARGET_PPC64)
msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */
#endif
-#if !defined(TARGET_WORDS_BIGENDIAN)
+#if !TARGET_BIG_ENDIAN
msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */
if (!((env->msr_mask >> MSR_LE) & 1)) {
fprintf(stderr, "Selected CPU does not support little-endian.\n");
@@ -8849,18 +7187,24 @@ static void ppc_cpu_reset(DeviceState *dev)
#if !defined(CONFIG_USER_ONLY)
env->nip = env->hreset_vector | env->excp_prefix;
-#if defined(CONFIG_TCG)
- if (env->mmu_model != POWERPC_MMU_REAL) {
- ppc_tlb_invalidate_all(env);
+
+ if (tcg_enabled()) {
+ cpu_breakpoint_remove_all(cs, BP_CPU);
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+ if (env->mmu_model != POWERPC_MMU_REAL) {
+ ppc_tlb_invalidate_all(env);
+ }
+ pmu_mmcr01_updated(env);
}
-#endif /* CONFIG_TCG */
-#endif
+ /* clean any pending stop state */
+ env->resume_as_sreset = 0;
+#endif
hreg_compute_hflags(env);
env->reserve_addr = (target_ulong)-1ULL;
/* Be sure no exception or interrupt is pending */
env->pending_interrupts = 0;
- s->exception_index = POWERPC_EXCP_NONE;
+ cs->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
ppc_irq_reset(cpu);
@@ -8882,12 +7226,19 @@ static void ppc_cpu_reset(DeviceState *dev)
static bool ppc_cpu_is_big_endian(CPUState *cs)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
-
cpu_synchronize_state(cs);
- return !msr_le;
+ return !FIELD_EX64(cpu_env(cs)->msr, MSR, LE);
+}
+
+static bool ppc_get_irq_stats(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ CPUPPCState *env = &POWERPC_CPU(obj)->env;
+
+ *irq_counts = env->excp_stats;
+ *nb_irqs = ARRAY_SIZE(env->excp_stats);
+ return true;
}
#ifdef CONFIG_TCG
@@ -8922,7 +7273,6 @@ static void ppc_cpu_instance_init(Object *obj)
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
- cpu_set_cpustate_pointers(cpu);
cpu->vcpu_id = UNASSIGNED_CPU_INDEX;
env->msr_mask = pcc->msr_mask;
@@ -8958,15 +7308,14 @@ static void ppc_cpu_instance_finalize(Object *obj)
ppc_hash64_finalize(cpu);
}
-static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
+static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr, bool best)
{
return pcc->pvr == pvr;
}
static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
if ((env->hflags >> MSR_LE) & 1) {
info->endian = BFD_ENDIAN_LITTLE;
@@ -8979,8 +7328,6 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
info->mach = bfd_mach_ppc;
#endif
}
- info->disassembler_options = (char *)"any";
- info->print_insn = print_insn_ppc;
info->cap_arch = CS_ARCH_PPC;
#ifdef TARGET_PPC64
@@ -9012,16 +7359,23 @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
#ifdef CONFIG_TCG
#include "hw/core/tcg-cpu-ops.h"
-static const struct TCGCPUOps ppc_tcg_ops = {
+static const TCGCPUOps ppc_tcg_ops = {
.initialize = ppc_translate_init,
- .tlb_fill = ppc_cpu_tlb_fill,
+ .restore_state_to_opc = ppc_restore_state_to_opc,
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ .record_sigsegv = ppc_cpu_record_sigsegv,
+#else
+ .tlb_fill = ppc_cpu_tlb_fill,
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
.do_interrupt = ppc_cpu_do_interrupt,
.cpu_exec_enter = ppc_cpu_exec_enter,
.cpu_exec_exit = ppc_cpu_exec_exit,
.do_unaligned_access = ppc_cpu_do_unaligned_access,
+ .do_transaction_failed = ppc_cpu_do_transaction_failed,
+ .debug_excp_handler = ppc_cpu_debug_excp_handler,
+ .debug_check_breakpoint = ppc_cpu_debug_check_breakpoint,
+ .debug_check_watchpoint = ppc_cpu_debug_check_watchpoint,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
@@ -9031,6 +7385,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
+ ResettableClass *rc = RESETTABLE_CLASS(oc);
device_class_set_parent_realize(dc, ppc_cpu_realize,
&pcc->parent_realize);
@@ -9039,22 +7394,23 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
pcc->pvr_match = ppc_pvr_match_default;
device_class_set_props(dc, ppc_cpu_properties);
- device_class_set_parent_reset(dc, ppc_cpu_reset, &pcc->parent_reset);
+ resettable_class_set_parent_phases(rc, NULL, ppc_cpu_reset_hold, NULL,
+ &pcc->parent_phases);
cc->class_by_name = ppc_cpu_class_by_name;
cc->has_work = ppc_cpu_has_work;
+ cc->mmu_index = ppc_cpu_mmu_index;
cc->dump_state = ppc_cpu_dump_state;
cc->set_pc = ppc_cpu_set_pc;
+ cc->get_pc = ppc_cpu_get_pc;
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &ppc_sysemu_ops;
+ INTERRUPT_STATS_PROVIDER_CLASS(oc)->get_statistics = ppc_get_irq_stats;
#endif
cc->gdb_num_core_regs = 71;
-#ifndef CONFIG_USER_ONLY
- cc->gdb_get_dynamic_xml = ppc_gdb_get_dynamic_xml;
-#endif
#ifdef USE_APPLE_GDB
cc->gdb_read_register = ppc_cpu_gdb_read_register_apple;
cc->gdb_write_register = ppc_cpu_gdb_write_register_apple;
@@ -9086,6 +7442,12 @@ static const TypeInfo ppc_cpu_type_info = {
.abstract = true,
.class_size = sizeof(PowerPCCPUClass),
.class_init = ppc_cpu_class_init,
+#ifndef CONFIG_USER_ONLY
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
+#endif
};
#ifndef CONFIG_USER_ONLY
@@ -9109,8 +7471,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
#define RGPL 4
#define RFPL 4
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
int i;
qemu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR "
@@ -9120,18 +7481,16 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
"%08x iidx %d didx %d\n",
env->msr, env->spr[SPR_HID0], env->hflags,
- cpu_mmu_index(env, true), cpu_mmu_index(env, false));
-#if !defined(NO_TIMER_DUMP)
- qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
-#if !defined(CONFIG_USER_ONLY)
- " DECR " TARGET_FMT_lu
-#endif
- "\n",
- cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env)
+ ppc_env_mmu_index(env, true), ppc_env_mmu_index(env, false));
#if !defined(CONFIG_USER_ONLY)
- , cpu_ppc_load_decr(env)
-#endif
- );
+ if (env->tb_env) {
+ qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
+ " DECR " TARGET_FMT_lu "\n", cpu_ppc_load_tbu(env),
+ cpu_ppc_load_tbl(env), cpu_ppc_load_decr(env));
+ }
+#else
+ qemu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64 "\n", cpu_ppc_load_tbu(env),
+ cpu_ppc_load_tbl(env));
#endif
for (i = 0; i < 32; i++) {
if ((i & (RGPL - 1)) == 0) {
@@ -9157,8 +7516,8 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
}
- qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
- env->reserve_addr);
+ qemu_fprintf(f, " ] RES %03x@" TARGET_FMT_lx "\n",
+ (int)env->reserve_length, env->reserve_addr);
if (flags & CPU_DUMP_FPU) {
for (i = 0; i < 32; i++) {
@@ -9189,16 +7548,17 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
env->spr[SPR_SPRG4], env->spr[SPR_SPRG5],
env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]);
+ switch (env->excp_model) {
#if defined(TARGET_PPC64)
- if (env->excp_model == POWERPC_EXCP_POWER7 ||
- env->excp_model == POWERPC_EXCP_POWER8 ||
- env->excp_model == POWERPC_EXCP_POWER9 ||
- env->excp_model == POWERPC_EXCP_POWER10) {
+ case POWERPC_EXCP_POWER7:
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n",
env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
- }
+ break;
#endif
- if (env->excp_model == POWERPC_EXCP_BOOKE) {
+ case POWERPC_EXCP_BOOKE:
qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx
" MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n",
env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1],
@@ -9229,6 +7589,20 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
* IVORs are left out as they are large and do not change often --
* they can be read with "p $ivor0", "p $ivor1", etc.
*/
+ break;
+ case POWERPC_EXCP_40x:
+ qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx
+ " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n",
+ env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
+ env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
+
+ qemu_fprintf(f, " EVPR " TARGET_FMT_lx " SRR2 " TARGET_FMT_lx
+ " SRR3 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n",
+ env->spr[SPR_40x_EVPR], env->spr[SPR_40x_SRR2],
+ env->spr[SPR_40x_SRR3], env->spr[SPR_40x_PID]);
+ break;
+ default:
+ break;
}
#if defined(TARGET_PPC64)
@@ -9243,9 +7617,7 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
switch (env->mmu_model) {
case POWERPC_MMU_32B:
- case POWERPC_MMU_601:
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
case POWERPC_MMU_2_03:
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
index 07341a69f5..5967ea07a9 100644
--- a/target/ppc/dfp_helper.c
+++ b/target/ppc/dfp_helper.c
@@ -42,13 +42,21 @@ static void get_dfp128(ppc_vsr_t *dst, ppc_fprp_t *dfp)
static void set_dfp64(ppc_fprp_t *dfp, ppc_vsr_t *src)
{
- dfp->VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(1) = 0ULL;
}
static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src)
{
dfp[0].VsrD(0) = src->VsrD(0);
dfp[1].VsrD(0) = src->VsrD(1);
+ dfp[0].VsrD(1) = 0ULL;
+ dfp[1].VsrD(1) = 0ULL;
+}
+
+static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src)
+{
+ *dst = *src;
}
struct PPC_DFP {
@@ -113,7 +121,7 @@ static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
case 3: /* use FPSCR rounding mode */
return;
default:
- assert(0); /* cannot get here */
+ g_assert_not_reached();
}
} else { /* r == 1 */
switch (rmc & 3) {
@@ -130,7 +138,7 @@ static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
rnd = DEC_ROUND_HALF_DOWN;
break;
default:
- assert(0); /* cannot get here */
+ g_assert_not_reached();
}
}
decContextSetRounding(&dfp->context, rnd);
@@ -440,8 +448,8 @@ static void ADD_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXISI_add(dfp);
}
-DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64)
-DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128)
+DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64)
+DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128)
static void SUB_PPs(struct PPC_DFP *dfp)
{
@@ -453,8 +461,8 @@ static void SUB_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXISI_subtract(dfp);
}
-DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64)
-DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128)
+DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64)
+DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128)
static void MUL_PPs(struct PPC_DFP *dfp)
{
@@ -466,8 +474,8 @@ static void MUL_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXIMZ(dfp);
}
-DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64)
-DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128)
+DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64)
+DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128)
static void DIV_PPs(struct PPC_DFP *dfp)
{
@@ -481,8 +489,8 @@ static void DIV_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXIDI(dfp);
}
-DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64)
-DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128)
+DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64)
+DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128)
#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@@ -502,8 +510,8 @@ static void CMPU_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
-DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64)
-DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128)
+DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64)
+DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128)
static void CMPO_PPs(struct PPC_DFP *dfp)
{
@@ -513,8 +521,8 @@ static void CMPO_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXVC(dfp);
}
-DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64)
-DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128)
+DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64)
+DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128)
#define DFP_HELPER_TSTDC(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
@@ -541,8 +549,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
return dfp.crbf; \
}
-DFP_HELPER_TSTDC(dtstdc, 64)
-DFP_HELPER_TSTDC(dtstdcq, 128)
+DFP_HELPER_TSTDC(DTSTDC, 64)
+DFP_HELPER_TSTDC(DTSTDCQ, 128)
#define DFP_HELPER_TSTDG(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
@@ -596,8 +604,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \
return dfp.crbf; \
}
-DFP_HELPER_TSTDG(dtstdg, 64)
-DFP_HELPER_TSTDG(dtstdgq, 128)
+DFP_HELPER_TSTDG(DTSTDG, 64)
+DFP_HELPER_TSTDG(DTSTDGQ, 128)
#define DFP_HELPER_TSTEX(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@@ -628,8 +636,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
return dfp.crbf; \
}
-DFP_HELPER_TSTEX(dtstex, 64)
-DFP_HELPER_TSTEX(dtstexq, 128)
+DFP_HELPER_TSTEX(DTSTEX, 64)
+DFP_HELPER_TSTEX(DTSTEXQ, 128)
#define DFP_HELPER_TSTSF(op, size) \
uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
@@ -665,8 +673,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \
return dfp.crbf; \
}
-DFP_HELPER_TSTSF(dtstsf, 64)
-DFP_HELPER_TSTSF(dtstsfq, 128)
+DFP_HELPER_TSTSF(DTSTSF, 64)
+DFP_HELPER_TSTSF(DTSTSFQ, 128)
#define DFP_HELPER_TSTSFI(op, size) \
uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \
@@ -700,8 +708,8 @@ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \
return dfp.crbf; \
}
-DFP_HELPER_TSTSFI(dtstsfi, 64)
-DFP_HELPER_TSTSFI(dtstsfiq, 128)
+DFP_HELPER_TSTSFI(DTSTSFI, 64)
+DFP_HELPER_TSTSFI(DTSTSFIQ, 128)
static void QUA_PPs(struct PPC_DFP *dfp)
{
@@ -746,8 +754,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_QUAI(dquai, 64)
-DFP_HELPER_QUAI(dquaiq, 128)
+DFP_HELPER_QUAI(DQUAI, 64)
+DFP_HELPER_QUAI(DQUAIQ, 128)
#define DFP_HELPER_QUA(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
@@ -764,8 +772,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_QUA(dqua, 64)
-DFP_HELPER_QUA(dquaq, 128)
+DFP_HELPER_QUA(DQUA, 64)
+DFP_HELPER_QUA(DQUAQ, 128)
static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
struct PPC_DFP *dfp)
@@ -842,8 +850,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_RRND(drrnd, 64)
-DFP_HELPER_RRND(drrndq, 128)
+DFP_HELPER_RRND(DRRND, 64)
+DFP_HELPER_RRND(DRRNDQ, 128)
#define DFP_HELPER_RINT(op, postprocs, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
@@ -868,8 +876,8 @@ static void RINTX_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
-DFP_HELPER_RINT(drintx, RINTX_PPs, 64)
-DFP_HELPER_RINT(drintxq, RINTX_PPs, 128)
+DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64)
+DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128)
static void RINTN_PPs(struct PPC_DFP *dfp)
{
@@ -877,10 +885,10 @@ static void RINTN_PPs(struct PPC_DFP *dfp)
dfp_check_for_VXSNAN(dfp);
}
-DFP_HELPER_RINT(drintn, RINTN_PPs, 64)
-DFP_HELPER_RINT(drintnq, RINTN_PPs, 128)
+DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64)
+DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128)
-void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
ppc_vsr_t vb;
@@ -896,7 +904,7 @@ void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
dfp_set_FPRF_from_FRT(&dfp);
}
-void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
ppc_vsr_t vb;
@@ -911,7 +919,7 @@ void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
set_dfp128(t, &dfp.vt);
}
-void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
uint32_t t_short = 0;
@@ -929,7 +937,7 @@ void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
set_dfp64(t, &vt);
}
-void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
+void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b)
{
struct PPC_DFP dfp;
dfp_prepare_decimal128(&dfp, 0, b, env);
@@ -967,8 +975,20 @@ static void CFFIX_PPs(struct PPC_DFP *dfp)
dfp_check_for_XX(dfp);
}
-DFP_HELPER_CFFIX(dcffix, 64)
-DFP_HELPER_CFFIX(dcffixq, 128)
+DFP_HELPER_CFFIX(DCFFIX, 64)
+DFP_HELPER_CFFIX(DCFFIXQ, 128)
+
+void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b)
+{
+ struct PPC_DFP dfp;
+
+ dfp_prepare_decimal128(&dfp, NULL, NULL, env);
+ decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0));
+ dfp_finalize_decimal128(&dfp);
+ CFFIX_PPs(&dfp);
+
+ set_dfp128(t, &dfp.vt);
+}
#define DFP_HELPER_CTFIX(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
@@ -1005,8 +1025,55 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
set_dfp64(t, &dfp.vt); \
}
-DFP_HELPER_CTFIX(dctfix, 64)
-DFP_HELPER_CTFIX(dctfixq, 128)
+DFP_HELPER_CTFIX(DCTFIX, 64)
+DFP_HELPER_CTFIX(DCTFIXQ, 128)
+
+void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b)
+{
+ struct PPC_DFP dfp;
+ dfp_prepare_decimal128(&dfp, 0, b, env);
+
+ if (unlikely(decNumberIsSpecial(&dfp.b))) {
+ uint64_t invalid_flags = FP_VX | FP_VXCVI;
+ if (decNumberIsInfinite(&dfp.b)) {
+ if (decNumberIsNegative(&dfp.b)) {
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ dfp.vt.VsrD(0) = INT64_MAX;
+ dfp.vt.VsrD(1) = UINT64_MAX;
+ }
+ } else { /* NaN */
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ if (decNumberIsSNaN(&dfp.b)) {
+ invalid_flags |= FP_VXSNAN;
+ }
+ }
+ dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE);
+ } else if (unlikely(decNumberIsZero(&dfp.b))) {
+ dfp.vt.VsrD(0) = 0;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context);
+ decNumberIntegralToInt128(&dfp.b, &dfp.context,
+ &dfp.vt.VsrD(1), &dfp.vt.VsrD(0));
+ if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) {
+ if (decNumberIsNegative(&dfp.b)) {
+ dfp.vt.VsrD(0) = INT64_MIN;
+ dfp.vt.VsrD(1) = 0;
+ } else {
+ dfp.vt.VsrD(0) = INT64_MAX;
+ dfp.vt.VsrD(1) = UINT64_MAX;
+ }
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE);
+ } else {
+ dfp_check_for_XX(&dfp);
+ }
+ }
+
+ set_dfp128_to_avr(t, &dfp.vt);
+}
static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit,
unsigned n)
@@ -1067,8 +1134,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_DEDPD(ddedpd, 64)
-DFP_HELPER_DEDPD(ddedpdq, 128)
+DFP_HELPER_DEDPD(DDEDPD, 64)
+DFP_HELPER_DEDPD(DDEDPDQ, 128)
static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n)
{
@@ -1080,6 +1147,26 @@ static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n)
return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15;
}
+static inline void dfp_invalid_op_vxcvi_64(struct PPC_DFP *dfp)
+{
+ /* TODO: fpscr is incorrectly not being saved to env */
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ if ((dfp->env->fpscr & FP_VE) == 0) {
+ dfp->vt.VsrD(1) = 0x7c00000000000000; /* QNaN */
+ }
+}
+
+
+static inline void dfp_invalid_op_vxcvi_128(struct PPC_DFP *dfp)
+{
+ /* TODO: fpscr is incorrectly not being saved to env */
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ if ((dfp->env->fpscr & FP_VE) == 0) {
+ dfp->vt.VsrD(0) = 0x7c00000000000000; /* QNaN */
+ dfp->vt.VsrD(1) = 0x0;
+ }
+}
+
#define DFP_HELPER_ENBCD(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
uint32_t s) \
@@ -1106,7 +1193,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
sgn = 0; \
break; \
default: \
- dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ dfp_invalid_op_vxcvi_##size(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
return; \
} \
} \
@@ -1116,7 +1204,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \
offset++); \
if (digits[(size) / 4 - n] > 10) { \
- dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ dfp_invalid_op_vxcvi_##size(&dfp); \
+ set_dfp##size(t, &dfp.vt); \
return; \
} else { \
nonzero |= (digits[(size) / 4 - n] > 0); \
@@ -1135,8 +1224,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_ENBCD(denbcd, 64)
-DFP_HELPER_ENBCD(denbcdq, 128)
+DFP_HELPER_ENBCD(DENBCD, 64)
+DFP_HELPER_ENBCD(DENBCDQ, 128)
#define DFP_HELPER_XEX(op, size) \
void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
@@ -1169,8 +1258,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \
} \
}
-DFP_HELPER_XEX(dxex, 64)
-DFP_HELPER_XEX(dxexq, 128)
+DFP_HELPER_XEX(DXEX, 64)
+DFP_HELPER_XEX(DXEXQ, 128)
static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw)
{
@@ -1235,8 +1324,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_IEX(diex, 64)
-DFP_HELPER_IEX(diexq, 128)
+DFP_HELPER_IEX(DIEX, 64)
+DFP_HELPER_IEX(DIEXQ, 128)
static void dfp_clear_lmd_from_g5msb(uint64_t *t)
{
@@ -1323,7 +1412,72 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \
set_dfp##size(t, &dfp.vt); \
}
-DFP_HELPER_SHIFT(dscli, 64, 1)
-DFP_HELPER_SHIFT(dscliq, 128, 1)
-DFP_HELPER_SHIFT(dscri, 64, 0)
-DFP_HELPER_SHIFT(dscriq, 128, 0)
+DFP_HELPER_SHIFT(DSCLI, 64, 1)
+DFP_HELPER_SHIFT(DSCLIQ, 128, 1)
+DFP_HELPER_SHIFT(DSCRI, 64, 0)
+DFP_HELPER_SHIFT(DSCRIQ, 128, 0)
+
+target_ulong helper_CDTBCD(target_ulong s)
+{
+ uint64_t res = 0;
+ uint32_t dec32, declets;
+ uint8_t bcd[6];
+ int i, w, sh;
+ decNumber a;
+
+ for (w = 1; w >= 0; w--) {
+ res <<= 32;
+ declets = extract64(s, 32 * w, 20);
+ if (declets) {
+ /* decimal32 with zero exponent and word "w" declets */
+ dec32 = (0x225ULL << 20) | declets;
+ decimal32ToNumber((decimal32 *)&dec32, &a);
+ decNumberGetBCD(&a, bcd);
+ for (i = 0; i < a.digits; i++) {
+ sh = 4 * (a.digits - 1 - i);
+ res |= (uint64_t)bcd[i] << sh;
+ }
+ }
+ }
+
+ return res;
+}
+
+target_ulong helper_CBCDTD(target_ulong s)
+{
+ uint64_t res = 0;
+ uint32_t dec32;
+ uint8_t bcd[6];
+ int w, i, offs;
+ decNumber a;
+ decContext context;
+
+ decContextDefault(&context, DEC_INIT_DECIMAL32);
+
+ for (w = 1; w >= 0; w--) {
+ res <<= 32;
+ decNumberZero(&a);
+ /* Extract each BCD field of word "w" */
+ for (i = 5; i >= 0; i--) {
+ offs = 4 * (5 - i) + 32 * w;
+ bcd[i] = extract64(s, offs, 4);
+ if (bcd[i] > 9) {
+ /*
+ * If the field value is greater than 9, the results are
+ * undefined. We could use a fixed value like 0 or 9, but
+ * an and with 9 seems to better match the hardware behavior.
+ */
+ bcd[i] &= 9;
+ }
+ }
+
+ /* Create a decNumber with the BCD values and convert to decimal32 */
+ decNumberSetBCD(&a, bcd, 6);
+ decimal32FromNumber((decimal32 *)&dec32, &a, &context);
+
+ /* Extract the two declets from the decimal32 value */
+ res |= dec32 & 0xfffff;
+ }
+
+ return res;
+}
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index d7e32ee107..674c05a2ce 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -18,31 +18,96 @@
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
+#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "internal.h"
#include "helper_regs.h"
+#include "hw/ppc/ppc.h"
+
+#include "trace.h"
#ifdef CONFIG_TCG
+#include "sysemu/tcg.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#endif
-/* #define DEBUG_OP */
-/* #define DEBUG_SOFTWARE_TLB */
-/* #define DEBUG_EXCEPTIONS */
-
-#ifdef DEBUG_EXCEPTIONS
-# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
-#else
-# define LOG_EXCP(...) do { } while (0)
-#endif
-
/*****************************************************************************/
/* Exception processing */
-#if !defined(CONFIG_USER_ONLY)
+#ifndef CONFIG_USER_ONLY
+
+static const char *powerpc_excp_name(int excp)
+{
+ switch (excp) {
+ case POWERPC_EXCP_CRITICAL: return "CRITICAL";
+ case POWERPC_EXCP_MCHECK: return "MCHECK";
+ case POWERPC_EXCP_DSI: return "DSI";
+ case POWERPC_EXCP_ISI: return "ISI";
+ case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
+ case POWERPC_EXCP_ALIGN: return "ALIGN";
+ case POWERPC_EXCP_PROGRAM: return "PROGRAM";
+ case POWERPC_EXCP_FPU: return "FPU";
+ case POWERPC_EXCP_SYSCALL: return "SYSCALL";
+ case POWERPC_EXCP_APU: return "APU";
+ case POWERPC_EXCP_DECR: return "DECR";
+ case POWERPC_EXCP_FIT: return "FIT";
+ case POWERPC_EXCP_WDT: return "WDT";
+ case POWERPC_EXCP_DTLB: return "DTLB";
+ case POWERPC_EXCP_ITLB: return "ITLB";
+ case POWERPC_EXCP_DEBUG: return "DEBUG";
+ case POWERPC_EXCP_SPEU: return "SPEU";
+ case POWERPC_EXCP_EFPDI: return "EFPDI";
+ case POWERPC_EXCP_EFPRI: return "EFPRI";
+ case POWERPC_EXCP_EPERFM: return "EPERFM";
+ case POWERPC_EXCP_DOORI: return "DOORI";
+ case POWERPC_EXCP_DOORCI: return "DOORCI";
+ case POWERPC_EXCP_GDOORI: return "GDOORI";
+ case POWERPC_EXCP_GDOORCI: return "GDOORCI";
+ case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
+ case POWERPC_EXCP_RESET: return "RESET";
+ case POWERPC_EXCP_DSEG: return "DSEG";
+ case POWERPC_EXCP_ISEG: return "ISEG";
+ case POWERPC_EXCP_HDECR: return "HDECR";
+ case POWERPC_EXCP_TRACE: return "TRACE";
+ case POWERPC_EXCP_HDSI: return "HDSI";
+ case POWERPC_EXCP_HISI: return "HISI";
+ case POWERPC_EXCP_HDSEG: return "HDSEG";
+ case POWERPC_EXCP_HISEG: return "HISEG";
+ case POWERPC_EXCP_VPU: return "VPU";
+ case POWERPC_EXCP_PIT: return "PIT";
+ case POWERPC_EXCP_EMUL: return "EMUL";
+ case POWERPC_EXCP_IFTLB: return "IFTLB";
+ case POWERPC_EXCP_DLTLB: return "DLTLB";
+ case POWERPC_EXCP_DSTLB: return "DSTLB";
+ case POWERPC_EXCP_FPA: return "FPA";
+ case POWERPC_EXCP_DABR: return "DABR";
+ case POWERPC_EXCP_IABR: return "IABR";
+ case POWERPC_EXCP_SMI: return "SMI";
+ case POWERPC_EXCP_PERFM: return "PERFM";
+ case POWERPC_EXCP_THERM: return "THERM";
+ case POWERPC_EXCP_VPUA: return "VPUA";
+ case POWERPC_EXCP_SOFTP: return "SOFTP";
+ case POWERPC_EXCP_MAINT: return "MAINT";
+ case POWERPC_EXCP_MEXTBR: return "MEXTBR";
+ case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
+ case POWERPC_EXCP_ITLBE: return "ITLBE";
+ case POWERPC_EXCP_DTLBE: return "DTLBE";
+ case POWERPC_EXCP_VSXU: return "VSXU";
+ case POWERPC_EXCP_FU: return "FU";
+ case POWERPC_EXCP_HV_EMU: return "HV_EMU";
+ case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
+ case POWERPC_EXCP_HV_FU: return "HV_FU";
+ case POWERPC_EXCP_SDOOR: return "SDOOR";
+ case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
+ case POWERPC_EXCP_HVIRT: return "HVIRT";
+ case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
+ default:
+ g_assert_not_reached();
+ }
+}
-static inline void dump_syscall(CPUPPCState *env)
+static void dump_syscall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
" r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
@@ -54,7 +119,7 @@ static inline void dump_syscall(CPUPPCState *env)
ppc_dump_gpr(env, 8), env->nip);
}
-static inline void dump_hcall(CPUPPCState *env)
+static void dump_hcall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
" r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
@@ -69,8 +134,60 @@ static inline void dump_hcall(CPUPPCState *env)
env->nip);
}
-static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
- target_ulong *msr)
+#ifdef CONFIG_TCG
+/* Return true iff byteswap is needed to load instruction */
+static inline bool insn_need_byteswap(CPUArchState *env)
+{
+ /* SYSTEM builds TARGET_BIG_ENDIAN. Need to swap when MSR[LE] is set */
+ return !!(env->msr & ((target_ulong)1 << MSR_LE));
+}
+
+static uint32_t ppc_ldl_code(CPUArchState *env, abi_ptr addr)
+{
+ uint32_t insn = cpu_ldl_code(env, addr);
+
+ if (insn_need_byteswap(env)) {
+ insn = bswap32(insn);
+ }
+
+ return insn;
+}
+#endif
+
+static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp)
+{
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+
+ if (!qemu_loglevel_mask(CPU_LOG_MMU)) {
+ return;
+ }
+
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_IMISS];
+ cmp = &env->spr[SPR_ICMP];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB) {
+ es = "DL";
+ } else {
+ es = "DS";
+ }
+ en = 'D';
+ miss = &env->spr[SPR_DMISS];
+ cmp = &env->spr[SPR_DCMP];
+ }
+ qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->spr[SPR_HASH1], env->spr[SPR_HASH2],
+ env->error_code);
+}
+
+#ifdef TARGET_PPC64
+static int powerpc_reset_wakeup(CPUPPCState *env, int excp, target_ulong *msr)
{
/* We no longer are in a PM state */
env->resume_as_sreset = false;
@@ -105,8 +222,8 @@ static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
*msr |= SRR1_WAKEHVI;
break;
default:
- cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
- excp);
+ cpu_abort(env_cpu(env),
+ "Unsupported exception %d in Power Save mode\n", excp);
}
return POWERPC_EXCP_RESET;
}
@@ -167,12 +284,10 @@ static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp,
* | a | h | 11 | 1 | 1 | h |
* +--------------------------------------------------------------------+
*/
-static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
- target_ulong msr,
- target_ulong *new_msr,
- target_ulong *vector)
+static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp, target_ulong msr,
+ target_ulong *new_msr, target_ulong *vector)
{
-#if defined(TARGET_PPC64)
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
bool mmu_all_on = ((msr >> MSR_IR) & 1) && ((msr >> MSR_DR) & 1);
bool hv_escalation = !(msr & MSR_HVB) && (*new_msr & MSR_HVB);
@@ -185,8 +300,13 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
return;
}
- if (excp_model == POWERPC_EXCP_POWER8 ||
- excp_model == POWERPC_EXCP_POWER9) {
+ if (!(pcc->lpcr_mask & LPCR_AIL)) {
+ /* This CPU does not have AIL */
+ return;
+ }
+
+ /* P8 & P9 */
+ if (!(pcc->lpcr_mask & LPCR_HAIL)) {
if (!mmu_all_on) {
/* AIL only works if MSR[IR] and MSR[DR] are both enabled. */
return;
@@ -209,7 +329,8 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
return;
}
- } else if (excp_model == POWERPC_EXCP_POWER10) {
+ /* P10 and up */
+ } else {
if (!mmu_all_on && !hv_escalation) {
/*
* AIL works for HV interrupts even with guest MSR[IR/DR] disabled.
@@ -234,9 +355,6 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
/* AIL=1 and AIL=2 are reserved, treat them like AIL=0 */
return;
}
- } else {
- /* Other processors do not support AIL */
- return;
}
/*
@@ -261,206 +379,510 @@ static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
*vector |= 0xc000000000003000ull; /* Apply scv's AIL=3 offset */
}
}
-#endif
}
+#endif /* TARGET_PPC64 */
-static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
- target_ulong vector, target_ulong msr)
+static void powerpc_reset_excp_state(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ /* Reset exception state */
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+}
+
+static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector,
+ target_ulong msr)
+{
+ CPUPPCState *env = &cpu->env;
+
+ assert((msr & env->msr_mask) == msr);
+
/*
* We don't use hreg_store_msr here as already have treated any
* special case that could occur. Just store MSR and update hflags
*
- * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
- * will prevent setting of the HV bit which some exceptions might need
- * to do.
+ * Note: We *MUST* not use hreg_store_msr() as-is anyway because it will
+ * prevent setting of the HV bit which some exceptions might need to do.
*/
- env->msr = msr & env->msr_mask;
- hreg_compute_hflags(env);
env->nip = vector;
- /* Reset exception state */
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
+ env->msr = msr;
+ hreg_compute_hflags(env);
+ ppc_maybe_interrupt(env);
- /* Reset the reservation */
- env->reserve_addr = -1;
+ powerpc_reset_excp_state(cpu);
/*
* Any interrupt is context synchronizing, check if TCG TLB needs
* a delayed flush on ppc64
*/
check_tlb_flush(env, false);
+
+ /* Reset the reservation */
+ env->reserve_addr = -1;
}
-/*
- * Note that this function should be greatly optimized when called
- * with a constant excp, from ppc_hw_interrupt
- */
-static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
+static void powerpc_mcheck_checkstop(CPUPPCState *env)
{
- CPUState *cs = CPU(cpu);
- CPUPPCState *env = &cpu->env;
- target_ulong msr, new_msr, vector;
- int srr0, srr1, asrr0, asrr1, lev = -1;
-
- qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
- " => %08x (%02x)\n", env->nip, excp, env->error_code);
+ CPUState *cs = env_cpu(env);
- /* new srr1 value excluding must-be-zero bits */
- if (excp_model == POWERPC_EXCP_BOOKE) {
- msr = env->msr;
- } else {
- msr = env->msr & ~0x783f0000ULL;
+ if (FIELD_EX64(env->msr, MSR, ME)) {
+ return;
}
- /*
- * new interrupt handler msr preserves existing HV and ME unless
- * explicitly overriden
- */
- new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
+ /* Machine check exception is not enabled. Enter checkstop state. */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cpu_interrupt_exittb(cs);
+}
- /* target registers */
- srr0 = SPR_SRR0;
- srr1 = SPR_SRR1;
- asrr0 = -1;
- asrr1 = -1;
+static void powerpc_excp_40x(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
- /*
- * check for special resume at 0x100 from doze/nap/sleep/winkle on
- * P7/P8/P9
- */
- if (env->resume_as_sreset) {
- excp = powerpc_reset_wakeup(cs, env, excp, &msr);
- }
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
- /*
- * Hypervisor emulation assistance interrupt only exists on server
- * arch 2.05 server or later. We also don't want to generate it if
- * we don't have HVB in msr_mask (PAPR mode).
- */
- if (excp == POWERPC_EXCP_HV_EMU
-#if defined(TARGET_PPC64)
- && !(mmu_is_64bit(env->mmu_model) && (env->msr_mask & MSR_HVB))
-#endif /* defined(TARGET_PPC64) */
+ /* new interrupt handler msr preserves ME unless explicitly overridden */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME));
- ) {
+ /* HV emu assistance interrupt only exists on server arch 2.05 or later */
+ if (excp == POWERPC_EXCP_HV_EMU) {
excp = POWERPC_EXCP_PROGRAM;
}
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
switch (excp) {
- case POWERPC_EXCP_NONE:
- /* Should never happen */
- return;
case POWERPC_EXCP_CRITICAL: /* Critical input */
- switch (excp_model) {
- case POWERPC_EXCP_40x:
- srr0 = SPR_40x_SRR2;
- srr1 = SPR_40x_SRR3;
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ powerpc_mcheck_checkstop(env);
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
+ return;
+ }
+ env->spr[SPR_40x_ESR] = ESR_FP;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ env->spr[SPR_40x_ESR] = ESR_PIL;
break;
- case POWERPC_EXCP_BOOKE:
- srr0 = SPR_BOOKE_CSRR0;
- srr1 = SPR_BOOKE_CSRR1;
+ case POWERPC_EXCP_PRIV:
+ env->spr[SPR_40x_ESR] = ESR_PPR;
break;
- case POWERPC_EXCP_G2:
+ case POWERPC_EXCP_TRAP:
+ env->spr[SPR_40x_ESR] = ESR_PTR;
break;
default:
- goto excp_invalid;
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
}
break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ dump_syscall(env);
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+ break;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ trace_ppc_excp_print("FIT");
+ break;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ trace_ppc_excp_print("WDT");
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ break;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
+ trace_ppc_excp_print("PIT");
+ break;
+ case POWERPC_EXCP_DEBUG: /* Debug interrupt */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ break;
+ }
+
+ env->spr[srr0] = env->nip;
+ env->spr[srr1] = msr;
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+static void powerpc_excp_6xx(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /* new interrupt handler msr preserves ME unless explicitly overridden */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /* HV emu assistance interrupt only exists on server arch 2.05 or later */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- if (msr_me == 0) {
- /*
- * Machine check exception is not enabled. Enter
- * checkstop state.
- */
- fprintf(stderr, "Machine check while not allowed. "
- "Entering checkstop state\n");
- if (qemu_log_separate()) {
- qemu_log("Machine check while not allowed. "
- "Entering checkstop state\n");
+ powerpc_mcheck_checkstop(env);
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Get rS/rD and rA from faulting opcode */
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
+ return;
}
- cs->halted = 1;
- cpu_interrupt_exittb(cs);
- }
- if (env->msr_mask & MSR_HVB) {
/*
- * ISA specifies HV, but can be delivered to guest with HV
- * clear (e.g., see FWNMI in PAPR).
+ * NIP always points to the faulting instruction for FP exceptions,
+ * so always use store_next and claim we are precise in the MSR.
*/
- new_msr |= (target_ulong)MSR_HVB;
- }
-
- /* machine check exceptions don't have ME set */
- new_msr &= ~((target_ulong)1 << MSR_ME);
-
- /* XXX: should also have something loaded in DAR / DSISR */
- switch (excp_model) {
- case POWERPC_EXCP_40x:
- srr0 = SPR_40x_SRR2;
- srr1 = SPR_40x_SRR3;
+ msr |= 0x00100000;
break;
- case POWERPC_EXCP_BOOKE:
- /* FIXME: choose one or the other based on CPU type */
- srr0 = SPR_BOOKE_MCSRR0;
- srr1 = SPR_BOOKE_MCSRR1;
- asrr0 = SPR_BOOKE_CSRR0;
- asrr1 = SPR_BOOKE_CSRR1;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
break;
default:
+ /* Should never occur */
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
+ env->error_code);
break;
}
break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ dump_syscall(env);
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ if (FIELD_EX64(env->msr, MSR, POW)) {
+ cpu_abort(env_cpu(env),
+ "Trying to deliver power-saving system reset exception "
+ "%d with no HV support\n", excp);
+ }
+ break;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ /* Swap temporary saved registers with GPRs */
+ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
+ new_msr |= (target_ulong)1 << MSR_TGPR;
+ hreg_swap_gpr_tgpr(env);
+ }
+
+ ppc_excp_debug_sw_tlb(env, excp);
+
+ msr |= env->crf[0] << 28;
+ msr |= env->error_code; /* key, D/I, S/L bits */
+ /* Set way using a LRU mechanism */
+ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
+ break;
+ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
+ case POWERPC_EXCP_DABR: /* Data address breakpoint */
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
+ case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ break;
+ }
+
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ env->spr[SPR_SRR0] = env->nip;
+ env->spr[SPR_SRR1] = msr;
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /* new interrupt handler msr preserves ME unless explicitly overridden */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /* HV emu assistance interrupt only exists on server arch 2.05 or later */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ powerpc_mcheck_checkstop(env);
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ break;
case POWERPC_EXCP_DSI: /* Data storage exception */
- LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
- "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
break;
case POWERPC_EXCP_ISI: /* Instruction storage exception */
- LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
- "\n", msr, env->nip);
+ trace_ppc_excp_isi(msr, env->nip);
msr |= env->error_code;
break;
case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Get rS/rD and rA from faulting opcode */
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
+ return;
+ }
+ /*
+ * NIP always points to the faulting instruction for FP exceptions,
+ * so always use store_next and claim we are precise in the MSR.
+ */
+ msr |= 0x00100000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
{
- bool lpes0;
+ int lev = env->error_code;
- cs = CPU(cpu);
+ if (lev == 1 && cpu->vhyp) {
+ dump_hcall(env);
+ } else {
+ dump_syscall(env);
+ }
/*
- * Exception targeting modifiers
- *
- * LPES0 is supported on POWER7/8/9
- * LPES1 is not supported (old iSeries mode)
- *
- * On anything else, we behave as if LPES0 is 1
- * (externals don't alter MSR:HV)
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
*/
-#if defined(TARGET_PPC64)
- if (excp_model == POWERPC_EXCP_POWER7 ||
- excp_model == POWERPC_EXCP_POWER8 ||
- excp_model == POWERPC_EXCP_POWER9 ||
- excp_model == POWERPC_EXCP_POWER10) {
- lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
- } else
-#endif /* defined(TARGET_PPC64) */
- {
- lpes0 = true;
- }
+ env->nip += 4;
- if (!lpes0) {
- new_msr |= (target_ulong)MSR_HVB;
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
- srr0 = SPR_HSRR0;
- srr1 = SPR_HSRR1;
+ /*
+ * The Virtual Open Firmware (VOF) relies on the 'sc 1'
+ * instruction to communicate with QEMU. The pegasos2 machine
+ * uses VOF and the 7xx CPUs, so although the 7xx don't have
+ * HV mode, we need to keep hypercall support.
+ */
+ if (lev == 1 && cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hypercall(cpu->vhyp, cpu);
+ powerpc_reset_excp_state(cpu);
+ return;
}
- if (env->mpic_proxy) {
- /* IACK the IRQ on delivery */
- env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
+
+ break;
+ }
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ if (FIELD_EX64(env->msr, MSR, POW)) {
+ cpu_abort(env_cpu(env),
+ "Trying to deliver power-saving system reset exception "
+ "%d with no HV support\n", excp);
}
break;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ ppc_excp_debug_sw_tlb(env, excp);
+ msr |= env->crf[0] << 28;
+ msr |= env->error_code; /* key, D/I, S/L bits */
+ /* Set way using a LRU mechanism */
+ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ break;
+ }
+
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
}
+ env->spr[SPR_SRR0] = env->nip;
+ env->spr[SPR_SRR1] = msr;
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /* new interrupt handler msr preserves ME unless explicitly overridden */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /* HV emu assistance interrupt only exists on server arch 2.05 or later */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ powerpc_mcheck_checkstop(env);
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
/* Get rS/rD and rA from faulting opcode */
/*
@@ -473,45 +895,39 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
- if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
- LOG_EXCP("Ignore floating point exception\n");
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
return;
}
-
/*
- * FP exceptions always have NIP pointing to the faulting
- * instruction, so always use store_next and claim we are
- * precise in the MSR.
+ * NIP always points to the faulting instruction for FP exceptions,
+ * so always use store_next and claim we are precise in the MSR.
*/
msr |= 0x00100000;
- env->spr[SPR_BOOKE_ESR] = ESR_FP;
break;
case POWERPC_EXCP_INVAL:
- LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
+ trace_ppc_excp_inval(env->nip);
msr |= 0x00080000;
- env->spr[SPR_BOOKE_ESR] = ESR_PIL;
break;
case POWERPC_EXCP_PRIV:
msr |= 0x00040000;
- env->spr[SPR_BOOKE_ESR] = ESR_PPR;
break;
case POWERPC_EXCP_TRAP:
msr |= 0x00020000;
- env->spr[SPR_BOOKE_ESR] = ESR_PTR;
break;
default:
/* Should never occur */
- cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
env->error_code);
break;
}
break;
case POWERPC_EXCP_SYSCALL: /* System call exception */
- lev = env->error_code;
+ {
+ int lev = env->error_code;
- if ((lev == 1) && cpu->vhyp) {
+ if (lev == 1 && cpu->vhyp) {
dump_hcall(env);
} else {
dump_syscall(env);
@@ -523,23 +939,171 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
*/
env->nip += 4;
- /* "PAPR mode" built-in hypercall emulation */
- if ((lev == 1) && cpu->vhyp) {
+ /*
+ * The Virtual Open Firmware (VOF) relies on the 'sc 1'
+ * instruction to communicate with QEMU. The pegasos2 machine
+ * uses VOF and the 74xx CPUs, so although the 74xx don't have
+ * HV mode, we need to keep hypercall support.
+ */
+ if (lev == 1 && cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
vhc->hypercall(cpu->vhyp, cpu);
+ powerpc_reset_excp_state(cpu);
return;
}
- if (lev == 1) {
- new_msr |= (target_ulong)MSR_HVB;
+
+ break;
+ }
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ if (FIELD_EX64(env->msr, MSR, POW)) {
+ cpu_abort(env_cpu(env),
+ "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
}
break;
- case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
- lev = env->error_code;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ break;
+ }
+
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ env->spr[SPR_SRR0] = env->nip;
+ env->spr[SPR_SRR1] = msr;
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+static void powerpc_excp_booke(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0 = SPR_SRR0, srr1 = SPR_SRR1;
+
+ /*
+ * Book E does not play games with certain bits of xSRR1 being MSR save
+ * bits and others being error status. xSRR1 is the old MSR, period.
+ */
+ msr = env->msr;
+
+ /* new interrupt handler msr preserves ME unless explicitly overridden */
+ new_msr = env->msr & ((target_ulong)1 << MSR_ME);
+
+ /* HV emu assistance interrupt only exists on server arch 2.05 or later */
+ if (excp == POWERPC_EXCP_HV_EMU) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+#ifdef TARGET_PPC64
+ /*
+ * SPEU and VPU share the same IVOR but they exist in different
+ * processors. SPEU is e500v1/2 only and VPU is e6500 only.
+ */
+ if (excp == POWERPC_EXCP_VPU) {
+ excp = POWERPC_EXCP_SPEU;
+ }
+#endif
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
+ switch (excp) {
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ powerpc_mcheck_checkstop(env);
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ /* FIXME: choose one or the other based on CPU type */
+ srr0 = SPR_BOOKE_MCSRR0;
+ srr1 = SPR_BOOKE_MCSRR1;
+
+ env->spr[SPR_BOOKE_CSRR0] = env->nip;
+ env->spr[SPR_BOOKE_CSRR1] = msr;
+
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ if (env->mpic_proxy) {
+ CPUState *cs = env_cpu(env);
+ /* IACK the IRQ on delivery */
+ env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
+ }
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
+ return;
+ }
+ /*
+ * NIP always points to the faulting instruction for FP exceptions,
+ * so always use store_next and claim we are precise in the MSR.
+ */
+ msr |= 0x00100000;
+ env->spr[SPR_BOOKE_ESR] = ESR_FP;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PIL;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PPR;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PTR;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
dump_syscall(env);
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
env->nip += 4;
- new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
- new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
break;
case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
@@ -547,18 +1111,12 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
/* FIT on 4xx */
- LOG_EXCP("FIT exception\n");
+ trace_ppc_excp_print("FIT");
break;
case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
- LOG_EXCP("WDT exception\n");
- switch (excp_model) {
- case POWERPC_EXCP_BOOKE:
- srr0 = SPR_BOOKE_CSRR0;
- srr1 = SPR_BOOKE_CSRR1;
- break;
- default:
- break;
- }
+ trace_ppc_excp_print("WDT");
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
break;
case POWERPC_EXCP_DTLB: /* Data TLB error */
case POWERPC_EXCP_ITLB: /* Instruction TLB error */
@@ -568,33 +1126,19 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* FIXME: choose one or the other based on CPU type */
srr0 = SPR_BOOKE_DSRR0;
srr1 = SPR_BOOKE_DSRR1;
- asrr0 = SPR_BOOKE_CSRR0;
- asrr1 = SPR_BOOKE_CSRR1;
+
+ env->spr[SPR_BOOKE_CSRR0] = env->nip;
+ env->spr[SPR_BOOKE_CSRR1] = msr;
+
/* DBSR already modified by caller */
} else {
- cpu_abort(cs, "Debug exception triggered on unsupported model\n");
+ cpu_abort(env_cpu(env),
+ "Debug exception triggered on unsupported model\n");
}
break;
- case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable/VPU */
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
break;
- case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
- /* XXX: TODO */
- cpu_abort(cs, "Embedded floating point data exception "
- "is not implemented yet !\n");
- env->spr[SPR_BOOKE_ESR] = ESR_SPV;
- break;
- case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
- /* XXX: TODO */
- cpu_abort(cs, "Embedded floating point round exception "
- "is not implemented yet !\n");
- env->spr[SPR_BOOKE_ESR] = ESR_SPV;
- break;
- case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
- /* XXX: TODO */
- cpu_abort(cs,
- "Performance counter exception is not implemented yet !\n");
- break;
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
break;
case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
@@ -602,8 +1146,329 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
srr1 = SPR_BOOKE_CSRR1;
break;
case POWERPC_EXCP_RESET: /* System reset exception */
+ if (FIELD_EX64(env->msr, MSR, POW)) {
+ cpu_abort(env_cpu(env),
+ "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
+ break;
+ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
+ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ break;
+ }
+
+#ifdef TARGET_PPC64
+ if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
+ /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
+ new_msr |= (target_ulong)1 << MSR_CM;
+ } else {
+ vector = (uint32_t)vector;
+ }
+#endif
+
+ env->spr[srr0] = env->nip;
+ env->spr[srr1] = msr;
+ powerpc_set_excp_state(cpu, vector, new_msr);
+}
+
+/*
+ * When running a nested HV guest under vhyp, external interrupts are
+ * delivered as HVIRT.
+ */
+static bool books_vhyp_promotes_external_to_hvirt(PowerPCCPU *cpu)
+{
+ if (cpu->vhyp) {
+ return vhyp_cpu_in_nested(cpu);
+ }
+ return false;
+}
+
+#ifdef TARGET_PPC64
+/*
+ * When running under vhyp, hcalls are always intercepted and sent to the
+ * vhc->hypercall handler.
+ */
+static bool books_vhyp_handles_hcall(PowerPCCPU *cpu)
+{
+ if (cpu->vhyp) {
+ return !vhyp_cpu_in_nested(cpu);
+ }
+ return false;
+}
+
+/*
+ * When running a nested KVM HV guest under vhyp, HV exceptions are not
+ * delivered to the guest (because there is no concept of HV support), but
+ * rather they are sent to the vhyp to exit from the L2 back to the L1 and
+ * return from the H_ENTER_NESTED hypercall.
+ */
+static bool books_vhyp_handles_hv_excp(PowerPCCPU *cpu)
+{
+ if (cpu->vhyp) {
+ return vhyp_cpu_in_nested(cpu);
+ }
+ return false;
+}
+
+#ifdef CONFIG_TCG
+static bool is_prefix_insn(CPUPPCState *env, uint32_t insn)
+{
+ if (!(env->insns_flags2 & PPC2_ISA310)) {
+ return false;
+ }
+ return ((insn & 0xfc000000) == 0x04000000);
+}
+
+static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if (!(env->insns_flags2 & PPC2_ISA310)) {
+ return false;
+ }
+
+ if (!tcg_enabled()) {
+ /*
+ * This does not load instructions and set the prefix bit correctly
+ * for injected interrupts with KVM. That may have to be discovered
+ * and set by the KVM layer before injecting.
+ */
+ return false;
+ }
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK:
+ if (!(env->error_code & PPC_BIT(42))) {
+ /*
+ * Fetch attempt caused a machine check, so attempting to fetch
+ * again would cause a recursive machine check.
+ */
+ return false;
+ }
+ break;
+ case POWERPC_EXCP_HDSI:
+ /* HDSI PRTABLE_FAULT has the originating access type in error_code */
+ if ((env->spr[SPR_HDSISR] & DSISR_PRTABLE_FAULT) &&
+ (env->error_code == MMU_INST_FETCH)) {
+ /*
+ * Fetch failed due to partition scope translation, so prefix
+ * indication is not relevant (and attempting to load the
+ * instruction at NIP would cause recursive faults with the same
+ * translation).
+ */
+ return false;
+ }
+ break;
+
+ case POWERPC_EXCP_DSI:
+ case POWERPC_EXCP_DSEG:
+ case POWERPC_EXCP_ALIGN:
+ case POWERPC_EXCP_PROGRAM:
+ case POWERPC_EXCP_FPU:
+ case POWERPC_EXCP_TRACE:
+ case POWERPC_EXCP_HV_EMU:
+ case POWERPC_EXCP_VPU:
+ case POWERPC_EXCP_VSXU:
+ case POWERPC_EXCP_FU:
+ case POWERPC_EXCP_HV_FU:
+ break;
+ default:
+ return false;
+ }
+
+ return is_prefix_insn(env, ppc_ldl_code(env, env->nip));
+}
+#else
+static bool is_prefix_insn_excp(PowerPCCPU *cpu, int excp)
+{
+ return false;
+}
+#endif
+
+static void powerpc_excp_books(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0 = SPR_SRR0, srr1 = SPR_SRR1, lev = -1;
+
+ /* new srr1 value excluding must-be-zero bits */
+ msr = env->msr & ~0x783f0000ULL;
+
+ /*
+ * new interrupt handler msr preserves HV and ME unless explicitly
+ * overridden
+ */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
+
+ /*
+ * check for special resume at 0x100 from doze/nap/sleep/winkle on
+ * P7/P8/P9
+ */
+ if (env->resume_as_sreset) {
+ excp = powerpc_reset_wakeup(env, excp, &msr);
+ }
+
+ /*
+ * We don't want to generate a Hypervisor Emulation Assistance
+ * Interrupt if we don't have HVB in msr_mask (PAPR mode),
+ * unless running a nested-hv guest, in which case the L1
+ * kernel wants the interrupt.
+ */
+ if (excp == POWERPC_EXCP_HV_EMU && !(env->msr_mask & MSR_HVB) &&
+ !books_vhyp_handles_hv_excp(cpu)) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(env_cpu(env),
+ "Raised an exception without defined vector %d\n", excp);
+ }
+ vector |= env->excp_prefix;
+
+ if (is_prefix_insn_excp(cpu, excp)) {
+ msr |= PPC_BIT(34);
+ }
+
+ switch (excp) {
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ powerpc_mcheck_checkstop(env);
+ if (env->msr_mask & MSR_HVB) {
+ /*
+ * ISA specifies HV, but can be delivered to guest with HV
+ * clear (e.g., see FWNMI in PAPR).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+
+ /* HV machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+ }
+
+ msr |= env->error_code;
+ break;
+
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ trace_ppc_excp_isi(msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ {
+ bool lpes0;
+
+ /* LPES0 is only taken into consideration if we support HV mode */
+ if (!env->has_hv_mode) {
+ break;
+ }
+ lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ if (!lpes0) {
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ }
+ break;
+ }
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Optional DSISR update was removed from ISA v3.0 */
+ if (!(env->insns_flags2 & PPC2_ISA300)) {
+ /* Get rS/rD and rA from faulting opcode */
+ /*
+ * Note: the opcode fields will not be set properly for a
+ * direct store load/store, but nobody cares as nobody
+ * actually uses direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ }
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if (!FIELD_EX64_FE(env->msr) || !FIELD_EX64(env->msr, MSR, FP)) {
+ trace_ppc_excp_fp_ignore();
+ powerpc_reset_excp_state(cpu);
+ return;
+ }
+ /*
+ * NIP always points to the faulting instruction for FP exceptions,
+ * so always use store_next and claim we are precise in the MSR.
+ */
+ msr |= 0x00100000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ trace_ppc_excp_inval(env->nip);
+ msr |= 0x00080000;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ lev = env->error_code;
+
+ if (lev == 1 && cpu->vhyp) {
+ dump_hcall(env);
+ } else {
+ dump_syscall(env);
+ }
+
+ /*
+ * We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
+ /* "PAPR mode" built-in hypercall emulation */
+ if (lev == 1 && books_vhyp_handles_hcall(cpu)) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->hypercall(cpu->vhyp, cpu);
+ powerpc_reset_excp_state(cpu);
+ return;
+ }
+ if (env->insns_flags2 & PPC2_ISA310) {
+ /* ISAv3.1 puts LEV into SRR1 */
+ msr |= lev << 20;
+ }
+ if (lev == 1) {
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL_VECTORED: /* scv exception */
+ lev = env->error_code;
+ dump_syscall(env);
+ env->nip += 4;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+
+ vector += lev * 0x20;
+
+ env->lr = env->nip;
+ env->ctr = msr;
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
/* A power-saving exception sets ME, otherwise it is unchanged */
- if (msr_pow) {
+ if (FIELD_EX64(env->msr, MSR, POW)) {
/* indicate that we resumed from power save mode */
msr |= 0x10000;
new_msr |= ((target_ulong)1 << MSR_ME);
@@ -615,335 +1480,534 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
*/
new_msr |= (target_ulong)MSR_HVB;
} else {
- if (msr_pow) {
- cpu_abort(cs, "Trying to deliver power-saving system reset "
+ if (FIELD_EX64(env->msr, MSR, POW)) {
+ cpu_abort(env_cpu(env),
+ "Trying to deliver power-saving system reset "
"exception %d with no HV support\n", excp);
}
}
break;
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ msr |= env->error_code;
+ /* fall through */
case POWERPC_EXCP_DSEG: /* Data segment exception */
case POWERPC_EXCP_ISEG: /* Instruction segment exception */
- case POWERPC_EXCP_TRACE: /* Trace exception */
+ case POWERPC_EXCP_SDOOR: /* Doorbell interrupt */
+ case POWERPC_EXCP_PERFM: /* Performance monitor interrupt */
break;
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
msr |= env->error_code;
/* fall through */
case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
- case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
- case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */
- case POWERPC_EXCP_HV_EMU:
case POWERPC_EXCP_HVIRT: /* Hypervisor virtualization */
srr0 = SPR_HSRR0;
srr1 = SPR_HSRR1;
new_msr |= (target_ulong)MSR_HVB;
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
break;
+#ifdef CONFIG_TCG
+ case POWERPC_EXCP_HV_EMU: {
+ uint32_t insn = ppc_ldl_code(env, env->nip);
+ env->spr[SPR_HEIR] = insn;
+ if (is_prefix_insn(env, insn)) {
+ uint32_t insn2 = ppc_ldl_code(env, env->nip + 4);
+ env->spr[SPR_HEIR] <<= 32;
+ env->spr[SPR_HEIR] |= insn2;
+ }
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ }
+#endif
case POWERPC_EXCP_VPU: /* Vector unavailable exception */
case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
case POWERPC_EXCP_FU: /* Facility unavailable exception */
-#ifdef TARGET_PPC64
env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
-#endif
break;
case POWERPC_EXCP_HV_FU: /* Hypervisor Facility Unavailable Exception */
-#ifdef TARGET_PPC64
env->spr[SPR_HFSCR] |= ((target_ulong)env->error_code << FSCR_IC_POS);
srr0 = SPR_HSRR0;
srr1 = SPR_HSRR1;
new_msr |= (target_ulong)MSR_HVB;
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
-#endif
break;
- case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
- LOG_EXCP("PIT exception\n");
- break;
- case POWERPC_EXCP_IO: /* IO error exception */
- /* XXX: TODO */
- cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
- break;
- case POWERPC_EXCP_RUNM: /* Run mode exception */
- /* XXX: TODO */
- cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
+ case POWERPC_EXCP_PERFM_EBB: /* Performance Monitor EBB Exception */
+ case POWERPC_EXCP_EXTERNAL_EBB: /* External EBB Exception */
+ env->spr[SPR_BESCR] &= ~BESCR_GE;
+
+ /*
+ * Save NIP for rfebb insn in SPR_EBBRR. Next nip is
+ * stored in the EBB Handler SPR_EBBHR.
+ */
+ env->spr[SPR_EBBRR] = env->nip;
+ powerpc_set_excp_state(cpu, env->spr[SPR_EBBHR], env->msr);
+
+ /*
+ * This exception is handled in userspace. No need to proceed.
+ */
+ return;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ case POWERPC_EXCP_HV_MAINT: /* Hypervisor Maintenance exception */
+ cpu_abort(env_cpu(env), "%s exception not implemented\n",
+ powerpc_excp_name(excp));
break;
- case POWERPC_EXCP_EMUL: /* Emulation trap exception */
- /* XXX: TODO */
- cpu_abort(cs, "602 emulation trap exception "
- "is not implemented yet !\n");
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
break;
- case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
- case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
- case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
- switch (excp_model) {
- case POWERPC_EXCP_602:
- case POWERPC_EXCP_603:
- case POWERPC_EXCP_603E:
- case POWERPC_EXCP_G2:
- /* Swap temporary saved registers with GPRs */
- if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
- new_msr |= (target_ulong)1 << MSR_TGPR;
- hreg_swap_gpr_tgpr(env);
- }
- /* fall through */
- case POWERPC_EXCP_7x5:
-#if defined(DEBUG_SOFTWARE_TLB)
- if (qemu_log_enabled()) {
- const char *es;
- target_ulong *miss, *cmp;
- int en;
-
- if (excp == POWERPC_EXCP_IFTLB) {
- es = "I";
- en = 'I';
- miss = &env->spr[SPR_IMISS];
- cmp = &env->spr[SPR_ICMP];
- } else {
- if (excp == POWERPC_EXCP_DLTLB) {
- es = "DL";
- } else {
- es = "DS";
- }
- en = 'D';
- miss = &env->spr[SPR_DMISS];
- cmp = &env->spr[SPR_DCMP];
- }
- qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
- TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
- TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
- env->spr[SPR_HASH1], env->spr[SPR_HASH2],
- env->error_code);
- }
-#endif
- msr |= env->crf[0] << 28;
- msr |= env->error_code; /* key, D/I, S/L bits */
- /* Set way using a LRU mechanism */
- msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
- break;
- case POWERPC_EXCP_74xx:
-#if defined(DEBUG_SOFTWARE_TLB)
- if (qemu_log_enabled()) {
- const char *es;
- target_ulong *miss, *cmp;
- int en;
-
- if (excp == POWERPC_EXCP_IFTLB) {
- es = "I";
- en = 'I';
- miss = &env->spr[SPR_TLBMISS];
- cmp = &env->spr[SPR_PTEHI];
- } else {
- if (excp == POWERPC_EXCP_DLTLB) {
- es = "DL";
- } else {
- es = "DS";
- }
- en = 'D';
- miss = &env->spr[SPR_TLBMISS];
- cmp = &env->spr[SPR_PTEHI];
- }
- qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
- TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
- env->error_code);
- }
-#endif
- msr |= env->error_code; /* key bit */
- break;
- default:
- cpu_abort(cs, "Invalid TLB miss exception\n");
- break;
+ }
+
+ if (ppc_interrupts_little_endian(cpu, !!(new_msr & MSR_HVB))) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ new_msr |= (target_ulong)1 << MSR_SF;
+
+ if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
+ env->spr[srr0] = env->nip;
+ env->spr[srr1] = msr;
+ }
+
+ if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) {
+ PPCVirtualHypervisorClass *vhc =
+ PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */
+ vhc->deliver_hv_excp(cpu, excp);
+ powerpc_reset_excp_state(cpu);
+ } else {
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB) && srr0 == SPR_HSRR0) {
+ cpu_abort(env_cpu(env), "Trying to deliver HV exception (HSRR) %d "
+ "with no HV support\n", excp);
}
+ /* This can update new_msr and vector if AIL applies */
+ ppc_excp_apply_ail(cpu, excp, msr, &new_msr, &vector);
+ powerpc_set_excp_state(cpu, vector, new_msr);
+ }
+}
+#else
+static inline void powerpc_excp_books(PowerPCCPU *cpu, int excp)
+{
+ g_assert_not_reached();
+}
+#endif /* TARGET_PPC64 */
+
+static void powerpc_excp(PowerPCCPU *cpu, int excp)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
+ cpu_abort(env_cpu(env), "Invalid PowerPC exception %d. Aborting\n",
+ excp);
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
+ excp, env->error_code);
+ env->excp_stats[excp]++;
+
+ switch (env->excp_model) {
+ case POWERPC_EXCP_40x:
+ powerpc_excp_40x(cpu, excp);
break;
- case POWERPC_EXCP_FPA: /* Floating-point assist exception */
- /* XXX: TODO */
- cpu_abort(cs, "Floating point assist exception "
- "is not implemented yet !\n");
- break;
- case POWERPC_EXCP_DABR: /* Data address breakpoint */
- /* XXX: TODO */
- cpu_abort(cs, "DABR exception is not implemented yet !\n");
- break;
- case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
- /* XXX: TODO */
- cpu_abort(cs, "IABR exception is not implemented yet !\n");
- break;
- case POWERPC_EXCP_SMI: /* System management interrupt */
- /* XXX: TODO */
- cpu_abort(cs, "SMI exception is not implemented yet !\n");
- break;
- case POWERPC_EXCP_THERM: /* Thermal interrupt */
- /* XXX: TODO */
- cpu_abort(cs, "Thermal management exception "
- "is not implemented yet !\n");
- break;
- case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
- /* XXX: TODO */
- cpu_abort(cs,
- "Performance counter exception is not implemented yet !\n");
- break;
- case POWERPC_EXCP_VPUA: /* Vector assist exception */
- /* XXX: TODO */
- cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
+ case POWERPC_EXCP_6xx:
+ powerpc_excp_6xx(cpu, excp);
break;
- case POWERPC_EXCP_SOFTP: /* Soft patch exception */
- /* XXX: TODO */
- cpu_abort(cs,
- "970 soft-patch exception is not implemented yet !\n");
+ case POWERPC_EXCP_7xx:
+ powerpc_excp_7xx(cpu, excp);
break;
- case POWERPC_EXCP_MAINT: /* Maintenance exception */
- /* XXX: TODO */
- cpu_abort(cs,
- "970 maintenance exception is not implemented yet !\n");
+ case POWERPC_EXCP_74xx:
+ powerpc_excp_74xx(cpu, excp);
break;
- case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
- /* XXX: TODO */
- cpu_abort(cs, "Maskable external exception "
- "is not implemented yet !\n");
+ case POWERPC_EXCP_BOOKE:
+ powerpc_excp_booke(cpu, excp);
break;
- case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
- /* XXX: TODO */
- cpu_abort(cs, "Non maskable external exception "
- "is not implemented yet !\n");
+ case POWERPC_EXCP_970:
+ case POWERPC_EXCP_POWER7:
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ powerpc_excp_books(cpu, excp);
break;
default:
- excp_invalid:
- cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
- break;
+ g_assert_not_reached();
}
+}
- /* Sanity check */
- if (!(env->msr_mask & MSR_HVB)) {
- if (new_msr & MSR_HVB) {
- cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
- "no HV support\n", excp);
- }
- if (srr0 == SPR_HSRR0) {
- cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
- "no HV support\n", excp);
+void ppc_cpu_do_interrupt(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ powerpc_excp(cpu, cs->exception_index);
+}
+
+#ifdef TARGET_PPC64
+#define P7_UNUSED_INTERRUPTS \
+ (PPC_INTERRUPT_RESET | PPC_INTERRUPT_HVIRT | PPC_INTERRUPT_CEXT | \
+ PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
+ PPC_INTERRUPT_PIT | PPC_INTERRUPT_DOORBELL | PPC_INTERRUPT_HDOORBELL | \
+ PPC_INTERRUPT_THERM | PPC_INTERRUPT_EBB)
+
+static int p7_interrupt_powersave(CPUPPCState *env)
+{
+ if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
+ return PPC_INTERRUPT_EXT;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
+ return PPC_INTERRUPT_DECR;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return PPC_INTERRUPT_MCK;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return PPC_INTERRUPT_HMI;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ return PPC_INTERRUPT_RESET;
+ }
+ return 0;
+}
+
+static int p7_next_unmasked_interrupt(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* Ignore MSR[EE] when coming out of some power management states */
+ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
+
+ assert((env->pending_interrupts & P7_UNUSED_INTERRUPTS) == 0);
+
+ if (cs->halted) {
+ /* LPCR[PECE] controls which interrupts can exit power-saving mode */
+ return p7_interrupt_powersave(env);
+ }
+
+ /* Machine check exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ return PPC_INTERRUPT_MCK;
+ }
+
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
+ /* HDEC clears on delivery */
+ return PPC_INTERRUPT_HDECR;
}
}
- /*
- * Sort out endianness of interrupt, this differs depending on the
- * CPU, the HV mode, etc...
- */
-#ifdef TARGET_PPC64
- if (excp_model == POWERPC_EXCP_POWER7) {
- if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
- new_msr |= (target_ulong)1 << MSR_LE;
- }
- } else if (excp_model == POWERPC_EXCP_POWER8) {
- if (new_msr & MSR_HVB) {
- if (env->spr[SPR_HID0] & HID0_HILE) {
- new_msr |= (target_ulong)1 << MSR_LE;
- }
- } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
- new_msr |= (target_ulong)1 << MSR_LE;
- }
- } else if (excp_model == POWERPC_EXCP_POWER9 ||
- excp_model == POWERPC_EXCP_POWER10) {
- if (new_msr & MSR_HVB) {
- if (env->spr[SPR_HID0] & HID0_POWER9_HILE) {
- new_msr |= (target_ulong)1 << MSR_LE;
- }
- } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
- new_msr |= (target_ulong)1 << MSR_LE;
+ /* External interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ /* HEIC blocks delivery to the hypervisor */
+ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
+ !FIELD_EX64(env->msr, MSR, PR))) ||
+ (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
+ return PPC_INTERRUPT_EXT;
}
- } else if (msr_ile) {
- new_msr |= (target_ulong)1 << MSR_LE;
}
-#else
- if (msr_ile) {
- new_msr |= (target_ulong)1 << MSR_LE;
+ if (msr_ee != 0) {
+ /* Decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ return PPC_INTERRUPT_DECR;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ return PPC_INTERRUPT_PERFM;
+ }
}
-#endif
- vector = env->excp_vectors[excp];
- if (vector == (target_ulong)-1ULL) {
- cpu_abort(cs, "Raised an exception without defined vector %d\n",
- excp);
+ return 0;
+}
+
+#define P8_UNUSED_INTERRUPTS \
+ (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_HVIRT | \
+ PPC_INTERRUPT_CEXT | PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | \
+ PPC_INTERRUPT_FIT | PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
+
+static int p8_interrupt_powersave(CPUPPCState *env)
+{
+ if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
+ return PPC_INTERRUPT_EXT;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
+ return PPC_INTERRUPT_DECR;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_MCK) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return PPC_INTERRUPT_MCK;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_HMI) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return PPC_INTERRUPT_HMI;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
+ return PPC_INTERRUPT_DOORBELL;
+ }
+ if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
+ return PPC_INTERRUPT_HDOORBELL;
}
+ if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ return PPC_INTERRUPT_RESET;
+ }
+ return 0;
+}
- vector |= env->excp_prefix;
+static int p8_next_unmasked_interrupt(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* Ignore MSR[EE] when coming out of some power management states */
+ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
- /* If any alternate SRR register are defined, duplicate saved values */
- if (asrr0 != -1) {
- env->spr[asrr0] = env->nip;
+ assert((env->pending_interrupts & P8_UNUSED_INTERRUPTS) == 0);
+
+ if (cs->halted) {
+ /* LPCR[PECE] controls which interrupts can exit power-saving mode */
+ return p8_interrupt_powersave(env);
}
- if (asrr1 != -1) {
- env->spr[asrr1] = msr;
+
+ /* Machine check exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ return PPC_INTERRUPT_MCK;
}
-#if defined(TARGET_PPC64)
- if (excp_model == POWERPC_EXCP_BOOKE) {
- if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
- /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
- new_msr |= (target_ulong)1 << MSR_CM;
- } else {
- vector = (uint32_t)vector;
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
+ /* HDEC clears on delivery */
+ return PPC_INTERRUPT_HDECR;
}
- } else {
- if (!msr_isf && !mmu_is_64bit(env->mmu_model)) {
- vector = (uint32_t)vector;
- } else {
- new_msr |= (target_ulong)1 << MSR_SF;
+ }
+
+ /* External interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ /* HEIC blocks delivery to the hypervisor */
+ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
+ !FIELD_EX64(env->msr, MSR, PR))) ||
+ (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
+ return PPC_INTERRUPT_EXT;
+ }
+ }
+ if (msr_ee != 0) {
+ /* Decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ return PPC_INTERRUPT_DECR;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ return PPC_INTERRUPT_DOORBELL;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ return PPC_INTERRUPT_HDOORBELL;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ return PPC_INTERRUPT_PERFM;
+ }
+ /* EBB exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ /*
+ * EBB exception must be taken in problem state and
+ * with BESCR_GE set.
+ */
+ if (FIELD_EX64(env->msr, MSR, PR) &&
+ (env->spr[SPR_BESCR] & BESCR_GE)) {
+ return PPC_INTERRUPT_EBB;
+ }
}
}
-#endif
- if (excp != POWERPC_EXCP_SYSCALL_VECTORED) {
- /* Save PC */
- env->spr[srr0] = env->nip;
+ return 0;
+}
- /* Save MSR */
- env->spr[srr1] = msr;
+#define P9_UNUSED_INTERRUPTS \
+ (PPC_INTERRUPT_RESET | PPC_INTERRUPT_DEBUG | PPC_INTERRUPT_CEXT | \
+ PPC_INTERRUPT_WDT | PPC_INTERRUPT_CDOORBELL | PPC_INTERRUPT_FIT | \
+ PPC_INTERRUPT_PIT | PPC_INTERRUPT_THERM)
-#if defined(TARGET_PPC64)
- } else {
- vector += lev * 0x20;
+static int p9_interrupt_powersave(CPUPPCState *env)
+{
+ /* External Exception */
+ if ((env->pending_interrupts & PPC_INTERRUPT_EXT) &&
+ (env->spr[SPR_LPCR] & LPCR_EEE)) {
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ if (!heic || !FIELD_EX64_HV(env->msr) ||
+ FIELD_EX64(env->msr, MSR, PR)) {
+ return PPC_INTERRUPT_EXT;
+ }
+ }
+ /* Decrementer Exception */
+ if ((env->pending_interrupts & PPC_INTERRUPT_DECR) &&
+ (env->spr[SPR_LPCR] & LPCR_DEE)) {
+ return PPC_INTERRUPT_DECR;
+ }
+ /* Machine Check or Hypervisor Maintenance Exception */
+ if (env->spr[SPR_LPCR] & LPCR_OEE) {
+ if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ return PPC_INTERRUPT_MCK;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_HMI) {
+ return PPC_INTERRUPT_HMI;
+ }
+ }
+ /* Privileged Doorbell Exception */
+ if ((env->pending_interrupts & PPC_INTERRUPT_DOORBELL) &&
+ (env->spr[SPR_LPCR] & LPCR_PDEE)) {
+ return PPC_INTERRUPT_DOORBELL;
+ }
+ /* Hypervisor Doorbell Exception */
+ if ((env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) &&
+ (env->spr[SPR_LPCR] & LPCR_HDEE)) {
+ return PPC_INTERRUPT_HDOORBELL;
+ }
+ /* Hypervisor virtualization exception */
+ if ((env->pending_interrupts & PPC_INTERRUPT_HVIRT) &&
+ (env->spr[SPR_LPCR] & LPCR_HVEE)) {
+ return PPC_INTERRUPT_HVIRT;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ return PPC_INTERRUPT_RESET;
+ }
+ return 0;
+}
- env->lr = env->nip;
- env->ctr = msr;
-#endif
+static int p9_next_unmasked_interrupt(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+
+ /* Ignore MSR[EE] when coming out of some power management states */
+ bool msr_ee = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
+
+ assert((env->pending_interrupts & P9_UNUSED_INTERRUPTS) == 0);
+
+ if (cs->halted) {
+ if (env->spr[SPR_PSSCR] & PSSCR_EC) {
+ /*
+ * When PSSCR[EC] is set, LPCR[PECE] controls which interrupts can
+ * wakeup the processor
+ */
+ return p9_interrupt_powersave(env);
+ } else {
+ /*
+ * When it's clear, any system-caused exception exits power-saving
+ * mode, even the ones that gate on MSR[EE].
+ */
+ msr_ee = true;
+ }
}
- /* This can update new_msr and vector if AIL applies */
- ppc_excp_apply_ail(cpu, excp_model, excp, msr, &new_msr, &vector);
+ /* Machine check exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ return PPC_INTERRUPT_MCK;
+ }
- powerpc_set_excp_state(cpu, vector, new_msr);
-}
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hdice) {
+ /* HDEC clears on delivery */
+ return PPC_INTERRUPT_HDECR;
+ }
+ }
-void ppc_cpu_do_interrupt(CPUState *cs)
-{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ /* Hypervisor virtualization interrupt */
+ if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
+ if ((msr_ee || !FIELD_EX64_HV(env->msr)) && hvice) {
+ return PPC_INTERRUPT_HVIRT;
+ }
+ }
+
+ /* External interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
+ /* HEIC blocks delivery to the hypervisor */
+ if ((msr_ee && !(heic && FIELD_EX64_HV(env->msr) &&
+ !FIELD_EX64(env->msr, MSR, PR))) ||
+ (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
+ return PPC_INTERRUPT_EXT;
+ }
+ }
+ if (msr_ee != 0) {
+ /* Decrementer exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ return PPC_INTERRUPT_DECR;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ return PPC_INTERRUPT_DOORBELL;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ return PPC_INTERRUPT_HDOORBELL;
+ }
+ if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ return PPC_INTERRUPT_PERFM;
+ }
+ /* EBB exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ /*
+ * EBB exception must be taken in problem state and
+ * with BESCR_GE set.
+ */
+ if (FIELD_EX64(env->msr, MSR, PR) &&
+ (env->spr[SPR_BESCR] & BESCR_GE)) {
+ return PPC_INTERRUPT_EBB;
+ }
+ }
+ }
- powerpc_excp(cpu, env->excp_model, cs->exception_index);
+ return 0;
}
+#endif /* TARGET_PPC64 */
-static void ppc_hw_interrupt(CPUPPCState *env)
+static int ppc_next_unmasked_interrupt(CPUPPCState *env)
{
- PowerPCCPU *cpu = env_archcpu(env);
+#ifdef TARGET_PPC64
+ switch (env->excp_model) {
+ case POWERPC_EXCP_POWER7:
+ return p7_next_unmasked_interrupt(env);
+ case POWERPC_EXCP_POWER8:
+ return p8_next_unmasked_interrupt(env);
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ return p9_next_unmasked_interrupt(env);
+ default:
+ break;
+ }
+#endif
bool async_deliver;
/* External reset */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_RESET) {
+ return PPC_INTERRUPT_RESET;
}
/* Machine check exception */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_MCK) {
+ return PPC_INTERRUPT_MCK;
}
#if 0 /* TODO */
/* External debug exception */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_DEBUG) {
+ return PPC_INTERRUPT_DEBUG;
}
#endif
@@ -953,108 +2017,151 @@ static void ppc_hw_interrupt(CPUPPCState *env)
* clear when coming out of some power management states (in order
* for them to become a 0x100).
*/
- async_deliver = (msr_ee != 0) || env->resume_as_sreset;
+ async_deliver = FIELD_EX64(env->msr, MSR, EE) || env->resume_as_sreset;
/* Hypervisor decrementer exception */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ if (env->pending_interrupts & PPC_INTERRUPT_HDECR) {
/* LPCR will be clear when not supported so this will work */
bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
- if ((async_deliver || msr_hv == 0) && hdice) {
+ if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hdice) {
/* HDEC clears on delivery */
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
- return;
+ return PPC_INTERRUPT_HDECR;
}
}
/* Hypervisor virtualization interrupt */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_HVIRT)) {
+ if (env->pending_interrupts & PPC_INTERRUPT_HVIRT) {
/* LPCR will be clear when not supported so this will work */
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
- if ((async_deliver || msr_hv == 0) && hvice) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
- return;
+ if ((async_deliver || !FIELD_EX64_HV(env->msr)) && hvice) {
+ return PPC_INTERRUPT_HVIRT;
}
}
/* External interrupt can ignore MSR:EE under some circumstances */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
+ if (env->pending_interrupts & PPC_INTERRUPT_EXT) {
bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC);
/* HEIC blocks delivery to the hypervisor */
- if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
- (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
- return;
+ if ((async_deliver && !(heic && FIELD_EX64_HV(env->msr) &&
+ !FIELD_EX64(env->msr, MSR, PR))) ||
+ (env->has_hv_mode && !FIELD_EX64_HV(env->msr) && !lpes0)) {
+ return PPC_INTERRUPT_EXT;
}
}
- if (msr_ce != 0) {
+ if (FIELD_EX64(env->msr, MSR, CE)) {
/* External critical interrupt */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_CEXT) {
+ return PPC_INTERRUPT_CEXT;
}
}
if (async_deliver != 0) {
/* Watchdog timer on embedded PowerPC */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_WDT) {
+ return PPC_INTERRUPT_WDT;
}
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_CDOORBELL) {
+ return PPC_INTERRUPT_CDOORBELL;
}
/* Fixed interval timer on embedded PowerPC */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_FIT) {
+ return PPC_INTERRUPT_FIT;
}
/* Programmable interval timer on embedded PowerPC */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_PIT) {
+ return PPC_INTERRUPT_PIT;
}
/* Decrementer exception */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
- if (ppc_decr_clear_on_delivery(env)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
- }
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_DECR) {
+ return PPC_INTERRUPT_DECR;
}
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
- if (is_book3s_arch2x(env)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
- } else {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
- }
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ return PPC_INTERRUPT_DOORBELL;
}
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_HDOORBELL) {
+ return PPC_INTERRUPT_HDOORBELL;
}
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_PERFM) {
+ return PPC_INTERRUPT_PERFM;
}
/* Thermal interrupt */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
- return;
+ if (env->pending_interrupts & PPC_INTERRUPT_THERM) {
+ return PPC_INTERRUPT_THERM;
+ }
+ /* EBB exception */
+ if (env->pending_interrupts & PPC_INTERRUPT_EBB) {
+ /*
+ * EBB exception must be taken in problem state and
+ * with BESCR_GE set.
+ */
+ if (FIELD_EX64(env->msr, MSR, PR) &&
+ (env->spr[SPR_BESCR] & BESCR_GE)) {
+ return PPC_INTERRUPT_EBB;
+ }
}
}
- if (env->resume_as_sreset) {
+ return 0;
+}
+
+/*
+ * Sets CPU_INTERRUPT_HARD if there is at least one unmasked interrupt to be
+ * delivered and clears CPU_INTERRUPT_HARD otherwise.
+ *
+ * This method is called by ppc_set_interrupt when an interrupt is raised or
+ * lowered, and should also be called whenever an interrupt masking condition
+ * is changed, e.g.:
+ * - When relevant bits of MSR are altered, like EE, HV, PR, etc.;
+ * - When relevant bits of LPCR are altered, like PECE, HDICE, HVICE, etc.;
+ * - When PSSCR[EC] or env->resume_as_sreset are changed;
+ * - When cs->halted is changed and the CPU has a different interrupt masking
+ * logic in power-saving mode (e.g., POWER7/8/9/10);
+ */
+void ppc_maybe_interrupt(CPUPPCState *env)
+{
+ CPUState *cs = env_cpu(env);
+ BQL_LOCK_GUARD();
+
+ if (ppc_next_unmasked_interrupt(env)) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+#ifdef TARGET_PPC64
+static void p7_deliver_interrupt(CPUPPCState *env, int interrupt)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ switch (interrupt) {
+ case PPC_INTERRUPT_MCK: /* Machine check exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
+ powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
+ break;
+
+ case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
+ powerpc_excp(cpu, POWERPC_EXCP_HDECR);
+ break;
+
+ case PPC_INTERRUPT_EXT:
+ if (books_vhyp_promotes_external_to_hvirt(cpu)) {
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
+ }
+ break;
+
+ case PPC_INTERRUPT_DECR: /* Decrementer exception */
+ powerpc_excp(cpu, POWERPC_EXCP_DECR);
+ break;
+ case PPC_INTERRUPT_PERFM:
+ env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM);
+ break;
+ case 0:
/*
* This is a bug ! It means that has_work took us out of halt without
* anything to deliver while in a PM state that requires getting
@@ -1066,17 +2173,288 @@ static void ppc_hw_interrupt(CPUPPCState *env)
* It generally means a discrepancy between the wakeup conditions in the
* processor has_work implementation and the logic in this function.
*/
- cpu_abort(env_cpu(env),
- "Wakeup from PM state but interrupt Undelivered");
+ assert(!env->resume_as_sreset);
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
+ interrupt);
+ }
+}
+
+static void p8_deliver_interrupt(CPUPPCState *env, int interrupt)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ switch (interrupt) {
+ case PPC_INTERRUPT_MCK: /* Machine check exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
+ powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
+ break;
+
+ case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
+ powerpc_excp(cpu, POWERPC_EXCP_HDECR);
+ break;
+
+ case PPC_INTERRUPT_EXT:
+ if (books_vhyp_promotes_external_to_hvirt(cpu)) {
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
+ }
+ break;
+
+ case PPC_INTERRUPT_DECR: /* Decrementer exception */
+ powerpc_excp(cpu, POWERPC_EXCP_DECR);
+ break;
+ case PPC_INTERRUPT_DOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ if (is_book3s_arch2x(env)) {
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_DOORI);
+ }
+ break;
+ case PPC_INTERRUPT_HDOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
+ break;
+ case PPC_INTERRUPT_PERFM:
+ env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM);
+ break;
+ case PPC_INTERRUPT_EBB: /* EBB exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
+ if (env->spr[SPR_BESCR] & BESCR_PMEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
+ } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
+ }
+ break;
+ case 0:
+ /*
+ * This is a bug ! It means that has_work took us out of halt without
+ * anything to deliver while in a PM state that requires getting
+ * out via a 0x100
+ *
+ * This means we will incorrectly execute past the power management
+ * instruction instead of triggering a reset.
+ *
+ * It generally means a discrepancy between the wakeup conditions in the
+ * processor has_work implementation and the logic in this function.
+ */
+ assert(!env->resume_as_sreset);
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
+ interrupt);
+ }
+}
+
+static void p9_deliver_interrupt(CPUPPCState *env, int interrupt)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ if (cs->halted && !(env->spr[SPR_PSSCR] & PSSCR_EC) &&
+ !FIELD_EX64(env->msr, MSR, EE)) {
+ /*
+ * A pending interrupt took us out of power-saving, but MSR[EE] says
+ * that we should return to NIP+4 instead of delivering it.
+ */
+ return;
+ }
+
+ switch (interrupt) {
+ case PPC_INTERRUPT_MCK: /* Machine check exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
+ powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
+ break;
+
+ case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
+ powerpc_excp(cpu, POWERPC_EXCP_HDECR);
+ break;
+ case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ break;
+
+ case PPC_INTERRUPT_EXT:
+ if (books_vhyp_promotes_external_to_hvirt(cpu)) {
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
+ }
+ break;
+
+ case PPC_INTERRUPT_DECR: /* Decrementer exception */
+ powerpc_excp(cpu, POWERPC_EXCP_DECR);
+ break;
+ case PPC_INTERRUPT_DOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
+ break;
+ case PPC_INTERRUPT_HDOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
+ break;
+ case PPC_INTERRUPT_PERFM:
+ env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM);
+ break;
+ case PPC_INTERRUPT_EBB: /* EBB exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
+ if (env->spr[SPR_BESCR] & BESCR_PMEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
+ } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
+ }
+ break;
+ case 0:
+ /*
+ * This is a bug ! It means that has_work took us out of halt without
+ * anything to deliver while in a PM state that requires getting
+ * out via a 0x100
+ *
+ * This means we will incorrectly execute past the power management
+ * instruction instead of triggering a reset.
+ *
+ * It generally means a discrepancy between the wakeup conditions in the
+ * processor has_work implementation and the logic in this function.
+ */
+ assert(!env->resume_as_sreset);
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
+ interrupt);
+ }
+}
+#endif /* TARGET_PPC64 */
+
+static void ppc_deliver_interrupt(CPUPPCState *env, int interrupt)
+{
+#ifdef TARGET_PPC64
+ switch (env->excp_model) {
+ case POWERPC_EXCP_POWER7:
+ return p7_deliver_interrupt(env, interrupt);
+ case POWERPC_EXCP_POWER8:
+ return p8_deliver_interrupt(env, interrupt);
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ return p9_deliver_interrupt(env, interrupt);
+ default:
+ break;
+ }
+#endif
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ switch (interrupt) {
+ case PPC_INTERRUPT_RESET: /* External reset */
+ env->pending_interrupts &= ~PPC_INTERRUPT_RESET;
+ powerpc_excp(cpu, POWERPC_EXCP_RESET);
+ break;
+ case PPC_INTERRUPT_MCK: /* Machine check exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_MCK;
+ powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
+ break;
+
+ case PPC_INTERRUPT_HDECR: /* Hypervisor decrementer exception */
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDECR;
+ powerpc_excp(cpu, POWERPC_EXCP_HDECR);
+ break;
+ case PPC_INTERRUPT_HVIRT: /* Hypervisor virtualization interrupt */
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ break;
+
+ case PPC_INTERRUPT_EXT:
+ if (books_vhyp_promotes_external_to_hvirt(cpu)) {
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
+ }
+ break;
+ case PPC_INTERRUPT_CEXT: /* External critical interrupt */
+ powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
+ break;
+
+ case PPC_INTERRUPT_WDT: /* Watchdog timer on embedded PowerPC */
+ env->pending_interrupts &= ~PPC_INTERRUPT_WDT;
+ powerpc_excp(cpu, POWERPC_EXCP_WDT);
+ break;
+ case PPC_INTERRUPT_CDOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_CDOORBELL;
+ powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
+ break;
+ case PPC_INTERRUPT_FIT: /* Fixed interval timer on embedded PowerPC */
+ env->pending_interrupts &= ~PPC_INTERRUPT_FIT;
+ powerpc_excp(cpu, POWERPC_EXCP_FIT);
+ break;
+ case PPC_INTERRUPT_PIT: /* Programmable interval timer on embedded ppc */
+ env->pending_interrupts &= ~PPC_INTERRUPT_PIT;
+ powerpc_excp(cpu, POWERPC_EXCP_PIT);
+ break;
+ case PPC_INTERRUPT_DECR: /* Decrementer exception */
+ if (ppc_decr_clear_on_delivery(env)) {
+ env->pending_interrupts &= ~PPC_INTERRUPT_DECR;
+ }
+ powerpc_excp(cpu, POWERPC_EXCP_DECR);
+ break;
+ case PPC_INTERRUPT_DOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_DOORBELL;
+ if (is_book3s_arch2x(env)) {
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
+ } else {
+ powerpc_excp(cpu, POWERPC_EXCP_DOORI);
+ }
+ break;
+ case PPC_INTERRUPT_HDOORBELL:
+ env->pending_interrupts &= ~PPC_INTERRUPT_HDOORBELL;
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
+ break;
+ case PPC_INTERRUPT_PERFM:
+ env->pending_interrupts &= ~PPC_INTERRUPT_PERFM;
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM);
+ break;
+ case PPC_INTERRUPT_THERM: /* Thermal interrupt */
+ env->pending_interrupts &= ~PPC_INTERRUPT_THERM;
+ powerpc_excp(cpu, POWERPC_EXCP_THERM);
+ break;
+ case PPC_INTERRUPT_EBB: /* EBB exception */
+ env->pending_interrupts &= ~PPC_INTERRUPT_EBB;
+ if (env->spr[SPR_BESCR] & BESCR_PMEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM_EBB);
+ } else if (env->spr[SPR_BESCR] & BESCR_EEO) {
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL_EBB);
+ }
+ break;
+ case 0:
+ /*
+ * This is a bug ! It means that has_work took us out of halt without
+ * anything to deliver while in a PM state that requires getting
+ * out via a 0x100
+ *
+ * This means we will incorrectly execute past the power management
+ * instruction instead of triggering a reset.
+ *
+ * It generally means a discrepancy between the wakeup conditions in the
+ * processor has_work implementation and the logic in this function.
+ */
+ assert(!env->resume_as_sreset);
+ break;
+ default:
+ cpu_abort(env_cpu(env), "Invalid PowerPC interrupt %d. Aborting\n",
+ interrupt);
}
}
void ppc_cpu_do_system_reset(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ powerpc_excp(cpu, POWERPC_EXCP_RESET);
}
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
@@ -1091,37 +2469,37 @@ void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
*/
msr = (1ULL << MSR_ME);
msr |= env->msr & (1ULL << MSR_SF);
- if (ppc_interrupts_little_endian(cpu)) {
+ if (ppc_interrupts_little_endian(cpu, false)) {
msr |= (1ULL << MSR_LE);
}
+ /* Anything for nested required here? MSR[HV] bit? */
+
powerpc_set_excp_state(cpu, vector, msr);
}
bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
+ int interrupt;
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- ppc_hw_interrupt(env);
- if (env->pending_interrupts == 0) {
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
- }
- return true;
+ if ((interrupt_request & CPU_INTERRUPT_HARD) == 0) {
+ return false;
}
- return false;
-}
-#endif /* !CONFIG_USER_ONLY */
+ interrupt = ppc_next_unmasked_interrupt(env);
+ if (interrupt == 0) {
+ return false;
+ }
-#if defined(DEBUG_OP)
-static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
-{
- qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
- TARGET_FMT_lx "\n", RA, msr);
+ ppc_deliver_interrupt(env, interrupt);
+ if (env->pending_interrupts == 0) {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ return true;
}
-#endif
+
+#endif /* !CONFIG_USER_ONLY */
/*****************************************************************************/
/* Exceptions processing helpers */
@@ -1164,22 +2542,24 @@ void helper_raise_exception(CPUPPCState *env, uint32_t exception)
{
raise_exception_err_ra(env, exception, 0, 0);
}
-#endif
-#if !defined(CONFIG_USER_ONLY)
-#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
void helper_store_msr(CPUPPCState *env, target_ulong val)
{
uint32_t excp = hreg_store_msr(env, val, 0);
if (excp != 0) {
- CPUState *cs = env_cpu(env);
- cpu_interrupt_exittb(cs);
+ cpu_interrupt_exittb(env_cpu(env));
raise_exception(env, excp);
}
}
-#if defined(TARGET_PPC64)
+void helper_ppc_maybe_interrupt(CPUPPCState *env)
+{
+ ppc_maybe_interrupt(env);
+}
+
+#ifdef TARGET_PPC64
void helper_scv(CPUPPCState *env, uint32_t lev)
{
if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
@@ -1189,28 +2569,36 @@ void helper_scv(CPUPPCState *env, uint32_t lev)
}
}
-void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
+void helper_pminsn(CPUPPCState *env, uint32_t insn)
{
- CPUState *cs;
+ CPUState *cs = env_cpu(env);
- cs = env_cpu(env);
cs->halted = 1;
/* Condition for waking up at 0x100 */
env->resume_as_sreset = (insn != PPC_PM_STOP) ||
(env->spr[SPR_PSSCR] & PSSCR_EC);
+
+ /* HDECR is not to wake from PM state, it may have already fired */
+ if (env->resume_as_sreset) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+ }
+
+ ppc_maybe_interrupt(env);
}
-#endif /* defined(TARGET_PPC64) */
-#endif /* CONFIG_TCG */
+#endif /* TARGET_PPC64 */
-static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
{
- CPUState *cs = env_cpu(env);
-
/* MSR:POW cannot be set by any form of rfi */
msr &= ~(1ULL << MSR_POW);
-#if defined(TARGET_PPC64)
+ /* MSR:TGPR cannot be set by any form of rfi */
+ if (env->flags & POWERPC_FLAG_TGPR)
+ msr &= ~(1ULL << MSR_TGPR);
+
+#ifdef TARGET_PPC64
/* Switching to 32-bit ? Crop the nip */
if (!msr_is_64bit(env, msr)) {
nip = (uint32_t)nip;
@@ -1221,14 +2609,12 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
/* XXX: beware: this is false if VLE is supported */
env->nip = nip & ~((target_ulong)0x00000003);
hreg_store_msr(env, msr, 1);
-#if defined(DEBUG_OP)
- cpu_dump_rfi(env->nip, env->msr);
-#endif
+ trace_ppc_excp_rfi(env->nip, env->msr);
/*
* No need to raise an exception here, as rfi is always the last
* insn of a TB
*/
- cpu_interrupt_exittb(cs);
+ cpu_interrupt_exittb(env_cpu(env));
/* Reset the reservation */
env->reserve_addr = -1;
@@ -1236,14 +2622,12 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
check_tlb_flush(env, false);
}
-#ifdef CONFIG_TCG
void helper_rfi(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
}
-#define MSR_BOOK3S_MASK
-#if defined(TARGET_PPC64)
+#ifdef TARGET_PPC64
void helper_rfid(CPUPPCState *env)
{
/*
@@ -1264,7 +2648,82 @@ void helper_hrfid(CPUPPCState *env)
{
do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
}
-#endif
+
+void helper_rfebb(CPUPPCState *env, target_ulong s)
+{
+ target_ulong msr = env->msr;
+
+ /*
+ * Handling of BESCR bits 32:33 according to PowerISA v3.1:
+ *
+ * "If BESCR 32:33 != 0b00 the instruction is treated as if
+ * the instruction form were invalid."
+ */
+ if (env->spr[SPR_BESCR] & BESCR_INVALID) {
+ raise_exception_err(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+
+ env->nip = env->spr[SPR_EBBRR];
+
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ env->nip = (uint32_t)env->spr[SPR_EBBRR];
+ }
+
+ if (s) {
+ env->spr[SPR_BESCR] |= BESCR_GE;
+ } else {
+ env->spr[SPR_BESCR] &= ~BESCR_GE;
+ }
+}
+
+/*
+ * Triggers or queues an 'ebb_excp' EBB exception. All checks
+ * but FSCR, HFSCR and msr_pr must be done beforehand.
+ *
+ * PowerISA v3.1 isn't clear about whether an EBB should be
+ * postponed or cancelled if the EBB facility is unavailable.
+ * Our assumption here is that the EBB is cancelled if both
+ * FSCR and HFSCR EBB facilities aren't available.
+ */
+static void do_ebb(CPUPPCState *env, int ebb_excp)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ /*
+ * FSCR_EBB and FSCR_IC_EBB are the same bits used with
+ * HFSCR.
+ */
+ helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
+ helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
+
+ if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_PMEO;
+ } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_EEO;
+ }
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ powerpc_excp(cpu, ebb_excp);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
+ }
+}
+
+void raise_ebb_perfm_exception(CPUPPCState *env)
+{
+ bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
+ env->spr[SPR_BESCR] & BESCR_PME &&
+ env->spr[SPR_BESCR] & BESCR_GE;
+
+ if (!perfm_ebb_enabled) {
+ return;
+ }
+
+ do_ebb(env, POWERPC_EXCP_PERFM_EBB);
+}
+#endif /* TARGET_PPC64 */
/*****************************************************************************/
/* Embedded PowerPC specific helpers */
@@ -1289,10 +2748,8 @@ void helper_rfmci(CPUPPCState *env)
/* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
}
-#endif /* CONFIG_TCG */
-#endif /* !defined(CONFIG_USER_ONLY) */
+#endif /* !CONFIG_USER_ONLY */
-#ifdef CONFIG_TCG
void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
uint32_t flags)
{
@@ -1306,7 +2763,7 @@ void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
}
}
-#if defined(TARGET_PPC64)
+#ifdef TARGET_PPC64
void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
uint32_t flags)
{
@@ -1319,19 +2776,120 @@ void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
POWERPC_EXCP_TRAP, GETPC());
}
}
-#endif
-#endif
+#endif /* TARGET_PPC64 */
-#if !defined(CONFIG_USER_ONLY)
-/*****************************************************************************/
-/* PowerPC 601 specific instructions (POWER bridge) */
+static uint32_t helper_SIMON_LIKE_32_64(uint32_t x, uint64_t key, uint32_t lane)
+{
+ const uint16_t c = 0xfffc;
+ const uint64_t z0 = 0xfa2561cdf44ac398ULL;
+ uint16_t z = 0, temp;
+ uint16_t k[32], eff_k[32], xleft[33], xright[33], fxleft[32];
+
+ for (int i = 3; i >= 0; i--) {
+ k[i] = key & 0xffff;
+ key >>= 16;
+ }
+ xleft[0] = x & 0xffff;
+ xright[0] = (x >> 16) & 0xffff;
-#ifdef CONFIG_TCG
-void helper_rfsvc(CPUPPCState *env)
+ for (int i = 0; i < 28; i++) {
+ z = (z0 >> (63 - i)) & 1;
+ temp = ror16(k[i + 3], 3) ^ k[i + 1];
+ k[i + 4] = c ^ z ^ k[i] ^ temp ^ ror16(temp, 1);
+ }
+
+ for (int i = 0; i < 8; i++) {
+ eff_k[4 * i + 0] = k[4 * i + ((0 + lane) % 4)];
+ eff_k[4 * i + 1] = k[4 * i + ((1 + lane) % 4)];
+ eff_k[4 * i + 2] = k[4 * i + ((2 + lane) % 4)];
+ eff_k[4 * i + 3] = k[4 * i + ((3 + lane) % 4)];
+ }
+
+ for (int i = 0; i < 32; i++) {
+ fxleft[i] = (rol16(xleft[i], 1) &
+ rol16(xleft[i], 8)) ^ rol16(xleft[i], 2);
+ xleft[i + 1] = xright[i] ^ fxleft[i] ^ eff_k[i];
+ xright[i + 1] = xleft[i];
+ }
+
+ return (((uint32_t)xright[32]) << 16) | xleft[32];
+}
+
+static uint64_t hash_digest(uint64_t ra, uint64_t rb, uint64_t key)
{
- do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
+ uint64_t stage0_h = 0ULL, stage0_l = 0ULL;
+ uint64_t stage1_h, stage1_l;
+
+ for (int i = 0; i < 4; i++) {
+ stage0_h |= ror64(rb & 0xff, 8 * (2 * i + 1));
+ stage0_h |= ((ra >> 32) & 0xff) << (8 * 2 * i);
+ stage0_l |= ror64((rb >> 32) & 0xff, 8 * (2 * i + 1));
+ stage0_l |= (ra & 0xff) << (8 * 2 * i);
+ rb >>= 8;
+ ra >>= 8;
+ }
+
+ stage1_h = (uint64_t)helper_SIMON_LIKE_32_64(stage0_h >> 32, key, 0) << 32;
+ stage1_h |= helper_SIMON_LIKE_32_64(stage0_h, key, 1);
+ stage1_l = (uint64_t)helper_SIMON_LIKE_32_64(stage0_l >> 32, key, 2) << 32;
+ stage1_l |= helper_SIMON_LIKE_32_64(stage0_l, key, 3);
+
+ return stage1_h ^ stage1_l;
}
+static void do_hash(CPUPPCState *env, target_ulong ea, target_ulong ra,
+ target_ulong rb, uint64_t key, bool store)
+{
+ uint64_t calculated_hash = hash_digest(ra, rb, key), loaded_hash;
+
+ if (store) {
+ cpu_stq_data_ra(env, ea, calculated_hash, GETPC());
+ } else {
+ loaded_hash = cpu_ldq_data_ra(env, ea, GETPC());
+ if (loaded_hash != calculated_hash) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+ }
+}
+
+#include "qemu/guest-random.h"
+
+#ifdef TARGET_PPC64
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ if (env->msr & R_MSR_PR_MASK) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PRO_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_HV_MASK)) { \
+ if (!(env->spr[SPR_DEXCR] & R_DEXCR_PNH_##dexcr_aspect##_MASK || \
+ env->spr[SPR_HDEXCR] & R_HDEXCR_ENF_##dexcr_aspect##_MASK)) \
+ return; \
+ } else if (!(env->msr & R_MSR_S_MASK)) { \
+ if (!(env->spr[SPR_HDEXCR] & R_HDEXCR_HNU_##dexcr_aspect##_MASK)) \
+ return; \
+ } \
+ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#else
+#define HELPER_HASH(op, key, store, dexcr_aspect) \
+void helper_##op(CPUPPCState *env, target_ulong ea, target_ulong ra, \
+ target_ulong rb) \
+{ \
+ do_hash(env, ea, ra, rb, key, store); \
+}
+#endif /* TARGET_PPC64 */
+
+HELPER_HASH(HASHST, env->spr[SPR_HASHKEYR], true, NPHIE)
+HELPER_HASH(HASHCHK, env->spr[SPR_HASHKEYR], false, NPHIE)
+HELPER_HASH(HASHSTP, env->spr[SPR_HASHPKEYR], true, PHIE)
+HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
+
+#ifndef CONFIG_USER_ONLY
/* Embedded.Processor Control */
static int dbell2irq(target_ulong rb)
{
@@ -1364,7 +2922,7 @@ void helper_msgclr(CPUPPCState *env, target_ulong rb)
return;
}
- env->pending_interrupts &= ~(1 << irq);
+ ppc_set_irq(env_archcpu(env), irq, 0);
}
void helper_msgsnd(target_ulong rb)
@@ -1377,17 +2935,16 @@ void helper_msgsnd(target_ulong rb)
return;
}
- qemu_mutex_lock_iothread();
+ bql_lock();
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *cenv = &cpu->env;
if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
- cenv->pending_interrupts |= 1 << irq;
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ ppc_set_irq(cpu, irq, 1);
}
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
/* Server Processor Control */
@@ -1408,25 +2965,24 @@ void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
return;
}
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
}
static void book3s_msgsnd_common(int pir, int irq)
{
CPUState *cs;
- qemu_mutex_lock_iothread();
+ bql_lock();
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *cenv = &cpu->env;
/* TODO: broadcast message to all threads of the same processor */
if (cenv->spr_cb[SPR_PIR].default_value == pir) {
- cenv->pending_interrupts |= 1 << irq;
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ ppc_set_irq(cpu, irq, 1);
}
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
void helper_book3s_msgsnd(target_ulong rb)
@@ -1440,7 +2996,7 @@ void helper_book3s_msgsnd(target_ulong rb)
book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL);
}
-#if defined(TARGET_PPC64)
+#ifdef TARGET_PPC64
void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
{
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
@@ -1449,45 +3005,236 @@ void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
return;
}
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
}
/*
- * sends a message to other threads that are on the same
+ * sends a message to another thread on the same
* multi-threaded processor
*/
void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
{
- int pir = env->spr_cb[SPR_PIR].default_value;
+ CPUState *cs = env_cpu(env);
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+ int ttir = rb & PPC_BITMASK(57, 63);
helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
- if (!dbell_type_server(rb)) {
+ if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ nr_threads = 1; /* msgsndp behaves as 1-thread in LPAR-per-thread mode*/
+ }
+
+ if (!dbell_type_server(rb) || ttir >= nr_threads) {
return;
}
- /* TODO: TCG supports only one thread */
+ if (nr_threads == 1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, 1);
+ return;
+ }
+
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ uint32_t thread_id = ppc_cpu_tir(ccpu);
+
+ if (ttir == thread_id) {
+ ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1);
+ bql_unlock();
+ return;
+ }
+ }
+
+ g_assert_not_reached();
+}
+#endif /* TARGET_PPC64 */
- book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL);
+/* Single-step tracing */
+void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
+{
+ uint32_t error_code = 0;
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
+ env->spr[SPR_POWER_SIAR] = prev_ip;
+ error_code = PPC_BIT(33);
+ }
+ raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
}
-#endif
-#endif /* CONFIG_TCG */
-#endif
-#ifdef CONFIG_TCG
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
uint32_t insn;
/* Restore state and reload the insn we executed, for filling in DSISR. */
- cpu_restore_state(cs, retaddr, true);
- insn = cpu_ldl_code(env, env->nip);
+ cpu_restore_state(cs, retaddr);
+ insn = ppc_ldl_code(env, env->nip);
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_4xx:
+ env->spr[SPR_40x_DEAR] = vaddr;
+ break;
+ case POWERPC_MMU_BOOKE:
+ case POWERPC_MMU_BOOKE206:
+ env->spr[SPR_BOOKE_DEAR] = vaddr;
+ break;
+ default:
+ env->spr[SPR_DAR] = vaddr;
+ break;
+ }
cs->exception_index = POWERPC_EXCP_ALIGN;
env->error_code = insn & 0x03FF0000;
cpu_loop_exit(cs);
}
+
+void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr vaddr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ CPUPPCState *env = cpu_env(cs);
+
+ switch (env->excp_model) {
+#if defined(TARGET_PPC64)
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
+ /*
+ * Machine check codes can be found in processor User Manual or
+ * Linux or skiboot source.
+ */
+ if (access_type == MMU_DATA_LOAD) {
+ env->spr[SPR_DAR] = vaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(57);
+ env->error_code = PPC_BIT(42);
+
+ } else if (access_type == MMU_DATA_STORE) {
+ /*
+ * MCE for stores in POWER is asynchronous so hardware does
+ * not set DAR, but QEMU can do better.
+ */
+ env->spr[SPR_DAR] = vaddr;
+ env->error_code = PPC_BIT(36) | PPC_BIT(43) | PPC_BIT(45);
+ env->error_code |= PPC_BIT(42);
+
+ } else { /* Fetch */
+ /*
+ * is_prefix_insn_excp() tests !PPC_BIT(42) to avoid fetching
+ * the instruction, so that must always be clear for fetches.
+ */
+ env->error_code = PPC_BIT(36) | PPC_BIT(44) | PPC_BIT(45);
+ }
+ break;
+#endif
+ default:
+ /*
+ * TODO: Check behaviour for other CPUs, for now do nothing.
+ * Could add a basic MCE even if real hardware ignores.
+ */
+ return;
+ }
+
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void ppc_cpu_debug_excp_handler(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ env->spr[SPR_DAR] = cs->watchpoint_hit->hitaddr;
+ env->spr[SPR_DSISR] = PPC_BIT(41);
+ cs->watchpoint_hit = NULL;
+ raise_exception(env, POWERPC_EXCP_DSI);
+ }
+ cs->watchpoint_hit = NULL;
+ } else if (cpu_breakpoint_test(cs, env->nip, BP_CPU)) {
+ raise_exception_err(env, POWERPC_EXCP_TRACE,
+ PPC_BIT(33) | PPC_BIT(43));
+ }
+ }
+#endif
+}
+
+bool ppc_cpu_debug_check_breakpoint(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ target_ulong priv;
+
+ priv = env->spr[SPR_CIABR] & PPC_BITMASK(62, 63);
+ switch (priv) {
+ case 0x1: /* problem */
+ return env->msr & ((target_ulong)1 << MSR_PR);
+ case 0x2: /* supervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ !(env->msr & ((target_ulong)1 << MSR_HV)));
+ case 0x3: /* hypervisor */
+ return (!(env->msr & ((target_ulong)1 << MSR_PR)) &&
+ (env->msr & ((target_ulong)1 << MSR_HV)));
+ default:
+ g_assert_not_reached();
+ }
+ }
#endif
+
+ return false;
+}
+
+bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+#if defined(TARGET_PPC64)
+ CPUPPCState *env = cpu_env(cs);
+
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ if (wp == env->dawr0_watchpoint) {
+ uint32_t dawrx = env->spr[SPR_DAWRX0];
+ bool wt = extract32(dawrx, PPC_BIT_NR(59), 1);
+ bool wti = extract32(dawrx, PPC_BIT_NR(60), 1);
+ bool hv = extract32(dawrx, PPC_BIT_NR(61), 1);
+ bool sv = extract32(dawrx, PPC_BIT_NR(62), 1);
+ bool pr = extract32(dawrx, PPC_BIT_NR(62), 1);
+
+ if ((env->msr & ((target_ulong)1 << MSR_PR)) && !pr) {
+ return false;
+ } else if ((env->msr & ((target_ulong)1 << MSR_HV)) && !hv) {
+ return false;
+ } else if (!sv) {
+ return false;
+ }
+
+ if (!wti) {
+ if (env->msr & ((target_ulong)1 << MSR_DR)) {
+ if (!wt) {
+ return false;
+ }
+ } else {
+ if (wt) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+ }
+#endif
+
+ return false;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+#endif /* CONFIG_TCG */
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index c4896cecc8..4b3dcad5d1 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -36,6 +36,15 @@ static inline float128 float128_snan_to_qnan(float128 x)
#define float32_snan_to_qnan(x) ((x) | 0x00400000)
#define float16_snan_to_qnan(x) ((x) | 0x0200)
+static inline float32 bfp32_neg(float32 a)
+{
+ if (unlikely(float32_is_any_nan(a))) {
+ return a;
+ } else {
+ return float32_chs(a);
+ }
+}
+
static inline bool fp_exceptions_enabled(CPUPPCState *env)
{
#ifdef CONFIG_USER_ONLY
@@ -132,62 +141,28 @@ static inline int ppc_float64_get_unbiased_exp(float64 f)
return ((f >> 52) & 0x7FF) - 1023;
}
-/* Classify a floating-point number. */
-enum {
- is_normal = 1,
- is_zero = 2,
- is_denormal = 4,
- is_inf = 8,
- is_qnan = 16,
- is_snan = 32,
- is_neg = 64,
-};
-
-#define COMPUTE_CLASS(tp) \
-static int tp##_classify(tp arg) \
-{ \
- int ret = tp##_is_neg(arg) * is_neg; \
- if (unlikely(tp##_is_any_nan(arg))) { \
- float_status dummy = { }; /* snan_bit_is_one = 0 */ \
- ret |= (tp##_is_signaling_nan(arg, &dummy) \
- ? is_snan : is_qnan); \
- } else if (unlikely(tp##_is_infinity(arg))) { \
- ret |= is_inf; \
- } else if (tp##_is_zero(arg)) { \
- ret |= is_zero; \
- } else if (tp##_is_zero_or_denormal(arg)) { \
- ret |= is_denormal; \
- } else { \
- ret |= is_normal; \
- } \
- return ret; \
-}
-
-COMPUTE_CLASS(float16)
-COMPUTE_CLASS(float32)
-COMPUTE_CLASS(float64)
-COMPUTE_CLASS(float128)
-
-static void set_fprf_from_class(CPUPPCState *env, int class)
-{
- static const uint8_t fprf[6][2] = {
- { 0x04, 0x08 }, /* normalized */
- { 0x02, 0x12 }, /* zero */
- { 0x14, 0x18 }, /* denormalized */
- { 0x05, 0x09 }, /* infinity */
- { 0x11, 0x11 }, /* qnan */
- { 0x00, 0x00 }, /* snan -- flags are undefined */
- };
- bool isneg = class & is_neg;
-
- env->fpscr &= ~FP_FPRF;
- env->fpscr |= fprf[ctz32(class)][isneg] << FPSCR_FPRF;
-}
-
-#define COMPUTE_FPRF(tp) \
-void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
-{ \
- set_fprf_from_class(env, tp##_classify(arg)); \
+#define COMPUTE_FPRF(tp) \
+void helper_compute_fprf_##tp(CPUPPCState *env, tp arg) \
+{ \
+ bool neg = tp##_is_neg(arg); \
+ target_ulong fprf; \
+ if (likely(tp##_is_normal(arg))) { \
+ fprf = neg ? 0x08 << FPSCR_FPRF : 0x04 << FPSCR_FPRF; \
+ } else if (tp##_is_zero(arg)) { \
+ fprf = neg ? 0x12 << FPSCR_FPRF : 0x02 << FPSCR_FPRF; \
+ } else if (tp##_is_zero_or_denormal(arg)) { \
+ fprf = neg ? 0x18 << FPSCR_FPRF : 0x14 << FPSCR_FPRF; \
+ } else if (tp##_is_infinity(arg)) { \
+ fprf = neg ? 0x09 << FPSCR_FPRF : 0x05 << FPSCR_FPRF; \
+ } else { \
+ float_status dummy = { }; /* snan_bit_is_one = 0 */ \
+ if (tp##_is_signaling_nan(arg, &dummy)) { \
+ fprf = 0x00 << FPSCR_FPRF; \
+ } else { \
+ fprf = 0x11 << FPSCR_FPRF; \
+ } \
+ } \
+ env->fpscr = (env->fpscr & ~FP_FPRF) | fprf; \
}
COMPUTE_FPRF(float16)
@@ -202,7 +177,7 @@ static void finish_invalid_op_excp(CPUPPCState *env, int op, uintptr_t retaddr)
env->fpscr |= FP_VX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ve != 0) {
+ if (env->fpscr & FP_VE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
if (fp_exceptions_enabled(env)) {
@@ -216,7 +191,7 @@ static void finish_invalid_op_arith(CPUPPCState *env, int op,
bool set_fpcc, uintptr_t retaddr)
{
env->fpscr &= ~(FP_FR | FP_FI);
- if (fpscr_ve == 0) {
+ if (!(env->fpscr & FP_VE)) {
if (set_fpcc) {
env->fpscr &= ~FP_FPCC;
env->fpscr |= (FP_C | FP_FU);
@@ -286,7 +261,7 @@ static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc,
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
/* We must update the target FPR before raising the exception */
- if (fpscr_ve != 0) {
+ if (env->fpscr & FP_VE) {
CPUState *cs = env_cpu(env);
cs->exception_index = POWERPC_EXCP_PROGRAM;
@@ -303,7 +278,7 @@ static void float_invalid_op_vxcvi(CPUPPCState *env, bool set_fpcc,
{
env->fpscr |= FP_VXCVI;
env->fpscr &= ~(FP_FR | FP_FI);
- if (fpscr_ve == 0) {
+ if (!(env->fpscr & FP_VE)) {
if (set_fpcc) {
env->fpscr &= ~FP_FPCC;
env->fpscr |= (FP_C | FP_FU);
@@ -318,7 +293,7 @@ static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
env->fpscr &= ~(FP_FR | FP_FI);
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ze != 0) {
+ if (env->fpscr & FP_ZE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
if (fp_exceptions_enabled(env)) {
@@ -329,24 +304,24 @@ static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
}
}
-static inline void float_overflow_excp(CPUPPCState *env)
+static inline int float_overflow_excp(CPUPPCState *env)
{
CPUState *cs = env_cpu(env);
env->fpscr |= FP_OX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_oe != 0) {
- /* XXX: should adjust the result */
+
+ bool overflow_enabled = !!(env->fpscr & FP_OE);
+ if (overflow_enabled) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
/* We must update the target FPR before raising the exception */
cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
- } else {
- env->fpscr |= FP_XX;
- env->fpscr |= FP_FI;
}
+
+ return overflow_enabled ? 0 : float_flag_inexact;
}
static inline void float_underflow_excp(CPUPPCState *env)
@@ -356,8 +331,7 @@ static inline void float_underflow_excp(CPUPPCState *env)
env->fpscr |= FP_UX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_ue != 0) {
- /* XXX: should adjust the result */
+ if (env->fpscr & FP_UE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
/* We must update the target FPR before raising the exception */
@@ -370,11 +344,10 @@ static inline void float_inexact_excp(CPUPPCState *env)
{
CPUState *cs = env_cpu(env);
- env->fpscr |= FP_FI;
env->fpscr |= FP_XX;
/* Update the floating-point exception summary */
env->fpscr |= FP_FX;
- if (fpscr_xe != 0) {
+ if (env->fpscr & FP_XE) {
/* Update the floating-point enabled exception summary */
env->fpscr |= FP_FEX;
/* We must update the target FPR before raising the exception */
@@ -414,20 +387,77 @@ void helper_store_fpscr(CPUPPCState *env, uint64_t val, uint32_t nibbles)
ppc_store_fpscr(env, val);
}
-static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
+static void do_fpscr_check_status(CPUPPCState *env, uintptr_t raddr)
+{
+ CPUState *cs = env_cpu(env);
+ target_ulong fpscr = env->fpscr;
+ int error = 0;
+
+ if ((fpscr & FP_OX) && (fpscr & FP_OE)) {
+ error = POWERPC_EXCP_FP_OX;
+ } else if ((fpscr & FP_UX) && (fpscr & FP_UE)) {
+ error = POWERPC_EXCP_FP_UX;
+ } else if ((fpscr & FP_XX) && (fpscr & FP_XE)) {
+ error = POWERPC_EXCP_FP_XX;
+ } else if ((fpscr & FP_ZX) && (fpscr & FP_ZE)) {
+ error = POWERPC_EXCP_FP_ZX;
+ } else if (fpscr & FP_VE) {
+ if (fpscr & FP_VXSOFT) {
+ error = POWERPC_EXCP_FP_VXSOFT;
+ } else if (fpscr & FP_VXSNAN) {
+ error = POWERPC_EXCP_FP_VXSNAN;
+ } else if (fpscr & FP_VXISI) {
+ error = POWERPC_EXCP_FP_VXISI;
+ } else if (fpscr & FP_VXIDI) {
+ error = POWERPC_EXCP_FP_VXIDI;
+ } else if (fpscr & FP_VXZDZ) {
+ error = POWERPC_EXCP_FP_VXZDZ;
+ } else if (fpscr & FP_VXIMZ) {
+ error = POWERPC_EXCP_FP_VXIMZ;
+ } else if (fpscr & FP_VXVC) {
+ error = POWERPC_EXCP_FP_VXVC;
+ } else if (fpscr & FP_VXSQRT) {
+ error = POWERPC_EXCP_FP_VXSQRT;
+ } else if (fpscr & FP_VXCVI) {
+ error = POWERPC_EXCP_FP_VXCVI;
+ } else {
+ return;
+ }
+ } else {
+ return;
+ }
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = error | POWERPC_EXCP_FP;
+ env->fpscr |= FP_FEX;
+ /* Deferred floating-point exception after target FPSCR update */
+ if (fp_exceptions_enabled(env)) {
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, raddr);
+ }
+}
+
+void helper_fpscr_check_status(CPUPPCState *env)
+{
+ do_fpscr_check_status(env, GETPC());
+}
+
+static void do_float_check_status(CPUPPCState *env, bool change_fi,
+ uintptr_t raddr)
{
CPUState *cs = env_cpu(env);
int status = get_float_exception_flags(&env->fp_status);
if (status & float_flag_overflow) {
- float_overflow_excp(env);
+ status |= float_overflow_excp(env);
} else if (status & float_flag_underflow) {
float_underflow_excp(env);
}
if (status & float_flag_inexact) {
float_inexact_excp(env);
- } else {
- env->fpscr &= ~FP_FI; /* clear the FPSCR[FI] bit */
+ }
+ if (change_fi) {
+ env->fpscr = FIELD_DP64(env->fpscr, FPSCR, FI,
+ !!(status & float_flag_inexact));
}
if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
@@ -442,7 +472,7 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
void helper_float_check_status(CPUPPCState *env)
{
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
void helper_reset_fpstatus(CPUPPCState *env)
@@ -450,13 +480,12 @@ void helper_reset_fpstatus(CPUPPCState *env)
set_float_exception_flags(0, &env->fp_status);
}
-static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
- uintptr_t retaddr, int classes)
+static void float_invalid_op_addsub(CPUPPCState *env, int flags,
+ bool set_fpcc, uintptr_t retaddr)
{
- if ((classes & ~is_neg) == is_inf) {
- /* Magnitude subtraction of infinities */
+ if (flags & float_flag_invalid_isi) {
float_invalid_op_vxisi(env, set_fpcc, retaddr);
- } else if (classes & is_snan) {
+ } else if (flags & float_flag_invalid_snan) {
float_invalid_op_vxsnan(env, retaddr);
}
}
@@ -465,39 +494,58 @@ static void float_invalid_op_addsub(CPUPPCState *env, bool set_fpcc,
float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2)
{
float64 ret = float64_add(arg1, arg2, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status & float_flag_invalid)) {
- float_invalid_op_addsub(env, 1, GETPC(),
- float64_classify(arg1) |
- float64_classify(arg2));
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, flags, 1, GETPC());
}
return ret;
}
+/* fadds - fadds. */
+float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64r32_add(arg1, arg2, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, flags, 1, GETPC());
+ }
+ return ret;
+}
+
/* fsub - fsub. */
float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2)
{
float64 ret = float64_sub(arg1, arg2, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status & float_flag_invalid)) {
- float_invalid_op_addsub(env, 1, GETPC(),
- float64_classify(arg1) |
- float64_classify(arg2));
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, flags, 1, GETPC());
}
return ret;
}
-static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
- uintptr_t retaddr, int classes)
+/* fsubs - fsubs. */
+float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64r32_sub(arg1, arg2, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_addsub(env, flags, 1, GETPC());
+ }
+ return ret;
+}
+
+static void float_invalid_op_mul(CPUPPCState *env, int flags,
+ bool set_fprc, uintptr_t retaddr)
{
- if ((classes & (is_zero | is_inf)) == (is_zero | is_inf)) {
- /* Multiplication of zero by infinity */
+ if (flags & float_flag_invalid_imz) {
float_invalid_op_vximz(env, set_fprc, retaddr);
- } else if (classes & is_snan) {
+ } else if (flags & float_flag_invalid_snan) {
float_invalid_op_vxsnan(env, retaddr);
}
}
@@ -506,28 +554,35 @@ static void float_invalid_op_mul(CPUPPCState *env, bool set_fprc,
float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2)
{
float64 ret = float64_mul(arg1, arg2, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status & float_flag_invalid)) {
- float_invalid_op_mul(env, 1, GETPC(),
- float64_classify(arg1) |
- float64_classify(arg2));
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_mul(env, flags, 1, GETPC());
}
return ret;
}
-static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
- uintptr_t retaddr, int classes)
+/* fmuls - fmuls. */
+float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2)
+{
+ float64 ret = float64r32_mul(arg1, arg2, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_mul(env, flags, 1, GETPC());
+ }
+ return ret;
+}
+
+static void float_invalid_op_div(CPUPPCState *env, int flags,
+ bool set_fprc, uintptr_t retaddr)
{
- classes &= ~is_neg;
- if (classes == is_inf) {
- /* Division of infinity by infinity */
+ if (flags & float_flag_invalid_idi) {
float_invalid_op_vxidi(env, set_fprc, retaddr);
- } else if (classes == is_zero) {
- /* Division of zero by zero */
+ } else if (flags & float_flag_invalid_zdz) {
float_invalid_op_vxzdz(env, set_fprc, retaddr);
- } else if (classes & is_snan) {
+ } else if (flags & float_flag_invalid_snan) {
float_invalid_op_vxsnan(env, retaddr);
}
}
@@ -536,43 +591,57 @@ static void float_invalid_op_div(CPUPPCState *env, bool set_fprc,
float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2)
{
float64 ret = float64_div(arg1, arg2, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status)) {
- if (status & float_flag_invalid) {
- float_invalid_op_div(env, 1, GETPC(),
- float64_classify(arg1) |
- float64_classify(arg2));
- }
- if (status & float_flag_divbyzero) {
- float_zero_divide_excp(env, GETPC());
- }
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_div(env, flags, 1, GETPC());
+ }
+ if (unlikely(flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
}
return ret;
}
-static void float_invalid_cvt(CPUPPCState *env, bool set_fprc,
- uintptr_t retaddr, int class1)
+/* fdivs - fdivs. */
+float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2)
{
- float_invalid_op_vxcvi(env, set_fprc, retaddr);
- if (class1 & is_snan) {
- float_invalid_op_vxsnan(env, retaddr);
+ float64 ret = float64r32_div(arg1, arg2, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_div(env, flags, 1, GETPC());
}
+ if (unlikely(flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
+ }
+
+ return ret;
+}
+
+static uint64_t float_invalid_cvt(CPUPPCState *env, int flags,
+ uint64_t ret, uint64_t ret_nan,
+ bool set_fprc, uintptr_t retaddr)
+{
+ /*
+ * VXCVI is different from most in that it sets two exception bits,
+ * VXCVI and VXSNAN for an SNaN input.
+ */
+ if (flags & float_flag_invalid_snan) {
+ env->fpscr |= FP_VXSNAN;
+ }
+ float_invalid_op_vxcvi(env, set_fprc, retaddr);
+
+ return flags & float_flag_invalid_cvti ? ret : ret_nan;
}
#define FPU_FCTI(op, cvt, nanval) \
uint64_t helper_##op(CPUPPCState *env, float64 arg) \
{ \
uint64_t ret = float64_to_##cvt(arg, &env->fp_status); \
- int status = get_float_exception_flags(&env->fp_status); \
- \
- if (unlikely(status)) { \
- if (status & float_flag_invalid) { \
- float_invalid_cvt(env, 1, GETPC(), float64_classify(arg)); \
- ret = nanval; \
- } \
- do_float_check_status(env, GETPC()); \
+ int flags = get_float_exception_flags(&env->fp_status); \
+ if (unlikely(flags & float_flag_invalid)) { \
+ ret = float_invalid_cvt(env, flags, ret, nanval, 1, GETPC()); \
} \
return ret; \
}
@@ -597,7 +666,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
} else { \
farg.d = cvtr(arg, &env->fp_status); \
} \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, true, GETPC()); \
return farg.ll; \
}
@@ -606,32 +675,26 @@ FPU_FCFI(fcfids, int64_to_float32, 1)
FPU_FCFI(fcfidu, uint64_to_float64, 0)
FPU_FCFI(fcfidus, uint64_to_float32, 1)
-static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
- int rounding_mode)
+static uint64_t do_fri(CPUPPCState *env, uint64_t arg,
+ FloatRoundMode rounding_mode)
{
- CPU_DoubleU farg;
FloatRoundMode old_rounding_mode = get_float_rounding_mode(&env->fp_status);
+ int flags;
- farg.ll = arg;
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ arg = float64_round_to_int(arg, &env->fp_status);
+ set_float_rounding_mode(old_rounding_mode, &env->fp_status);
- if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
- /* sNaN round */
+ flags = get_float_exception_flags(&env->fp_status);
+ if (flags & float_flag_invalid_snan) {
float_invalid_op_vxsnan(env, GETPC());
- farg.ll = arg | 0x0008000000000000ULL;
- } else {
- int inexact = get_float_exception_flags(&env->fp_status) &
- float_flag_inexact;
- set_float_rounding_mode(rounding_mode, &env->fp_status);
- farg.ll = float64_round_to_int(farg.d, &env->fp_status);
- set_float_rounding_mode(old_rounding_mode, &env->fp_status);
-
- /* fri* does not set FPSCR[XX] */
- if (!inexact) {
- env->fp_status.float_exception_flags &= ~float_flag_inexact;
- }
}
- do_float_check_status(env, GETPC());
- return farg.ll;
+
+ /* fri* does not set FPSCR[XX] */
+ set_float_exception_flags(flags & ~float_flag_inexact, &env->fp_status);
+ do_float_check_status(env, true, GETPC());
+
+ return arg;
}
uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
@@ -654,57 +717,48 @@ uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
return do_fri(env, arg, float_round_down);
}
-#define FPU_MADDSUB_UPDATE(NAME, TP) \
-static void NAME(CPUPPCState *env, TP arg1, TP arg2, TP arg3, \
- unsigned int madd_flags, uintptr_t retaddr) \
-{ \
- if (TP##_is_signaling_nan(arg1, &env->fp_status) || \
- TP##_is_signaling_nan(arg2, &env->fp_status) || \
- TP##_is_signaling_nan(arg3, &env->fp_status)) { \
- /* sNaN operation */ \
- float_invalid_op_vxsnan(env, retaddr); \
- } \
- if ((TP##_is_infinity(arg1) && TP##_is_zero(arg2)) || \
- (TP##_is_zero(arg1) && TP##_is_infinity(arg2))) { \
- /* Multiplication of zero by infinity */ \
- float_invalid_op_vximz(env, 1, retaddr); \
- } \
- if ((TP##_is_infinity(arg1) || TP##_is_infinity(arg2)) && \
- TP##_is_infinity(arg3)) { \
- uint8_t aSign, bSign, cSign; \
- \
- aSign = TP##_is_neg(arg1); \
- bSign = TP##_is_neg(arg2); \
- cSign = TP##_is_neg(arg3); \
- if (madd_flags & float_muladd_negate_c) { \
- cSign ^= 1; \
- } \
- if (aSign ^ bSign ^ cSign) { \
- float_invalid_op_vxisi(env, 1, retaddr); \
- } \
- } \
+static void float_invalid_op_madd(CPUPPCState *env, int flags,
+ bool set_fpcc, uintptr_t retaddr)
+{
+ if (flags & float_flag_invalid_imz) {
+ float_invalid_op_vximz(env, set_fpcc, retaddr);
+ } else {
+ float_invalid_op_addsub(env, flags, set_fpcc, retaddr);
+ }
}
-FPU_MADDSUB_UPDATE(float32_maddsub_update_excp, float32)
-FPU_MADDSUB_UPDATE(float64_maddsub_update_excp, float64)
-#define FPU_FMADD(op, madd_flags) \
-uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
- uint64_t arg2, uint64_t arg3) \
-{ \
- uint32_t flags; \
- float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
- &env->fp_status); \
- flags = get_float_exception_flags(&env->fp_status); \
- if (flags) { \
- if (flags & float_flag_invalid) { \
- float64_maddsub_update_excp(env, arg1, arg2, arg3, \
- madd_flags, GETPC()); \
- } \
- do_float_check_status(env, GETPC()); \
- } \
- return ret; \
+static float64 do_fmadd(CPUPPCState *env, float64 a, float64 b,
+ float64 c, int madd_flags, uintptr_t retaddr)
+{
+ float64 ret = float64_muladd(a, b, c, madd_flags, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_madd(env, flags, 1, retaddr);
+ }
+ return ret;
+}
+
+static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b,
+ float64 c, int madd_flags, uintptr_t retaddr)
+{
+ float64 ret = float64r32_muladd(a, b, c, madd_flags, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_madd(env, flags, 1, retaddr);
+ }
+ return ret;
}
+#define FPU_FMADD(op, madd_flags) \
+ uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
+ uint64_t arg2, uint64_t arg3) \
+ { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \
+ uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \
+ uint64_t arg2, uint64_t arg3) \
+ { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); }
+
#define MADD_FLGS 0
#define MSUB_FLGS float_muladd_negate_c
#define NMADD_FLGS float_muladd_negate_result
@@ -716,62 +770,62 @@ FPU_FMADD(fmsub, MSUB_FLGS)
FPU_FMADD(fnmsub, NMSUB_FLGS)
/* frsp - frsp. */
-uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
+static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr)
{
- CPU_DoubleU farg;
- float32 f32;
-
- farg.ll = arg;
+ float32 f32 = float64_to_float32(arg, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
- float_invalid_op_vxsnan(env, GETPC());
+ if (unlikely(flags & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, retaddr);
}
- f32 = float64_to_float32(farg.d, &env->fp_status);
- farg.d = float32_to_float64(f32, &env->fp_status);
-
- return farg.ll;
+ return helper_todouble(f32);
}
-/* fsqrt - fsqrt. */
-float64 helper_fsqrt(CPUPPCState *env, float64 arg)
+uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
{
- float64 ret = float64_sqrt(arg, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ return do_frsp(env, arg, GETPC());
+}
- if (unlikely(status & float_flag_invalid)) {
- if (unlikely(float64_is_any_nan(arg))) {
- if (unlikely(float64_is_signaling_nan(arg, &env->fp_status))) {
- /* sNaN square root */
- float_invalid_op_vxsnan(env, GETPC());
- }
- } else {
- /* Square root of a negative nonzero number */
- float_invalid_op_vxsqrt(env, 1, GETPC());
- }
+static void float_invalid_op_sqrt(CPUPPCState *env, int flags,
+ bool set_fpcc, uintptr_t retaddr)
+{
+ if (unlikely(flags & float_flag_invalid_sqrt)) {
+ float_invalid_op_vxsqrt(env, set_fpcc, retaddr);
+ } else if (unlikely(flags & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, retaddr);
}
+}
- return ret;
+#define FPU_FSQRT(name, op) \
+float64 helper_##name(CPUPPCState *env, float64 arg) \
+{ \
+ float64 ret = op(arg, &env->fp_status); \
+ int flags = get_float_exception_flags(&env->fp_status); \
+ \
+ if (unlikely(flags & float_flag_invalid)) { \
+ float_invalid_op_sqrt(env, flags, 1, GETPC()); \
+ } \
+ \
+ return ret; \
}
+FPU_FSQRT(FSQRT, float64_sqrt)
+FPU_FSQRT(FSQRTS, float64r32_sqrt)
+
/* fre - fre. */
float64 helper_fre(CPUPPCState *env, float64 arg)
{
/* "Estimate" the reciprocal with actual division. */
float64 ret = float64_div(float64_one, arg, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status)) {
- if (status & float_flag_invalid) {
- if (float64_is_signaling_nan(arg, &env->fp_status)) {
- /* sNaN reciprocal */
- float_invalid_op_vxsnan(env, GETPC());
- }
- }
- if (status & float_flag_divbyzero) {
- float_zero_divide_excp(env, GETPC());
- /* For FPSCR.ZE == 0, the result is 1/2. */
- ret = float64_set_sign(float64_half, float64_is_neg(arg));
- }
+ if (unlikely(flags & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+ if (unlikely(flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
+ /* For FPSCR.ZE == 0, the result is 1/2. */
+ ret = float64_set_sign(float64_half, float64_is_neg(arg));
}
return ret;
@@ -780,20 +834,20 @@ float64 helper_fre(CPUPPCState *env, float64 arg)
/* fres - fres. */
uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
{
- CPU_DoubleU farg;
- float32 f32;
-
- farg.ll = arg;
+ /* "Estimate" the reciprocal with actual division. */
+ float64 ret = float64r32_div(float64_one, arg, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
- /* sNaN reciprocal */
+ if (unlikely(flags & float_flag_invalid_snan)) {
float_invalid_op_vxsnan(env, GETPC());
}
- farg.d = float64_div(float64_one, farg.d, &env->fp_status);
- f32 = float64_to_float32(farg.d, &env->fp_status);
- farg.d = float32_to_float64(f32, &env->fp_status);
+ if (unlikely(flags & float_flag_divbyzero)) {
+ float_zero_divide_excp(env, GETPC());
+ /* For FPSCR.ZE == 0, the result is 1/2. */
+ ret = float64_set_sign(float64_half, float64_is_neg(arg));
+ }
- return farg.ll;
+ return ret;
}
/* frsqrte - frsqrte. */
@@ -802,40 +856,50 @@ float64 helper_frsqrte(CPUPPCState *env, float64 arg)
/* "Estimate" the reciprocal with actual division. */
float64 rets = float64_sqrt(arg, &env->fp_status);
float64 retd = float64_div(float64_one, rets, &env->fp_status);
- int status = get_float_exception_flags(&env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
- if (unlikely(status)) {
- if (status & float_flag_invalid) {
- if (float64_is_signaling_nan(arg, &env->fp_status)) {
- /* sNaN reciprocal */
- float_invalid_op_vxsnan(env, GETPC());
- } else {
- /* Square root of a negative nonzero number */
- float_invalid_op_vxsqrt(env, 1, GETPC());
- }
- }
- if (status & float_flag_divbyzero) {
- /* Reciprocal of (square root of) zero. */
- float_zero_divide_excp(env, GETPC());
- }
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_sqrt(env, flags, 1, GETPC());
+ }
+ if (unlikely(flags & float_flag_divbyzero)) {
+ /* Reciprocal of (square root of) zero. */
+ float_zero_divide_excp(env, GETPC());
+ }
+
+ return retd;
+}
+
+/* frsqrtes - frsqrtes. */
+float64 helper_frsqrtes(CPUPPCState *env, float64 arg)
+{
+ /* "Estimate" the reciprocal with actual division. */
+ float64 rets = float64_sqrt(arg, &env->fp_status);
+ float64 retd = float64r32_div(float64_one, rets, &env->fp_status);
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ if (unlikely(flags & float_flag_invalid)) {
+ float_invalid_op_sqrt(env, flags, 1, GETPC());
+ }
+ if (unlikely(flags & float_flag_divbyzero)) {
+ /* Reciprocal of (square root of) zero. */
+ float_zero_divide_excp(env, GETPC());
}
return retd;
}
/* fsel - fsel. */
-uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
- uint64_t arg3)
+uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c)
{
- CPU_DoubleU farg1;
+ CPU_DoubleU fa;
- farg1.ll = arg1;
+ fa.ll = a;
- if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
- !float64_is_any_nan(farg1.d)) {
- return arg2;
+ if ((!float64_is_neg(fa.d) || float64_is_zero(fa.d)) &&
+ !float64_is_any_nan(fa.d)) {
+ return c;
} else {
- return arg3;
+ return b;
}
}
@@ -1598,13 +1662,13 @@ uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_ADD_SUB(name, op, nels, tp, fld, sfifprf, r2sp) \
void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1616,21 +1680,20 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- float_invalid_op_addsub(env, sfprf, GETPC(), \
- tp##_classify(xa->fld) | \
- tp##_classify(xb->fld)); \
+ float_invalid_op_addsub(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
\
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
@@ -1660,15 +1723,13 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa->f128) |
- float128_classify(xb->f128));
+ float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
}
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
/*
@@ -1677,13 +1738,13 @@ void helper_xsaddqp(CPUPPCState *env, uint32_t opcode,
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_MUL(op, nels, tp, fld, sfifprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1695,22 +1756,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- float_invalid_op_mul(env, sfprf, GETPC(), \
- tp##_classify(xa->fld) | \
- tp##_classify(xb->fld)); \
+ float_invalid_op_mul(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
\
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
@@ -1735,14 +1795,12 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- float_invalid_op_mul(env, 1, GETPC(),
- float128_classify(xa->f128) |
- float128_classify(xb->f128));
+ float_invalid_op_mul(env, tstat.float_exception_flags, 1, GETPC());
}
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
/*
@@ -1751,13 +1809,13 @@ void helper_xsmulqp(CPUPPCState *env, uint32_t opcode,
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_DIV(op, nels, tp, fld, sfifprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1769,25 +1827,24 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- float_invalid_op_div(env, sfprf, GETPC(), \
- tp##_classify(xa->fld) | \
- tp##_classify(xb->fld)); \
+ float_invalid_op_div(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) { \
float_zero_divide_excp(env, GETPC()); \
} \
\
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
@@ -1812,9 +1869,7 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- float_invalid_op_div(env, 1, GETPC(),
- float128_classify(xa->f128) |
- float128_classify(xb->f128));
+ float_invalid_op_div(env, tstat.float_exception_flags, 1, GETPC());
}
if (unlikely(tstat.float_exception_flags & float_flag_divbyzero)) {
float_zero_divide_excp(env, GETPC());
@@ -1822,7 +1877,7 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
/*
@@ -1831,12 +1886,12 @@ void helper_xsdivqp(CPUPPCState *env, uint32_t opcode,
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_RE(op, nels, tp, fld, sfifprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1848,16 +1903,16 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
t.fld = tp##_div(tp##_one, xb->fld, &env->fp_status); \
\
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
@@ -1871,12 +1926,12 @@ VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_SQRT(op, nels, tp, fld, sfifprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1888,24 +1943,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
- float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
- float_invalid_op_vxsnan(env, GETPC()); \
- } \
+ float_invalid_op_sqrt(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
\
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
@@ -1919,12 +1971,12 @@ VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
+#define VSX_RSQRTE(op, nels, tp, fld, sfifprf, r2sp) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -1935,26 +1987,21 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
t.fld = tp##_sqrt(xb->fld, &tstat); \
t.fld = tp##_div(tp##_one, t.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
- \
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb->fld) && !tp##_is_zero(xb->fld)) { \
- float_invalid_op_vxsqrt(env, sfprf, GETPC()); \
- } else if (tp##_is_signaling_nan(xb->fld, &tstat)) { \
- float_invalid_op_vxsnan(env, GETPC()); \
- } \
+ float_invalid_op_sqrt(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
- \
if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
+ t.fld = do_frsp(env, t.fld, GETPC()); \
} \
\
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
@@ -2080,13 +2127,13 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
* fld - vsr_t field (VsrD(*) or VsrW(*))
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_MADD(op, nels, tp, fld, maddflgs, sfprf, r2sp) \
+#define VSX_MADD(op, nels, tp, fld, maddflgs, sfifprf) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
- ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \
+ ppc_vsr_t *s1, ppc_vsr_t *s2, ppc_vsr_t *s3) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
helper_reset_fpstatus(env); \
@@ -2094,108 +2141,129 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
- /* \
- * Avoid double rounding errors by rounding the intermediate \
- * result to odd. \
- */ \
- set_float_rounding_mode(float_round_to_zero, &tstat); \
- t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
- maddflgs, &tstat); \
- t.fld |= (get_float_exception_flags(&tstat) & \
- float_flag_inexact) != 0; \
- } else { \
- t.fld = tp##_muladd(xa->fld, b->fld, c->fld, \
- maddflgs, &tstat); \
- } \
+ t.fld = tp##_muladd(s1->fld, s3->fld, s2->fld, maddflgs, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- tp##_maddsub_update_excp(env, xa->fld, b->fld, \
- c->fld, maddflgs, GETPC()); \
+ float_invalid_op_madd(env, tstat.float_exception_flags, \
+ sfifprf, GETPC()); \
} \
\
- if (r2sp) { \
- t.fld = helper_frsp(env, t.fld); \
- } \
- \
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
-VSX_MADD(xsmadddp, 1, float64, VsrD(0), MADD_FLGS, 1, 0)
-VSX_MADD(xsmsubdp, 1, float64, VsrD(0), MSUB_FLGS, 1, 0)
-VSX_MADD(xsnmadddp, 1, float64, VsrD(0), NMADD_FLGS, 1, 0)
-VSX_MADD(xsnmsubdp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 0)
-VSX_MADD(xsmaddsp, 1, float64, VsrD(0), MADD_FLGS, 1, 1)
-VSX_MADD(xsmsubsp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1)
-VSX_MADD(xsnmaddsp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1)
-VSX_MADD(xsnmsubsp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1)
+VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
+VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
+VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
+VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
+VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
+VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
+VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
+VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
-VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0, 0)
-VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0)
-VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0)
-VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0)
+VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
+VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
+VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
+VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
-VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0)
-VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0)
-VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0)
-VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0)
+VSX_MADD(xvmaddsp, 4, float32, VsrW(i), MADD_FLGS, 0)
+VSX_MADD(xvmsubsp, 4, float32, VsrW(i), MSUB_FLGS, 0)
+VSX_MADD(xvnmaddsp, 4, float32, VsrW(i), NMADD_FLGS, 0)
+VSX_MADD(xvnmsubsp, 4, float32, VsrW(i), NMSUB_FLGS, 0)
/*
- * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ * VSX_MADDQ - VSX floating point quad-precision muliply/add
* op - instruction mnemonic
+ * maddflgs - flags for the float*muladd routine that control the
+ * various forms (madd, msub, nmadd, nmsub)
+ * ro - round to odd
+ */
+#define VSX_MADDQ(op, maddflgs, ro) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *s1, ppc_vsr_t *s2,\
+ ppc_vsr_t *s3) \
+{ \
+ ppc_vsr_t t = *xt; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ if (ro) { \
+ tstat.float_rounding_mode = float_round_to_odd; \
+ } \
+ t.f128 = float128_muladd(s1->f128, s3->f128, s2->f128, maddflgs, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ float_invalid_op_madd(env, tstat.float_exception_flags, \
+ false, GETPC()); \
+ } \
+ \
+ helper_compute_fprf_float128(env, t.f128); \
+ *xt = t; \
+ do_float_check_status(env, true, GETPC()); \
+}
+
+VSX_MADDQ(XSMADDQP, MADD_FLGS, 0)
+VSX_MADDQ(XSMADDQPO, MADD_FLGS, 1)
+VSX_MADDQ(XSMSUBQP, MSUB_FLGS, 0)
+VSX_MADDQ(XSMSUBQPO, MSUB_FLGS, 1)
+VSX_MADDQ(XSNMADDQP, NMADD_FLGS, 0)
+VSX_MADDQ(XSNMADDQPO, NMADD_FLGS, 1)
+VSX_MADDQ(XSNMSUBQP, NMSUB_FLGS, 0)
+VSX_MADDQ(XSNMSUBQPO, NMSUB_FLGS, 0)
+
+/*
+ * VSX_SCALAR_CMP - VSX scalar floating point compare
+ * op - instruction mnemonic
+ * tp - type
* cmp - comparison operation
- * exp - expected result of comparison
+ * fld - vsr_t field
* svxvc - set VXVC bit
*/
-#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
-void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
- ppc_vsr_t *xa, ppc_vsr_t *xb) \
+#define VSX_SCALAR_CMP(op, tp, cmp, fld, svxvc) \
+ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
+ ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
- bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
+ int flags; \
+ bool r, vxvc; \
\
- if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
- vxsnan_flag = true; \
- if (fpscr_ve == 0 && svxvc) { \
- vxvc_flag = true; \
- } \
- } else if (svxvc) { \
- vxvc_flag = float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || \
- float64_is_quiet_nan(xb->VsrD(0), &env->fp_status); \
- } \
- if (vxsnan_flag) { \
- float_invalid_op_vxsnan(env, GETPC()); \
- } \
- if (vxvc_flag) { \
- float_invalid_op_vxvc(env, 0, GETPC()); \
+ helper_reset_fpstatus(env); \
+ \
+ if (svxvc) { \
+ r = tp##_##cmp(xb->fld, xa->fld, &env->fp_status); \
+ } else { \
+ r = tp##_##cmp##_quiet(xb->fld, xa->fld, &env->fp_status); \
} \
- vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
\
- if (!vex_flag) { \
- if (float64_##cmp(xb->VsrD(0), xa->VsrD(0), \
- &env->fp_status) == exp) { \
- t.VsrD(0) = -1; \
- t.VsrD(1) = 0; \
- } else { \
- t.VsrD(0) = 0; \
- t.VsrD(1) = 0; \
+ flags = get_float_exception_flags(&env->fp_status); \
+ if (unlikely(flags & float_flag_invalid)) { \
+ vxvc = svxvc; \
+ if (flags & float_flag_invalid_snan) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ vxvc &= !(env->fpscr & FP_VE); \
+ } \
+ if (vxvc) { \
+ float_invalid_op_vxvc(env, 0, GETPC()); \
} \
} \
- *xt = t; \
- do_float_check_status(env, GETPC()); \
+ \
+ memset(xt, 0, sizeof(*xt)); \
+ memset(&xt->fld, -r, sizeof(xt->fld)); \
+ do_float_check_status(env, false, GETPC()); \
}
-VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
-VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
-VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
-VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
+VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
+VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
+VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
+VSX_SCALAR_CMP(XSCMPEQQP, float128, eq, f128, 0)
+VSX_SCALAR_CMP(XSCMPGEQP, float128, le, f128, 1)
+VSX_SCALAR_CMP(XSCMPGTQP, float128, lt, f128, 1)
void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
ppc_vsr_t *xa, ppc_vsr_t *xb)
@@ -2223,7 +2291,7 @@ void helper_xscmpexpdp(CPUPPCState *env, uint32_t opcode,
env->fpscr |= cc << FPSCR_FPCC;
env->crf[BF(opcode)] = cc;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, false, GETPC());
}
void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
@@ -2252,7 +2320,7 @@ void helper_xscmpexpqp(CPUPPCState *env, uint32_t opcode,
env->fpscr |= cc << FPSCR_FPCC;
env->crf[BF(opcode)] = cc;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, false, GETPC());
}
static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
@@ -2279,7 +2347,7 @@ static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) ||
float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) {
vxsnan_flag = true;
- if (fpscr_ve == 0 && ordered) {
+ if (!(env->fpscr & FP_VE) && ordered) {
vxvc_flag = true;
}
} else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) ||
@@ -2305,7 +2373,7 @@ static inline void do_scalar_cmp(CPUPPCState *env, ppc_vsr_t *xa, ppc_vsr_t *xb,
float_invalid_op_vxvc(env, 0, GETPC());
}
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, false, GETPC());
}
void helper_xscmpodp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
@@ -2344,7 +2412,7 @@ static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
if (float128_is_signaling_nan(xa->f128, &env->fp_status) ||
float128_is_signaling_nan(xb->f128, &env->fp_status)) {
vxsnan_flag = true;
- if (fpscr_ve == 0 && ordered) {
+ if (!(env->fpscr & FP_VE) && ordered) {
vxvc_flag = true;
}
} else if (float128_is_quiet_nan(xa->f128, &env->fp_status) ||
@@ -2370,7 +2438,7 @@ static inline void do_scalar_cmpq(CPUPPCState *env, ppc_vsr_t *xa,
float_invalid_op_vxvc(env, 0, GETPC());
}
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, false, GETPC());
}
void helper_xscmpoqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
@@ -2397,7 +2465,7 @@ void helper_xscmpuqp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xa,
void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
for (i = 0; i < nels; i++) { \
@@ -2409,7 +2477,7 @@ void helper_##name(CPUPPCState *env, ppc_vsr_t *xt, \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, false, GETPC()); \
}
VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
@@ -2419,46 +2487,43 @@ VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
-#define VSX_MAX_MINC(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode, \
+#define VSX_MAX_MINC(name, max, tp, fld) \
+void helper_##name(CPUPPCState *env, \
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
- bool vxsnan_flag = false, vex_flag = false; \
+ ppc_vsr_t t = { }; \
+ bool first; \
\
- if (unlikely(float64_is_any_nan(xa->VsrD(0)) || \
- float64_is_any_nan(xb->VsrD(0)))) { \
- if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || \
- float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
- vxsnan_flag = true; \
- } \
- t.VsrD(0) = xb->VsrD(0); \
- } else if ((max && \
- !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
- (!max && \
- float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
- t.VsrD(0) = xa->VsrD(0); \
+ helper_reset_fpstatus(env); \
+ \
+ if (max) { \
+ first = tp##_le_quiet(xb->fld, xa->fld, &env->fp_status); \
} else { \
- t.VsrD(0) = xb->VsrD(0); \
+ first = tp##_lt_quiet(xa->fld, xb->fld, &env->fp_status); \
} \
\
- vex_flag = fpscr_ve & vxsnan_flag; \
- if (vxsnan_flag) { \
- float_invalid_op_vxsnan(env, GETPC()); \
- } \
- if (!vex_flag) { \
- *xt = t; \
+ if (first) { \
+ t.fld = xa->fld; \
+ } else { \
+ t.fld = xb->fld; \
+ if (env->fp_status.float_exception_flags & float_flag_invalid_snan) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ } \
} \
-} \
+ \
+ *xt = t; \
+}
-VSX_MAX_MINC(xsmaxcdp, 1);
-VSX_MAX_MINC(xsmincdp, 0);
+VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
+VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
+VSX_MAX_MINC(XSMAXCQP, true, float128, f128);
+VSX_MAX_MINC(XSMINCQP, false, float128, f128);
#define VSX_MAX_MINJ(name, max) \
-void helper_##name(CPUPPCState *env, uint32_t opcode, \
+void helper_##name(CPUPPCState *env, \
ppc_vsr_t *xt, ppc_vsr_t *xa, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
bool vxsnan_flag = false, vex_flag = false; \
\
if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
@@ -2497,7 +2562,7 @@ void helper_##name(CPUPPCState *env, uint32_t opcode, \
t.VsrD(0) = xb->VsrD(0); \
} \
\
- vex_flag = fpscr_ve & vxsnan_flag; \
+ vex_flag = (env->fpscr & FP_VE) && vxsnan_flag; \
if (vxsnan_flag) { \
float_invalid_op_vxsnan(env, GETPC()); \
} \
@@ -2506,8 +2571,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode, \
} \
} \
-VSX_MAX_MINJ(xsmaxjdp, 1);
-VSX_MAX_MINJ(xsminjdp, 0);
+VSX_MAX_MINJ(XSMAXJDP, 1);
+VSX_MAX_MINJ(XSMINJDP, 0);
/*
* VSX_CMP - VSX floating point compare
@@ -2529,6 +2594,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
int all_true = 1; \
int all_false = 1; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
if (unlikely(tp##_is_any_nan(xa->fld) || \
tp##_is_any_nan(xb->fld))) { \
@@ -2574,14 +2641,16 @@ VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
* ttp - target type (float32 or float64)
* sfld - source vsr_t field
* tfld - target vsr_t field (f32 or f64)
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
+#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2589,20 +2658,46 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
float_invalid_op_vxsnan(env, GETPC()); \
t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
-VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
-VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0)
VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
+#define VSX_CVT_FP_TO_FP2(op, nels, stp, ttp, sfifprf) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = { }; \
+ int i; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
+ &env->fp_status))) { \
+ float_invalid_op_vxsnan(env, GETPC()); \
+ t.VsrW(2 * i) = ttp##_snan_to_qnan(t.VsrW(2 * i)); \
+ } \
+ if (sfifprf) { \
+ helper_compute_fprf_##ttp(env, t.VsrW(2 * i)); \
+ } \
+ t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, sfifprf, GETPC()); \
+}
+
+VSX_CVT_FP_TO_FP2(xvcvdpsp, 2, float64, float32, 0)
+VSX_CVT_FP_TO_FP2(xscvdpsp, 1, float64, float32, 1)
+
/*
* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion
* op - instruction mnemonic
@@ -2613,13 +2708,15 @@ VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
* tfld - target vsr_t field (f32 or f64)
* sfprf - set FPRF
*/
-#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode, \
- ppc_vsr_t *xt, ppc_vsr_t *xb) \
+#define VSX_CVT_FP_TO_FP_VECTOR(op, nels, stp, ttp, sfld, tfld, sfprf) \
+void helper_##op(CPUPPCState *env, uint32_t opcode, \
+ ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
ppc_vsr_t t = *xt; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2633,7 +2730,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, true, GETPC()); \
}
VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
@@ -2647,14 +2744,16 @@ VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
* ttp - target type
* sfld - source vsr_t field
* tfld - target vsr_t field
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfprf) \
+#define VSX_CVT_FP_TO_FP_HP(op, nels, stp, ttp, sfld, tfld, sfifprf) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, 1, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb->sfld, \
@@ -2662,13 +2761,13 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
float_invalid_op_vxsnan(env, GETPC()); \
t.tfld = ttp##_snan_to_qnan(t.tfld); \
} \
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_##ttp(env, t.tfld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
@@ -2676,18 +2775,36 @@ VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
VSX_CVT_FP_TO_FP_HP(xvcvsphp, 4, float32, float16, VsrW(i), VsrH(2 * i + 1), 0)
VSX_CVT_FP_TO_FP_HP(xvcvhpsp, 4, float16, float32, VsrH(2 * i + 1), VsrW(i), 0)
-/*
- * xscvqpdp isn't using VSX_CVT_FP_TO_FP() because xscvqpdpo will be
- * added to this later.
- */
-void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
- ppc_vsr_t *xt, ppc_vsr_t *xb)
+void helper_XVCVSPBF16(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
+{
+ ppc_vsr_t t = { };
+ int i, status;
+
+ helper_reset_fpstatus(env);
+
+ for (i = 0; i < 4; i++) {
+ t.VsrH(2 * i + 1) = float32_to_bfloat16(xb->VsrW(i), &env->fp_status);
+ }
+
+ status = get_float_exception_flags(&env->fp_status);
+ if (unlikely(status & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ }
+
+ *xt = t;
+ do_float_check_status(env, false, GETPC());
+}
+
+void helper_XSCVQPDP(CPUPPCState *env, uint32_t ro, ppc_vsr_t *xt,
+ ppc_vsr_t *xb)
{
ppc_vsr_t t = { };
float_status tstat;
+ helper_reset_fpstatus(env);
+
tstat = env->fp_status;
- if (unlikely(Rc(opcode) != 0)) {
+ if (ro != 0) {
tstat.float_rounding_mode = float_round_to_odd;
}
@@ -2700,13 +2817,14 @@ void helper_xscvqpdp(CPUPPCState *env, uint32_t opcode,
helper_compute_fprf_float64(env, t.VsrD(0));
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
{
uint64_t result, sign, exp, frac;
+ helper_reset_fpstatus(env);
float_status tstat = env->fp_status;
set_float_exception_flags(0, &tstat);
@@ -2743,12 +2861,9 @@ uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
return (result << 32) | result;
}
-uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
+uint64_t helper_XSCVSPDPN(uint64_t xb)
{
- float_status tstat = env->fp_status;
- set_float_exception_flags(0, &tstat);
-
- return float32_to_float64(xb >> 32, &tstat);
+ return helper_todouble(xb >> 32);
}
/*
@@ -2759,48 +2874,105 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
* ttp - target type (int32, uint32, int64 or uint64)
* sfld - source vsr_t field
* tfld - target vsr_t field
+ * sfi - set FI
* rnan - resulting NaN
*/
-#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, sfi, rnan) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- int all_flags = env->fp_status.float_exception_flags, flags; \
- ppc_vsr_t t = *xt; \
- int i; \
+ int all_flags = 0; \
+ ppc_vsr_t t = { }; \
+ int i, flags; \
\
for (i = 0; i < nels; i++) { \
- env->fp_status.float_exception_flags = 0; \
+ helper_reset_fpstatus(env); \
t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
flags = env->fp_status.float_exception_flags; \
+ all_flags |= flags; \
if (unlikely(flags & float_flag_invalid)) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
- t.tfld = rnan; \
+ t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC());\
} \
- all_flags |= flags; \
} \
\
*xt = t; \
env->fp_status.float_exception_flags = all_flags; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfi, GETPC()); \
}
-VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
- 0x80000000U)
-VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
-VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
-VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \
- 0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
-VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U)
-VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
+ 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL)
-VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), false, \
+ 0x80000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
+ false, 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), false, 0U)
+
+#define VSX_CVT_FP_TO_INT128(op, tp, rnan) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t; \
+ int flags; \
+ \
+ helper_reset_fpstatus(env); \
+ t.s128 = float128_to_##tp##_round_to_zero(xb->f128, &env->fp_status); \
+ flags = get_float_exception_flags(&env->fp_status); \
+ if (unlikely(flags & float_flag_invalid)) { \
+ t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
+ t.VsrD(1) = -(t.VsrD(0) & 1); \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, true, GETPC()); \
+}
+
+VSX_CVT_FP_TO_INT128(XSCVQPUQZ, uint128, 0)
+VSX_CVT_FP_TO_INT128(XSCVQPSQZ, int128, 0x8000000000000000ULL);
+
+/*
+ * Likewise, except that the result is duplicated into both subwords.
+ * Power ISA v3.1 has Programming Notes for these insns:
+ * Previous versions of the architecture allowed the contents of
+ * word 0 of the result register to be undefined. However, all
+ * processors that support this instruction write the result into
+ * words 0 and 1 (and words 2 and 3) of the result register, as
+ * is required by this version of the architecture.
+ */
+#define VSX_CVT_FP_TO_INT2(op, nels, stp, ttp, sfi, rnan) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ int all_flags = 0; \
+ ppc_vsr_t t = { }; \
+ int i, flags; \
+ \
+ for (i = 0; i < nels; i++) { \
+ helper_reset_fpstatus(env); \
+ t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
+ &env->fp_status); \
+ flags = env->fp_status.float_exception_flags; \
+ all_flags |= flags; \
+ if (unlikely(flags & float_flag_invalid)) { \
+ t.VsrW(2 * i) = float_invalid_cvt(env, flags, t.VsrW(2 * i), \
+ rnan, 0, GETPC()); \
+ } \
+ t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
+ } \
+ \
+ *xt = t; \
+ env->fp_status.float_exception_flags = all_flags; \
+ do_float_check_status(env, sfi, GETPC()); \
+}
+
+VSX_CVT_FP_TO_INT2(xscvdpsxws, 1, float64, int32, true, 0x80000000U)
+VSX_CVT_FP_TO_INT2(xscvdpuxws, 1, float64, uint32, true, 0U)
+VSX_CVT_FP_TO_INT2(xvcvdpsxws, 2, float64, int32, false, 0x80000000U)
+VSX_CVT_FP_TO_INT2(xvcvdpuxws, 2, float64, uint32, false, 0U)
/*
* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion
@@ -2816,20 +2988,22 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
ppc_vsr_t t = { }; \
+ int flags; \
+ \
+ helper_reset_fpstatus(env); \
\
t.tfld = stp##_to_##ttp##_round_to_zero(xb->sfld, &env->fp_status); \
- if (env->fp_status.float_exception_flags & float_flag_invalid) { \
- float_invalid_cvt(env, 0, GETPC(), stp##_classify(xb->sfld)); \
- t.tfld = rnan; \
+ flags = get_float_exception_flags(&env->fp_status); \
+ if (flags & float_flag_invalid) { \
+ t.tfld = float_invalid_cvt(env, flags, t.tfld, rnan, 0, GETPC()); \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, true, GETPC()); \
}
VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
0x8000000000000000ULL)
-
VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
0xffffffff80000000ULL)
VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
@@ -2844,26 +3018,28 @@ VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
* sfld - source vsr_t field
* tfld - target vsr_t field
* jdef - definition of the j index (i or 2*i)
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfifprf, r2sp)\
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
\
+ helper_reset_fpstatus(env); \
+ \
for (i = 0; i < nels; i++) { \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
if (r2sp) { \
- t.tfld = helper_frsp(env, t.tfld); \
+ t.tfld = do_frsp(env, t.tfld, GETPC()); \
} \
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.tfld); \
} \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
@@ -2874,11 +3050,39 @@ VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
+#define VSX_CVT_INT_TO_FP2(op, stp, ttp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
+{ \
+ ppc_vsr_t t = { }; \
+ int i; \
+ \
+ for (i = 0; i < 2; i++) { \
+ t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
+ t.VsrW(2 * i + 1) = t.VsrW(2 * i); \
+ } \
+ \
+ *xt = t; \
+ do_float_check_status(env, false, GETPC()); \
+}
+
+VSX_CVT_INT_TO_FP2(xvcvsxdsp, int64, float32)
+VSX_CVT_INT_TO_FP2(xvcvuxdsp, uint64, float32)
+
+#define VSX_CVT_INT128_TO_FP(op, tp) \
+void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)\
+{ \
+ helper_reset_fpstatus(env); \
+ xt->f128 = tp##_to_float128(xb->s128, &env->fp_status); \
+ helper_compute_fprf_float128(env, xt->f128); \
+ do_float_check_status(env, true, GETPC()); \
+}
+
+VSX_CVT_INT128_TO_FP(XSCVUQQP, uint128);
+VSX_CVT_INT128_TO_FP(XSCVSQQP, int128);
+
/*
* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion
* op - instruction mnemonic
@@ -2893,11 +3097,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode, \
{ \
ppc_vsr_t t = *xt; \
\
+ helper_reset_fpstatus(env); \
t.tfld = stp##_to_##ttp(xb->sfld, &env->fp_status); \
helper_compute_fprf_##ttp(env, t.tfld); \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, true, GETPC()); \
}
VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
@@ -2917,15 +3122,17 @@ VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
* tp - type (float32 or float64)
* fld - vsr_t field (VsrD(*) or VsrW(*))
* rmode - rounding mode
- * sfprf - set FPRF
+ * sfifprf - set FI and FPRF
*/
-#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
+#define VSX_ROUND(op, nels, tp, fld, rmode, sfifprf) \
void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
{ \
- ppc_vsr_t t = *xt; \
+ ppc_vsr_t t = { }; \
int i; \
FloatRoundMode curr_rounding_mode; \
\
+ helper_reset_fpstatus(env); \
+ \
if (rmode != FLOAT_ROUND_CURRENT) { \
curr_rounding_mode = get_float_rounding_mode(&env->fp_status); \
set_float_rounding_mode(rmode, &env->fp_status); \
@@ -2939,7 +3146,7 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
} else { \
t.fld = tp##_round_to_int(xb->fld, &env->fp_status); \
} \
- if (sfprf) { \
+ if (sfifprf) { \
helper_compute_fprf_float64(env, t.fld); \
} \
} \
@@ -2955,7 +3162,7 @@ void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb) \
} \
\
*xt = t; \
- do_float_check_status(env, GETPC()); \
+ do_float_check_status(env, sfifprf, GETPC()); \
}
VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
@@ -2980,35 +3187,14 @@ uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
{
helper_reset_fpstatus(env);
- uint64_t xt = helper_frsp(env, xb);
+ uint64_t xt = do_frsp(env, xb, GETPC());
helper_compute_fprf_float64(env, xt);
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
return xt;
}
-#define VSX_XXPERM(op, indexed) \
-void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \
- ppc_vsr_t *xa, ppc_vsr_t *pcv) \
-{ \
- ppc_vsr_t t = *xt; \
- int i, idx; \
- \
- for (i = 0; i < 16; i++) { \
- idx = pcv->VsrB(i) & 0x1F; \
- if (indexed) { \
- idx = 31 - idx; \
- } \
- t.VsrB(i) = (idx <= 15) ? xa->VsrB(idx) \
- : xt->VsrB(idx - 16); \
- } \
- *xt = t; \
-}
-
-VSX_XXPERM(xxperm, 0)
-VSX_XXPERM(xxpermr, 1)
-
-void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
+void helper_XVXSIGSP(ppc_vsr_t *xt, ppc_vsr_t *xb)
{
ppc_vsr_t t = { };
uint32_t exp, i, fraction;
@@ -3025,94 +3211,82 @@ void helper_xvxsigsp(CPUPPCState *env, ppc_vsr_t *xt, ppc_vsr_t *xb)
*xt = t;
}
-/*
- * VSX_TEST_DC - VSX floating point test data class
- * op - instruction mnemonic
- * nels - number of elements (1, 2 or 4)
- * xbn - VSR register number
- * tp - type (float32 or float64)
- * fld - vsr_t field (VsrD(*) or VsrW(*))
- * tfld - target vsr_t field (VsrD(*) or VsrW(*))
- * fld_max - target field max
- * scrf - set result in CR and FPCC
- */
-#define VSX_TEST_DC(op, nels, xbn, tp, fld, tfld, fld_max, scrf) \
-void helper_##op(CPUPPCState *env, uint32_t opcode) \
+#define VSX_TSTDC(tp) \
+static int32_t tp##_tstdc(tp b, uint32_t dcmx) \
{ \
- ppc_vsr_t *xt = &env->vsr[xT(opcode)]; \
- ppc_vsr_t *xb = &env->vsr[xbn]; \
- ppc_vsr_t t = { }; \
- uint32_t i, sign, dcmx; \
- uint32_t cc, match = 0; \
- \
- if (!scrf) { \
- dcmx = DCMX_XV(opcode); \
- } else { \
- t = *xt; \
- dcmx = DCMX(opcode); \
- } \
- \
- for (i = 0; i < nels; i++) { \
- sign = tp##_is_neg(xb->fld); \
- if (tp##_is_any_nan(xb->fld)) { \
- match = extract32(dcmx, 6, 1); \
- } else if (tp##_is_infinity(xb->fld)) { \
- match = extract32(dcmx, 4 + !sign, 1); \
- } else if (tp##_is_zero(xb->fld)) { \
- match = extract32(dcmx, 2 + !sign, 1); \
- } else if (tp##_is_zero_or_denormal(xb->fld)) { \
- match = extract32(dcmx, 0 + !sign, 1); \
- } \
- \
- if (scrf) { \
- cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
- env->fpscr &= ~FP_FPCC; \
- env->fpscr |= cc << FPSCR_FPCC; \
- env->crf[BF(opcode)] = cc; \
- } else { \
- t.tfld = match ? fld_max : 0; \
- } \
- match = 0; \
- } \
- if (!scrf) { \
- *xt = t; \
+ uint32_t match = 0; \
+ uint32_t sign = tp##_is_neg(b); \
+ if (tp##_is_any_nan(b)) { \
+ match = extract32(dcmx, 6, 1); \
+ } else if (tp##_is_infinity(b)) { \
+ match = extract32(dcmx, 4 + !sign, 1); \
+ } else if (tp##_is_zero(b)) { \
+ match = extract32(dcmx, 2 + !sign, 1); \
+ } else if (tp##_is_zero_or_denormal(b)) { \
+ match = extract32(dcmx, 0 + !sign, 1); \
} \
+ return (match != 0); \
}
-VSX_TEST_DC(xvtstdcdp, 2, xB(opcode), float64, VsrD(i), VsrD(i), UINT64_MAX, 0)
-VSX_TEST_DC(xvtstdcsp, 4, xB(opcode), float32, VsrW(i), VsrW(i), UINT32_MAX, 0)
-VSX_TEST_DC(xststdcdp, 1, xB(opcode), float64, VsrD(0), VsrD(0), 0, 1)
-VSX_TEST_DC(xststdcqp, 1, (rB(opcode) + 32), float128, f128, VsrD(0), 0, 1)
+VSX_TSTDC(float32)
+VSX_TSTDC(float64)
+VSX_TSTDC(float128)
+#undef VSX_TSTDC
-void helper_xststdcsp(CPUPPCState *env, uint32_t opcode, ppc_vsr_t *xb)
+void helper_XVTSTDCDP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
{
- uint32_t dcmx, sign, exp;
- uint32_t cc, match = 0, not_sp = 0;
-
- dcmx = DCMX(opcode);
- exp = (xb->VsrD(0) >> 52) & 0x7FF;
+ int i;
+ for (i = 0; i < 2; i++) {
+ t->s64[i] = (int64_t)-float64_tstdc(b->f64[i], dcmx);
+ }
+}
- sign = float64_is_neg(xb->VsrD(0));
- if (float64_is_any_nan(xb->VsrD(0))) {
- match = extract32(dcmx, 6, 1);
- } else if (float64_is_infinity(xb->VsrD(0))) {
- match = extract32(dcmx, 4 + !sign, 1);
- } else if (float64_is_zero(xb->VsrD(0))) {
- match = extract32(dcmx, 2 + !sign, 1);
- } else if (float64_is_zero_or_denormal(xb->VsrD(0)) ||
- (exp > 0 && exp < 0x381)) {
- match = extract32(dcmx, 0 + !sign, 1);
+void helper_XVTSTDCSP(ppc_vsr_t *t, ppc_vsr_t *b, uint64_t dcmx, uint32_t v)
+{
+ int i;
+ for (i = 0; i < 4; i++) {
+ t->s32[i] = (int32_t)-float32_tstdc(b->f32[i], dcmx);
}
+}
- not_sp = !float64_eq(xb->VsrD(0),
- float32_to_float64(
- float64_to_float32(xb->VsrD(0), &env->fp_status),
- &env->fp_status), &env->fp_status);
+static bool not_SP_value(float64 val)
+{
+ return val != helper_todouble(helper_tosingle(val));
+}
+/*
+ * VSX_XS_TSTDC - VSX Scalar Test Data Class
+ * NAME - instruction name
+ * FLD - vsr_t field (VsrD(0) or f128)
+ * TP - type (float64 or float128)
+ */
+#define VSX_XS_TSTDC(NAME, FLD, TP) \
+ void helper_##NAME(CPUPPCState *env, uint32_t bf, \
+ uint32_t dcmx, ppc_vsr_t *b) \
+ { \
+ uint32_t cc, match, sign = TP##_is_neg(b->FLD); \
+ match = TP##_tstdc(b->FLD, dcmx); \
+ cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT; \
+ env->fpscr &= ~FP_FPCC; \
+ env->fpscr |= cc << FPSCR_FPCC; \
+ env->crf[bf] = cc; \
+ }
+
+VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
+VSX_XS_TSTDC(XSTSTDCQP, f128, float128)
+#undef VSX_XS_TSTDC
+
+void helper_XSTSTDCSP(CPUPPCState *env, uint32_t bf,
+ uint32_t dcmx, ppc_vsr_t *b)
+{
+ uint32_t cc, match, sign = float64_is_neg(b->VsrD(0));
+ uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF;
+ int not_sp = (int)not_SP_value(b->VsrD(0));
+ match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381);
cc = sign << CRF_LT_BIT | match << CRF_EQ_BIT | not_sp << CRF_SO_BIT;
env->fpscr &= ~FP_FPCC;
env->fpscr |= cc << FPSCR_FPCC;
- env->crf[BF(opcode)] = cc;
+ env->crf[bf] = cc;
}
void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
@@ -3130,7 +3304,7 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
if (r == 0 && rmc == 0) {
rmode = float_round_ties_away;
} else if (r == 0 && rmc == 0x3) {
- rmode = fpscr_rn;
+ rmode = env->fpscr & FP_RN;
} else if (r == 1) {
switch (rmc) {
case 0:
@@ -3156,11 +3330,8 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
t.f128 = float128_round_to_int(xb->f128, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
- if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb->f128, &tstat)) {
- float_invalid_op_vxsnan(env, GETPC());
- t.f128 = float128_snan_to_qnan(t.f128);
- }
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, GETPC());
}
if (ex == 0 && (tstat.float_exception_flags & float_flag_inexact)) {
@@ -3168,7 +3339,7 @@ void helper_xsrqpi(CPUPPCState *env, uint32_t opcode,
}
helper_compute_fprf_float128(env, t.f128);
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
*xt = t;
}
@@ -3187,7 +3358,7 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
if (r == 0 && rmc == 0) {
rmode = float_round_ties_away;
} else if (r == 0 && rmc == 0x3) {
- rmode = fpscr_rn;
+ rmode = env->fpscr & FP_RN;
} else if (r == 1) {
switch (rmc) {
case 0:
@@ -3214,16 +3385,14 @@ void helper_xsrqpxp(CPUPPCState *env, uint32_t opcode,
t.f128 = floatx80_to_float128(round_res, &tstat);
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
- if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb->f128, &tstat)) {
- float_invalid_op_vxsnan(env, GETPC());
- t.f128 = float128_snan_to_qnan(t.f128);
- }
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid_snan)) {
+ float_invalid_op_vxsnan(env, GETPC());
+ t.f128 = float128_snan_to_qnan(t.f128);
}
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
@@ -3244,20 +3413,12 @@ void helper_xssqrtqp(CPUPPCState *env, uint32_t opcode,
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- if (float128_is_signaling_nan(xb->f128, &tstat)) {
- float_invalid_op_vxsnan(env, GETPC());
- t.f128 = float128_snan_to_qnan(xb->f128);
- } else if (float128_is_quiet_nan(xb->f128, &tstat)) {
- t.f128 = xb->f128;
- } else if (float128_is_neg(xb->f128) && !float128_is_zero(xb->f128)) {
- float_invalid_op_vxsqrt(env, 1, GETPC());
- t.f128 = float128_default_nan(&env->fp_status);
- }
+ float_invalid_op_sqrt(env, tstat.float_exception_flags, 1, GETPC());
}
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
}
void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
@@ -3278,12 +3439,320 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
env->fp_status.float_exception_flags |= tstat.float_exception_flags;
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {
- float_invalid_op_addsub(env, 1, GETPC(),
- float128_classify(xa->f128) |
- float128_classify(xb->f128));
+ float_invalid_op_addsub(env, tstat.float_exception_flags, 1, GETPC());
}
helper_compute_fprf_float128(env, t.f128);
*xt = t;
- do_float_check_status(env, GETPC());
+ do_float_check_status(env, true, GETPC());
+}
+
+static inline void vsxger_excp(CPUPPCState *env, uintptr_t retaddr)
+{
+ /*
+ * XV*GER instructions execute and set the FPSCR as if exceptions
+ * are disabled and only at the end throw an exception
+ */
+ target_ulong enable;
+ enable = env->fpscr & (FP_ENABLES | FP_FI | FP_FR);
+ env->fpscr &= ~(FP_ENABLES | FP_FI | FP_FR);
+ int status = get_float_exception_flags(&env->fp_status);
+ if (unlikely(status & float_flag_invalid)) {
+ if (status & float_flag_invalid_snan) {
+ float_invalid_op_vxsnan(env, 0);
+ }
+ if (status & float_flag_invalid_imz) {
+ float_invalid_op_vximz(env, false, 0);
+ }
+ if (status & float_flag_invalid_isi) {
+ float_invalid_op_vxisi(env, false, 0);
+ }
+ }
+ do_float_check_status(env, false, retaddr);
+ env->fpscr |= enable;
+ do_fpscr_check_status(env, retaddr);
+}
+
+typedef float64 extract_f16(float16, float_status *);
+
+static float64 extract_hf16(float16 in, float_status *fp_status)
+{
+ return float16_to_float64(in, true, fp_status);
+}
+
+static float64 extract_bf16(bfloat16 in, float_status *fp_status)
+{
+ return bfloat16_to_float64(in, fp_status);
+}
+
+static void vsxger16(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask, bool acc,
+ bool neg_mul, bool neg_acc, extract_f16 extract)
+{
+ float32 r, aux_acc;
+ float64 psum, va, vb, vc, vd;
+ int i, j, xmsk_bit, ymsk_bit;
+ uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
+ xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
+ ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
+ float_status *excp_ptr = &env->fp_status;
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ va = !(pmsk & 2) ? float64_zero :
+ extract(a->VsrHF(2 * i), excp_ptr);
+ vb = !(pmsk & 2) ? float64_zero :
+ extract(b->VsrHF(2 * j), excp_ptr);
+ vc = !(pmsk & 1) ? float64_zero :
+ extract(a->VsrHF(2 * i + 1), excp_ptr);
+ vd = !(pmsk & 1) ? float64_zero :
+ extract(b->VsrHF(2 * j + 1), excp_ptr);
+ psum = float64_mul(va, vb, excp_ptr);
+ psum = float64r32_muladd(vc, vd, psum, 0, excp_ptr);
+ r = float64_to_float32(psum, excp_ptr);
+ if (acc) {
+ aux_acc = at[i].VsrSF(j);
+ if (neg_mul) {
+ r = bfp32_neg(r);
+ }
+ if (neg_acc) {
+ aux_acc = bfp32_neg(aux_acc);
+ }
+ r = float32_add(r, aux_acc, excp_ptr);
+ }
+ at[i].VsrSF(j) = r;
+ } else {
+ at[i].VsrSF(j) = float32_zero;
+ }
+ }
+ }
+ vsxger_excp(env, GETPC());
+}
+
+typedef void vsxger_zero(ppc_vsr_t *at, int, int);
+
+typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
+ int flags, float_status *s);
+
+static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j),
+ at[i].VsrSF(j), flags, s);
+}
+
+static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
+}
+
+static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
+{
+ at[i].VsrSF(j) = float32_zero;
+}
+
+static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
+ at[i].VsrDF(j), flags, s);
+ }
+}
+
+static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i,
+ int j, int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
+ }
+}
+
+static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_zero;
+ }
+}
+
+static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask, bool acc, bool neg_mul,
+ bool neg_acc, vsxger_muladd_f mul, vsxger_muladd_f muladd,
+ vsxger_zero zero)
+{
+ int i, j, xmsk_bit, ymsk_bit, op_flags;
+ uint8_t xmsk = mask & 0x0F;
+ uint8_t ymsk = (mask >> 4) & 0x0F;
+ float_status *excp_ptr = &env->fp_status;
+ op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
+ op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
+ helper_reset_fpstatus(env);
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ if (acc) {
+ muladd(at, a, b, i, j, op_flags, excp_ptr);
+ } else {
+ mul(at, a, b, i, j, op_flags, excp_ptr);
+ }
+ } else {
+ zero(at, i, j);
+ }
+ }
+ }
+ vsxger_excp(env, GETPC());
+}
+
+QEMU_FLATTEN
+void helper_XVBF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, false, false, false, extract_bf16);
+}
+
+QEMU_FLATTEN
+void helper_XVBF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, false, false, extract_bf16);
+}
+
+QEMU_FLATTEN
+void helper_XVBF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, false, true, extract_bf16);
+}
+
+QEMU_FLATTEN
+void helper_XVBF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, true, false, extract_bf16);
+}
+
+QEMU_FLATTEN
+void helper_XVBF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, true, true, extract_bf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, false, false, false, extract_hf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, false, false, extract_hf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF16GER2PN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, false, true, extract_hf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF16GER2NP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, true, false, extract_hf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF16GER2NN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger16(env, a, b, at, mask, true, true, true, extract_hf16);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
}
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
index 1808a150e4..3b28d4e21c 100644
--- a/target/ppc/gdbstub.c
+++ b/target/ppc/gdbstub.c
@@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/gdbstub.h"
+#include "gdbstub/helpers.h"
#include "internal.h"
static int ppc_gdb_register_len_apple(int n)
@@ -53,12 +54,6 @@ static int ppc_gdb_register_len(int n)
case 0 ... 31:
/* gprs */
return sizeof(target_ulong);
- case 32 ... 63:
- /* fprs */
- if (gdb_has_xml) {
- return 0;
- }
- return 8;
case 66:
/* cr */
case 69:
@@ -73,12 +68,6 @@ static int ppc_gdb_register_len(int n)
case 68:
/* ctr */
return sizeof(target_ulong);
- case 70:
- /* fpscr */
- if (gdb_has_xml) {
- return 0;
- }
- return sizeof(target_ulong);
default:
return 0;
}
@@ -87,15 +76,15 @@ static int ppc_gdb_register_len(int n)
/*
* We need to present the registers to gdb in the "current" memory
* ordering. For user-only mode we get this for free;
- * TARGET_WORDS_BIGENDIAN is set to the proper ordering for the
+ * TARGET_BIG_ENDIAN is set to the proper ordering for the
* binary, and cannot be changed. For system mode,
- * TARGET_WORDS_BIGENDIAN is always set, and we must check the current
+ * TARGET_BIG_ENDIAN is always set, and we must check the current
* mode of the chip to see if we're running in little-endian.
*/
void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
- if (!msr_le) {
+ if (!FIELD_EX64(env->msr, MSR, LE)) {
/* do nothing */
} else if (len == 4) {
bswap32s((uint32_t *)mem_buf);
@@ -119,8 +108,7 @@ void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
uint8_t *mem_buf;
int r = ppc_gdb_register_len(n);
@@ -131,9 +119,6 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
if (n < 32) {
/* gprs */
gdb_get_regl(buf, env->gpr[n]);
- } else if (n < 64) {
- /* fprs */
- gdb_get_reg64(buf, *cpu_fpr_ptr(env, n - 32));
} else {
switch (n) {
case 64:
@@ -144,11 +129,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
break;
case 66:
{
- uint32_t cr = 0;
- int i;
- for (i = 0; i < 8; i++) {
- cr |= env->crf[i] << (32 - ((i + 1) * 4));
- }
+ uint32_t cr = ppc_get_cr(env);
gdb_get_reg32(buf, cr);
break;
}
@@ -159,10 +140,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
gdb_get_regl(buf, env->ctr);
break;
case 69:
- gdb_get_reg32(buf, env->xer);
- break;
- case 70:
- gdb_get_reg32(buf, env->fpscr);
+ gdb_get_reg32(buf, cpu_read_xer(env));
break;
}
}
@@ -173,8 +151,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n)
int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
uint8_t *mem_buf;
int r = ppc_gdb_register_len_apple(n);
@@ -202,11 +179,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n)
break;
case 66 + 32:
{
- uint32_t cr = 0;
- int i;
- for (i = 0; i < 8; i++) {
- cr |= env->crf[i] << (32 - ((i + 1) * 4));
- }
+ uint32_t cr = ppc_get_cr(env);
gdb_get_reg32(buf, cr);
break;
}
@@ -217,7 +190,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n)
gdb_get_reg64(buf, env->ctr);
break;
case 69 + 32:
- gdb_get_reg32(buf, env->xer);
+ gdb_get_reg32(buf, cpu_read_xer(env));
break;
case 70 + 32:
gdb_get_reg64(buf, env->fpscr);
@@ -231,8 +204,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n)
int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
int r = ppc_gdb_register_len(n);
if (!r) {
@@ -256,10 +228,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
case 66:
{
uint32_t cr = ldl_p(mem_buf);
- int i;
- for (i = 0; i < 8; i++) {
- env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
- }
+ ppc_set_cr(env, cr);
break;
}
case 67:
@@ -269,7 +238,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
env->ctr = ldtul_p(mem_buf);
break;
case 69:
- env->xer = ldl_p(mem_buf);
+ cpu_write_xer(env, ldl_p(mem_buf));
break;
case 70:
/* fpscr */
@@ -281,8 +250,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
}
int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
int r = ppc_gdb_register_len_apple(n);
if (!r) {
@@ -306,10 +274,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
case 66 + 32:
{
uint32_t cr = ldl_p(mem_buf);
- int i;
- for (i = 0; i < 8; i++) {
- env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
- }
+ ppc_set_cr(env, cr);
break;
}
case 67 + 32:
@@ -319,7 +284,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
env->ctr = ldq_p(mem_buf);
break;
case 69 + 32:
- env->xer = ldl_p(mem_buf);
+ cpu_write_xer(env, ldl_p(mem_buf));
break;
case 70 + 32:
/* fpscr */
@@ -331,23 +296,15 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
}
#ifndef CONFIG_USER_ONLY
-void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
+static void gdb_gen_spr_feature(CPUState *cs)
{
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- GString *xml;
- char *spr_name;
+ GDBFeatureBuilder builder;
unsigned int num_regs = 0;
int i;
- if (pcc->gdb_spr_xml) {
- return;
- }
-
- xml = g_string_new("<?xml version=\"1.0\"?>");
- g_string_append(xml, "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">");
- g_string_append(xml, "<feature name=\"org.qemu.power.spr\">");
-
for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
ppc_spr_t *spr = &env->spr_cb[i];
@@ -355,13 +312,6 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
continue;
}
- spr_name = g_ascii_strdown(spr->name, -1);
- g_string_append_printf(xml, "<reg name=\"%s\"", spr_name);
- g_free(spr_name);
-
- g_string_append_printf(xml, " bitsize=\"%d\"", TARGET_LONG_BITS);
- g_string_append(xml, " group=\"spr\"/>");
-
/*
* GDB identifies registers based on the order they are
* presented in the XML. These ids will not match QEMU's
@@ -374,20 +324,27 @@ void ppc_gdb_gen_spr_xml(PowerPCCPU *cpu)
num_regs++;
}
- g_string_append(xml, "</feature>");
+ if (pcc->gdb_spr.xml) {
+ return;
+ }
- pcc->gdb_num_sprs = num_regs;
- pcc->gdb_spr_xml = g_string_free(xml, false);
-}
+ gdb_feature_builder_init(&builder, &pcc->gdb_spr,
+ "org.qemu.power.spr", "power-spr.xml",
+ cs->gdb_num_regs);
-const char *ppc_gdb_get_dynamic_xml(CPUState *cs, const char *xml_name)
-{
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (!spr->name) {
+ continue;
+ }
- if (strcmp(xml_name, "power-spr.xml") == 0) {
- return pcc->gdb_spr_xml;
+ gdb_feature_builder_append_reg(&builder, g_ascii_strdown(spr->name, -1),
+ TARGET_LONG_BITS, spr->gdb_id,
+ "int", "spr");
}
- return NULL;
+
+ gdb_feature_builder_end(&builder);
}
#endif
@@ -406,8 +363,10 @@ static int gdb_find_spr_idx(CPUPPCState *env, int n)
return -1;
}
-static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n)
+static int gdb_get_spr_reg(CPUState *cs, GByteArray *buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int reg;
int len;
@@ -417,13 +376,40 @@ static int gdb_get_spr_reg(CPUPPCState *env, GByteArray *buf, int n)
}
len = TARGET_LONG_SIZE;
- gdb_get_regl(buf, env->spr[reg]);
+
+ /* Handle those SPRs that are not part of the env->spr[] array */
+ target_ulong val;
+ switch (reg) {
+#if defined(TARGET_PPC64)
+ case SPR_CFAR:
+ val = env->cfar;
+ break;
+#endif
+ case SPR_HDEC:
+ val = cpu_ppc_load_hdecr(env);
+ break;
+ case SPR_TBL:
+ val = cpu_ppc_load_tbl(env);
+ break;
+ case SPR_TBU:
+ val = cpu_ppc_load_tbu(env);
+ break;
+ case SPR_DECR:
+ val = cpu_ppc_load_decr(env);
+ break;
+ default:
+ val = env->spr[reg];
+ }
+ gdb_get_regl(buf, val);
+
ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, len), len);
return len;
}
-static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+static int gdb_set_spr_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int reg;
int len;
@@ -434,14 +420,27 @@ static int gdb_set_spr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
len = TARGET_LONG_SIZE;
ppc_maybe_bswap_register(env, mem_buf, len);
- env->spr[reg] = ldn_p(mem_buf, len);
+
+ /* Handle those SPRs that are not part of the env->spr[] array */
+ target_ulong val = ldn_p(mem_buf, len);
+ switch (reg) {
+#if defined(TARGET_PPC64)
+ case SPR_CFAR:
+ env->cfar = val;
+ break;
+#endif
+ default:
+ env->spr[reg] = val;
+ }
return len;
}
#endif
-static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n)
+static int gdb_get_float_reg(CPUState *cs, GByteArray *buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
uint8_t *mem_buf;
if (n < 32) {
gdb_get_reg64(buf, *cpu_fpr_ptr(env, n));
@@ -458,8 +457,11 @@ static int gdb_get_float_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
-static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+static int gdb_set_float_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
ppc_maybe_bswap_register(env, mem_buf, 8);
*cpu_fpr_ptr(env, n) = ldq_p(mem_buf);
@@ -473,8 +475,10 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
-static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
+static int gdb_get_avr_reg(CPUState *cs, GByteArray *buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
uint8_t *mem_buf;
if (n < 32) {
@@ -499,8 +503,11 @@ static int gdb_get_avr_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
-static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+static int gdb_set_avr_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
ppc_avr_t *avr = cpu_avr_ptr(env, n);
ppc_maybe_bswap_register(env, mem_buf, 16);
@@ -521,8 +528,11 @@ static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
-static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n)
+static int gdb_get_spe_reg(CPUState *cs, GByteArray *buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
#if defined(TARGET_PPC64)
gdb_get_reg32(buf, env->gpr[n] >> 32);
@@ -545,8 +555,11 @@ static int gdb_get_spe_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
-static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+static int gdb_set_spe_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
#if defined(TARGET_PPC64)
target_ulong lo = (uint32_t)env->gpr[n];
@@ -574,8 +587,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
-static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n)
+static int gdb_get_vsx_reg(CPUState *cs, GByteArray *buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
gdb_get_reg64(buf, *cpu_vsrl_ptr(env, n));
ppc_maybe_bswap_register(env, gdb_get_reg_ptr(buf, 8), 8);
@@ -584,8 +600,11 @@ static int gdb_get_vsx_reg(CPUPPCState *env, GByteArray *buf, int n)
return 0;
}
-static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+static int gdb_set_vsx_reg(CPUState *cs, uint8_t *mem_buf, int n)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
if (n < 32) {
ppc_maybe_bswap_register(env, mem_buf, 8);
*cpu_vsrl_ptr(env, n) = ldq_p(mem_buf);
@@ -594,12 +613,12 @@ static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 0;
}
-gchar *ppc_gdb_arch_name(CPUState *cs)
+const gchar *ppc_gdb_arch_name(CPUState *cs)
{
#if defined(TARGET_PPC64)
- return g_strdup("powerpc:common64");
+ return "powerpc:common64";
#else
- return g_strdup("powerpc:common");
+ return "powerpc:common";
#endif
}
@@ -607,22 +626,24 @@ void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *pcc)
{
if (pcc->insns_flags & PPC_FLOAT) {
gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
- 33, "power-fpu.xml", 0);
+ gdb_find_static_feature("power-fpu.xml"), 0);
}
if (pcc->insns_flags & PPC_ALTIVEC) {
gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
- 34, "power-altivec.xml", 0);
+ gdb_find_static_feature("power-altivec.xml"),
+ 0);
}
if (pcc->insns_flags & PPC_SPE) {
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
- 34, "power-spe.xml", 0);
+ gdb_find_static_feature("power-spe.xml"), 0);
}
if (pcc->insns_flags2 & PPC2_VSX) {
gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
- 32, "power-vsx.xml", 0);
+ gdb_find_static_feature("power-vsx.xml"), 0);
}
#ifndef CONFIG_USER_ONLY
+ gdb_gen_spr_feature(cs);
gdb_register_coprocessor(cs, gdb_get_spr_reg, gdb_set_spr_reg,
- pcc->gdb_num_sprs, "power-spr.xml", 0);
+ &pcc->gdb_spr, 0);
#endif
}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 4076aa281e..86f97ee1e7 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -4,10 +4,14 @@ DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)
#endif
+DEF_HELPER_4(HASHST, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHCHK, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHSTP, void, env, tl, tl, tl)
+DEF_HELPER_4(HASHCHKP, void, env, tl, tl, tl)
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER_2(store_msr, void, env, tl)
+DEF_HELPER_1(ppc_maybe_interrupt, void, env)
DEF_HELPER_1(rfi, void, env)
-DEF_HELPER_1(rfsvc, void, env)
DEF_HELPER_1(40x_rfci, void, env)
DEF_HELPER_1(rfci, void, env)
DEF_HELPER_1(rfdi, void, env)
@@ -18,9 +22,20 @@ DEF_HELPER_2(pminsn, void, env, i32)
DEF_HELPER_1(rfid, void, env)
DEF_HELPER_1(rfscv, void, env)
DEF_HELPER_1(hrfid, void, env)
+DEF_HELPER_2(rfebb, void, env, tl)
DEF_HELPER_2(store_lpcr, void, env, tl)
DEF_HELPER_2(store_pcr, void, env, tl)
+DEF_HELPER_2(store_ciabr, void, env, tl)
+DEF_HELPER_2(store_dawr0, void, env, tl)
+DEF_HELPER_2(store_dawrx0, void, env, tl)
+DEF_HELPER_2(store_mmcr0, void, env, tl)
+DEF_HELPER_2(store_mmcr1, void, env, tl)
+DEF_HELPER_3(store_pmc, void, env, i32, i64)
+DEF_HELPER_2(read_pmc, tl, env, i32)
+DEF_HELPER_2(insns_inc, void, env, i32)
+DEF_HELPER_1(handle_pmc5_overflow, void, env)
#endif
+DEF_HELPER_2(book3s_trace, void, env, tl)
DEF_HELPER_1(check_tlb_flush_local, void, env)
DEF_HELPER_1(check_tlb_flush_global, void, env)
#endif
@@ -46,14 +61,18 @@ DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_3(sraw, tl, env, tl, tl)
-DEF_HELPER_FLAGS_2(cfuged, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(CDTBCD, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(CBCDTD, TCG_CALL_NO_RWG_SE, tl, tl)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_3(srad, tl, env, tl, tl)
-DEF_HELPER_0(darn32, tl)
-DEF_HELPER_0(darn64, tl)
+DEF_HELPER_FLAGS_0(darn32, TCG_CALL_NO_RWG, tl)
+DEF_HELPER_FLAGS_0(darn64, TCG_CALL_NO_RWG, tl)
#endif
DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
@@ -61,6 +80,7 @@ DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_1(float_check_status, void, env)
+DEF_HELPER_1(fpscr_check_status, void, env)
DEF_HELPER_1(reset_fpstatus, void, env)
DEF_HELPER_2(compute_fprf_float64, void, env, i64)
DEF_HELPER_3(store_fpscr, void, env, i64, i32)
@@ -91,112 +111,95 @@ DEF_HELPER_2(frip, i64, env, i64)
DEF_HELPER_2(frim, i64, env, i64)
DEF_HELPER_3(fadd, f64, env, f64, f64)
+DEF_HELPER_3(fadds, f64, env, f64, f64)
DEF_HELPER_3(fsub, f64, env, f64, f64)
+DEF_HELPER_3(fsubs, f64, env, f64, f64)
DEF_HELPER_3(fmul, f64, env, f64, f64)
+DEF_HELPER_3(fmuls, f64, env, f64, f64)
DEF_HELPER_3(fdiv, f64, env, f64, f64)
+DEF_HELPER_3(fdivs, f64, env, f64, f64)
DEF_HELPER_4(fmadd, i64, env, i64, i64, i64)
DEF_HELPER_4(fmsub, i64, env, i64, i64, i64)
DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64)
DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64)
-DEF_HELPER_2(fsqrt, f64, env, f64)
+DEF_HELPER_4(fmadds, i64, env, i64, i64, i64)
+DEF_HELPER_4(fmsubs, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmadds, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmsubs, i64, env, i64, i64, i64)
+DEF_HELPER_2(FSQRT, f64, env, f64)
+DEF_HELPER_2(FSQRTS, f64, env, f64)
DEF_HELPER_2(fre, i64, env, i64)
DEF_HELPER_2(fres, i64, env, i64)
DEF_HELPER_2(frsqrte, i64, env, i64)
-DEF_HELPER_4(fsel, i64, env, i64, i64, i64)
+DEF_HELPER_2(frsqrtes, i64, env, i64)
+DEF_HELPER_FLAGS_3(FSEL, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64)
DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64)
#define dh_alias_avr ptr
#define dh_ctype_avr ppc_avr_t *
+#define dh_typecode_avr dh_typecode_ptr
#define dh_alias_vsr ptr
#define dh_ctype_vsr ppc_vsr_t *
+#define dh_typecode_vsr dh_typecode_ptr
-DEF_HELPER_3(vavgub, void, avr, avr, avr)
-DEF_HELPER_3(vavguh, void, avr, avr, avr)
-DEF_HELPER_3(vavguw, void, avr, avr, avr)
-DEF_HELPER_3(vabsdub, void, avr, avr, avr)
-DEF_HELPER_3(vabsduh, void, avr, avr, avr)
-DEF_HELPER_3(vabsduw, void, avr, avr, avr)
-DEF_HELPER_3(vavgsb, void, avr, avr, avr)
-DEF_HELPER_3(vavgsh, void, avr, avr, avr)
-DEF_HELPER_3(vavgsw, void, avr, avr, avr)
-DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr)
+#define dh_alias_acc ptr
+#define dh_ctype_acc ppc_acc_t *
+#define dh_typecode_acc dh_typecode_ptr
+
+DEF_HELPER_FLAGS_4(VAVGUB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VAVGUH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VAVGUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VABSDUB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VABSDUH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VABSDUW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VAVGSB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VAVGSH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VAVGSW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr)
-DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VCMPNEZB, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VCMPNEZH, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VCMPNEZW, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr)
DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr)
-DEF_HELPER_3(vmrglb, void, avr, avr, avr)
-DEF_HELPER_3(vmrglh, void, avr, avr, avr)
-DEF_HELPER_3(vmrglw, void, avr, avr, avr)
-DEF_HELPER_3(vmrghb, void, avr, avr, avr)
-DEF_HELPER_3(vmrghh, void, avr, avr, avr)
-DEF_HELPER_3(vmrghw, void, avr, avr, avr)
-DEF_HELPER_3(vmulesb, void, avr, avr, avr)
-DEF_HELPER_3(vmulesh, void, avr, avr, avr)
-DEF_HELPER_3(vmulesw, void, avr, avr, avr)
-DEF_HELPER_3(vmuleub, void, avr, avr, avr)
-DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
-DEF_HELPER_3(vmuleuw, void, avr, avr, avr)
-DEF_HELPER_3(vmulosb, void, avr, avr, avr)
-DEF_HELPER_3(vmulosh, void, avr, avr, avr)
-DEF_HELPER_3(vmulosw, void, avr, avr, avr)
-DEF_HELPER_3(vmuloub, void, avr, avr, avr)
-DEF_HELPER_3(vmulouh, void, avr, avr, avr)
-DEF_HELPER_3(vmulouw, void, avr, avr, avr)
-DEF_HELPER_3(vmulhsw, void, avr, avr, avr)
-DEF_HELPER_3(vmulhuw, void, avr, avr, avr)
-DEF_HELPER_3(vmulhsd, void, avr, avr, avr)
-DEF_HELPER_3(vmulhud, void, avr, avr, avr)
-DEF_HELPER_3(vslo, void, avr, avr, avr)
-DEF_HELPER_3(vsro, void, avr, avr, avr)
-DEF_HELPER_3(vsrv, void, avr, avr, avr)
-DEF_HELPER_3(vslv, void, avr, avr, avr)
-DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
-DEF_HELPER_2(vprtybw, void, avr, avr)
-DEF_HELPER_2(vprtybd, void, avr, avr)
-DEF_HELPER_2(vprtybq, void, avr, avr)
-DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrglb, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrglh, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrglw, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrghb, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrghh, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vmrghw, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULESB, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULESH, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULESW, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULEUB, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULEUH, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULEUW, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOSB, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOSH, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOSW, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOUB, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOUH, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMULOUW, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVSQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVESD, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVEUD, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVESQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VDIVEUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMODSQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VMODUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vslo, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vsro, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vsrv, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vslv, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VPRTYBQ, TCG_CALL_NO_RWG, void, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddsbs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddshs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vaddsws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
@@ -209,43 +212,39 @@ DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
-DEF_HELPER_3(vadduqm, void, avr, avr, avr)
-DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr)
-DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr)
-DEF_HELPER_3(vaddcuq, void, avr, avr, avr)
-DEF_HELPER_3(vsubuqm, void, avr, avr, avr)
-DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr)
-DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr)
-DEF_HELPER_3(vsubcuq, void, avr, avr, avr)
-DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
-DEF_HELPER_3(vextractub, void, avr, avr, i32)
-DEF_HELPER_3(vextractuh, void, avr, avr, i32)
-DEF_HELPER_3(vextractuw, void, avr, avr, i32)
-DEF_HELPER_3(vextractd, void, avr, avr, i32)
-DEF_HELPER_3(vinsertb, void, avr, avr, i32)
-DEF_HELPER_3(vinserth, void, avr, avr, i32)
-DEF_HELPER_3(vinsertw, void, avr, avr, i32)
-DEF_HELPER_3(vinsertd, void, avr, avr, i32)
-DEF_HELPER_2(vextsb2w, void, avr, avr)
-DEF_HELPER_2(vextsh2w, void, avr, avr)
-DEF_HELPER_2(vextsb2d, void, avr, avr)
-DEF_HELPER_2(vextsh2d, void, avr, avr)
-DEF_HELPER_2(vextsw2d, void, avr, avr)
-DEF_HELPER_2(vnegw, void, avr, avr)
-DEF_HELPER_2(vnegd, void, avr, avr)
-DEF_HELPER_2(vupkhpx, void, avr, avr)
-DEF_HELPER_2(vupklpx, void, avr, avr)
-DEF_HELPER_2(vupkhsb, void, avr, avr)
-DEF_HELPER_2(vupkhsh, void, avr, avr)
-DEF_HELPER_2(vupkhsw, void, avr, avr)
-DEF_HELPER_2(vupklsb, void, avr, avr)
-DEF_HELPER_2(vupklsh, void, avr, avr)
-DEF_HELPER_2(vupklsw, void, avr, avr)
-DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VADDUQM, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VADDECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VADDEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VADDCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VSUBUQM, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VSUBECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VSUBEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VSUBCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_4(vsldoi, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_3(vextractub, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(vextractuh, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(vextractuw, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(vextractd, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl)
+DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl)
+DEF_HELPER_FLAGS_2(VSTRIBL, TCG_CALL_NO_RWG, i32, avr, avr)
+DEF_HELPER_FLAGS_2(VSTRIBR, TCG_CALL_NO_RWG, i32, avr, avr)
+DEF_HELPER_FLAGS_2(VSTRIHL, TCG_CALL_NO_RWG, i32, avr, avr)
+DEF_HELPER_FLAGS_2(VSTRIHR, TCG_CALL_NO_RWG, i32, avr, avr)
+DEF_HELPER_FLAGS_2(vupkhpx, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupklpx, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupkhsb, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupkhsh, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupkhsw, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupklsb, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupklsh, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vupklsw, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_4(VMSUMUBM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VMSUMMBM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VPERM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VPERMR, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
@@ -258,14 +257,14 @@ DEF_HELPER_4(vpkudus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr)
DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr)
DEF_HELPER_4(vpkudum, void, env, avr, avr, avr)
-DEF_HELPER_3(vpkpx, void, avr, avr, avr)
-DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr)
-DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr)
-DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vpkpx, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_5(VMHADDSHS, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(VMHRADDSHS, void, env, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VMSUMUHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_5(VMSUMUHS, void, env, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VMSUMSHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
+DEF_HELPER_5(VMSUMSHS, void, env, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_5(VMLADDUHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32)
DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32)
DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env)
DEF_HELPER_3(lvebx, void, env, avr, tl)
@@ -291,10 +290,10 @@ DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr)
DEF_HELPER_4(vminfp, void, env, avr, avr, avr)
DEF_HELPER_3(vrefp, void, env, avr, avr)
DEF_HELPER_3(vrsqrtefp, void, env, avr, avr)
-DEF_HELPER_3(vrlwmi, void, avr, avr, avr)
-DEF_HELPER_3(vrldmi, void, avr, avr, avr)
-DEF_HELPER_3(vrldnm, void, avr, avr, avr)
-DEF_HELPER_3(vrlwnm, void, avr, avr, avr)
+DEF_HELPER_FLAGS_4(VRLWMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VRLDMI, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VRLDNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(VRLWNM, TCG_CALL_NO_RWG, void, avr, avr, avr, i32)
DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr)
DEF_HELPER_3(vexptefp, void, env, avr, avr)
@@ -308,55 +307,59 @@ DEF_HELPER_4(vcfsx, void, env, avr, avr, i32)
DEF_HELPER_4(vctuxs, void, env, avr, avr, i32)
DEF_HELPER_4(vctsxs, void, env, avr, avr, i32)
-DEF_HELPER_2(vclzb, void, avr, avr)
-DEF_HELPER_2(vclzh, void, avr, avr)
-DEF_HELPER_2(vctzb, void, avr, avr)
-DEF_HELPER_2(vctzh, void, avr, avr)
-DEF_HELPER_2(vctzw, void, avr, avr)
-DEF_HELPER_2(vctzd, void, avr, avr)
-DEF_HELPER_2(vpopcntb, void, avr, avr)
-DEF_HELPER_2(vpopcnth, void, avr, avr)
-DEF_HELPER_2(vpopcntw, void, avr, avr)
-DEF_HELPER_2(vpopcntd, void, avr, avr)
-DEF_HELPER_1(vclzlsbb, tl, avr)
-DEF_HELPER_1(vctzlsbb, tl, avr)
-DEF_HELPER_3(vbpermd, void, avr, avr, avr)
-DEF_HELPER_3(vbpermq, void, avr, avr, avr)
-DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
-DEF_HELPER_3(vpmsumh, void, avr, avr, avr)
-DEF_HELPER_3(vpmsumw, void, avr, avr, avr)
-DEF_HELPER_3(vpmsumd, void, avr, avr, avr)
-DEF_HELPER_2(vextublx, tl, tl, avr)
-DEF_HELPER_2(vextuhlx, tl, tl, avr)
-DEF_HELPER_2(vextuwlx, tl, tl, avr)
-DEF_HELPER_2(vextubrx, tl, tl, avr)
-DEF_HELPER_2(vextuhrx, tl, tl, avr)
-DEF_HELPER_2(vextuwrx, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vclzb, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vclzh, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vctzb, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vctzh, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vctzw, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vctzd, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vpopcntb, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vpopcnth, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vpopcntw, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_2(vpopcntd, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_1(vclzlsbb, TCG_CALL_NO_RWG, tl, avr)
+DEF_HELPER_FLAGS_1(vctzlsbb, TCG_CALL_NO_RWG, tl, avr)
+DEF_HELPER_FLAGS_3(vbpermd, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vbpermq, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vpmsumb, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vpmsumh, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vpmsumw, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(VPMSUMD, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_2(vextublx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vextuhlx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vextuwlx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vextubrx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vextuhrx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_FLAGS_2(vextuwrx, TCG_CALL_NO_RWG, tl, tl, avr)
+DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl)
+DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl)
-DEF_HELPER_2(vsbox, void, avr, avr)
-DEF_HELPER_3(vcipher, void, avr, avr, avr)
-DEF_HELPER_3(vcipherlast, void, avr, avr, avr)
-DEF_HELPER_3(vncipher, void, avr, avr, avr)
-DEF_HELPER_3(vncipherlast, void, avr, avr, avr)
-DEF_HELPER_3(vshasigmaw, void, avr, avr, i32)
-DEF_HELPER_3(vshasigmad, void, avr, avr, i32)
-DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr)
+DEF_HELPER_FLAGS_2(vsbox, TCG_CALL_NO_RWG, void, avr, avr)
+DEF_HELPER_FLAGS_3(vcipher, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vcipherlast, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vncipher, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vncipherlast, TCG_CALL_NO_RWG, void, avr, avr, avr)
+DEF_HELPER_FLAGS_3(vshasigmaw, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_3(vshasigmad, TCG_CALL_NO_RWG, void, avr, avr, i32)
+DEF_HELPER_FLAGS_4(vpermxor, TCG_CALL_NO_RWG, void, avr, avr, avr, avr)
-DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32)
-DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32)
-DEF_HELPER_3(bcdcfn, i32, avr, avr, i32)
-DEF_HELPER_3(bcdctn, i32, avr, avr, i32)
-DEF_HELPER_3(bcdcfz, i32, avr, avr, i32)
-DEF_HELPER_3(bcdctz, i32, avr, avr, i32)
-DEF_HELPER_3(bcdcfsq, i32, avr, avr, i32)
-DEF_HELPER_3(bcdctsq, i32, avr, avr, i32)
-DEF_HELPER_4(bcdcpsgn, i32, avr, avr, avr, i32)
-DEF_HELPER_3(bcdsetsgn, i32, avr, avr, i32)
-DEF_HELPER_4(bcds, i32, avr, avr, avr, i32)
-DEF_HELPER_4(bcdus, i32, avr, avr, avr, i32)
-DEF_HELPER_4(bcdsr, i32, avr, avr, avr, i32)
-DEF_HELPER_4(bcdtrunc, i32, avr, avr, avr, i32)
-DEF_HELPER_4(bcdutrunc, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdadd, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdsub, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdcfn, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdctn, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdcfz, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdctz, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdcfsq, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdctsq, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdcpsgn, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_3(bcdsetsgn, TCG_CALL_NO_RWG, i32, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcds, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdus, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdsr, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdtrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
+DEF_HELPER_FLAGS_4(bcdutrunc, TCG_CALL_NO_RWG, i32, avr, avr, avr, i32)
DEF_HELPER_4(xsadddp, void, env, vsr, vsr, vsr)
DEF_HELPER_5(xsaddqp, void, env, i32, vsr, vsr, vsr)
@@ -370,14 +373,16 @@ DEF_HELPER_3(xssqrtdp, void, env, vsr, vsr)
DEF_HELPER_3(xsrsqrtedp, void, env, vsr, vsr)
DEF_HELPER_4(xstdivdp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xstsqrtdp, void, env, i32, vsr)
-DEF_HELPER_5(xsmadddp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsmsubdp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsnmadddp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsnmsubdp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_4(xscmpeqdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xscmpgtdp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xscmpgedp, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xscmpnedp, void, env, vsr, vsr, vsr)
+DEF_HELPER_5(XSMADDDP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMSUBDP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMADDDP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMSUBDP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPEQDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPGTDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPGEDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPEQQP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPGTQP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSCMPGEQP, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xscmpexpdp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpexpqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpodp, void, env, i32, vsr, vsr)
@@ -386,23 +391,29 @@ DEF_HELPER_4(xscmpoqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscmpuqp, void, env, i32, vsr, vsr)
DEF_HELPER_4(xsmaxdp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xsmindp, void, env, vsr, vsr, vsr)
-DEF_HELPER_5(xsmaxcdp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_5(xsmincdp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_5(xsmaxjdp, void, env, i32, vsr, vsr, vsr)
-DEF_HELPER_5(xsminjdp, void, env, i32, vsr, vsr, vsr)
+DEF_HELPER_4(XSMAXCDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMINCDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMAXJDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMINJDP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMAXCQP, void, env, vsr, vsr, vsr)
+DEF_HELPER_4(XSMINCQP, void, env, vsr, vsr, vsr)
DEF_HELPER_3(xscvdphp, void, env, vsr, vsr)
DEF_HELPER_4(xscvdpqp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xscvdpsp, void, env, vsr, vsr)
DEF_HELPER_2(xscvdpspn, i64, env, i64)
-DEF_HELPER_4(xscvqpdp, void, env, i32, vsr, vsr)
+DEF_HELPER_4(XSCVQPDP, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscvqpsdz, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscvqpswz, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscvqpudz, void, env, i32, vsr, vsr)
DEF_HELPER_4(xscvqpuwz, void, env, i32, vsr, vsr)
+DEF_HELPER_3(XSCVQPUQZ, void, env, vsr, vsr)
+DEF_HELPER_3(XSCVQPSQZ, void, env, vsr, vsr)
+DEF_HELPER_3(XSCVUQQP, void, env, vsr, vsr)
+DEF_HELPER_3(XSCVSQQP, void, env, vsr, vsr)
DEF_HELPER_3(xscvhpdp, void, env, vsr, vsr)
DEF_HELPER_4(xscvsdqp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xscvspdp, void, env, vsr, vsr)
-DEF_HELPER_2(xscvspdpn, i64, env, i64)
+DEF_HELPER_FLAGS_1(XSCVSPDPN, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_3(xscvdpsxds, void, env, vsr, vsr)
DEF_HELPER_3(xscvdpsxws, void, env, vsr, vsr)
DEF_HELPER_3(xscvdpuxds, void, env, vsr, vsr)
@@ -412,9 +423,9 @@ DEF_HELPER_3(xscvuxdsp, void, env, vsr, vsr)
DEF_HELPER_3(xscvsxdsp, void, env, vsr, vsr)
DEF_HELPER_4(xscvudqp, void, env, i32, vsr, vsr)
DEF_HELPER_3(xscvuxddp, void, env, vsr, vsr)
-DEF_HELPER_3(xststdcsp, void, env, i32, vsr)
-DEF_HELPER_2(xststdcdp, void, env, i32)
-DEF_HELPER_2(xststdcqp, void, env, i32)
+DEF_HELPER_4(XSTSTDCSP, void, env, i32, i32, vsr)
+DEF_HELPER_4(XSTSTDCDP, void, env, i32, i32, vsr)
+DEF_HELPER_4(XSTSTDCQP, void, env, i32, i32, vsr)
DEF_HELPER_3(xsrdpi, void, env, vsr, vsr)
DEF_HELPER_3(xsrdpic, void, env, vsr, vsr)
DEF_HELPER_3(xsrdpim, void, env, vsr, vsr)
@@ -433,10 +444,19 @@ DEF_HELPER_3(xsresp, void, env, vsr, vsr)
DEF_HELPER_2(xsrsp, i64, env, i64)
DEF_HELPER_3(xssqrtsp, void, env, vsr, vsr)
DEF_HELPER_3(xsrsqrtesp, void, env, vsr, vsr)
-DEF_HELPER_5(xsmaddsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsmsubsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsnmaddsp, void, env, vsr, vsr, vsr, vsr)
-DEF_HELPER_5(xsnmsubsp, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMADDSP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMSUBSP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMADDSP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMSUBSP, void, env, vsr, vsr, vsr, vsr)
+
+DEF_HELPER_5(XSMADDQP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMADDQPO, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMSUBQP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSMSUBQPO, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMADDQP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMADDQPO, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMSUBQP, void, env, vsr, vsr, vsr, vsr)
+DEF_HELPER_5(XSNMSUBQPO, void, env, vsr, vsr, vsr, vsr)
DEF_HELPER_4(xvadddp, void, env, vsr, vsr, vsr)
DEF_HELPER_4(xvsubdp, void, env, vsr, vsr, vsr)
@@ -494,6 +514,7 @@ DEF_HELPER_FLAGS_4(xvcmpnesp, TCG_CALL_NO_RWG, i32, env, vsr, vsr, vsr)
DEF_HELPER_3(xvcvspdp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvsphp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvhpsp, void, env, vsr, vsr)
+DEF_HELPER_3(XVCVSPBF16, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspsxds, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspsxws, void, env, vsr, vsr)
DEF_HELPER_3(xvcvspuxds, void, env, vsr, vsr)
@@ -502,18 +523,67 @@ DEF_HELPER_3(xvcvsxdsp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvuxdsp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvsxwsp, void, env, vsr, vsr)
DEF_HELPER_3(xvcvuxwsp, void, env, vsr, vsr)
-DEF_HELPER_2(xvtstdcsp, void, env, i32)
-DEF_HELPER_2(xvtstdcdp, void, env, i32)
+DEF_HELPER_FLAGS_4(XVTSTDCSP, TCG_CALL_NO_RWG, void, vsr, vsr, i64, i32)
+DEF_HELPER_FLAGS_4(XVTSTDCDP, TCG_CALL_NO_RWG, void, vsr, vsr, i64, i32)
DEF_HELPER_3(xvrspi, void, env, vsr, vsr)
DEF_HELPER_3(xvrspic, void, env, vsr, vsr)
DEF_HELPER_3(xvrspim, void, env, vsr, vsr)
DEF_HELPER_3(xvrspip, void, env, vsr, vsr)
DEF_HELPER_3(xvrspiz, void, env, vsr, vsr)
-DEF_HELPER_4(xxperm, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr)
-DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32)
-DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32)
-DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr)
+DEF_HELPER_FLAGS_2(XXGENPCVBM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVBM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVBM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVBM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVHM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVHM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVHM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVHM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVWM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVWM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVWM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVWM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVDM_be_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVDM_be_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVDM_le_exp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_2(XXGENPCVDM_le_comp, TCG_CALL_NO_RWG, void, vsr, avr)
+DEF_HELPER_FLAGS_3(XXEXTRACTUW, TCG_CALL_NO_RWG, void, vsr, vsr, i32)
+DEF_HELPER_FLAGS_5(XXPERMX, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, tl)
+DEF_HELPER_FLAGS_3(XXINSERTW, TCG_CALL_NO_RWG, void, vsr, vsr, i32)
+DEF_HELPER_FLAGS_2(XVXSIGSP, TCG_CALL_NO_RWG, void, vsr, vsr)
+DEF_HELPER_FLAGS_5(XXEVAL, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_FLAGS_5(XXBLENDVB, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_FLAGS_5(XXBLENDVH, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_FLAGS_5(XXBLENDVW, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_FLAGS_5(XXBLENDVD, TCG_CALL_NO_RWG, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI4GER8, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI4GER8PP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI8GER4, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI8GER4PP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI8GER4SPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI16GER2, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI16GER2S, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI16GER2PP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVI16GER2SPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF16GER2, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF16GER2PP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF16GER2PN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF16GER2NP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF16GER2NN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVBF16GER2, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVBF16GER2PP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVBF16GER2PN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVBF16GER2NP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVBF16GER2NN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GER, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERPN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERNP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF32GERNN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GER, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERPP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERPN, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERNP, void, env, vsr, vsr, acc, i32)
+DEF_HELPER_5(XVF64GERNN, void, env, vsr, vsr, acc, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@@ -604,24 +674,24 @@ DEF_HELPER_2(booke_set_eplc, void, env, tl)
DEF_HELPER_2(booke_set_epsc, void, env, tl)
DEF_HELPER_2(6xx_tlbd, void, env, tl)
DEF_HELPER_2(6xx_tlbi, void, env, tl)
-DEF_HELPER_2(74xx_tlbd, void, env, tl)
-DEF_HELPER_2(74xx_tlbi, void, env, tl)
DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
#if defined(TARGET_PPC64)
-DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
-DEF_HELPER_2(load_slb_esid, tl, env, tl)
-DEF_HELPER_2(load_slb_vsid, tl, env, tl)
-DEF_HELPER_2(find_slb_vsid, tl, env, tl)
-DEF_HELPER_FLAGS_2(slbia, TCG_CALL_NO_RWG, void, env, i32)
-DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
-DEF_HELPER_FLAGS_2(slbieg, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_4(tlbie_isa300, TCG_CALL_NO_WG, void, \
+ env, tl, tl, i32)
+DEF_HELPER_FLAGS_3(SLBMTE, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_2(SLBMFEE, tl, env, tl)
+DEF_HELPER_2(SLBMFEV, tl, env, tl)
+DEF_HELPER_2(SLBFEE, tl, env, tl)
+DEF_HELPER_FLAGS_2(SLBIA, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_3(SLBIAG, TCG_CALL_NO_RWG, void, env, tl, i32)
+DEF_HELPER_FLAGS_2(SLBIE, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(SLBIEG, TCG_CALL_NO_RWG, void, env, tl)
#endif
DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
-DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_1(msgsnd, void, tl)
DEF_HELPER_2(msgclr, void, env, tl)
DEF_HELPER_1(book3s_msgsnd, void, tl)
@@ -629,20 +699,18 @@ DEF_HELPER_2(book3s_msgclr, void, env, tl)
#endif
DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32)
-DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32)
#if !defined(CONFIG_USER_ONLY)
DEF_HELPER_2(rac, tl, env, tl)
-#endif
-DEF_HELPER_3(div, tl, env, tl, tl)
-DEF_HELPER_3(divo, tl, env, tl, tl)
-DEF_HELPER_3(divs, tl, env, tl, tl)
-DEF_HELPER_3(divso, tl, env, tl, tl)
DEF_HELPER_2(load_dcr, tl, env, tl)
DEF_HELPER_3(store_dcr, void, env, tl, tl)
+#endif
DEF_HELPER_2(load_dump_spr, void, env, i32)
DEF_HELPER_2(store_dump_spr, void, env, i32)
+DEF_HELPER_3(spr_core_write_generic, void, env, i32, tl)
+DEF_HELPER_3(spr_write_CTRL, void, env, i32, tl)
+
DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32)
DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32)
DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env)
@@ -650,8 +718,6 @@ DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_1(load_vtb, TCG_CALL_NO_RWG, tl, env)
-DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env)
-DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env)
#if !defined(CONFIG_USER_ONLY)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env)
@@ -661,6 +727,8 @@ DEF_HELPER_FLAGS_1(load_dpdes, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_dpdes, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_2(book3s_msgsndp, void, env, tl)
DEF_HELPER_2(book3s_msgclrp, void, env, tl)
+DEF_HELPER_1(load_tfmr, tl, env)
+DEF_HELPER_2(store_tfmr, void, env, tl)
#endif
DEF_HELPER_2(store_sdr1, void, env, tl)
DEF_HELPER_2(store_pidr, void, env, tl)
@@ -669,18 +737,17 @@ DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl)
-DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl)
-DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_vtb, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_FLAGS_2(store_tbu40, TCG_CALL_NO_RWG, void, env, tl)
-DEF_HELPER_2(store_hid0_601, void, env, tl)
-DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_40x_tcr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_40x_tsr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_40x_pid, void, env, tl)
DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
DEF_HELPER_2(store_40x_sler, void, env, tl)
DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
@@ -689,76 +756,66 @@ DEF_HELPER_3(store_ibatl, void, env, i32, tl)
DEF_HELPER_3(store_ibatu, void, env, i32, tl)
DEF_HELPER_3(store_dbatl, void, env, i32, tl)
DEF_HELPER_3(store_dbatu, void, env, i32, tl)
-DEF_HELPER_3(store_601_batl, void, env, i32, tl)
-DEF_HELPER_3(store_601_batu, void, env, i32, tl)
#endif
#define dh_alias_fprp ptr
#define dh_ctype_fprp ppc_fprp_t *
+#define dh_typecode_fprp dh_typecode_ptr
-DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp)
-DEF_HELPER_3(dcmpo, i32, env, fprp, fprp)
-DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp)
-DEF_HELPER_3(dcmpu, i32, env, fprp, fprp)
-DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp)
-DEF_HELPER_3(dtstdc, i32, env, fprp, i32)
-DEF_HELPER_3(dtstdcq, i32, env, fprp, i32)
-DEF_HELPER_3(dtstdg, i32, env, fprp, i32)
-DEF_HELPER_3(dtstdgq, i32, env, fprp, i32)
-DEF_HELPER_3(dtstex, i32, env, fprp, fprp)
-DEF_HELPER_3(dtstexq, i32, env, fprp, fprp)
-DEF_HELPER_3(dtstsf, i32, env, fprp, fprp)
-DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp)
-DEF_HELPER_3(dtstsfi, i32, env, i32, fprp)
-DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp)
-DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32)
-DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32)
-DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32)
-DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32)
-DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32)
-DEF_HELPER_3(dctdp, void, env, fprp, fprp)
-DEF_HELPER_3(dctqpq, void, env, fprp, fprp)
-DEF_HELPER_3(drsp, void, env, fprp, fprp)
-DEF_HELPER_3(drdpq, void, env, fprp, fprp)
-DEF_HELPER_3(dcffix, void, env, fprp, fprp)
-DEF_HELPER_3(dcffixq, void, env, fprp, fprp)
-DEF_HELPER_3(dctfix, void, env, fprp, fprp)
-DEF_HELPER_3(dctfixq, void, env, fprp, fprp)
-DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32)
-DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32)
-DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32)
-DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32)
-DEF_HELPER_3(dxex, void, env, fprp, fprp)
-DEF_HELPER_3(dxexq, void, env, fprp, fprp)
-DEF_HELPER_4(diex, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp)
-DEF_HELPER_4(dscri, void, env, fprp, fprp, i32)
-DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32)
-DEF_HELPER_4(dscli, void, env, fprp, fprp, i32)
-DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_3(DCMPO, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPU, i32, env, fprp, fprp)
+DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTDC, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDG, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32)
+DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp)
+DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp)
+DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp)
+DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_3(DCTDP, void, env, fprp, fprp)
+DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp)
+DEF_HELPER_3(DRSP, void, env, fprp, fprp)
+DEF_HELPER_3(DRDPQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIX, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr)
+DEF_HELPER_3(DCTFIX, void, env, fprp, fprp)
+DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp)
+DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp)
+DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32)
+DEF_HELPER_3(DXEX, void, env, fprp, fprp)
+DEF_HELPER_3(DXEXQ, void, env, fprp, fprp)
+DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32)
+DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32)
DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
-
-#ifdef TARGET_PPC64
-DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
-DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
-DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
- void, env, tl, i64, i64, i32)
-DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
- void, env, tl, i64, i64, i32)
-DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
-DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
-#endif
diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c
index 405450d863..25258986e3 100644
--- a/target/ppc/helper_regs.c
+++ b/target/ppc/helper_regs.c
@@ -22,7 +22,11 @@
#include "qemu/main-loop.h"
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "helper_regs.h"
+#include "power8-pmu.h"
+#include "cpu-models.h"
+#include "spr_common.h"
/* Swap temporary saved registers with GPRs */
void hreg_swap_gpr_tgpr(CPUPPCState *env)
@@ -43,6 +47,48 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env)
env->tgpr[3] = tmp;
}
+static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env)
+{
+ uint32_t hflags = 0;
+
+#if defined(TARGET_PPC64)
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) {
+ hflags |= 1 << HFLAGS_PMCC0;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) {
+ hflags |= 1 << HFLAGS_PMCC1;
+ }
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) {
+ hflags |= 1 << HFLAGS_PMCJCE;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (env->pmc_ins_cnt) {
+ hflags |= 1 << HFLAGS_INSN_CNT;
+ }
+ if (env->pmc_ins_cnt & 0x1e) {
+ hflags |= 1 << HFLAGS_PMC_OTHER;
+ }
+#endif
+#endif
+
+ return hflags;
+}
+
+/* Mask of all PMU hflags */
+static uint32_t hreg_compute_pmu_hflags_mask(CPUPPCState *env)
+{
+ uint32_t hflags_mask = 0;
+#if defined(TARGET_PPC64)
+ hflags_mask |= 1 << HFLAGS_PMCC0;
+ hflags_mask |= 1 << HFLAGS_PMCC1;
+ hflags_mask |= 1 << HFLAGS_PMCJCE;
+ hflags_mask |= 1 << HFLAGS_INSN_CNT;
+ hflags_mask |= 1 << HFLAGS_PMC_OTHER;
+#endif
+ return hflags_mask;
+}
+
static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
{
target_ulong msr = env->msr;
@@ -58,21 +104,12 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
msr_mask = ((1 << MSR_LE) | (1 << MSR_PR) |
(1 << MSR_DR) | (1 << MSR_FP));
- if (ppc_flags & POWERPC_FLAG_HID0_LE) {
- /*
- * Note that MSR_LE is not set in env->msr_mask for this cpu,
- * and so will never be set in msr.
- */
- uint32_t le = extract32(env->spr[SPR_HID0], 3, 1);
- hflags |= le << MSR_LE;
- }
-
if (ppc_flags & POWERPC_FLAG_DE) {
target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0];
- if (dbcr0 & DBCR0_ICMP) {
+ if ((dbcr0 & DBCR0_ICMP) && FIELD_EX64(env->msr, MSR, DE)) {
hflags |= 1 << HFLAGS_SE;
}
- if (dbcr0 & DBCR0_BRT) {
+ if ((dbcr0 & DBCR0_BRT) && FIELD_EX64(env->msr, MSR, DE)) {
hflags |= 1 << HFLAGS_BE;
}
} else {
@@ -106,6 +143,9 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
if (env->spr[SPR_LPCR] & LPCR_GTSE) {
hflags |= 1 << HFLAGS_GTSE;
}
+ if (env->spr[SPR_LPCR] & LPCR_HR) {
+ hflags |= 1 << HFLAGS_HR;
+ }
#ifndef CONFIG_USER_ONLY
if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) {
@@ -140,7 +180,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
*/
unsigned immu_idx, dmmu_idx;
dmmu_idx = msr & (1 << MSR_PR) ? 0 : 1;
- if (env->mmu_model & POWERPC_MMU_BOOKE) {
+ if (env->mmu_model == POWERPC_MMU_BOOKE ||
+ env->mmu_model == POWERPC_MMU_BOOKE206) {
dmmu_idx |= msr & (1 << MSR_GS) ? 4 : 0;
immu_idx = dmmu_idx;
immu_idx |= msr & (1 << MSR_IS) ? 2 : 0;
@@ -155,6 +196,8 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env)
hflags |= dmmu_idx << HFLAGS_DMMU_IDX;
#endif
+ hflags |= hreg_compute_pmu_hflags_value(env);
+
return hflags | (msr & msr_mask);
}
@@ -163,9 +206,20 @@ void hreg_compute_hflags(CPUPPCState *env)
env->hflags = hreg_compute_hflags_value(env);
}
+/*
+ * This can be used as a lighter-weight alternative to hreg_compute_hflags
+ * when PMU MMCR0 or pmc_ins_cnt changes. pmc_ins_cnt is changed by
+ * pmu_update_summaries.
+ */
+void hreg_update_pmu_hflags(CPUPPCState *env)
+{
+ env->hflags &= ~hreg_compute_pmu_hflags_mask(env);
+ env->hflags |= hreg_compute_pmu_hflags_value(env);
+}
+
#ifdef CONFIG_DEBUG_TCG
-void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
+void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
+ uint64_t *cs_base, uint32_t *flags)
{
uint32_t hflags_current = env->hflags;
uint32_t hflags_rebuilt;
@@ -185,15 +239,12 @@ void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
void cpu_interrupt_exittb(CPUState *cs)
{
- if (!kvm_enabled()) {
- return;
- }
-
- if (!qemu_mutex_iothread_locked()) {
- qemu_mutex_lock_iothread();
- cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
- qemu_mutex_unlock_iothread();
- } else {
+ /*
+ * We don't need to worry about translation blocks
+ * unless running with TCG.
+ */
+ if (tcg_enabled()) {
+ BQL_LOCK_GUARD();
cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
}
}
@@ -213,12 +264,17 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
value &= ~MSR_HVB;
value |= env->msr & MSR_HVB;
}
- if (((value >> MSR_IR) & 1) != msr_ir ||
- ((value >> MSR_DR) & 1) != msr_dr) {
+ /* Attempt to modify MSR[ME] in guest state is ignored */
+ if (is_book3s_arch2x(env) && !(env->msr & MSR_HVB)) {
+ value &= ~(1 << MSR_ME);
+ value |= env->msr & (1 << MSR_ME);
+ }
+ if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) {
cpu_interrupt_exittb(cs);
}
- if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
- ((value >> MSR_GS) & 1) != msr_gs) {
+ if ((env->mmu_model == POWERPC_MMU_BOOKE ||
+ env->mmu_model == POWERPC_MMU_BOOKE206) &&
+ ((value ^ env->msr) & R_MSR_GS_MASK)) {
cpu_interrupt_exittb(cs);
}
if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
@@ -226,9 +282,8 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
/* Swap temporary saved registers with GPRs */
hreg_swap_gpr_tgpr(env);
}
- if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
- /* Change the exception prefix on PowerPC 601 */
- env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
+ if (unlikely((value ^ env->msr) & R_MSR_EP_MASK)) {
+ env->excp_prefix = FIELD_EX64(value, MSR, EP) * 0xFFF00000;
}
/*
* If PR=1 then EE, IR and DR must be 1
@@ -247,7 +302,9 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
env->msr = value;
hreg_compute_hflags(env);
#if !defined(CONFIG_USER_ONLY)
- if (unlikely(msr_pow == 1)) {
+ ppc_maybe_interrupt(env);
+
+ if (unlikely(FIELD_EX64(env->msr, MSR, POW))) {
if (!env->pending_interrupts && (*env->check_pow)(env)) {
cs->halted = 1;
excp = EXCP_HALTED;
@@ -258,7 +315,7 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv)
return excp;
}
-#ifdef CONFIG_SOFTMMU
+#ifndef CONFIG_USER_ONLY
void store_40x_sler(CPUPPCState *env, uint32_t val)
{
/* XXX: TO BE FIXED */
@@ -268,9 +325,7 @@ void store_40x_sler(CPUPPCState *env, uint32_t val)
}
env->spr[SPR_405_SLER] = val;
}
-#endif /* CONFIG_SOFTMMU */
-#ifndef CONFIG_USER_ONLY
void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = env_cpu(env);
@@ -279,7 +334,7 @@ void check_tlb_flush(CPUPPCState *env, bool global)
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
- tlb_flush_all_cpus_synced(cs);
+ tlb_flush_all_cpus(cs);
return;
}
@@ -289,4 +344,423 @@ void check_tlb_flush(CPUPPCState *env, bool global)
tlb_flush(cs);
}
}
+#endif /* !CONFIG_USER_ONLY */
+
+/**
+ * _spr_register
+ *
+ * Register an SPR with all the callbacks required for tcg,
+ * and the ID number for KVM.
+ *
+ * The reason for the conditional compilation is that the tcg functions
+ * may be compiled out, and the system kvm header may not be available
+ * for supplying the ID numbers. This is ugly, but the best we can do.
+ */
+void _spr_register(CPUPPCState *env, int num, const char *name,
+ USR_ARG(spr_callback *uea_read)
+ USR_ARG(spr_callback *uea_write)
+ SYS_ARG(spr_callback *oea_read)
+ SYS_ARG(spr_callback *oea_write)
+ SYS_ARG(spr_callback *hea_read)
+ SYS_ARG(spr_callback *hea_write)
+ KVM_ARG(uint64_t one_reg_id)
+ target_ulong initial_value)
+{
+ ppc_spr_t *spr = &env->spr_cb[num];
+
+ /* No SPR should be registered twice. */
+ assert(spr->name == NULL);
+ assert(name != NULL);
+
+ spr->name = name;
+ spr->default_value = initial_value;
+ env->spr[num] = initial_value;
+
+#ifdef CONFIG_TCG
+ spr->uea_read = uea_read;
+ spr->uea_write = uea_write;
+# ifndef CONFIG_USER_ONLY
+ spr->oea_read = oea_read;
+ spr->oea_write = oea_write;
+ spr->hea_read = hea_read;
+ spr->hea_write = hea_write;
+# endif
+#endif
+#ifdef CONFIG_KVM
+ spr->one_reg_id = one_reg_id;
#endif
+}
+
+/* Generic PowerPC SPRs */
+void register_generic_sprs(PowerPCCPU *cpu)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ /* Integer processing */
+ spr_register(env, SPR_XER, "XER",
+ &spr_read_xer, &spr_write_xer,
+ &spr_read_xer, &spr_write_xer,
+ 0x00000000);
+ /* Branch control */
+ spr_register(env, SPR_LR, "LR",
+ &spr_read_lr, &spr_write_lr,
+ &spr_read_lr, &spr_write_lr,
+ 0x00000000);
+ spr_register(env, SPR_CTR, "CTR",
+ &spr_read_ctr, &spr_write_ctr,
+ &spr_read_ctr, &spr_write_ctr,
+ 0x00000000);
+ /* Interrupt processing */
+ spr_register(env, SPR_SRR0, "SRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SRR1, "SRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_SPRG0, "SPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG1, "SPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG2, "SPRG2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG3, "SPRG3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_PVR, "PVR",
+ /* Linux permits userspace to read PVR */
+#if defined(CONFIG_LINUX_USER)
+ &spr_read_generic,
+#else
+ SPR_NOACCESS,
+#endif
+ SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->pvr);
+
+ /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
+ if (pcc->svr != POWERPC_SVR_NONE) {
+ if (pcc->svr & POWERPC_SVR_E500) {
+ spr_register(env, SPR_E500_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr & ~POWERPC_SVR_E500);
+ } else {
+ spr_register(env, SPR_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr);
+ }
+ }
+
+ /* Time base */
+#if defined(TARGET_PPC64)
+ spr_register(env, SPR_TBL, "TB",
+#else
+ spr_register(env, SPR_TBL, "TBL",
+#endif
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+#ifndef CONFIG_USER_ONLY
+ if (env->has_hv_mode) {
+ spr_register_hv(env, SPR_WR_TBL, "TBL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbl,
+ 0x00000000);
+ spr_register_hv(env, SPR_WR_TBU, "TBU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu,
+ 0x00000000);
+ } else {
+ spr_register(env, SPR_WR_TBL, "TBL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_WR_TBU, "TBU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu,
+ 0x00000000);
+ }
+#endif
+}
+
+void register_non_embedded_sprs(CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register_kvm(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ KVM_REG_PPC_DSISR, 0x00000000);
+ spr_register_kvm(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAR, 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DEC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+}
+
+/* Storage Description Register 1 */
+void register_sdr1_sprs(CPUPPCState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ if (env->has_hv_mode) {
+ /*
+ * SDR1 is a hypervisor resource on CPUs which have a
+ * hypervisor mode
+ */
+ spr_register_hv(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sdr1,
+ 0x00000000);
+ } else {
+ spr_register(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sdr1,
+ 0x00000000);
+ }
+#endif
+}
+
+/* BATs 0-3 */
+void register_low_BATs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0U, "DBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0L, "DBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1U, "DBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1L, "DBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2U, "DBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2L, "DBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3U, "DBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3L, "DBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* BATs 4-7 */
+void register_high_BATs(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT4U, "IBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT4L, "IBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5U, "IBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5L, "IBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6U, "IBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6L, "IBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7U, "IBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7L, "IBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4U, "DBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4L, "DBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5U, "DBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5L, "DBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6U, "DBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6L, "DBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7U, "DBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7L, "DBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* Softare table search registers */
+void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ env->tlb_type = TLB_6XX;
+ spr_register(env, SPR_DMISS, "DMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_DCMP, "DCMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH1, "HASH1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH2, "HASH2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_IMISS, "IMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ICMP, "ICMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_RPA, "RPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+void register_thrm_sprs(CPUPPCState *env)
+{
+ /* Thermal management */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+}
+
+void register_usprgh_sprs(CPUPPCState *env)
+{
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
index 42f26870b9..8196c1346d 100644
--- a/target/ppc/helper_regs.h
+++ b/target/ppc/helper_regs.h
@@ -22,6 +22,7 @@
void hreg_swap_gpr_tgpr(CPUPPCState *env);
void hreg_compute_hflags(CPUPPCState *env);
+void hreg_update_pmu_hflags(CPUPPCState *env);
void cpu_interrupt_exittb(CPUState *cs);
int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv);
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
index 9fd8d6b817..eada59f59f 100644
--- a/target/ppc/insn32.decode
+++ b/target/ppc/insn32.decode
@@ -17,31 +17,250 @@
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
+&A frt fra frb frc rc:bool
+@A ...... frt:5 fra:5 frb:5 frc:5 ..... rc:1 &A
+
+&A_tb frt frb rc:bool
+@A_tb ...... frt:5 ..... frb:5 ..... ..... rc:1 &A_tb
+
&D rt ra si:int64_t
-@D ...... rt:5 ra:5 si:s16 &D
+@D ...... rt:5 ra:5 si:s16 &D
&D_bf bf l:bool ra imm
-@D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf
-@D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf
+@D_bfs ...... bf:3 . l:1 ra:5 imm:s16 &D_bf
+@D_bfu ...... bf:3 . l:1 ra:5 imm:16 &D_bf
+
+%dq_si 4:s12 !function=times_16
+%dq_rtp 22:4 !function=times_2
+@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si
+
+%dq_rt_tsx 3:1 21:5
+@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx
+
+%rt_tsxp 21:1 22:4 !function=times_2
+@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp
%ds_si 2:s14 !function=times_4
-@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si
+@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si
+
+%ds_rtp 22:4 !function=times_2
+@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si
+
+&DX_b vrt b
+%dx_b 6:10 16:5 0:1
+@DX_b ...... vrt:5 ..... .......... ..... . &DX_b b=%dx_b
&DX rt d
%dx_d 6:s10 16:5 0:1
-@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
+@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
+
+&VA vrt vra vrb rc
+@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA
+
+&VC vrt vra vrb rc:bool
+@VC ...... vrt:5 vra:5 vrb:5 rc:1 .......... &VC
+
+&VN vrt vra vrb sh
+@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN
&VX vrt vra vrb
-@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX
+@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX
+
+&VX_bf bf vra vrb
+@VX_bf ...... bf:3 .. vra:5 vrb:5 ........... &VX_bf
+
+&VX_mp rt mp:bool vrb
+@VX_mp ...... rt:5 .... mp:1 vrb:5 ........... &VX_mp
+
+&VX_n rt vrb n
+@VX_n ...... rt:5 .. n:3 vrb:5 ........... &VX_n
+
+&VX_tb_rc vrt vrb rc:bool
+@VX_tb_rc ...... vrt:5 ..... vrb:5 rc:1 .......... &VX_tb_rc
+
+&VX_uim4 vrt uim vrb
+@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4
+
+&VX_tb vrt vrb
+@VX_tb ...... vrt:5 ..... vrb:5 ........... &VX_tb
&X rt ra rb
-@X ...... rt:5 ra:5 rb:5 .......... . &X
+@X ...... rt:5 ra:5 rb:5 .......... . &X
+
+&X_rc rt ra rb rc:bool
+@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc
+
+&X_sa rs ra
+@X_sa ...... rs:5 ra:5 ..... .......... . &X_sa
+
+%x_frtp 22:4 !function=times_2
+%x_frap 17:4 !function=times_2
+%x_frbp 12:4 !function=times_2
+@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp
+
+@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp
+
+&X_t rt
+@X_t ...... rt:5 ..... ..... .......... . &X_t
+
+&X_tb rt rb
+@X_tb ...... rt:5 ..... rb:5 .......... . &X_tb
+
+&X_t_rc rt rc:bool
+@X_t_rc ...... rt:5 ..... ..... .......... rc:1 &X_t_rc
+
+&X_tb_rc rt rb rc:bool
+@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc
+
+@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp
+
+@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp
+
+@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp
&X_bi rt bi
-@X_bi ...... rt:5 bi:5 ----- .......... - &X_bi
+@X_bi ...... rt:5 bi:5 ..... .......... . &X_bi
+
+&X_bf bf ra rb
+@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf
+
+@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp
+
+@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp
+
+&X_bf_uim bf uim rb
+@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim
+
+@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp
&X_bfl bf l:bool ra rb
-@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl
+@X_bfl ...... bf:3 . l:1 ra:5 rb:5 .......... . &X_bfl
+
+&X_imm2 rt imm
+@X_imm2 ...... rt:5 ..... ... imm:2 .......... . &X_imm2
+
+&X_imm3 rt imm
+@X_imm3 ...... rt:5 ..... .. imm:3 .......... . &X_imm3
+
+%x_xt 0:1 21:5
+&X_imm5 xt imm:uint8_t vrb
+@X_imm5 ...... ..... imm:5 vrb:5 .......... . &X_imm5 xt=%x_xt
+
+&X_imm8 xt imm:uint8_t
+@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt
+
+&X_ih ih:uint8_t
+@X_ih ...... .. ih:3 ..... ..... .......... . &X_ih
+
+&X_rb rb
+@X_rb ...... ..... ..... rb:5 .......... . &X_rb
+
+&X_rs_l rs l:bool
+@X_rs_l ...... rs:5 .... l:1 ..... .......... . &X_rs_l
+
+&X_uim5 xt uim:uint8_t
+@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt
+
+&X_tb_sp_rc rt rb sp rc:bool
+@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc
+
+@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp
+
+&X_tb_s_rc rt rb s:bool rc:bool
+@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc
+
+@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp
+
+%x_rt_tsx 0:1 21:5
+@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx
+@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp
+
+%x_dw 0:1 21:5 !function=dw_compose_ea
+@X_DW ...... ..... ra:5 rb:5 .......... . &X rt=%x_dw
+
+&X_frtp_vrb frtp vrb
+@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp
+
+&X_vrt_frbp vrt frbp
+@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp
+
+&X_a ra
+@X_a ...... ra:3 .. ..... ..... .......... . &X_a
+
+&XO rt ra rb oe:bool rc:bool
+@XO ...... rt:5 ra:5 rb:5 oe:1 ......... rc:1 &XO
+
+&XO_ta rt ra oe:bool rc:bool
+@XO_ta ...... rt:5 ra:5 ..... oe:1 ......... rc:1 &XO_ta
+
+%xx_xt 0:1 21:5
+%xx_xb 1:1 11:5
+%xx_xa 2:1 16:5
+%xx_xc 3:1 6:5
+&XX2 xt xb
+@XX2 ...... ..... ..... ..... ......... .. &XX2 xt=%xx_xt xb=%xx_xb
+
+&XX2_uim xt xb uim:uint8_t
+@XX2_uim2 ...... ..... ... uim:2 ..... ......... .. &XX2_uim xt=%xx_xt xb=%xx_xb
+
+@XX2_uim4 ...... ..... . uim:4 ..... ......... .. &XX2_uim xt=%xx_xt xb=%xx_xb
+
+%xx_uim7 6:1 2:1 16:5
+@XX2_uim7 ...... ..... ..... ..... .... . ... . .. &XX2_uim xt=%xx_xt xb=%xx_xb uim=%xx_uim7
+
+&XX2_bf_uim bf xb uim
+@XX2_bf_uim ...... bf:3 uim:7 ..... ......... . . &XX2_bf_uim
+
+&XX2_bf_xb bf xb
+@XX2_bf_xb ...... bf:3 .. ..... ..... ......... . . &XX2_bf_xb xb=%xx_xb
+
+&XX3 xt xa xb
+@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
+
+# 32 bit GER instructions have all mask bits considered 1
+&MMIRR_XX3 xa xb xt pmsk xmsk ymsk
+%xx_at 23:3
+%xx_xa_pair 2:1 17:4 !function=times_2
+@XX3_at ...... ... .. ..... ..... ........ ... &MMIRR_XX3 xt=%xx_at xb=%xx_xb \
+ pmsk=255 xmsk=15 ymsk=15
+
+&XX3_dm xt xa xb dm
+@XX3_dm ...... ..... ..... ..... . dm:2 ..... ... &XX3_dm xt=%xx_xt xa=%xx_xa xb=%xx_xb
+
+&XX4 xt xa xb xc
+@XX4 ...... ..... ..... ..... ..... .. .... &XX4 xt=%xx_xt xa=%xx_xa xb=%xx_xb xc=%xx_xc
+
+&Z22_bf_fra bf fra dm
+@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra
+
+%z22_frap 17:4 !function=times_2
+@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap
+
+&Z22_ta_sh_rc rt ra sh rc:bool
+@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc
+
+%z22_frtp 22:4 !function=times_2
+@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap
+
+&Z23_tab frt fra frb rmc rc:bool
+@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab
+
+%z23_frtp 22:4 !function=times_2
+%z23_frap 17:4 !function=times_2
+%z23_frbp 12:4 !function=times_2
+@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp
+
+@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp
+
+&Z23_tb frt frb r:bool rmc rc:bool
+@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb
+
+@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp
+
+&Z23_te_tb te frt frb rmc rc:bool
+@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb
+
+@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp
### Fixed-Point Load Instructions
@@ -74,6 +293,8 @@ LDU 111010 ..... ..... ..............01 @DS
LDX 011111 ..... ..... ..... 0000010101 - @X
LDUX 011111 ..... ..... ..... 0000110101 - @X
+LQ 111000 ..... ..... ............ ---- @DQ_rtp
+
### Fixed-Point Store Instructions
STB 100110 ..... ..... ................ @D
@@ -96,6 +317,8 @@ STDU 111110 ..... ..... ..............01 @DS
STDX 011111 ..... ..... ..... 0010010101 - @X
STDUX 011111 ..... ..... ..... 0010110101 - @X
+STQ 111110 ..... ..... ..............10 @DS_rtp
+
### Fixed-Point Compare Instructions
CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl
@@ -105,14 +328,84 @@ CMPLI 001010 ... - . ..... ................ @D_bfu
### Fixed-Point Arithmetic Instructions
+ADD 011111 ..... ..... ..... . 100001010 . @XO
+ADDC 011111 ..... ..... ..... . 000001010 . @XO
+ADDE 011111 ..... ..... ..... . 010001010 . @XO
+
+# ADDEX is Z23-form, with CY=0; all other values for CY are reserved.
+# This works out the same as X-form.
+ADDEX 011111 ..... ..... ..... 00 10101010 - @X
+
ADDI 001110 ..... ..... ................ @D
ADDIS 001111 ..... ..... ................ @D
+ADDIC 001100 ..... ..... ................ @D
+ADDIC_ 001101 ..... ..... ................ @D
ADDPCIS 010011 ..... ..... .......... 00010 . @DX
+ADDME 011111 ..... ..... ----- . 011101010 . @XO_ta
+ADDZE 011111 ..... ..... ----- . 011001010 . @XO_ta
+
+SUBF 011111 ..... ..... ..... . 000101000 . @XO
+SUBFIC 001000 ..... ..... ................ @D
+SUBFC 011111 ..... ..... ..... . 000001000 . @XO
+SUBFE 011111 ..... ..... ..... . 010001000 . @XO
+
+SUBFME 011111 ..... ..... ----- . 011101000 . @XO_ta
+SUBFZE 011111 ..... ..... ----- . 011001000 . @XO_ta
## Fixed-Point Logical Instructions
CFUGED 011111 ..... ..... ..... 0011011100 - @X
+CNTLZDM 011111 ..... ..... ..... 0000111011 - @X
+CNTTZDM 011111 ..... ..... ..... 1000111011 - @X
+PDEPD 011111 ..... ..... ..... 0010011100 - @X
+PEXTD 011111 ..... ..... ..... 0010111100 - @X
+
+# Fixed-Point Hash Instructions
+
+HASHST 011111 ..... ..... ..... 1011010010 . @X_DW
+HASHCHK 011111 ..... ..... ..... 1011110010 . @X_DW
+HASHSTP 011111 ..... ..... ..... 1010010010 . @X_DW
+HASHCHKP 011111 ..... ..... ..... 1010110010 . @X_DW
+
+## BCD Assist
+
+ADDG6S 011111 ..... ..... ..... - 001001010 - @X
+CDTBCD 011111 ..... ..... ----- 0100011010 - @X_sa
+CBCDTD 011111 ..... ..... ----- 0100111010 - @X_sa
+
+### Float-Point Load Instructions
+
+LFS 110000 ..... ..... ................ @D
+LFSU 110001 ..... ..... ................ @D
+LFSX 011111 ..... ..... ..... 1000010111 - @X
+LFSUX 011111 ..... ..... ..... 1000110111 - @X
+
+LFD 110010 ..... ..... ................ @D
+LFDU 110011 ..... ..... ................ @D
+LFDX 011111 ..... ..... ..... 1001010111 - @X
+LFDUX 011111 ..... ..... ..... 1001110111 - @X
+
+### Float-Point Store Instructions
+
+STFS 110100 ..... ...... ............... @D
+STFSU 110101 ..... ...... ............... @D
+STFSX 011111 ..... ...... .... 1010010111 - @X
+STFSUX 011111 ..... ...... .... 1010110111 - @X
+
+STFD 110110 ..... ...... ............... @D
+STFDU 110111 ..... ...... ............... @D
+STFDX 011111 ..... ...... .... 1011010111 - @X
+STFDUX 011111 ..... ...... .... 1011110111 - @X
+
+### Floating-Point Arithmetic Instructions
+
+FSQRT 111111 ..... ----- ..... ----- 10110 . @A_tb
+FSQRTS 111011 ..... ----- ..... ----- 10110 . @A_tb
+
+### Floating-Point Select Instruction
+
+FSEL 111111 ..... ..... ..... ..... 10111 . @A
### Move To/From System Register Instructions
@@ -121,6 +414,587 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi
SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi
SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi
+### Move To/From FPSCR
+
+{
+ # Before Power ISA v3.0, MFFS bits 11~15 were reserved and should be ignored
+ MFFS_ISA207 111111 ..... ----- ----- 1001000111 . @X_t_rc
+ [
+ MFFS 111111 ..... 00000 ----- 1001000111 . @X_t_rc
+ MFFSCE 111111 ..... 00001 ----- 1001000111 - @X_t
+ MFFSCRN 111111 ..... 10110 ..... 1001000111 - @X_tb
+ MFFSCDRN 111111 ..... 10100 ..... 1001000111 - @X_tb
+ MFFSCRNI 111111 ..... 10111 ---.. 1001000111 - @X_imm2
+ MFFSCDRNI 111111 ..... 10101 --... 1001000111 - @X_imm3
+ MFFSL 111111 ..... 11000 ----- 1001000111 - @X_t
+ ]
+}
+
+### Decimal Floating-Point Arithmetic Instructions
+
+DADD 111011 ..... ..... ..... 0000000010 . @X_rc
+DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc
+
+DSUB 111011 ..... ..... ..... 1000000010 . @X_rc
+DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc
+
+DMUL 111011 ..... ..... ..... 0000100010 . @X_rc
+DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc
+
+DDIV 111011 ..... ..... ..... 1000100010 . @X_rc
+DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc
+
+### Decimal Floating-Point Compare Instructions
+
+DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf
+DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp
+
+DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf
+DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp
+
+### Decimal Floating-Point Test Instructions
+
+DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra
+DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap
+
+DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra
+DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap
+
+DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf
+DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp
+
+DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf
+DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp
+
+DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim
+DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp
+
+### Decimal Floating-Point Quantum Adjustment Instructions
+
+DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb
+DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp
+
+DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab
+DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp
+
+DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab
+DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp
+
+DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb
+DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp
+
+DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb
+DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp
+
+### Decimal Floating-Point Conversion Instructions
+
+DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc
+DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc
+
+DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc
+DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc
+
+DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc
+DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc
+DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb
+
+DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc
+DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc
+DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp
+
+### Decimal Floating-Point Format Instructions
+
+DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc
+DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc
+
+DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc
+DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc
+
+DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc
+DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc
+
+DIEX 111011 ..... ..... ..... 1101100010 . @X_rc
+DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc
+
+DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc
+DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc
+
+DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc
+DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc
+
+## Vector Exclusive-OR-based Instructions
+
+VPMSUMD 000100 ..... ..... ..... 10011001000 @VX
+
+## Vector Integer Instructions
+
+VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC
+VCMPEQUH 000100 ..... ..... ..... . 0001000110 @VC
+VCMPEQUW 000100 ..... ..... ..... . 0010000110 @VC
+VCMPEQUD 000100 ..... ..... ..... . 0011000111 @VC
+VCMPEQUQ 000100 ..... ..... ..... . 0111000111 @VC
+
+VCMPGTSB 000100 ..... ..... ..... . 1100000110 @VC
+VCMPGTSH 000100 ..... ..... ..... . 1101000110 @VC
+VCMPGTSW 000100 ..... ..... ..... . 1110000110 @VC
+VCMPGTSD 000100 ..... ..... ..... . 1111000111 @VC
+VCMPGTSQ 000100 ..... ..... ..... . 1110000111 @VC
+
+VCMPGTUB 000100 ..... ..... ..... . 1000000110 @VC
+VCMPGTUH 000100 ..... ..... ..... . 1001000110 @VC
+VCMPGTUW 000100 ..... ..... ..... . 1010000110 @VC
+VCMPGTUD 000100 ..... ..... ..... . 1011000111 @VC
+VCMPGTUQ 000100 ..... ..... ..... . 1010000111 @VC
+
+VCMPNEB 000100 ..... ..... ..... . 0000000111 @VC
+VCMPNEH 000100 ..... ..... ..... . 0001000111 @VC
+VCMPNEW 000100 ..... ..... ..... . 0010000111 @VC
+
+VCMPNEZB 000100 ..... ..... ..... . 0100000111 @VC
+VCMPNEZH 000100 ..... ..... ..... . 0101000111 @VC
+VCMPNEZW 000100 ..... ..... ..... . 0110000111 @VC
+
+VCMPSQ 000100 ... -- ..... ..... 00101000001 @VX_bf
+VCMPUQ 000100 ... -- ..... ..... 00100000001 @VX_bf
+
+## Vector Integer Average Instructions
+
+VAVGSB 000100 ..... ..... ..... 10100000010 @VX
+VAVGSH 000100 ..... ..... ..... 10101000010 @VX
+VAVGSW 000100 ..... ..... ..... 10110000010 @VX
+VAVGUB 000100 ..... ..... ..... 10000000010 @VX
+VAVGUH 000100 ..... ..... ..... 10001000010 @VX
+VAVGUW 000100 ..... ..... ..... 10010000010 @VX
+
+## Vector Integer Absolute Difference Instructions
+
+VABSDUB 000100 ..... ..... ..... 10000000011 @VX
+VABSDUH 000100 ..... ..... ..... 10001000011 @VX
+VABSDUW 000100 ..... ..... ..... 10010000011 @VX
+
## Vector Bit Manipulation Instruction
+VGNB 000100 ..... -- ... ..... 10011001100 @VX_n
+
VCFUGED 000100 ..... ..... ..... 10101001101 @VX
+VCLZDM 000100 ..... ..... ..... 11110000100 @VX
+VCTZDM 000100 ..... ..... ..... 11111000100 @VX
+VPDEPD 000100 ..... ..... ..... 10111001101 @VX
+VPEXTD 000100 ..... ..... ..... 10110001101 @VX
+
+VPRTYBD 000100 ..... 01001 ..... 11000000010 @VX_tb
+VPRTYBQ 000100 ..... 01010 ..... 11000000010 @VX_tb
+VPRTYBW 000100 ..... 01000 ..... 11000000010 @VX_tb
+
+## Vector Permute and Formatting Instruction
+
+VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA
+VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA
+VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA
+VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA
+VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA
+VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA
+VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA
+VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA
+
+VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4
+VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4
+VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4
+VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4
+
+VINSBLX 000100 ..... ..... ..... 01000001111 @VX
+VINSBRX 000100 ..... ..... ..... 01100001111 @VX
+VINSHLX 000100 ..... ..... ..... 01001001111 @VX
+VINSHRX 000100 ..... ..... ..... 01101001111 @VX
+VINSWLX 000100 ..... ..... ..... 01010001111 @VX
+VINSWRX 000100 ..... ..... ..... 01110001111 @VX
+VINSDLX 000100 ..... ..... ..... 01011001111 @VX
+VINSDRX 000100 ..... ..... ..... 01111001111 @VX
+
+VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4
+VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4
+
+VINSBVLX 000100 ..... ..... ..... 00000001111 @VX
+VINSBVRX 000100 ..... ..... ..... 00100001111 @VX
+VINSHVLX 000100 ..... ..... ..... 00001001111 @VX
+VINSHVRX 000100 ..... ..... ..... 00101001111 @VX
+VINSWVLX 000100 ..... ..... ..... 00010001111 @VX
+VINSWVRX 000100 ..... ..... ..... 00110001111 @VX
+
+VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN
+VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
+
+VPERM 000100 ..... ..... ..... ..... 101011 @VA
+VPERMR 000100 ..... ..... ..... ..... 111011 @VA
+
+VSEL 000100 ..... ..... ..... ..... 101010 @VA
+
+## Vector Integer Shift Instruction
+
+VSLB 000100 ..... ..... ..... 00100000100 @VX
+VSLH 000100 ..... ..... ..... 00101000100 @VX
+VSLW 000100 ..... ..... ..... 00110000100 @VX
+VSLD 000100 ..... ..... ..... 10111000100 @VX
+VSLQ 000100 ..... ..... ..... 00100000101 @VX
+
+VSRB 000100 ..... ..... ..... 01000000100 @VX
+VSRH 000100 ..... ..... ..... 01001000100 @VX
+VSRW 000100 ..... ..... ..... 01010000100 @VX
+VSRD 000100 ..... ..... ..... 11011000100 @VX
+VSRQ 000100 ..... ..... ..... 01000000101 @VX
+
+VSRAB 000100 ..... ..... ..... 01100000100 @VX
+VSRAH 000100 ..... ..... ..... 01101000100 @VX
+VSRAW 000100 ..... ..... ..... 01110000100 @VX
+VSRAD 000100 ..... ..... ..... 01111000100 @VX
+VSRAQ 000100 ..... ..... ..... 01100000101 @VX
+
+VRLB 000100 ..... ..... ..... 00000000100 @VX
+VRLH 000100 ..... ..... ..... 00001000100 @VX
+VRLW 000100 ..... ..... ..... 00010000100 @VX
+VRLD 000100 ..... ..... ..... 00011000100 @VX
+VRLQ 000100 ..... ..... ..... 00000000101 @VX
+
+VRLWMI 000100 ..... ..... ..... 00010000101 @VX
+VRLDMI 000100 ..... ..... ..... 00011000101 @VX
+VRLQMI 000100 ..... ..... ..... 00001000101 @VX
+
+VRLWNM 000100 ..... ..... ..... 00110000101 @VX
+VRLDNM 000100 ..... ..... ..... 00111000101 @VX
+VRLQNM 000100 ..... ..... ..... 00101000101 @VX
+
+## Vector Integer Arithmetic Instructions
+
+VADDCUW 000100 ..... ..... ..... 00110000000 @VX
+VADDCUQ 000100 ..... ..... ..... 00101000000 @VX
+VADDUQM 000100 ..... ..... ..... 00100000000 @VX
+
+VADDEUQM 000100 ..... ..... ..... ..... 111100 @VA
+VADDECUQ 000100 ..... ..... ..... ..... 111101 @VA
+
+VSUBCUW 000100 ..... ..... ..... 10110000000 @VX
+VSUBCUQ 000100 ..... ..... ..... 10101000000 @VX
+VSUBUQM 000100 ..... ..... ..... 10100000000 @VX
+
+VSUBECUQ 000100 ..... ..... ..... ..... 111111 @VA
+VSUBEUQM 000100 ..... ..... ..... ..... 111110 @VA
+
+VEXTSB2W 000100 ..... 10000 ..... 11000000010 @VX_tb
+VEXTSH2W 000100 ..... 10001 ..... 11000000010 @VX_tb
+VEXTSB2D 000100 ..... 11000 ..... 11000000010 @VX_tb
+VEXTSH2D 000100 ..... 11001 ..... 11000000010 @VX_tb
+VEXTSW2D 000100 ..... 11010 ..... 11000000010 @VX_tb
+VEXTSD2Q 000100 ..... 11011 ..... 11000000010 @VX_tb
+
+VNEGD 000100 ..... 00111 ..... 11000000010 @VX_tb
+VNEGW 000100 ..... 00110 ..... 11000000010 @VX_tb
+
+## Vector Mask Manipulation Instructions
+
+MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb
+MTVSRHM 000100 ..... 10001 ..... 11001000010 @VX_tb
+MTVSRWM 000100 ..... 10010 ..... 11001000010 @VX_tb
+MTVSRDM 000100 ..... 10011 ..... 11001000010 @VX_tb
+MTVSRQM 000100 ..... 10100 ..... 11001000010 @VX_tb
+MTVSRBMI 000100 ..... ..... .......... 01010 . @DX_b
+
+VEXPANDBM 000100 ..... 00000 ..... 11001000010 @VX_tb
+VEXPANDHM 000100 ..... 00001 ..... 11001000010 @VX_tb
+VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb
+VEXPANDDM 000100 ..... 00011 ..... 11001000010 @VX_tb
+VEXPANDQM 000100 ..... 00100 ..... 11001000010 @VX_tb
+
+VEXTRACTBM 000100 ..... 01000 ..... 11001000010 @VX_tb
+VEXTRACTHM 000100 ..... 01001 ..... 11001000010 @VX_tb
+VEXTRACTWM 000100 ..... 01010 ..... 11001000010 @VX_tb
+VEXTRACTDM 000100 ..... 01011 ..... 11001000010 @VX_tb
+VEXTRACTQM 000100 ..... 01100 ..... 11001000010 @VX_tb
+
+VCNTMBB 000100 ..... 1100 . ..... 11001000010 @VX_mp
+VCNTMBH 000100 ..... 1101 . ..... 11001000010 @VX_mp
+VCNTMBW 000100 ..... 1110 . ..... 11001000010 @VX_mp
+VCNTMBD 000100 ..... 1111 . ..... 11001000010 @VX_mp
+
+## Vector Multiply Instruction
+
+VMULESB 000100 ..... ..... ..... 01100001000 @VX
+VMULOSB 000100 ..... ..... ..... 00100001000 @VX
+VMULEUB 000100 ..... ..... ..... 01000001000 @VX
+VMULOUB 000100 ..... ..... ..... 00000001000 @VX
+
+VMULESH 000100 ..... ..... ..... 01101001000 @VX
+VMULOSH 000100 ..... ..... ..... 00101001000 @VX
+VMULEUH 000100 ..... ..... ..... 01001001000 @VX
+VMULOUH 000100 ..... ..... ..... 00001001000 @VX
+
+VMULESW 000100 ..... ..... ..... 01110001000 @VX
+VMULOSW 000100 ..... ..... ..... 00110001000 @VX
+VMULEUW 000100 ..... ..... ..... 01010001000 @VX
+VMULOUW 000100 ..... ..... ..... 00010001000 @VX
+
+VMULESD 000100 ..... ..... ..... 01111001000 @VX
+VMULOSD 000100 ..... ..... ..... 00111001000 @VX
+VMULEUD 000100 ..... ..... ..... 01011001000 @VX
+VMULOUD 000100 ..... ..... ..... 00011001000 @VX
+
+VMULHSW 000100 ..... ..... ..... 01110001001 @VX
+VMULHUW 000100 ..... ..... ..... 01010001001 @VX
+VMULHSD 000100 ..... ..... ..... 01111001001 @VX
+VMULHUD 000100 ..... ..... ..... 01011001001 @VX
+VMULLD 000100 ..... ..... ..... 00111001001 @VX
+
+## Vector Multiply-Sum Instructions
+
+VMSUMUBM 000100 ..... ..... ..... ..... 100100 @VA
+VMSUMMBM 000100 ..... ..... ..... ..... 100101 @VA
+VMSUMSHM 000100 ..... ..... ..... ..... 101000 @VA
+VMSUMSHS 000100 ..... ..... ..... ..... 101001 @VA
+VMSUMUHM 000100 ..... ..... ..... ..... 100110 @VA
+VMSUMUHS 000100 ..... ..... ..... ..... 100111 @VA
+
+VMSUMCUD 000100 ..... ..... ..... ..... 010111 @VA
+VMSUMUDM 000100 ..... ..... ..... ..... 100011 @VA
+
+VMLADDUHM 000100 ..... ..... ..... ..... 100010 @VA
+VMHADDSHS 000100 ..... ..... ..... ..... 100000 @VA
+VMHRADDSHS 000100 ..... ..... ..... ..... 100001 @VA
+
+## Vector String Instructions
+
+VSTRIBL 000100 ..... 00000 ..... . 0000001101 @VX_tb_rc
+VSTRIBR 000100 ..... 00001 ..... . 0000001101 @VX_tb_rc
+VSTRIHL 000100 ..... 00010 ..... . 0000001101 @VX_tb_rc
+VSTRIHR 000100 ..... 00011 ..... . 0000001101 @VX_tb_rc
+
+VCLRLB 000100 ..... ..... ..... 00110001101 @VX
+VCLRRB 000100 ..... ..... ..... 00111001101 @VX
+
+# VSX Load/Store Instructions
+
+LXSD 111001 ..... ..... .............. 10 @DS
+STXSD 111101 ..... ..... .............. 10 @DS
+LXSSP 111001 ..... ..... .............. 11 @DS
+STXSSP 111101 ..... ..... .............. 11 @DS
+LXV 111101 ..... ..... ............ . 001 @DQ_TSX
+STXV 111101 ..... ..... ............ . 101 @DQ_TSX
+LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP
+STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP
+LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX
+STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX
+LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP
+STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP
+LXVRBX 011111 ..... ..... ..... 0000001101 . @X_TSX
+LXVRHX 011111 ..... ..... ..... 0000101101 . @X_TSX
+LXVRWX 011111 ..... ..... ..... 0001001101 . @X_TSX
+LXVRDX 011111 ..... ..... ..... 0001101101 . @X_TSX
+STXVRBX 011111 ..... ..... ..... 0010001101 . @X_TSX
+STXVRHX 011111 ..... ..... ..... 0010101101 . @X_TSX
+STXVRWX 011111 ..... ..... ..... 0011001101 . @X_TSX
+STXVRDX 011111 ..... ..... ..... 0011101101 . @X_TSX
+
+## VSX Vector Binary Floating-Point Sign Manipulation Instructions
+
+XVABSDP 111100 ..... 00000 ..... 111011001 .. @XX2
+XVABSSP 111100 ..... 00000 ..... 110011001 .. @XX2
+XVNABSDP 111100 ..... 00000 ..... 111101001 .. @XX2
+XVNABSSP 111100 ..... 00000 ..... 110101001 .. @XX2
+XVNEGDP 111100 ..... 00000 ..... 111111001 .. @XX2
+XVNEGSP 111100 ..... 00000 ..... 110111001 .. @XX2
+XVCPSGNDP 111100 ..... ..... ..... 11110000 ... @XX3
+XVCPSGNSP 111100 ..... ..... ..... 11010000 ... @XX3
+
+## VSX Scalar Multiply-Add Instructions
+
+XSMADDADP 111100 ..... ..... ..... 00100001 . . . @XX3
+XSMADDMDP 111100 ..... ..... ..... 00101001 . . . @XX3
+XSMADDASP 111100 ..... ..... ..... 00000001 . . . @XX3
+XSMADDMSP 111100 ..... ..... ..... 00001001 . . . @XX3
+XSMADDQP 111111 ..... ..... ..... 0110000100 . @X_rc
+
+XSMSUBADP 111100 ..... ..... ..... 00110001 . . . @XX3
+XSMSUBMDP 111100 ..... ..... ..... 00111001 . . . @XX3
+XSMSUBASP 111100 ..... ..... ..... 00010001 . . . @XX3
+XSMSUBMSP 111100 ..... ..... ..... 00011001 . . . @XX3
+XSMSUBQP 111111 ..... ..... ..... 0110100100 . @X_rc
+
+XSNMADDASP 111100 ..... ..... ..... 10000001 . . . @XX3
+XSNMADDMSP 111100 ..... ..... ..... 10001001 . . . @XX3
+XSNMADDADP 111100 ..... ..... ..... 10100001 . . . @XX3
+XSNMADDMDP 111100 ..... ..... ..... 10101001 . . . @XX3
+XSNMADDQP 111111 ..... ..... ..... 0111000100 . @X_rc
+
+XSNMSUBASP 111100 ..... ..... ..... 10010001 . . . @XX3
+XSNMSUBMSP 111100 ..... ..... ..... 10011001 . . . @XX3
+XSNMSUBADP 111100 ..... ..... ..... 10110001 . . . @XX3
+XSNMSUBMDP 111100 ..... ..... ..... 10111001 . . . @XX3
+XSNMSUBQP 111111 ..... ..... ..... 0111100100 . @X_rc
+
+## VSX splat instruction
+
+XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8
+XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2_uim2
+
+## VSX Permute Instructions
+
+XXEXTRACTUW 111100 ..... - .... ..... 010100101 .. @XX2_uim4
+XXINSERTW 111100 ..... - .... ..... 010110101 .. @XX2_uim4
+
+XXPERM 111100 ..... ..... ..... 00011010 ... @XX3
+XXPERMR 111100 ..... ..... ..... 00111010 ... @XX3
+XXPERMDI 111100 ..... ..... ..... 0 .. 01010 ... @XX3_dm
+
+XXSEL 111100 ..... ..... ..... ..... 11 .... @XX4
+
+## VSX Vector Generate PCV
+
+XXGENPCVBM 111100 ..... ..... ..... 1110010100 . @X_imm5
+XXGENPCVHM 111100 ..... ..... ..... 1110010101 . @X_imm5
+XXGENPCVWM 111100 ..... ..... ..... 1110110100 . @X_imm5
+XXGENPCVDM 111100 ..... ..... ..... 1110110101 . @X_imm5
+
+## VSX Vector Load Special Value Instruction
+
+LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5
+
+## VSX Comparison Instructions
+
+XSMAXCDP 111100 ..... ..... ..... 10000000 ... @XX3
+XSMINCDP 111100 ..... ..... ..... 10001000 ... @XX3
+XSMAXJDP 111100 ..... ..... ..... 10010000 ... @XX3
+XSMINJDP 111100 ..... ..... ..... 10011000 ... @XX3
+XSMAXCQP 111111 ..... ..... ..... 1010100100 - @X
+XSMINCQP 111111 ..... ..... ..... 1011100100 - @X
+
+XSCMPEQDP 111100 ..... ..... ..... 00000011 ... @XX3
+XSCMPGEDP 111100 ..... ..... ..... 00010011 ... @XX3
+XSCMPGTDP 111100 ..... ..... ..... 00001011 ... @XX3
+XSCMPEQQP 111111 ..... ..... ..... 0001000100 - @X
+XSCMPGEQP 111111 ..... ..... ..... 0011000100 - @X
+XSCMPGTQP 111111 ..... ..... ..... 0011100100 - @X
+
+## VSX Binary Floating-Point Convert Instructions
+
+XSCVQPDP 111111 ..... 10100 ..... 1101000100 . @X_tb_rc
+XSCVQPUQZ 111111 ..... 00000 ..... 1101000100 - @X_tb
+XSCVQPSQZ 111111 ..... 01000 ..... 1101000100 - @X_tb
+XSCVUQQP 111111 ..... 00011 ..... 1101000100 - @X_tb
+XSCVSQQP 111111 ..... 01011 ..... 1101000100 - @X_tb
+XVCVBF16SPN 111100 ..... 10000 ..... 111011011 .. @XX2
+XVCVSPBF16 111100 ..... 10001 ..... 111011011 .. @XX2
+XSCVSPDPN 111100 ..... ----- ..... 101001011 .. @XX2
+
+## VSX Binary Floating-Point Math Support Instructions
+
+XVXSIGSP 111100 ..... 01001 ..... 111011011 .. @XX2
+XVTSTDCDP 111100 ..... ..... ..... 1111 . 101 ... @XX2_uim7
+XVTSTDCSP 111100 ..... ..... ..... 1101 . 101 ... @XX2_uim7
+XSTSTDCSP 111100 ... ....... ..... 100101010 . - @XX2_bf_uim xb=%xx_xb
+XSTSTDCDP 111100 ... ....... ..... 101101010 . - @XX2_bf_uim xb=%xx_xb
+XSTSTDCQP 111111 ... ....... xb:5 1011000100 - @XX2_bf_uim
+
+## VSX Vector Test Least-Significant Bit by Byte Instruction
+
+XVTLSBB 111100 ... -- 00010 ..... 111011011 . - @XX2_bf_xb
+
+### rfebb
+&XL_s s:uint8_t
+@XL_s ......-------------- s:1 .......... - &XL_s
+RFEBB 010011-------------- . 0010010010 - @XL_s
+
+## Accumulator Instructions
+
+XXMFACC 011111 ... -- 00000 ----- 0010110001 - @X_a
+XXMTACC 011111 ... -- 00001 ----- 0010110001 - @X_a
+XXSETACCZ 011111 ... -- 00011 ----- 0010110001 - @X_a
+
+## VSX GER instruction
+
+XVI4GER8 111011 ... -- ..... ..... 00100011 ..- @XX3_at xa=%xx_xa
+XVI4GER8PP 111011 ... -- ..... ..... 00100010 ..- @XX3_at xa=%xx_xa
+XVI8GER4 111011 ... -- ..... ..... 00000011 ..- @XX3_at xa=%xx_xa
+XVI8GER4PP 111011 ... -- ..... ..... 00000010 ..- @XX3_at xa=%xx_xa
+XVI16GER2 111011 ... -- ..... ..... 01001011 ..- @XX3_at xa=%xx_xa
+XVI16GER2PP 111011 ... -- ..... ..... 01101011 ..- @XX3_at xa=%xx_xa
+XVI8GER4SPP 111011 ... -- ..... ..... 01100011 ..- @XX3_at xa=%xx_xa
+XVI16GER2S 111011 ... -- ..... ..... 00101011 ..- @XX3_at xa=%xx_xa
+XVI16GER2SPP 111011 ... -- ..... ..... 00101010 ..- @XX3_at xa=%xx_xa
+
+XVBF16GER2 111011 ... -- ..... ..... 00110011 ..- @XX3_at xa=%xx_xa
+XVBF16GER2PP 111011 ... -- ..... ..... 00110010 ..- @XX3_at xa=%xx_xa
+XVBF16GER2PN 111011 ... -- ..... ..... 10110010 ..- @XX3_at xa=%xx_xa
+XVBF16GER2NP 111011 ... -- ..... ..... 01110010 ..- @XX3_at xa=%xx_xa
+XVBF16GER2NN 111011 ... -- ..... ..... 11110010 ..- @XX3_at xa=%xx_xa
+
+XVF16GER2 111011 ... -- ..... ..... 00010011 ..- @XX3_at xa=%xx_xa
+XVF16GER2PP 111011 ... -- ..... ..... 00010010 ..- @XX3_at xa=%xx_xa
+XVF16GER2PN 111011 ... -- ..... ..... 10010010 ..- @XX3_at xa=%xx_xa
+XVF16GER2NP 111011 ... -- ..... ..... 01010010 ..- @XX3_at xa=%xx_xa
+XVF16GER2NN 111011 ... -- ..... ..... 11010010 ..- @XX3_at xa=%xx_xa
+
+XVF32GER 111011 ... -- ..... ..... 00011011 ..- @XX3_at xa=%xx_xa
+XVF32GERPP 111011 ... -- ..... ..... 00011010 ..- @XX3_at xa=%xx_xa
+XVF32GERPN 111011 ... -- ..... ..... 10011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNP 111011 ... -- ..... ..... 01011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNN 111011 ... -- ..... ..... 11011010 ..- @XX3_at xa=%xx_xa
+
+XVF64GER 111011 ... -- .... 0 ..... 00111011 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPP 111011 ... -- .... 0 ..... 00111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPN 111011 ... -- .... 0 ..... 10111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNP 111011 ... -- .... 0 ..... 01111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNN 111011 ... -- .... 0 ..... 11111010 ..- @XX3_at xa=%xx_xa_pair
+
+## Vector Division Instructions
+
+VDIVSW 000100 ..... ..... ..... 00110001011 @VX
+VDIVUW 000100 ..... ..... ..... 00010001011 @VX
+VDIVSD 000100 ..... ..... ..... 00111001011 @VX
+VDIVUD 000100 ..... ..... ..... 00011001011 @VX
+VDIVSQ 000100 ..... ..... ..... 00100001011 @VX
+VDIVUQ 000100 ..... ..... ..... 00000001011 @VX
+
+VDIVESW 000100 ..... ..... ..... 01110001011 @VX
+VDIVEUW 000100 ..... ..... ..... 01010001011 @VX
+VDIVESD 000100 ..... ..... ..... 01111001011 @VX
+VDIVEUD 000100 ..... ..... ..... 01011001011 @VX
+VDIVESQ 000100 ..... ..... ..... 01100001011 @VX
+VDIVEUQ 000100 ..... ..... ..... 01000001011 @VX
+
+VMODSW 000100 ..... ..... ..... 11110001011 @VX
+VMODUW 000100 ..... ..... ..... 11010001011 @VX
+VMODSD 000100 ..... ..... ..... 11111001011 @VX
+VMODUD 000100 ..... ..... ..... 11011001011 @VX
+VMODSQ 000100 ..... ..... ..... 11100001011 @VX
+VMODUQ 000100 ..... ..... ..... 11000001011 @VX
+
+## SLB Management Instructions
+
+SLBIE 011111 ----- ----- ..... 0110110010 - @X_rb
+SLBIEG 011111 ..... ----- ..... 0111010010 - @X_tb
+
+SLBIA 011111 --... ----- ----- 0111110010 - @X_ih
+SLBIAG 011111 ..... ----. ----- 1101010010 - @X_rs_l
+
+SLBMTE 011111 ..... ----- ..... 0110010010 - @X_tb
+
+SLBMFEV 011111 ..... ----- ..... 1101010011 - @X_tb
+SLBMFEE 011111 ..... ----- ..... 1110010011 - @X_tb
+
+SLBFEE 011111 ..... ----- ..... 1111010011 1 @X_tb
+
+SLBSYNC 011111 ----- ----- ----- 0101010010 -
+
+## TLB Management Instructions
+
+&X_tlbie rb rs ric prs:bool r:bool
+@X_tlbie ...... rs:5 - ric:2 prs:1 r:1 rb:5 .......... - &X_tlbie
+
+TLBIE 011111 ..... - .. . . ..... 0100110010 - @X_tlbie
+TLBIEL 011111 ..... - .. . . ..... 0100010010 - @X_tlbie
+
+# Processor Control Instructions
+
+MSGCLR 011111 ----- ----- ..... 0011101110 - @X_rb
+MSGSND 011111 ----- ----- ..... 0011001110 - @X_rb
+MSGCLRP 011111 ----- ----- ..... 0010101110 - @X_rb
+MSGSNDP 011111 ----- ----- ..... 0010001110 - @X_rb
+MSGSYNC 011111 ----- ----- ----- 1101110110 -
diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode
index 72c5944a53..de115c1943 100644
--- a/target/ppc/insn64.decode
+++ b/target/ppc/insn64.decode
@@ -23,6 +23,64 @@
@PLS_D ...... .. ... r:1 .. .................. \
...... rt:5 ra:5 ................ \
&PLS_D si=%pls_si
+@8LS_D_TSX ...... .. . .. r:1 .. .................. \
+ ..... rt:6 ra:5 ................ \
+ &PLS_D si=%pls_si
+
+%rt_tsxp 21:1 22:4 !function=times_2
+@8LS_D_TSXP ...... .. . .. r:1 .. .................. \
+ ...... ..... ra:5 ................ \
+ &PLS_D si=%pls_si rt=%rt_tsxp
+
+@8LS_D ...... .. . .. r:1 .. .................. \
+ ...... rt:5 ra:5 ................ \
+ &PLS_D si=%pls_si
+
+# Format 8RR:D
+%8rr_si 32:s16 0:16
+%8rr_xt 16:1 21:5
+&8RR_D_IX xt ix si
+@8RR_D_IX ...... .. .... .. .. ................ \
+ ...... ..... ... ix:1 . ................ \
+ &8RR_D_IX si=%8rr_si xt=%8rr_xt
+&8RR_D xt si:int32_t
+@8RR_D ...... .. .... .. .. ................ \
+ ...... ..... .... . ................ \
+ &8RR_D si=%8rr_si xt=%8rr_xt
+
+# Format 8RR:XX4
+%8rr_xx_xt 0:1 21:5
+%8rr_xx_xa 2:1 16:5
+%8rr_xx_xb 1:1 11:5
+%8rr_xx_xc 3:1 6:5
+&8RR_XX4 xt xa xb xc
+@8RR_XX4 ........ ........ ........ ........ \
+ ...... ..... ..... ..... ..... .. .... \
+ &8RR_XX4 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
+
+&8RR_XX4_imm xt xa xb xc imm
+@8RR_XX4_imm ........ ........ ........ imm:8 \
+ ...... ..... ..... ..... ..... .. .... \
+ &8RR_XX4_imm xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
+
+&8RR_XX4_uim3 xt xa xb xc uim3
+@8RR_XX4_uim3 ...... .. .... .. ............... uim3:3 \
+ ...... ..... ..... ..... ..... .. .... \
+ &8RR_XX4_uim3 xt=%8rr_xx_xt xa=%8rr_xx_xa xb=%8rr_xx_xb xc=%8rr_xx_xc
+
+# Format MMIRR:XX3
+&MMIRR_XX3 !extern xa xb xt pmsk xmsk ymsk
+%xx3_xa 2:1 16:5
+%xx3_xb 1:1 11:5
+%xx3_at 23:3
+%xx3_xa_pair 2:1 17:4 !function=times_2
+@MMIRR_XX3 ...... .. .... .. . . ........ xmsk:4 ymsk:4 \
+ ...... ... .. ..... ..... ........ ... \
+ &MMIRR_XX3 xa=%xx3_xa xb=%xx3_xb xt=%xx3_at
+
+@MMIRR_XX3_NO_P ...... .. .... .. . . ........ xmsk:4 .... \
+ ...... ... .. ..... ..... ........ ... \
+ &MMIRR_XX3 xb=%xx3_xb xt=%xx3_at pmsk=1
### Fixed-Point Load Instructions
@@ -38,6 +96,8 @@ PLWA 000001 00 0--.-- .................. \
101001 ..... ..... ................ @PLS_D
PLD 000001 00 0--.-- .................. \
111001 ..... ..... ................ @PLS_D
+PLQ 000001 00 0--.-- .................. \
+ 111000 ..... ..... ................ @PLS_D
### Fixed-Point Store Instructions
@@ -50,12 +110,90 @@ PSTH 000001 10 0--.-- .................. \
PSTD 000001 00 0--.-- .................. \
111101 ..... ..... ................ @PLS_D
+PSTQ 000001 00 0--.-- .................. \
+ 111100 ..... ..... ................ @PLS_D
### Fixed-Point Arithmetic Instructions
PADDI 000001 10 0--.-- .................. \
001110 ..... ..... ................ @PLS_D
+### Float-Point Load and Store Instructions
+
+PLFS 000001 10 0--.-- .................. \
+ 110000 ..... ..... ................ @PLS_D
+PLFD 000001 10 0--.-- .................. \
+ 110010 ..... ..... ................ @PLS_D
+PSTFS 000001 10 0--.-- .................. \
+ 110100 ..... ..... ................ @PLS_D
+PSTFD 000001 10 0--.-- .................. \
+ 110110 ..... ..... ................ @PLS_D
+
+## VSX GER instruction
+
+PMXVI4GER8 000001 11 1001 -- - - pmsk:8 ........ \
+ 111011 ... -- ..... ..... 00100011 ..- @MMIRR_XX3
+PMXVI4GER8PP 000001 11 1001 -- - - pmsk:8 ........ \
+ 111011 ... -- ..... ..... 00100010 ..- @MMIRR_XX3
+PMXVI8GER4 000001 11 1001 -- - - pmsk:4 ---- ........ \
+ 111011 ... -- ..... ..... 00000011 ..- @MMIRR_XX3
+PMXVI8GER4PP 000001 11 1001 -- - - pmsk:4 ---- ........ \
+ 111011 ... -- ..... ..... 00000010 ..- @MMIRR_XX3
+PMXVI16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 01001011 ..- @MMIRR_XX3
+PMXVI16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 01101011 ..- @MMIRR_XX3
+PMXVI8GER4SPP 000001 11 1001 -- - - pmsk:4 ---- ........ \
+ 111011 ... -- ..... ..... 01100011 ..- @MMIRR_XX3
+PMXVI16GER2S 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00101011 ..- @MMIRR_XX3
+PMXVI16GER2SPP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00101010 ..- @MMIRR_XX3
+
+PMXVBF16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00110011 ..- @MMIRR_XX3
+PMXVBF16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00110010 ..- @MMIRR_XX3
+PMXVBF16GER2PN 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 10110010 ..- @MMIRR_XX3
+PMXVBF16GER2NP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 01110010 ..- @MMIRR_XX3
+PMXVBF16GER2NN 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 11110010 ..- @MMIRR_XX3
+
+PMXVF16GER2 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00010011 ..- @MMIRR_XX3
+PMXVF16GER2PP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 00010010 ..- @MMIRR_XX3
+PMXVF16GER2PN 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 10010010 ..- @MMIRR_XX3
+PMXVF16GER2NP 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 01010010 ..- @MMIRR_XX3
+PMXVF16GER2NN 000001 11 1001 -- - - pmsk:2 ------ ........ \
+ 111011 ... -- ..... ..... 11010010 ..- @MMIRR_XX3
+
+PMXVF32GER 000001 11 1001 -- - - -------- .... ymsk:4 \
+ 111011 ... -- ..... ..... 00011011 ..- @MMIRR_XX3_NO_P xa=%xx3_xa
+PMXVF32GERPP 000001 11 1001 -- - - -------- .... ymsk:4 \
+ 111011 ... -- ..... ..... 00011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa
+PMXVF32GERPN 000001 11 1001 -- - - -------- .... ymsk:4 \
+ 111011 ... -- ..... ..... 10011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa
+PMXVF32GERNP 000001 11 1001 -- - - -------- .... ymsk:4 \
+ 111011 ... -- ..... ..... 01011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa
+PMXVF32GERNN 000001 11 1001 -- - - -------- .... ymsk:4 \
+ 111011 ... -- ..... ..... 11011010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa
+
+PMXVF64GER 000001 11 1001 -- - - -------- .... ymsk:2 -- \
+ 111011 ... -- ....0 ..... 00111011 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair
+PMXVF64GERPP 000001 11 1001 -- - - -------- .... ymsk:2 -- \
+ 111011 ... -- ....0 ..... 00111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair
+PMXVF64GERPN 000001 11 1001 -- - - -------- .... ymsk:2 -- \
+ 111011 ... -- ....0 ..... 10111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair
+PMXVF64GERNP 000001 11 1001 -- - - -------- .... ymsk:2 -- \
+ 111011 ... -- ....0 ..... 01111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair
+PMXVF64GERNN 000001 11 1001 -- - - -------- .... ymsk:2 -- \
+ 111011 ... -- ....0 ..... 11111010 ..- @MMIRR_XX3_NO_P xa=%xx3_xa_pair
+
### Prefixed No-operation Instruction
@PNOP 000001 11 0000-- 000000000000000000 \
@@ -122,3 +260,48 @@ PADDI 000001 10 0--.-- .................. \
PNOP ................................ \
-------------------------------- @PNOP
}
+
+### VSX instructions
+
+PLXSD 000001 00 0--.-- .................. \
+ 101010 ..... ..... ................ @8LS_D
+
+PSTXSD 000001 00 0--.-- .................. \
+ 101110 ..... ..... ................ @8LS_D
+
+PLXSSP 000001 00 0--.-- .................. \
+ 101011 ..... ..... ................ @8LS_D
+
+PSTXSSP 000001 00 0--.-- .................. \
+ 101111 ..... ..... ................ @8LS_D
+
+PLXV 000001 00 0--.-- .................. \
+ 11001 ...... ..... ................ @8LS_D_TSX
+PSTXV 000001 00 0--.-- .................. \
+ 11011 ...... ..... ................ @8LS_D_TSX
+PLXVP 000001 00 0--.-- .................. \
+ 111010 ..... ..... ................ @8LS_D_TSXP
+PSTXVP 000001 00 0--.-- .................. \
+ 111110 ..... ..... ................ @8LS_D_TSXP
+
+XXEVAL 000001 01 0000 -- ---------- ........ \
+ 100010 ..... ..... ..... ..... 01 .... @8RR_XX4_imm
+
+XXSPLTIDP 000001 01 0000 -- -- ................ \
+ 100000 ..... 0010 . ................ @8RR_D
+XXSPLTIW 000001 01 0000 -- -- ................ \
+ 100000 ..... 0011 . ................ @8RR_D
+XXSPLTI32DX 000001 01 0000 -- -- ................ \
+ 100000 ..... 000 .. ................ @8RR_D_IX
+
+XXBLENDVD 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 11 .... @8RR_XX4
+XXBLENDVW 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 10 .... @8RR_XX4
+XXBLENDVH 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 01 .... @8RR_XX4
+XXBLENDVB 000001 01 0000 -- ------------------ \
+ 100001 ..... ..... ..... ..... 00 .... @8RR_XX4
+
+XXPERMX 000001 01 0000 -- --------------- ... \
+ 100010 ..... ..... ..... ..... 00 .... @8RR_XX4_uim3
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index c2d3248d1e..0a5c3e78a4 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -21,13 +21,15 @@
#include "cpu.h"
#include "internal.h"
#include "qemu/host-utils.h"
-#include "qemu/main-loop.h"
#include "qemu/log.h"
#include "exec/helper-proto.h"
#include "crypto/aes.h"
+#include "crypto/aes-round.h"
+#include "crypto/clmul.h"
#include "fpu/softfloat.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
+#include "tcg/tcg-gvec-desc.h"
#include "helper_regs.h"
/*****************************************************************************/
@@ -36,9 +38,9 @@
static inline void helper_update_ov_legacy(CPUPPCState *env, int ov)
{
if (unlikely(ov)) {
- env->so = env->ov = 1;
+ env->so = env->ov = env->ov32 = 1;
} else {
- env->ov = 0;
+ env->ov = env->ov32 = 0;
}
}
@@ -104,10 +106,11 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
uint64_t rt = 0;
int overflow = 0;
- overflow = divu128(&rt, &ra, rb);
-
- if (unlikely(overflow)) {
+ if (unlikely(rb == 0 || ra >= rb)) {
+ overflow = 1;
rt = 0; /* Undefined */
+ } else {
+ divu128(&rt, &ra, rb);
}
if (oe) {
@@ -119,13 +122,16 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
{
- int64_t rt = 0;
+ uint64_t rt = 0;
int64_t ra = (int64_t)rau;
int64_t rb = (int64_t)rbu;
- int overflow = divs128(&rt, &ra, rb);
+ int overflow = 0;
- if (unlikely(overflow)) {
+ if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) {
+ overflow = 1;
rt = 0; /* Undefined */
+ } else {
+ divs128(&rt, &ra, rb);
}
if (oe) {
@@ -320,7 +326,7 @@ target_ulong helper_popcntb(target_ulong val)
}
#endif
-uint64_t helper_cfuged(uint64_t src, uint64_t mask)
+uint64_t helper_CFUGED(uint64_t src, uint64_t mask)
{
/*
* Instead of processing the mask bit-by-bit from the most significant to
@@ -382,96 +388,45 @@ uint64_t helper_cfuged(uint64_t src, uint64_t mask)
return left | (right >> n);
}
-/*****************************************************************************/
-/* PowerPC 601 specific instructions (POWER bridge) */
-target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
+uint64_t helper_PDEPD(uint64_t src, uint64_t mask)
{
- uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+ int i, o;
+ uint64_t result = 0;
- if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
- (int32_t)arg2 == 0) {
- env->spr[SPR_MQ] = 0;
- return INT32_MIN;
- } else {
- env->spr[SPR_MQ] = tmp % arg2;
- return tmp / (int32_t)arg2;
+ if (mask == -1) {
+ return src;
}
-}
-
-target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
- target_ulong arg2)
-{
- uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
- if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
- (int32_t)arg2 == 0) {
- env->so = env->ov = 1;
- env->spr[SPR_MQ] = 0;
- return INT32_MIN;
- } else {
- env->spr[SPR_MQ] = tmp % arg2;
- tmp /= (int32_t)arg2;
- if ((int32_t)tmp != tmp) {
- env->so = env->ov = 1;
- } else {
- env->ov = 0;
- }
- return tmp;
+ for (i = 0; mask != 0; i++) {
+ o = ctz64(mask);
+ mask &= mask - 1;
+ result |= ((src >> i) & 1) << o;
}
-}
-target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
- target_ulong arg2)
-{
- if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
- (int32_t)arg2 == 0) {
- env->spr[SPR_MQ] = 0;
- return INT32_MIN;
- } else {
- env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
- return (int32_t)arg1 / (int32_t)arg2;
- }
+ return result;
}
-target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
- target_ulong arg2)
+uint64_t helper_PEXTD(uint64_t src, uint64_t mask)
{
- if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
- (int32_t)arg2 == 0) {
- env->so = env->ov = 1;
- env->spr[SPR_MQ] = 0;
- return INT32_MIN;
- } else {
- env->ov = 0;
- env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
- return (int32_t)arg1 / (int32_t)arg2;
+ int i, o;
+ uint64_t result = 0;
+
+ if (mask == -1) {
+ return src;
}
-}
-/*****************************************************************************/
-/* 602 specific instructions */
-/* mfrom is the most crazy instruction ever seen, imho ! */
-/* Real implementation uses a ROM table. Do the same */
-/*
- * Extremely decomposed:
- * -arg / 256
- * return 256 * log10(10 + 1.0) + 0.5
- */
-#if !defined(CONFIG_USER_ONLY)
-target_ulong helper_602_mfrom(target_ulong arg)
-{
- if (likely(arg < 602)) {
-#include "mfrom_table.c.inc"
- return mfrom_ROM_table[arg];
- } else {
- return 0;
+ for (o = 0; mask != 0; o++) {
+ i = ctz64(mask);
+ mask &= mask - 1;
+ result |= ((src >> i) & 1) << o;
}
+
+ return result;
}
-#endif
/*****************************************************************************/
/* Altivec extension helpers */
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define VECTOR_FOR_INORDER_I(index, element) \
for (index = 0; index < ARRAY_SIZE(r->element); index++)
#else
@@ -538,40 +493,8 @@ static inline void set_vscr_sat(CPUPPCState *env)
env->vscr_sat.u32[0] = 1;
}
-void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
- r->u32[i] = ~a->u32[i] < b->u32[i];
- }
-}
-
-/* vprtybw */
-void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
- uint64_t res = b->u32[i] ^ (b->u32[i] >> 16);
- res ^= res >> 8;
- r->u32[i] = res & 1;
- }
-}
-
-/* vprtybd */
-void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
- uint64_t res = b->u64[i] ^ (b->u64[i] >> 32);
- res ^= res >> 16;
- res ^= res >> 8;
- r->u64[i] = res & 1;
- }
-}
-
/* vprtybq */
-void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
+void helper_VPRTYBQ(ppc_avr_t *r, ppc_avr_t *b, uint32_t v)
{
uint64_t res = b->u64[0] ^ b->u64[1];
res ^= res >> 32;
@@ -648,29 +571,27 @@ VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
#undef VARITHSAT_SIGNED
#undef VARITHSAT_UNSIGNED
-#define VAVG_DO(name, element, etype) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
- { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
- r->element[i] = x >> 1; \
- } \
- }
-
-#define VAVG(type, signed_element, signed_type, unsigned_element, \
- unsigned_type) \
- VAVG_DO(avgs##type, signed_element, signed_type) \
- VAVG_DO(avgu##type, unsigned_element, unsigned_type)
-VAVG(b, s8, int16_t, u8, uint16_t)
-VAVG(h, s16, int32_t, u16, uint32_t)
-VAVG(w, s32, int64_t, u32, uint64_t)
-#undef VAVG_DO
+#define VAVG(name, element, etype) \
+ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
+ r->element[i] = x >> 1; \
+ } \
+ }
+
+VAVG(VAVGSB, s8, int16_t)
+VAVG(VAVGUB, u8, uint16_t)
+VAVG(VAVGSH, s16, int32_t)
+VAVG(VAVGUH, u16, uint32_t)
+VAVG(VAVGSW, s32, int64_t)
+VAVG(VAVGUW, u32, uint64_t)
#undef VAVG
-#define VABSDU_DO(name, element) \
-void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+#define VABSDU(name, element) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t v)\
{ \
int i; \
\
@@ -686,12 +607,9 @@ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
* name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
* element - element type to access from vector
*/
-#define VABSDU(type, element) \
- VABSDU_DO(absdu##type, element)
-VABSDU(b, u8)
-VABSDU(h, u16)
-VABSDU(w, u32)
-#undef VABSDU_DO
+VABSDU(VABSDUB, u8)
+VABSDU(VABSDUH, u16)
+VABSDU(VABSDUW, u32)
#undef VABSDU
#define VCF(suffix, cvt, element) \
@@ -709,100 +627,18 @@ VCF(ux, uint32_to_float32, u32)
VCF(sx, int32_to_float32, s32)
#undef VCF
-#define VCMP_DO(suffix, compare, element, record) \
- void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
- ppc_avr_t *a, ppc_avr_t *b) \
- { \
- uint64_t ones = (uint64_t)-1; \
- uint64_t all = ones; \
- uint64_t none = 0; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- uint64_t result = (a->element[i] compare b->element[i] ? \
- ones : 0x0); \
- switch (sizeof(a->element[0])) { \
- case 8: \
- r->u64[i] = result; \
- break; \
- case 4: \
- r->u32[i] = result; \
- break; \
- case 2: \
- r->u16[i] = result; \
- break; \
- case 1: \
- r->u8[i] = result; \
- break; \
- } \
- all &= result; \
- none |= result; \
- } \
- if (record) { \
- env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
- } \
- }
-#define VCMP(suffix, compare, element) \
- VCMP_DO(suffix, compare, element, 0) \
- VCMP_DO(suffix##_dot, compare, element, 1)
-VCMP(equb, ==, u8)
-VCMP(equh, ==, u16)
-VCMP(equw, ==, u32)
-VCMP(equd, ==, u64)
-VCMP(gtub, >, u8)
-VCMP(gtuh, >, u16)
-VCMP(gtuw, >, u32)
-VCMP(gtud, >, u64)
-VCMP(gtsb, >, s8)
-VCMP(gtsh, >, s16)
-VCMP(gtsw, >, s32)
-VCMP(gtsd, >, s64)
-#undef VCMP_DO
-#undef VCMP
-
-#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
-void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
- ppc_avr_t *a, ppc_avr_t *b) \
-{ \
- etype ones = (etype)-1; \
- etype all = ones; \
- etype result, none = 0; \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- if (cmpzero) { \
- result = ((a->element[i] == 0) \
- || (b->element[i] == 0) \
- || (a->element[i] != b->element[i]) ? \
- ones : 0x0); \
- } else { \
- result = (a->element[i] != b->element[i]) ? ones : 0x0; \
- } \
- r->element[i] = result; \
- all &= result; \
- none |= result; \
- } \
- if (record) { \
- env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
- } \
+#define VCMPNEZ(NAME, ELEM) \
+void helper_##NAME(ppc_vsr_t *t, ppc_vsr_t *a, ppc_vsr_t *b, uint32_t desc) \
+{ \
+ for (int i = 0; i < ARRAY_SIZE(t->ELEM); i++) { \
+ t->ELEM[i] = ((a->ELEM[i] == 0) || (b->ELEM[i] == 0) || \
+ (a->ELEM[i] != b->ELEM[i])) ? -1 : 0; \
+ } \
}
-
-/*
- * VCMPNEZ - Vector compare not equal to zero
- * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
- * element - element type to access from vector
- */
-#define VCMPNE(suffix, element, etype, cmpzero) \
- VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
- VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
-VCMPNE(zb, u8, uint8_t, 1)
-VCMPNE(zh, u16, uint16_t, 1)
-VCMPNE(zw, u32, uint32_t, 1)
-VCMPNE(b, u8, uint8_t, 0)
-VCMPNE(h, u16, uint16_t, 0)
-VCMPNE(w, u32, uint32_t, 0)
-#undef VCMPNE_DO
-#undef VCMPNE
+VCMPNEZ(VCMPNEZB, u8)
+VCMPNEZ(VCMPNEZH, u16)
+VCMPNEZ(VCMPNEZW, u32)
+#undef VCMPNEZ
#define VCMPFP_DO(suffix, compare, order, record) \
void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
@@ -910,6 +746,137 @@ VCT(uxs, cvtsduw, u32)
VCT(sxs, cvtsdsw, s32)
#undef VCT
+typedef int64_t do_ger(uint32_t, uint32_t, uint32_t);
+
+static int64_t ger_rank8(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 8; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += (int64_t)sextract32(a, 4 * i, 4) * sextract32(b, 4 * i, 4);
+ }
+ }
+ return psum;
+}
+
+static int64_t ger_rank4(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 4; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += sextract32(a, 8 * i, 8) * (int64_t)extract32(b, 8 * i, 8);
+ }
+ }
+ return psum;
+}
+
+static int64_t ger_rank2(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 2; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += (int64_t)sextract32(a, 16 * i, 16) *
+ sextract32(b, 16 * i, 16);
+ }
+ }
+ return psum;
+}
+
+static void xviger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, ppc_acc_t *at,
+ uint32_t mask, bool sat, bool acc, do_ger ger)
+{
+ uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
+ xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
+ ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
+ uint8_t xmsk_bit, ymsk_bit;
+ int64_t psum;
+ int i, j;
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ psum = ger(a->VsrW(i), b->VsrW(j), pmsk);
+ if (acc) {
+ psum += at[i].VsrSW(j);
+ }
+ if (sat && psum > INT32_MAX) {
+ set_vscr_sat(env);
+ at[i].VsrSW(j) = INT32_MAX;
+ } else if (sat && psum < INT32_MIN) {
+ set_vscr_sat(env);
+ at[i].VsrSW(j) = INT32_MIN;
+ } else {
+ at[i].VsrSW(j) = (int32_t) psum;
+ }
+ } else {
+ at[i].VsrSW(j) = 0;
+ }
+ }
+ }
+}
+
+QEMU_FLATTEN
+void helper_XVI4GER8(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank8);
+}
+
+QEMU_FLATTEN
+void helper_XVI4GER8PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank8);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, true, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2S(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, false, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, true, ger_rank2);
+}
+
target_ulong helper_vclzlsbb(ppc_avr_t *r)
{
target_ulong count = 0;
@@ -936,7 +903,7 @@ target_ulong helper_vctzlsbb(ppc_avr_t *r)
return count;
}
-void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+void helper_VMHADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
int sat = 0;
@@ -954,7 +921,7 @@ void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+void helper_VMHRADDSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
int sat = 0;
@@ -971,7 +938,8 @@ void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+void helper_VMLADDUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c,
+ uint32_t v)
{
int i;
@@ -1003,8 +971,7 @@ VMRG(w, u32, VsrW)
#undef VMRG_DO
#undef VMRG
-void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
+void helper_VMSUMMBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
int32_t prod[16];
int i;
@@ -1019,8 +986,7 @@ void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
+void helper_VMSUMSHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
int32_t prod[8];
int i;
@@ -1034,7 +1000,7 @@ void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+void helper_VMSUMSHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
int32_t prod[8];
@@ -1056,8 +1022,7 @@ void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
+void helper_VMSUMUBM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
uint16_t prod[16];
int i;
@@ -1072,8 +1037,7 @@ void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
- ppc_avr_t *b, ppc_avr_t *c)
+void helper_VMSUMUHM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
uint32_t prod[8];
int i;
@@ -1087,7 +1051,7 @@ void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
}
-void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+void helper_VMSUMUHS(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
ppc_avr_t *b, ppc_avr_t *c)
{
uint32_t prod[8];
@@ -1110,7 +1074,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL_DO_EVN(name, mul_element, mul_access, prod_access, cast) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
\
@@ -1121,7 +1085,7 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL_DO_ODD(name, mul_element, mul_access, prod_access, cast) \
- void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ void helper_V##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
int i; \
\
@@ -1132,55 +1096,145 @@ void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
}
#define VMUL(suffix, mul_element, mul_access, prod_access, cast) \
- VMUL_DO_EVN(mule##suffix, mul_element, mul_access, prod_access, cast) \
- VMUL_DO_ODD(mulo##suffix, mul_element, mul_access, prod_access, cast)
-VMUL(sb, s8, VsrSB, VsrSH, int16_t)
-VMUL(sh, s16, VsrSH, VsrSW, int32_t)
-VMUL(sw, s32, VsrSW, VsrSD, int64_t)
-VMUL(ub, u8, VsrB, VsrH, uint16_t)
-VMUL(uh, u16, VsrH, VsrW, uint32_t)
-VMUL(uw, u32, VsrW, VsrD, uint64_t)
+ VMUL_DO_EVN(MULE##suffix, mul_element, mul_access, prod_access, cast) \
+ VMUL_DO_ODD(MULO##suffix, mul_element, mul_access, prod_access, cast)
+VMUL(SB, s8, VsrSB, VsrSH, int16_t)
+VMUL(SH, s16, VsrSH, VsrSW, int32_t)
+VMUL(SW, s32, VsrSW, VsrSD, int64_t)
+VMUL(UB, u8, VsrB, VsrH, uint16_t)
+VMUL(UH, u16, VsrH, VsrW, uint32_t)
+VMUL(UW, u32, VsrW, VsrD, uint64_t)
#undef VMUL_DO_EVN
#undef VMUL_DO_ODD
#undef VMUL
-void helper_vmulhsw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_XXPERMX(ppc_vsr_t *t, ppc_vsr_t *s0, ppc_vsr_t *s1, ppc_vsr_t *pcv,
+ target_ulong uim)
{
- int i;
+ int i, idx;
+ ppc_vsr_t tmp = { .u64 = {0, 0} };
+
+ for (i = 0; i < ARRAY_SIZE(t->u8); i++) {
+ if ((pcv->VsrB(i) >> 5) == uim) {
+ idx = pcv->VsrB(i) & 0x1f;
+ if (idx < ARRAY_SIZE(t->u8)) {
+ tmp.VsrB(i) = s0->VsrB(idx);
+ } else {
+ tmp.VsrB(i) = s1->VsrB(idx - ARRAY_SIZE(t->u8));
+ }
+ }
+ }
- for (i = 0; i < 4; i++) {
- r->s32[i] = (int32_t)(((int64_t)a->s32[i] * (int64_t)b->s32[i]) >> 32);
+ *t = tmp;
+}
+
+void helper_VDIVSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ Int128 neg1 = int128_makes64(-1);
+ Int128 int128_min = int128_make128(0, INT64_MIN);
+ if (likely(int128_nz(b->s128) &&
+ (int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) {
+ t->s128 = int128_divs(a->s128, b->s128);
+ } else {
+ t->s128 = a->s128; /* Undefined behavior */
+ }
+}
+
+void helper_VDIVUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ if (int128_nz(b->s128)) {
+ t->s128 = int128_divu(a->s128, b->s128);
+ } else {
+ t->s128 = a->s128; /* Undefined behavior */
}
}
-void helper_vmulhuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VDIVESD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
+ int64_t high;
+ uint64_t low;
+ for (i = 0; i < 2; i++) {
+ high = a->s64[i];
+ low = 0;
+ if (unlikely((high == INT64_MIN && b->s64[i] == -1) || !b->s64[i])) {
+ t->s64[i] = a->s64[i]; /* Undefined behavior */
+ } else {
+ divs128(&low, &high, b->s64[i]);
+ t->s64[i] = low;
+ }
+ }
+}
- for (i = 0; i < 4; i++) {
- r->u32[i] = (uint32_t)(((uint64_t)a->u32[i] *
- (uint64_t)b->u32[i]) >> 32);
+void helper_VDIVEUD(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ uint64_t high, low;
+ for (i = 0; i < 2; i++) {
+ high = a->u64[i];
+ low = 0;
+ if (unlikely(!b->u64[i])) {
+ t->u64[i] = a->u64[i]; /* Undefined behavior */
+ } else {
+ divu128(&low, &high, b->u64[i]);
+ t->u64[i] = low;
+ }
}
}
-void helper_vmulhsd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VDIVESQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
{
- uint64_t discard;
+ Int128 high, low;
+ Int128 int128_min = int128_make128(0, INT64_MIN);
+ Int128 neg1 = int128_makes64(-1);
- muls64(&discard, &r->u64[0], a->s64[0], b->s64[0]);
- muls64(&discard, &r->u64[1], a->s64[1], b->s64[1]);
+ high = a->s128;
+ low = int128_zero();
+ if (unlikely(!int128_nz(b->s128) ||
+ (int128_eq(b->s128, neg1) && int128_eq(high, int128_min)))) {
+ t->s128 = a->s128; /* Undefined behavior */
+ } else {
+ divs256(&low, &high, b->s128);
+ t->s128 = low;
+ }
}
-void helper_vmulhud(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VDIVEUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
{
- uint64_t discard;
+ Int128 high, low;
- mulu64(&discard, &r->u64[0], a->u64[0], b->u64[0]);
- mulu64(&discard, &r->u64[1], a->u64[1], b->u64[1]);
+ high = a->s128;
+ low = int128_zero();
+ if (unlikely(!int128_nz(b->s128))) {
+ t->s128 = a->s128; /* Undefined behavior */
+ } else {
+ divu256(&low, &high, b->s128);
+ t->s128 = low;
+ }
}
-void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
- ppc_avr_t *c)
+void helper_VMODSQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ Int128 neg1 = int128_makes64(-1);
+ Int128 int128_min = int128_make128(0, INT64_MIN);
+ if (likely(int128_nz(b->s128) &&
+ (int128_ne(a->s128, int128_min) || int128_ne(b->s128, neg1)))) {
+ t->s128 = int128_rems(a->s128, b->s128);
+ } else {
+ t->s128 = int128_zero(); /* Undefined behavior */
+ }
+}
+
+void helper_VMODUQ(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b)
+{
+ if (likely(int128_nz(b->s128))) {
+ t->s128 = int128_remu(a->s128, b->s128);
+ } else {
+ t->s128 = int128_zero(); /* Undefined behavior */
+ }
+}
+
+void helper_VPERM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
int i;
@@ -1198,8 +1252,7 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
-void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
- ppc_avr_t *c)
+void helper_VPERMR(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
ppc_avr_t result;
int i;
@@ -1217,18 +1270,122 @@ void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
-#if defined(HOST_WORDS_BIGENDIAN)
+#define XXGENPCV_BE_EXP(NAME, SZ) \
+void glue(helper_, glue(NAME, _be_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
+{ \
+ ppc_vsr_t tmp; \
+ \
+ /* Initialize tmp with the result of an all-zeros mask */ \
+ tmp.VsrD(0) = 0x1011121314151617; \
+ tmp.VsrD(1) = 0x18191A1B1C1D1E1F; \
+ \
+ /* Iterate over the most significant byte of each element */ \
+ for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
+ if (b->VsrB(i) & 0x80) { \
+ /* Update each byte of the element */ \
+ for (int k = 0; k < SZ; k++) { \
+ tmp.VsrB(i + k) = j + k; \
+ } \
+ j += SZ; \
+ } \
+ } \
+ \
+ *t = tmp; \
+}
+
+#define XXGENPCV_BE_COMP(NAME, SZ) \
+void glue(helper_, glue(NAME, _be_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
+{ \
+ ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
+ \
+ /* Iterate over the most significant byte of each element */ \
+ for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
+ if (b->VsrB(i) & 0x80) { \
+ /* Update each byte of the element */ \
+ for (int k = 0; k < SZ; k++) { \
+ tmp.VsrB(j + k) = i + k; \
+ } \
+ j += SZ; \
+ } \
+ } \
+ \
+ *t = tmp; \
+}
+
+#define XXGENPCV_LE_EXP(NAME, SZ) \
+void glue(helper_, glue(NAME, _le_exp))(ppc_vsr_t *t, ppc_vsr_t *b) \
+{ \
+ ppc_vsr_t tmp; \
+ \
+ /* Initialize tmp with the result of an all-zeros mask */ \
+ tmp.VsrD(0) = 0x1F1E1D1C1B1A1918; \
+ tmp.VsrD(1) = 0x1716151413121110; \
+ \
+ /* Iterate over the most significant byte of each element */ \
+ for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
+ /* Reverse indexing of "i" */ \
+ const int idx = ARRAY_SIZE(b->u8) - i - SZ; \
+ if (b->VsrB(idx) & 0x80) { \
+ /* Update each byte of the element */ \
+ for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
+ tmp.VsrB(idx + rk) = j + k; \
+ } \
+ j += SZ; \
+ } \
+ } \
+ \
+ *t = tmp; \
+}
+
+#define XXGENPCV_LE_COMP(NAME, SZ) \
+void glue(helper_, glue(NAME, _le_comp))(ppc_vsr_t *t, ppc_vsr_t *b)\
+{ \
+ ppc_vsr_t tmp = { .u64 = { 0, 0 } }; \
+ \
+ /* Iterate over the most significant byte of each element */ \
+ for (int i = 0, j = 0; i < ARRAY_SIZE(b->u8); i += SZ) { \
+ if (b->VsrB(ARRAY_SIZE(b->u8) - i - SZ) & 0x80) { \
+ /* Update each byte of the element */ \
+ for (int k = 0, rk = SZ - 1; k < SZ; k++, rk--) { \
+ /* Reverse indexing of "j" */ \
+ const int idx = ARRAY_SIZE(b->u8) - j - SZ; \
+ tmp.VsrB(idx + rk) = i + k; \
+ } \
+ j += SZ; \
+ } \
+ } \
+ \
+ *t = tmp; \
+}
+
+#define XXGENPCV(NAME, SZ) \
+ XXGENPCV_BE_EXP(NAME, SZ) \
+ XXGENPCV_BE_COMP(NAME, SZ) \
+ XXGENPCV_LE_EXP(NAME, SZ) \
+ XXGENPCV_LE_COMP(NAME, SZ) \
+
+XXGENPCV(XXGENPCVBM, 1)
+XXGENPCV(XXGENPCVHM, 2)
+XXGENPCV(XXGENPCVWM, 4)
+XXGENPCV(XXGENPCVDM, 8)
+
+#undef XXGENPCV_BE_EXP
+#undef XXGENPCV_BE_COMP
+#undef XXGENPCV_LE_EXP
+#undef XXGENPCV_LE_COMP
+#undef XXGENPCV
+
+#if HOST_BIG_ENDIAN
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
#define VBPERMD_INDEX(i) (i)
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
-#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)])
#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
-#define EXTRACT_BIT(avr, i, index) \
- (extract64((avr)->u64[1 - i], 63 - index, 1))
#endif
+#define EXTRACT_BIT(avr, i, index) \
+ (extract64((avr)->VsrD(i), 63 - index, 1))
void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
@@ -1268,77 +1425,42 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#undef VBPERMQ_INDEX
#undef VBPERMQ_DW
-#define PMSUM(name, srcfld, trgfld, trgtyp) \
-void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
-{ \
- int i, j; \
- trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \
- \
- VECTOR_FOR_INORDER_I(i, srcfld) { \
- prod[i] = 0; \
- for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
- if (a->srcfld[i] & (1ull << j)) { \
- prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
- } \
- } \
- } \
- \
- VECTOR_FOR_INORDER_I(i, trgfld) { \
- r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \
- } \
-}
-
-PMSUM(vpmsumb, u8, u16, uint16_t)
-PMSUM(vpmsumh, u16, u32, uint32_t)
-PMSUM(vpmsumw, u32, u64, uint64_t)
-
-void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
-{
-
-#ifdef CONFIG_INT128
- int i, j;
- __uint128_t prod[2];
-
- VECTOR_FOR_INORDER_I(i, u64) {
- prod[i] = 0;
- for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull << j)) {
- prod[i] ^= (((__uint128_t)b->u64[i]) << j);
- }
- }
+/*
+ * There is no carry across the two doublewords, so their order does
+ * not matter. Nor is there partial overlap between registers.
+ */
+void helper_vpmsumb(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ for (int i = 0; i < 2; ++i) {
+ uint64_t aa = a->u64[i], bb = b->u64[i];
+ r->u64[i] = clmul_8x4_even(aa, bb) ^ clmul_8x4_odd(aa, bb);
}
+}
- r->u128 = prod[0] ^ prod[1];
-
-#else
- int i, j;
- ppc_avr_t prod[2];
-
- VECTOR_FOR_INORDER_I(i, u64) {
- prod[i].VsrD(1) = prod[i].VsrD(0) = 0;
- for (j = 0; j < 64; j++) {
- if (a->u64[i] & (1ull << j)) {
- ppc_avr_t bshift;
- if (j == 0) {
- bshift.VsrD(0) = 0;
- bshift.VsrD(1) = b->u64[i];
- } else {
- bshift.VsrD(0) = b->u64[i] >> (64 - j);
- bshift.VsrD(1) = b->u64[i] << j;
- }
- prod[i].VsrD(1) ^= bshift.VsrD(1);
- prod[i].VsrD(0) ^= bshift.VsrD(0);
- }
- }
+void helper_vpmsumh(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ for (int i = 0; i < 2; ++i) {
+ uint64_t aa = a->u64[i], bb = b->u64[i];
+ r->u64[i] = clmul_16x2_even(aa, bb) ^ clmul_16x2_odd(aa, bb);
}
+}
- r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1);
- r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0);
-#endif
+void helper_vpmsumw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ for (int i = 0; i < 2; ++i) {
+ uint64_t aa = a->u64[i], bb = b->u64[i];
+ r->u64[i] = clmul_32(aa, bb) ^ clmul_32(aa >> 32, bb >> 32);
+ }
}
+void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ Int128 e = clmul_64(a->u64[0], b->u64[0]);
+ Int128 o = clmul_64(a->u64[1], b->u64[1]);
+ r->s128 = int128_xor(e, o);
+}
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define PKBIG 1
#else
#define PKBIG 0
@@ -1347,7 +1469,7 @@ void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i, j;
ppc_avr_t result;
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
const ppc_avr_t *x[2] = { a, b };
#else
const ppc_avr_t *x[2] = { b, a };
@@ -1439,40 +1561,33 @@ void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
}
}
-#define VRLMI(name, size, element, insert) \
-void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
-{ \
- int i; \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- uint##size##_t src1 = a->element[i]; \
- uint##size##_t src2 = b->element[i]; \
- uint##size##_t src3 = r->element[i]; \
- uint##size##_t begin, end, shift, mask, rot_val; \
- \
- shift = extract##size(src2, 0, 6); \
- end = extract##size(src2, 8, 6); \
- begin = extract##size(src2, 16, 6); \
- rot_val = rol##size(src1, shift); \
- mask = mask_u##size(begin, end); \
- if (insert) { \
- r->element[i] = (rot_val & mask) | (src3 & ~mask); \
- } else { \
- r->element[i] = (rot_val & mask); \
- } \
- } \
-}
-
-VRLMI(vrldmi, 64, u64, 1);
-VRLMI(vrlwmi, 32, u32, 1);
-VRLMI(vrldnm, 64, u64, 0);
-VRLMI(vrlwnm, 32, u32, 0);
-
-void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
- ppc_avr_t *c)
-{
- r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
- r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
-}
+#define VRLMI(name, size, element, insert) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t desc) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint##size##_t src1 = a->element[i]; \
+ uint##size##_t src2 = b->element[i]; \
+ uint##size##_t src3 = r->element[i]; \
+ uint##size##_t begin, end, shift, mask, rot_val; \
+ \
+ shift = extract##size(src2, 0, 6); \
+ end = extract##size(src2, 8, 6); \
+ begin = extract##size(src2, 16, 6); \
+ rot_val = rol##size(src1, shift); \
+ mask = mask_u##size(begin, end); \
+ if (insert) { \
+ r->element[i] = (rot_val & mask) | (src3 & ~mask); \
+ } else { \
+ r->element[i] = (rot_val & mask); \
+ } \
+ } \
+}
+
+VRLMI(VRLDMI, 64, u64, 1);
+VRLMI(VRLWMI, 32, u32, 1);
+VRLMI(VRLDNM, 64, u64, 0);
+VRLMI(VRLWNM, 32, u32, 0);
void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
{
@@ -1563,7 +1678,7 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int sh = (b->VsrB(0xf) >> 3) & 0xf;
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
memmove(&r->u8[0], &a->u8[sh], 16 - sh);
memset(&r->u8[16 - sh], 0, sh);
#else
@@ -1572,27 +1687,75 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#endif
}
-#if defined(HOST_WORDS_BIGENDIAN)
-#define VINSERT(suffix, element) \
- void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
- { \
- memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \
- sizeof(r->element[0])); \
- }
+#if HOST_BIG_ENDIAN
+#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX])
#else
-#define VINSERT(suffix, element) \
- void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
- { \
- uint32_t d = (16 - index) - sizeof(r->element[0]); \
- memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
- }
+#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1)
+#endif
+
+#define VINSX(SUFFIX, TYPE) \
+void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \
+ uint64_t val, target_ulong index) \
+{ \
+ const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \
+ target_long idx = index; \
+ \
+ if (idx < 0 || idx > maxidx) { \
+ idx = idx < 0 ? sizeof(TYPE) - idx : idx; \
+ qemu_log_mask(LOG_GUEST_ERROR, \
+ "Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \
+ ", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \
+ } else { \
+ TYPE src = val; \
+ memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \
+ } \
+}
+VINSX(B, uint8_t)
+VINSX(H, uint16_t)
+VINSX(W, uint32_t)
+VINSX(D, uint64_t)
+#undef ELEM_ADDR
+#undef VINSX
+#if HOST_BIG_ENDIAN
+#define VEXTDVLX(NAME, SIZE) \
+void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ target_ulong index) \
+{ \
+ const target_long idx = index; \
+ ppc_avr_t tmp[2] = { *a, *b }; \
+ memset(t, 0, sizeof(*t)); \
+ if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
+ memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \
+ } else { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
+ TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
+ env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
+ } \
+}
+#else
+#define VEXTDVLX(NAME, SIZE) \
+void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ target_ulong index) \
+{ \
+ const target_long idx = index; \
+ ppc_avr_t tmp[2] = { *b, *a }; \
+ memset(t, 0, sizeof(*t)); \
+ if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \
+ memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \
+ (void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \
+ } else { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \
+ TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \
+ env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \
+ } \
+}
#endif
-VINSERT(b, u8)
-VINSERT(h, u16)
-VINSERT(w, u32)
-VINSERT(d, u64)
-#undef VINSERT
-#if defined(HOST_WORDS_BIGENDIAN)
+VEXTDVLX(VEXTDUBVLX, 1)
+VEXTDVLX(VEXTDUHVLX, 2)
+VEXTDVLX(VEXTDUWVLX, 4)
+VEXTDVLX(VEXTDDVLX, 8)
+#undef VEXTDVLX
+#if HOST_BIG_ENDIAN
#define VEXTRACT(suffix, element) \
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
@@ -1618,8 +1781,35 @@ VEXTRACT(uw, u32)
VEXTRACT(d, u64)
#undef VEXTRACT
-void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
- ppc_vsr_t *xb, uint32_t index)
+#define VSTRI(NAME, ELEM, NUM_ELEMS, LEFT) \
+uint32_t helper_##NAME(ppc_avr_t *t, ppc_avr_t *b) \
+{ \
+ int i, idx, crf = 0; \
+ \
+ for (i = 0; i < NUM_ELEMS; i++) { \
+ idx = LEFT ? i : NUM_ELEMS - i - 1; \
+ if (b->Vsr##ELEM(idx)) { \
+ t->Vsr##ELEM(idx) = b->Vsr##ELEM(idx); \
+ } else { \
+ crf = 0b0010; \
+ break; \
+ } \
+ } \
+ \
+ for (; i < NUM_ELEMS; i++) { \
+ idx = LEFT ? i : NUM_ELEMS - i - 1; \
+ t->Vsr##ELEM(idx) = 0; \
+ } \
+ \
+ return crf; \
+}
+VSTRI(VSTRIBL, B, 16, true)
+VSTRI(VSTRIBR, B, 16, false)
+VSTRI(VSTRIHL, H, 8, true)
+VSTRI(VSTRIHR, H, 8, false)
+#undef VSTRI
+
+void helper_XXEXTRACTUW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index)
{
ppc_vsr_t t = { };
size_t es = sizeof(uint32_t);
@@ -1634,8 +1824,7 @@ void helper_xxextractuw(CPUPPCState *env, ppc_vsr_t *xt,
*xt = t;
}
-void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
- ppc_vsr_t *xb, uint32_t index)
+void helper_XXINSERTW(ppc_vsr_t *xt, ppc_vsr_t *xb, uint32_t index)
{
ppc_vsr_t t = *xt;
size_t es = sizeof(uint32_t);
@@ -1649,38 +1838,67 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt,
*xt = t;
}
-#define VEXT_SIGNED(name, element, cast) \
-void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
-{ \
- int i; \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- r->element[i] = (cast)b->element[i]; \
- } \
+void helper_XXEVAL(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c,
+ uint32_t desc)
+{
+ /*
+ * Instead of processing imm bit-by-bit, we'll skip the computation of
+ * conjunctions whose corresponding bit is unset.
+ */
+ int bit, imm = simd_data(desc);
+ Int128 conj, disj = int128_zero();
+
+ /* Iterate over set bits from the least to the most significant bit */
+ while (imm) {
+ /*
+ * Get the next bit to be processed with ctz64. Invert the result of
+ * ctz64 to match the indexing used by PowerISA.
+ */
+ bit = 7 - ctzl(imm);
+ if (bit & 0x4) {
+ conj = a->s128;
+ } else {
+ conj = int128_not(a->s128);
+ }
+ if (bit & 0x2) {
+ conj = int128_and(conj, b->s128);
+ } else {
+ conj = int128_and(conj, int128_not(b->s128));
+ }
+ if (bit & 0x1) {
+ conj = int128_and(conj, c->s128);
+ } else {
+ conj = int128_and(conj, int128_not(c->s128));
+ }
+ disj = int128_or(disj, conj);
+
+ /* Unset the least significant bit that is set */
+ imm &= imm - 1;
+ }
+
+ t->s128 = disj;
}
-VEXT_SIGNED(vextsb2w, s32, int8_t)
-VEXT_SIGNED(vextsb2d, s64, int8_t)
-VEXT_SIGNED(vextsh2w, s32, int16_t)
-VEXT_SIGNED(vextsh2d, s64, int16_t)
-VEXT_SIGNED(vextsw2d, s64, int32_t)
-#undef VEXT_SIGNED
-#define VNEG(name, element) \
-void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
-{ \
- int i; \
- for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
- r->element[i] = -b->element[i]; \
- } \
+#define XXBLEND(name, sz) \
+void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \
+ ppc_avr_t *c, uint32_t desc) \
+{ \
+ for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \
+ t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \
+ b->glue(u, sz)[i] : a->glue(u, sz)[i]; \
+ } \
}
-VNEG(vnegw, s32)
-VNEG(vnegd, s64)
-#undef VNEG
+XXBLEND(B, 8)
+XXBLEND(H, 16)
+XXBLEND(W, 32)
+XXBLEND(D, 64)
+#undef XXBLEND
void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int sh = (b->VsrB(0xf) >> 3) & 0xf;
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
memmove(&r->u8[sh], &a->u8[0], 16 - sh);
memset(&r->u8[0], 0, sh);
#else
@@ -1689,15 +1907,6 @@ void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#endif
}
-void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
- r->u32[i] = a->u32[i] >= b->u32[i];
- }
-}
-
void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int64_t t;
@@ -1797,7 +2006,7 @@ void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
}
}
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define UPKHI 1
#define UPKLO 0
#else
@@ -1811,13 +2020,13 @@ void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
ppc_avr_t result; \
\
for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
- uint16_t e = b->u16[hi ? i : i + 4]; \
- uint8_t a = (e >> 15) ? 0xff : 0; \
- uint8_t r = (e >> 10) & 0x1f; \
- uint8_t g = (e >> 5) & 0x1f; \
- uint8_t b = e & 0x1f; \
+ uint16_t _e = b->u16[hi ? i : i + 4]; \
+ uint8_t _a = (_e >> 15) ? 0xff : 0; \
+ uint8_t _r = (_e >> 10) & 0x1f; \
+ uint8_t _g = (_e >> 5) & 0x1f; \
+ uint8_t _b = _e & 0x1f; \
\
- result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
+ result.u32[i] = (_a << 24) | (_r << 16) | (_g << 8) | _b; \
} \
*r = result; \
}
@@ -1904,189 +2113,66 @@ VGENERIC_DO(popcntd, u64)
#undef VGENERIC_DO
-#if defined(HOST_WORDS_BIGENDIAN)
-#define QW_ONE { .u64 = { 0, 1 } }
-#else
-#define QW_ONE { .u64 = { 1, 0 } }
-#endif
-
-#ifndef CONFIG_INT128
-
-static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a)
+void helper_VADDUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- t->u64[0] = ~a.u64[0];
- t->u64[1] = ~a.u64[1];
+ r->s128 = int128_add(a->s128, b->s128);
}
-static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b)
+void helper_VADDEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
- if (a.VsrD(0) < b.VsrD(0)) {
- return -1;
- } else if (a.VsrD(0) > b.VsrD(0)) {
- return 1;
- } else if (a.VsrD(1) < b.VsrD(1)) {
- return -1;
- } else if (a.VsrD(1) > b.VsrD(1)) {
- return 1;
- } else {
- return 0;
- }
+ r->s128 = int128_add(int128_add(a->s128, b->s128),
+ int128_make64(int128_getlo(c->s128) & 1));
}
-static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
+void helper_VADDCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
- t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
- (~a.VsrD(1) < b.VsrD(1));
-}
-
-static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
-{
- ppc_avr_t not_a;
- t->VsrD(1) = a.VsrD(1) + b.VsrD(1);
- t->VsrD(0) = a.VsrD(0) + b.VsrD(0) +
- (~a.VsrD(1) < b.VsrD(1));
- avr_qw_not(&not_a, a);
- return avr_qw_cmpu(not_a, b) < 0;
-}
-
-#endif
-
-void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
-{
-#ifdef CONFIG_INT128
- r->u128 = a->u128 + b->u128;
-#else
- avr_qw_add(r, *a, *b);
-#endif
-}
-
-void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
-{
-#ifdef CONFIG_INT128
- r->u128 = a->u128 + b->u128 + (c->u128 & 1);
-#else
-
- if (c->VsrD(1) & 1) {
- ppc_avr_t tmp;
-
- tmp.VsrD(0) = 0;
- tmp.VsrD(1) = c->VsrD(1) & 1;
- avr_qw_add(&tmp, *a, tmp);
- avr_qw_add(r, tmp, *b);
- } else {
- avr_qw_add(r, *a, *b);
- }
-#endif
-}
-
-void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
-{
-#ifdef CONFIG_INT128
- r->u128 = (~a->u128 < b->u128);
-#else
- ppc_avr_t not_a;
-
- avr_qw_not(&not_a, *a);
-
+ r->VsrD(1) = int128_ult(int128_not(a->s128), b->s128);
r->VsrD(0) = 0;
- r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0);
-#endif
}
-void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+void helper_VADDECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
-#ifdef CONFIG_INT128
- int carry_out = (~a->u128 < b->u128);
- if (!carry_out && (c->u128 & 1)) {
- carry_out = ((a->u128 + b->u128 + 1) == 0) &&
- ((a->u128 != 0) || (b->u128 != 0));
- }
- r->u128 = carry_out;
-#else
-
- int carry_in = c->VsrD(1) & 1;
- int carry_out = 0;
- ppc_avr_t tmp;
-
- carry_out = avr_qw_addc(&tmp, *a, *b);
+ bool carry_out = int128_ult(int128_not(a->s128), b->s128),
+ carry_in = int128_getlo(c->s128) & 1;
if (!carry_out && carry_in) {
- ppc_avr_t one = QW_ONE;
- carry_out = avr_qw_addc(&tmp, tmp, one);
+ carry_out = (int128_nz(a->s128) || int128_nz(b->s128)) &&
+ int128_eq(int128_add(a->s128, b->s128), int128_makes64(-1));
}
+
r->VsrD(0) = 0;
r->VsrD(1) = carry_out;
-#endif
}
-void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VSUBUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
-#ifdef CONFIG_INT128
- r->u128 = a->u128 - b->u128;
-#else
- ppc_avr_t tmp;
- ppc_avr_t one = QW_ONE;
-
- avr_qw_not(&tmp, *b);
- avr_qw_add(&tmp, *a, tmp);
- avr_qw_add(r, tmp, one);
-#endif
+ r->s128 = int128_sub(a->s128, b->s128);
}
-void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+void helper_VSUBEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
-#ifdef CONFIG_INT128
- r->u128 = a->u128 + ~b->u128 + (c->u128 & 1);
-#else
- ppc_avr_t tmp, sum;
-
- avr_qw_not(&tmp, *b);
- avr_qw_add(&sum, *a, tmp);
-
- tmp.VsrD(0) = 0;
- tmp.VsrD(1) = c->VsrD(1) & 1;
- avr_qw_add(r, sum, tmp);
-#endif
+ r->s128 = int128_add(int128_add(a->s128, int128_not(b->s128)),
+ int128_make64(int128_getlo(c->s128) & 1));
}
-void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+void helper_VSUBCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
-#ifdef CONFIG_INT128
- r->u128 = (~a->u128 < ~b->u128) ||
- (a->u128 + ~b->u128 == (__uint128_t)-1);
-#else
- int carry = (avr_qw_cmpu(*a, *b) > 0);
- if (!carry) {
- ppc_avr_t tmp;
- avr_qw_not(&tmp, *b);
- avr_qw_add(&tmp, *a, tmp);
- carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull));
- }
+ Int128 tmp = int128_not(b->s128);
+
+ r->VsrD(1) = int128_ult(int128_not(a->s128), tmp) ||
+ int128_eq(int128_add(a->s128, tmp), int128_makes64(-1));
r->VsrD(0) = 0;
- r->VsrD(1) = carry;
-#endif
}
-void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+void helper_VSUBECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
-#ifdef CONFIG_INT128
- r->u128 =
- (~a->u128 < ~b->u128) ||
- ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1));
-#else
- int carry_in = c->VsrD(1) & 1;
- int carry_out = (avr_qw_cmpu(*a, *b) > 0);
- if (!carry_out && carry_in) {
- ppc_avr_t tmp;
- avr_qw_not(&tmp, *b);
- avr_qw_add(&tmp, *a, tmp);
- carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull));
- }
+ Int128 tmp = int128_not(b->s128);
+ bool carry_out = int128_ult(int128_not(a->s128), tmp),
+ carry_in = int128_getlo(c->s128) & 1;
+ r->VsrD(1) = carry_out || (carry_in && int128_eq(int128_add(a->s128, tmp),
+ int128_makes64(-1)));
r->VsrD(0) = 0;
- r->VsrD(1) = carry_out;
-#endif
}
#define BCD_PLUS_PREF_1 0xC
@@ -2480,40 +2566,76 @@ uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
return cr;
}
+/**
+ * Compare 2 128-bit unsigned integers, passed in as unsigned 64-bit pairs
+ *
+ * Returns:
+ * > 0 if ahi|alo > bhi|blo,
+ * 0 if ahi|alo == bhi|blo,
+ * < 0 if ahi|alo < bhi|blo
+ */
+static inline int ucmp128(uint64_t alo, uint64_t ahi,
+ uint64_t blo, uint64_t bhi)
+{
+ return (ahi == bhi) ?
+ (alo > blo ? 1 : (alo == blo ? 0 : -1)) :
+ (ahi > bhi ? 1 : -1);
+}
+
uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
{
int i;
- int cr = 0;
+ int cr;
uint64_t lo_value;
uint64_t hi_value;
+ uint64_t rem;
ppc_avr_t ret = { .u64 = { 0, 0 } };
if (b->VsrSD(0) < 0) {
lo_value = -b->VsrSD(1);
hi_value = ~b->VsrD(0) + !lo_value;
bcd_put_digit(&ret, 0xD, 0);
+
+ cr = CRF_LT;
} else {
lo_value = b->VsrD(1);
hi_value = b->VsrD(0);
bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0);
- }
- if (divu128(&lo_value, &hi_value, 1000000000000000ULL) ||
- lo_value > 9999999999999999ULL) {
- cr = CRF_SO;
+ if (hi_value == 0 && lo_value == 0) {
+ cr = CRF_EQ;
+ } else {
+ cr = CRF_GT;
+ }
}
- for (i = 1; i < 16; hi_value /= 10, i++) {
- bcd_put_digit(&ret, hi_value % 10, i);
- }
+ /*
+ * Check src limits: abs(src) <= 10^31 - 1
+ *
+ * 10^31 - 1 = 0x0000007e37be2022 c0914b267fffffff
+ */
+ if (ucmp128(lo_value, hi_value,
+ 0xc0914b267fffffffULL, 0x7e37be2022ULL) > 0) {
+ cr |= CRF_SO;
- for (; i < 32; lo_value /= 10, i++) {
- bcd_put_digit(&ret, lo_value % 10, i);
- }
+ /*
+ * According to the ISA, if src wouldn't fit in the destination
+ * register, the result is undefined.
+ * In that case, we leave r unchanged.
+ */
+ } else {
+ rem = divu128(&lo_value, &hi_value, 1000000000000000ULL);
- cr |= bcd_cmp_zero(&ret);
+ for (i = 1; i < 16; rem /= 10, i++) {
+ bcd_put_digit(&ret, rem % 10, i);
+ }
- *r = ret;
+ for (; i < 32; lo_value /= 10, i++) {
+ bcd_put_digit(&ret, lo_value % 10, i);
+ }
+
+ *r = ret;
+ }
return cr;
}
@@ -2804,59 +2926,30 @@ void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- ppc_avr_t result;
- int i;
+ AESState *ad = (AESState *)r;
+ AESState *st = (AESState *)a;
+ AESState *rk = (AESState *)b;
- VECTOR_FOR_INORDER_I(i, u32) {
- result.VsrW(i) = b->VsrW(i) ^
- (AES_Te0[a->VsrB(AES_shifts[4 * i + 0])] ^
- AES_Te1[a->VsrB(AES_shifts[4 * i + 1])] ^
- AES_Te2[a->VsrB(AES_shifts[4 * i + 2])] ^
- AES_Te3[a->VsrB(AES_shifts[4 * i + 3])]);
- }
- *r = result;
+ aesenc_SB_SR_MC_AK(ad, st, rk, true);
}
void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- ppc_avr_t result;
- int i;
-
- VECTOR_FOR_INORDER_I(i, u8) {
- result.VsrB(i) = b->VsrB(i) ^ (AES_sbox[a->VsrB(AES_shifts[i])]);
- }
- *r = result;
+ aesenc_SB_SR_AK((AESState *)r, (AESState *)a, (AESState *)b, true);
}
void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- /* This differs from what is written in ISA V2.07. The RTL is */
- /* incorrect and will be fixed in V2.07B. */
- int i;
- ppc_avr_t tmp;
+ AESState *ad = (AESState *)r;
+ AESState *st = (AESState *)a;
+ AESState *rk = (AESState *)b;
- VECTOR_FOR_INORDER_I(i, u8) {
- tmp.VsrB(i) = b->VsrB(i) ^ AES_isbox[a->VsrB(AES_ishifts[i])];
- }
-
- VECTOR_FOR_INORDER_I(i, u32) {
- r->VsrW(i) =
- AES_imc[tmp.VsrB(4 * i + 0)][0] ^
- AES_imc[tmp.VsrB(4 * i + 1)][1] ^
- AES_imc[tmp.VsrB(4 * i + 2)][2] ^
- AES_imc[tmp.VsrB(4 * i + 3)][3];
- }
+ aesdec_ISB_ISR_AK_IMC(ad, st, rk, true);
}
void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
- ppc_avr_t result;
- int i;
-
- VECTOR_FOR_INORDER_I(i, u8) {
- result.VsrB(i) = b->VsrB(i) ^ (AES_isbox[a->VsrB(AES_ishifts[i])]);
- }
- *r = result;
+ aesdec_ISB_ISR_AK((AESState *)r, (AESState *)a, (AESState *)b, true);
}
void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
index 55284369f5..5b20ecbd33 100644
--- a/target/ppc/internal.h
+++ b/target/ppc/internal.h
@@ -18,6 +18,17 @@
#ifndef PPC_INTERNAL_H
#define PPC_INTERNAL_H
+#include "hw/registerfields.h"
+
+/* PM instructions */
+typedef enum {
+ PPC_PM_DOZE,
+ PPC_PM_NAP,
+ PPC_PM_SLEEP,
+ PPC_PM_RVWINKLE,
+ PPC_PM_STOP,
+} powerpc_pm_insn_t;
+
#define FUNC_MASK(name, ret_type, size, max_val) \
static inline ret_type name(uint##size##_t start, \
uint##size##_t end) \
@@ -157,15 +168,15 @@ EXTRACT_HELPER(FPL, 25, 1);
EXTRACT_HELPER(FPFLM, 17, 8);
EXTRACT_HELPER(FPW, 16, 1);
-/* mffscrni */
-EXTRACT_HELPER(RM, 11, 2);
-
/* addpcis */
EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
#if defined(TARGET_PPC64)
/* darn */
EXTRACT_HELPER(L, 16, 2);
#endif
+/* wait */
+EXTRACT_HELPER(WC, 21, 2);
+EXTRACT_HELPER(PL, 16, 2);
/*** Jump target decoding ***/
/* Immediate address */
@@ -211,11 +222,6 @@ void helper_compute_fprf_float16(CPUPPCState *env, float16 arg);
void helper_compute_fprf_float32(CPUPPCState *env, float32 arg);
void helper_compute_fprf_float128(CPUPPCState *env, float128 arg);
-/* Raise a data fault alignment exception for the specified virtual address */
-void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t retaddr) QEMU_NORETURN;
-
/* translate.c */
int ppc_fixup_cpu(PowerPCCPU *cpu);
@@ -224,7 +230,7 @@ void destroy_ppc_opcodes(PowerPCCPU *cpu);
/* gdbstub.c */
void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *ppc);
-gchar *ppc_gdb_arch_name(CPUState *cs);
+const gchar *ppc_gdb_arch_name(CPUState *cs);
/**
* prot_for_access_type:
@@ -245,9 +251,12 @@ static inline int prot_for_access_type(MMUAccessType access_type)
g_assert_not_reached();
}
+#ifndef CONFIG_USER_ONLY
+
/* PowerPC MMU emulation */
typedef struct mmu_ctx_t mmu_ctx_t;
+
bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
hwaddr *raddrp, int *psizep, int *protp,
int mmu_idx, bool guest_visible);
@@ -269,6 +278,8 @@ struct mmu_ctx_t {
int nx; /* Non-execute area */
};
+#endif /* !CONFIG_USER_ONLY */
+
/* Common routines used by software and hardware TLBs emulation */
static inline int pte_is_valid(target_ulong pte0)
{
@@ -283,5 +294,38 @@ static inline void pte_invalidate(target_ulong *pte0)
#define PTE_PTEM_MASK 0x7FFFFFBF
#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+#ifdef CONFIG_USER_ONLY
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t ra);
+#else
+bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+G_NORETURN void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr);
+void ppc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr);
+void ppc_cpu_debug_excp_handler(CPUState *cs);
+bool ppc_cpu_debug_check_breakpoint(CPUState *cs);
+bool ppc_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
+#endif
+
+FIELD(GER_MSK, XMSK, 0, 4)
+FIELD(GER_MSK, YMSK, 4, 4)
+FIELD(GER_MSK, PMSK, 8, 8)
+
+static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk)
+{
+ int msk = 0;
+ msk = FIELD_DP32(msk, GER_MSK, XMSK, xmsk);
+ msk = FIELD_DP32(msk, GER_MSK, YMSK, ymsk);
+ msk = FIELD_DP32(msk, GER_MSK, PMSK, pmsk);
+ return msk;
+}
#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm-stub.c b/target/ppc/kvm-stub.c
deleted file mode 100644
index b98e1d404f..0000000000
--- a/target/ppc/kvm-stub.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * QEMU KVM PPC specific function stubs
- *
- * Copyright Freescale Inc. 2013
- *
- * Author: Alexander Graf <agraf@suse.de>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "hw/ppc/openpic_kvm.h"
-
-int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
-{
- return -EINVAL;
-}
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index dc93b99189..63930d4a77 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -21,7 +21,6 @@
#include <linux/kvm.h>
-#include "qemu-common.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "cpu.h"
@@ -33,7 +32,6 @@
#include "sysemu/device_tree.h"
#include "mmu-hash64.h"
-#include "hw/sysbus.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "hw/hw.h"
@@ -90,6 +88,7 @@ static int cap_ppc_nested_kvm_hv;
static int cap_large_decr;
static int cap_fwnmi;
static int cap_rpt_invalidate;
+static int cap_ail_mode_3;
static uint32_t debug_inst_opcode;
@@ -109,6 +108,11 @@ static int kvm_ppc_register_host_cpu_type(void);
static void kvmppc_get_cpu_characteristics(KVMState *s);
static int kvmppc_get_dec_bits(void);
+int kvm_arch_get_default_type(MachineState *ms)
+{
+ return 0;
+}
+
int kvm_arch_init(MachineState *ms, KVMState *s)
{
cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
@@ -154,6 +158,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
}
cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
+ cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3);
kvm_ppc_register_host_cpu_type();
return 0;
@@ -263,11 +268,11 @@ static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
"KVM failed to provide the MMU features it supports");
}
-struct ppc_radix_page_info *kvm_get_radix_page_info(void)
+static struct ppc_radix_page_info *kvmppc_get_radix_page_info(void)
{
KVMState *s = KVM_STATE(current_accel());
struct ppc_radix_page_info *radix_page_info;
- struct kvm_ppc_rmmu_info rmmu_info;
+ struct kvm_ppc_rmmu_info rmmu_info = { };
int i;
if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
@@ -418,7 +423,7 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
* will be a normal mapping, not a special hugepage one used
* for RAM.
*/
- if (qemu_real_host_page_size < 0x10000) {
+ if (qemu_real_host_page_size() < 0x10000) {
error_setg(errp,
"KVM can't supply 64kiB CI pages, which guest expects");
}
@@ -541,12 +546,12 @@ static void kvm_sw_tlb_put(PowerPCCPU *cpu)
static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
+ /* Init 'val' to avoid "uninitialised value" Valgrind warnings */
union {
uint32_t u32;
uint64_t u64;
- } val;
+ } val = { };
struct kvm_one_reg reg = {
.id = id,
.addr = (uintptr_t) &val,
@@ -575,8 +580,7 @@ static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
union {
uint32_t u32;
uint64_t u64;
@@ -609,8 +613,7 @@ static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
static int kvm_put_fp(CPUState *cs)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
struct kvm_one_reg reg;
int i;
int ret;
@@ -629,10 +632,10 @@ static int kvm_put_fp(CPUState *cs)
for (i = 0; i < 32; i++) {
uint64_t vsr[2];
- uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
- uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
+ uint64_t *fpr = cpu_fpr_ptr(env, i);
+ uint64_t *vsrl = cpu_vsrl_ptr(env, i);
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
vsr[0] = float64_val(*fpr);
vsr[1] = *vsrl;
#else
@@ -676,8 +679,7 @@ static int kvm_put_fp(CPUState *cs)
static int kvm_get_fp(CPUState *cs)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
struct kvm_one_reg reg;
int i;
int ret;
@@ -698,8 +700,8 @@ static int kvm_get_fp(CPUState *cs)
for (i = 0; i < 32; i++) {
uint64_t vsr[2];
- uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
- uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
+ uint64_t *fpr = cpu_fpr_ptr(env, i);
+ uint64_t *vsrl = cpu_vsrl_ptr(env, i);
reg.addr = (uintptr_t) &vsr;
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
@@ -710,7 +712,7 @@ static int kvm_get_fp(CPUState *cs)
strerror(errno));
return ret;
} else {
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
*fpr = vsr[0];
if (vsx) {
*vsrl = vsr[1];
@@ -850,7 +852,7 @@ static int kvm_put_vpa(CPUState *cs)
int kvmppc_put_books_sregs(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
- struct kvm_sregs sregs;
+ struct kvm_sregs sregs = { };
int i;
sregs.pvr = env->spr[SPR_PVR];
@@ -928,10 +930,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
regs.gpr[i] = env->gpr[i];
}
- regs.cr = 0;
- for (i = 0; i < 8; i++) {
- regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
- }
+ regs.cr = ppc_get_cr(env);
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
if (ret < 0) {
@@ -957,8 +956,6 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
if (cap_one_reg) {
- int i;
-
/*
* We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
@@ -974,7 +971,7 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
#ifdef TARGET_PPC64
- if (msr_ts) {
+ if (FIELD_EX64(env->msr, MSR, TS)) {
for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
}
@@ -1206,7 +1203,6 @@ int kvm_arch_get_registers(CPUState *cs)
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
- uint32_t cr;
int i, ret;
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
@@ -1214,12 +1210,7 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
- cr = regs.cr;
- for (i = 7; i >= 0; i--) {
- env->crf[i] = cr & 15;
- cr >>= 4;
- }
-
+ ppc_set_cr(env, regs.cr);
env->ctr = regs.ctr;
env->lr = regs.lr;
cpu_write_xer(env, regs.xer);
@@ -1265,8 +1256,6 @@ int kvm_arch_get_registers(CPUState *cs)
}
if (cap_one_reg) {
- int i;
-
/*
* We deliberately ignore errors here, for kernels which have
* the ONE_REG calls, but don't support the specific
@@ -1282,7 +1271,7 @@ int kvm_arch_get_registers(CPUState *cs)
}
#ifdef TARGET_PPC64
- if (msr_ts) {
+ if (FIELD_EX64(env->msr, MSR, TS)) {
for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
}
@@ -1323,7 +1312,7 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
return 0;
}
- if (!kvm_enabled() || !cap_interrupt_unset) {
+ if (!cap_interrupt_unset) {
return 0;
}
@@ -1352,7 +1341,8 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ FIELD_EX64(env->msr, MSR, EE)) {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
}
@@ -1451,15 +1441,15 @@ static int find_hw_watchpoint(target_ulong addr, int *flag)
return -1;
}
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
- target_ulong len, int type)
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
{
- if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
+ const unsigned breakpoint_index = nb_hw_breakpoint + nb_hw_watchpoint;
+ if (breakpoint_index >= ARRAY_SIZE(hw_debug_points)) {
return -ENOBUFS;
}
- hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
- hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
+ hw_debug_points[breakpoint_index].addr = addr;
+ hw_debug_points[breakpoint_index].type = type;
switch (type) {
case GDB_BREAKPOINT_HW:
@@ -1495,8 +1485,7 @@ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
return 0;
}
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
- target_ulong len, int type)
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
{
int n;
@@ -1663,7 +1652,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
CPUPPCState *env = &cpu->env;
int ret;
- qemu_mutex_lock_iothread();
+ bql_lock();
switch (run->exit_reason) {
case KVM_EXIT_DCR:
@@ -1681,7 +1670,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
#if defined(TARGET_PPC64)
case KVM_EXIT_PAPR_HCALL:
- trace_kvm_handle_papr_hcall();
+ trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
run->papr_hcall.ret = spapr_hypercall(cpu,
run->papr_hcall.nr,
run->papr_hcall.args);
@@ -1722,7 +1711,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
@@ -1735,6 +1724,10 @@ int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
.addr = (uintptr_t) &bits,
};
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
}
@@ -1748,6 +1741,10 @@ int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
.addr = (uintptr_t) &bits,
};
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
}
@@ -1762,6 +1759,10 @@ int kvmppc_set_tcr(PowerPCCPU *cpu)
.addr = (uintptr_t) &tcr,
};
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
}
@@ -1876,6 +1877,12 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len)
buf[0] = '\0';
while ((dirp = readdir(dp)) != NULL) {
FILE *f;
+
+ /* Don't accidentally read from the current and parent directories */
+ if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
+ continue;
+ }
+
snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
dirp->d_name);
f = fopen(buf, "r");
@@ -2357,18 +2364,7 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
}
#if defined(TARGET_PPC64)
- pcc->radix_page_info = kvm_get_radix_page_info();
-
- if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
- /*
- * POWER9 DD1 has some bugs which make it not really ISA 3.00
- * compliant. More importantly, advertising ISA 3.00
- * architected mode may prevent guests from activating
- * necessary DD1 workarounds.
- */
- pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
- | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
- }
+ pcc->radix_page_info = kvmppc_get_radix_page_info();
#endif /* defined(TARGET_PPC64) */
}
@@ -2537,7 +2533,7 @@ int kvmppc_get_cap_large_decr(void)
int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
{
CPUState *cs = CPU(cpu);
- uint64_t lpcr;
+ uint64_t lpcr = 0;
kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
/* Do we need to modify the LPCR? */
@@ -2563,6 +2559,11 @@ int kvmppc_has_cap_rpt_invalidate(void)
return cap_rpt_invalidate;
}
+bool kvmppc_supports_ail_3(void)
+{
+ return cap_ail_mode_3;
+}
+
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
{
uint32_t host_pvr = mfpvr();
@@ -2672,7 +2673,7 @@ int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
{
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- uint8_t buf[bufsize];
+ g_autofree uint8_t *buf = g_malloc(bufsize);
ssize_t rc;
do {
@@ -2754,9 +2755,9 @@ void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
while (i < n) {
struct kvm_get_htab_header *hdr;
int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
- char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
+ char buf[sizeof(*hdr) + HPTES_PER_GROUP * HASH_PTE_SIZE_64];
- rc = read(fd, buf, sizeof(buf));
+ rc = read(fd, buf, sizeof(*hdr) + m * HASH_PTE_SIZE_64);
if (rc < 0) {
hw_error("kvmppc_read_hptes: Unable to read HPTEs");
}
@@ -2955,7 +2956,6 @@ void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
}
}
-bool kvm_arch_cpu_check_are_resettable(void)
+void kvm_arch_accel_class_init(ObjectClass *oc)
{
- return true;
}
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index ee9325bf9a..1975fb5ee6 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -9,7 +9,13 @@
#ifndef KVM_PPC_H
#define KVM_PPC_H
-#define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host")
+#include "sysemu/kvm.h"
+#include "exec/hwaddr.h"
+#include "cpu.h"
+
+#ifdef CONFIG_USER_ONLY
+#error Cannot include kvm_ppc.h from user emulation
+#endif
#ifdef CONFIG_KVM
@@ -40,7 +46,6 @@ int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
bool radix, bool gtse,
uint64_t proc_tbl);
-#ifndef CONFIG_USER_ONLY
bool kvmppc_spapr_use_multitce(void);
int kvmppc_spapr_enable_inkernel_multitce(void);
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
@@ -50,7 +55,6 @@ int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
int kvmppc_reset_htab(int shift_hint);
uint64_t kvmppc_vrma_limit(unsigned int hash_shift);
bool kvmppc_has_cap_spapr_vfio(void);
-#endif /* !CONFIG_USER_ONLY */
bool kvmppc_has_cap_epr(void);
int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function);
int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp);
@@ -73,6 +77,7 @@ int kvmppc_set_cap_nested_kvm_hv(int enable);
int kvmppc_get_cap_large_decr(void);
int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable);
int kvmppc_has_cap_rpt_invalidate(void);
+bool kvmppc_supports_ail_3(void);
int kvmppc_enable_hwrng(void);
int kvmppc_put_books_sregs(PowerPCCPU *cpu);
PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
@@ -88,7 +93,34 @@ void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset);
int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run);
-#else
+#define kvmppc_eieio() \
+ do { \
+ if (kvm_enabled()) { \
+ asm volatile("eieio" : : : "memory"); \
+ } \
+ } while (0)
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+ asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+ asm volatile("icbi 0,%0" : : "r"(p));
+ }
+}
+
+#else /* !CONFIG_KVM */
static inline uint32_t kvmppc_get_tbfreq(void)
{
@@ -232,7 +264,6 @@ static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
{
}
-#ifndef CONFIG_USER_ONLY
static inline bool kvmppc_spapr_use_multitce(void)
{
return false;
@@ -292,8 +323,6 @@ static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
abort();
}
-#endif /* !CONFIG_USER_ONLY */
-
static inline bool kvmppc_has_cap_epr(void)
{
return false;
@@ -393,6 +422,11 @@ static inline int kvmppc_has_cap_rpt_invalidate(void)
return false;
}
+static inline bool kvmppc_supports_ail_3(void)
+{
+ return false;
+}
+
static inline int kvmppc_enable_hwrng(void)
{
return -1;
@@ -430,10 +464,6 @@ static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
return false;
}
-#endif
-
-#ifndef CONFIG_KVM
-
#define kvmppc_eieio() do { } while (0)
static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
@@ -444,35 +474,6 @@ static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
{
}
-#else /* CONFIG_KVM */
-
-#define kvmppc_eieio() \
- do { \
- if (kvm_enabled()) { \
- asm volatile("eieio" : : : "memory"); \
- } \
- } while (0)
-
-/* Store data cache blocks back to memory */
-static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
-{
- uint8_t *p;
-
- for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
- asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
- }
-}
-
-/* Invalidate instruction cache blocks */
-static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
-{
- uint8_t *p;
-
- for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
- asm volatile("icbi 0,%0" : : "r"(p));
- }
-}
-
#endif /* CONFIG_KVM */
#endif /* KVM_PPC_H */
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 93972df58e..203fe28e01 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -2,12 +2,14 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "helper_regs.h"
#include "mmu-hash64.h"
#include "migration/cpu.h"
#include "qapi/error.h"
-#include "qemu/main-loop.h"
#include "kvm_ppc.h"
+#include "power8-pmu.h"
+#include "sysemu/replay.h"
static void post_load_update_msr(CPUPPCState *env)
{
@@ -21,117 +23,6 @@ static void post_load_update_msr(CPUPPCState *env)
ppc_store_msr(env, msr);
}
-static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
-{
- PowerPCCPU *cpu = opaque;
- CPUPPCState *env = &cpu->env;
- unsigned int i, j;
- target_ulong sdr1;
- uint32_t fpscr, vscr;
-#if defined(TARGET_PPC64)
- int32_t slb_nr;
-#endif
- target_ulong xer;
-
- for (i = 0; i < 32; i++) {
- qemu_get_betls(f, &env->gpr[i]);
- }
-#if !defined(TARGET_PPC64)
- for (i = 0; i < 32; i++) {
- qemu_get_betls(f, &env->gprh[i]);
- }
-#endif
- qemu_get_betls(f, &env->lr);
- qemu_get_betls(f, &env->ctr);
- for (i = 0; i < 8; i++) {
- qemu_get_be32s(f, &env->crf[i]);
- }
- qemu_get_betls(f, &xer);
- cpu_write_xer(env, xer);
- qemu_get_betls(f, &env->reserve_addr);
- qemu_get_betls(f, &env->msr);
- for (i = 0; i < 4; i++) {
- qemu_get_betls(f, &env->tgpr[i]);
- }
- for (i = 0; i < 32; i++) {
- union {
- float64 d;
- uint64_t l;
- } u;
- u.l = qemu_get_be64(f);
- *cpu_fpr_ptr(env, i) = u.d;
- }
- qemu_get_be32s(f, &fpscr);
- env->fpscr = fpscr;
- qemu_get_sbe32s(f, &env->access_type);
-#if defined(TARGET_PPC64)
- qemu_get_betls(f, &env->spr[SPR_ASR]);
- qemu_get_sbe32s(f, &slb_nr);
-#endif
- qemu_get_betls(f, &sdr1);
- for (i = 0; i < 32; i++) {
- qemu_get_betls(f, &env->sr[i]);
- }
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 8; j++) {
- qemu_get_betls(f, &env->DBAT[i][j]);
- }
- }
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 8; j++) {
- qemu_get_betls(f, &env->IBAT[i][j]);
- }
- }
- qemu_get_sbe32s(f, &env->nb_tlb);
- qemu_get_sbe32s(f, &env->tlb_per_way);
- qemu_get_sbe32s(f, &env->nb_ways);
- qemu_get_sbe32s(f, &env->last_way);
- qemu_get_sbe32s(f, &env->id_tlbs);
- qemu_get_sbe32s(f, &env->nb_pids);
- if (env->tlb.tlb6) {
- /* XXX assumes 6xx */
- for (i = 0; i < env->nb_tlb; i++) {
- qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
- qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
- qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
- }
- }
- for (i = 0; i < 4; i++) {
- qemu_get_betls(f, &env->pb[i]);
- }
- for (i = 0; i < 1024; i++) {
- qemu_get_betls(f, &env->spr[i]);
- }
- if (!cpu->vhyp) {
- ppc_store_sdr1(env, sdr1);
- }
- qemu_get_be32s(f, &vscr);
- ppc_store_vscr(env, vscr);
- qemu_get_be64s(f, &env->spe_acc);
- qemu_get_be32s(f, &env->spe_fscr);
- qemu_get_betls(f, &env->msr_mask);
- qemu_get_be32s(f, &env->flags);
- qemu_get_sbe32s(f, &env->error_code);
- qemu_get_be32s(f, &env->pending_interrupts);
- qemu_get_be32s(f, &env->irq_input_state);
- for (i = 0; i < POWERPC_EXCP_NB; i++) {
- qemu_get_betls(f, &env->excp_vectors[i]);
- }
- qemu_get_betls(f, &env->excp_prefix);
- qemu_get_betls(f, &env->ivor_mask);
- qemu_get_betls(f, &env->ivpr_mask);
- qemu_get_betls(f, &env->hreset_vector);
- qemu_get_betls(f, &env->nip);
- qemu_get_sbetl(f); /* Discard unused hflags */
- qemu_get_sbetl(f); /* Discard unused hflags_nmsr */
- qemu_get_sbe32(f); /* Discard unused mmu_idx */
- qemu_get_sbe32(f); /* Discard unused power_mode */
-
- post_load_update_msr(env);
-
- return 0;
-}
-
static int get_avr(QEMUFile *f, void *pv, size_t size,
const VMStateField *field)
{
@@ -262,7 +153,8 @@ static int cpu_pre_save(void *opaque)
| PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
| PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
| PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
- | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
+ | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM
+ | PPC2_MEM_LWSYNC;
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
@@ -314,9 +206,16 @@ static int cpu_pre_save(void *opaque)
}
}
- /* Retain migration compatibility for pre 6.0 for 601 machines. */
- env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE
- ? env->hflags & MSR_LE : 0);
+ /* Used to retain migration compatibility for pre 6.0 for 601 machines. */
+ env->hflags_compat_nmsr = 0;
+
+ if (tcg_enabled()) {
+ /*
+ * TCG does not maintain the DECR spr (unlike KVM) so have to save
+ * it here.
+ */
+ env->spr[SPR_DECR] = cpu_ppc_load_decr(env);
+ }
return 0;
}
@@ -339,7 +238,7 @@ static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
if (pvr == pcc->pvr) {
return true;
}
- return pcc->pvr_match(pcc, pvr);
+ return pcc->pvr_match(pcc, pvr, true);
}
static int cpu_post_load(void *opaque, int version_id)
@@ -422,6 +321,21 @@ static int cpu_post_load(void *opaque, int version_id)
post_load_update_msr(env);
+ if (tcg_enabled()) {
+ /* Re-set breaks based on regs */
+#if defined(TARGET_PPC64)
+ ppc_update_ciabr(env);
+ ppc_update_daw0(env);
+#endif
+ /*
+ * TCG needs to re-start the decrementer timer and/or raise the
+ * interrupt. This works for level-triggered decrementer. Edge
+ * triggered types (including HDEC) would need to carry more state.
+ */
+ cpu_ppc_store_decr(env, env->spr[SPR_DECR]);
+ pmu_mmcr01_updated(env);
+ }
+
return 0;
}
@@ -437,7 +351,7 @@ static const VMStateDescription vmstate_fpu = {
.version_id = 1,
.minimum_version_id = 1,
.needed = fpu_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
VMSTATE_END_OF_LIST()
@@ -478,7 +392,7 @@ static const VMStateDescription vmstate_altivec = {
.version_id = 1,
.minimum_version_id = 1,
.needed = altivec_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
/*
* Save the architecture value of the vscr, not the internally
@@ -511,7 +425,7 @@ static const VMStateDescription vmstate_vsx = {
.version_id = 1,
.minimum_version_id = 1,
.needed = vsx_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
VMSTATE_END_OF_LIST()
},
@@ -523,16 +437,15 @@ static bool tm_needed(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUPPCState *env = &cpu->env;
- return msr_ts;
+ return FIELD_EX64(env->msr, MSR, TS);
}
static const VMStateDescription vmstate_tm = {
.name = "cpu/tm",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.needed = tm_needed,
- .fields = (VMStateField []) {
+ .fields = (const VMStateField []) {
VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
@@ -566,7 +479,7 @@ static const VMStateDescription vmstate_sr = {
.version_id = 1,
.minimum_version_id = 1,
.needed = sr_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
VMSTATE_END_OF_LIST()
},
@@ -640,7 +553,7 @@ static const VMStateDescription vmstate_slb = {
.minimum_version_id = 1,
.needed = slb_needed,
.post_load = slb_post_load,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
VMSTATE_END_OF_LIST()
@@ -652,7 +565,7 @@ static const VMStateDescription vmstate_tlb6xx_entry = {
.name = "cpu/tlb6xx_entry",
.version_id = 1,
.minimum_version_id = 1,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
@@ -673,7 +586,7 @@ static const VMStateDescription vmstate_tlb6xx = {
.version_id = 1,
.minimum_version_id = 1,
.needed = tlb6xx_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
env.nb_tlb,
@@ -688,7 +601,7 @@ static const VMStateDescription vmstate_tlbemb_entry = {
.name = "cpu/tlbemb_entry",
.version_id = 1,
.minimum_version_id = 1,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT64(RPN, ppcemb_tlb_t),
VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
VMSTATE_UINTTL(PID, ppcemb_tlb_t),
@@ -707,50 +620,26 @@ static bool tlbemb_needed(void *opaque)
return env->nb_tlb && (env->tlb_type == TLB_EMB);
}
-static bool pbr403_needed(void *opaque)
-{
- PowerPCCPU *cpu = opaque;
- uint32_t pvr = cpu->env.spr[SPR_PVR];
-
- return (pvr & 0xffff0000) == 0x00200000;
-}
-
-static const VMStateDescription vmstate_pbr403 = {
- .name = "cpu/pbr403",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = pbr403_needed,
- .fields = (VMStateField[]) {
- VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
- VMSTATE_END_OF_LIST()
- },
-};
-
static const VMStateDescription vmstate_tlbemb = {
.name = "cpu/tlb6xx",
.version_id = 1,
.minimum_version_id = 1,
.needed = tlbemb_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
env.nb_tlb,
vmstate_tlbemb_entry,
ppcemb_tlb_t),
- /* 403 protection registers */
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription*[]) {
- &vmstate_pbr403,
- NULL
- }
};
static const VMStateDescription vmstate_tlbmas_entry = {
.name = "cpu/tlbmas_entry",
.version_id = 1,
.minimum_version_id = 1,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT32(mas8, ppcmas_tlb_t),
VMSTATE_UINT32(mas1, ppcmas_tlb_t),
VMSTATE_UINT64(mas2, ppcmas_tlb_t),
@@ -772,7 +661,7 @@ static const VMStateDescription vmstate_tlbmas = {
.version_id = 1,
.minimum_version_id = 1,
.needed = tlbmas_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
env.nb_tlb,
@@ -795,21 +684,40 @@ static const VMStateDescription vmstate_compat = {
.version_id = 1,
.minimum_version_id = 1,
.needed = compat_needed,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UINT32(compat_pvr, PowerPCCPU),
VMSTATE_END_OF_LIST()
}
};
+static bool reservation_needed(void *opaque)
+{
+ return (replay_mode != REPLAY_MODE_NONE);
+}
+
+static const VMStateDescription vmstate_reservation = {
+ .name = "cpu/reservation",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = reservation_needed,
+ .fields = (const VMStateField[]) {
+ VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
+ VMSTATE_UINTTL(env.reserve_length, PowerPCCPU),
+ VMSTATE_UINTTL(env.reserve_val, PowerPCCPU),
+#if defined(TARGET_PPC64)
+ VMSTATE_UINTTL(env.reserve_val2, PowerPCCPU),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_ppc_cpu = {
.name = "cpu",
.version_id = 5,
.minimum_version_id = 5,
- .minimum_version_id_old = 4,
- .load_state_old = cpu_load_old,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
- .fields = (VMStateField[]) {
+ .fields = (const VMStateField[]) {
VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
/* User mode architected state */
@@ -824,8 +732,7 @@ const VMStateDescription vmstate_ppc_cpu = {
VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
- /* Reservation */
- VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
+ VMSTATE_UNUSED(sizeof(target_ulong)), /* was env.reserve_addr */
/* Supervisor mode architected state */
VMSTATE_UINTTL(env.msr, PowerPCCPU),
@@ -841,7 +748,7 @@ const VMStateDescription vmstate_ppc_cpu = {
VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription*[]) {
+ .subsections = (const VMStateDescription * const []) {
&vmstate_fpu,
&vmstate_altivec,
&vmstate_vsx,
@@ -854,6 +761,7 @@ const VMStateDescription vmstate_ppc_cpu = {
&vmstate_tlbemb,
&vmstate_tlbmas,
&vmstate_compat,
+ &vmstate_reservation,
NULL
}
};
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index e2282baa8d..ea7e8443a8 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -21,11 +21,9 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
-#include "qemu/main-loop.h"
#include "exec/helper-proto.h"
#include "helper_regs.h"
#include "exec/cpu_ldst.h"
-#include "tcg/tcg.h"
#include "internal.h"
#include "qemu/atomic128.h"
@@ -33,10 +31,10 @@
static inline bool needs_byteswap(const CPUPPCState *env)
{
-#if defined(TARGET_WORDS_BIGENDIAN)
- return msr_le;
+#if TARGET_BIG_ENDIAN
+ return FIELD_EX64(env->msr, MSR, LE);
#else
- return !msr_le;
+ return !FIELD_EX64(env->msr, MSR, LE);
#endif
}
@@ -85,7 +83,7 @@ static void *probe_contiguous(CPUPPCState *env, target_ulong addr, uint32_t nb,
void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
{
uintptr_t raddr = GETPC();
- int mmu_idx = cpu_mmu_index(env, false);
+ int mmu_idx = ppc_env_mmu_index(env, false);
void *host = probe_contiguous(env, addr, (32 - reg) * 4,
MMU_DATA_LOAD, mmu_idx, raddr);
@@ -107,7 +105,7 @@ void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
{
uintptr_t raddr = GETPC();
- int mmu_idx = cpu_mmu_index(env, false);
+ int mmu_idx = ppc_env_mmu_index(env, false);
void *host = probe_contiguous(env, addr, (32 - reg) * 4,
MMU_DATA_STORE, mmu_idx, raddr);
@@ -137,7 +135,7 @@ static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
return;
}
- mmu_idx = cpu_mmu_index(env, false);
+ mmu_idx = ppc_env_mmu_index(env, false);
host = probe_contiguous(env, addr, nb, MMU_DATA_LOAD, mmu_idx, raddr);
if (likely(host)) {
@@ -226,7 +224,7 @@ void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
return;
}
- mmu_idx = cpu_mmu_index(env, false);
+ mmu_idx = ppc_env_mmu_index(env, false);
host = probe_contiguous(env, addr, nb, MMU_DATA_STORE, mmu_idx, raddr);
if (likely(host)) {
@@ -278,7 +276,7 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr,
target_ulong mask, dcbz_size = env->dcache_line_size;
uint32_t i;
void *haddr;
- int mmu_idx = epid ? PPC_TLB_EPID_STORE : cpu_mmu_index(env, false);
+ int mmu_idx = epid ? PPC_TLB_EPID_STORE : ppc_env_mmu_index(env, false);
#if defined(TARGET_PPC64)
/* Check for dcbz vs dcbzl on 970 */
@@ -368,101 +366,9 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
return i;
}
-#ifdef TARGET_PPC64
-uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
- uint32_t opidx)
-{
- Int128 ret;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- ret = cpu_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
- env->retxh = int128_gethi(ret);
- return int128_getlo(ret);
-}
-
-uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
- uint32_t opidx)
-{
- Int128 ret;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- ret = cpu_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
- env->retxh = int128_gethi(ret);
- return int128_getlo(ret);
-}
-
-void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t lo, uint64_t hi, uint32_t opidx)
-{
- Int128 val;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- val = int128_make128(lo, hi);
- cpu_atomic_sto_le_mmu(env, addr, val, opidx, GETPC());
-}
-
-void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t lo, uint64_t hi, uint32_t opidx)
-{
- Int128 val;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_ATOMIC128);
- val = int128_make128(lo, hi);
- cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
-}
-
-uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t new_lo, uint64_t new_hi,
- uint32_t opidx)
-{
- bool success = false;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_CMPXCHG128);
-
- if (likely(addr == env->reserve_addr)) {
- Int128 oldv, cmpv, newv;
-
- cmpv = int128_make128(env->reserve_val2, env->reserve_val);
- newv = int128_make128(new_lo, new_hi);
- oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
- success = int128_eq(oldv, cmpv);
- }
- env->reserve_addr = -1;
- return env->so + success * CRF_EQ_BIT;
-}
-
-uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t new_lo, uint64_t new_hi,
- uint32_t opidx)
-{
- bool success = false;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_CMPXCHG128);
-
- if (likely(addr == env->reserve_addr)) {
- Int128 oldv, cmpv, newv;
-
- cmpv = int128_make128(env->reserve_val2, env->reserve_val);
- newv = int128_make128(new_lo, new_hi);
- oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
- success = int128_eq(oldv, cmpv);
- }
- env->reserve_addr = -1;
- return env->so + success * CRF_EQ_BIT;
-}
-#endif
-
/*****************************************************************************/
/* Altivec extension helpers */
-#if defined(HOST_WORDS_BIGENDIAN)
+#if HOST_BIG_ENDIAN
#define HI_IDX 0
#define LO_IDX 1
#else
@@ -471,8 +377,8 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
#endif
/*
- * We use msr_le to determine index ordering in a vector. However,
- * byteswapping is not simply controlled by msr_le. We also need to
+ * We use MSR_LE to determine index ordering in a vector. However,
+ * byteswapping is not simply controlled by MSR_LE. We also need to
* take into account endianness of the target. This is done for the
* little-endian PPC64 user-mode target.
*/
@@ -485,7 +391,7 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
- if (msr_le) { \
+ if (FIELD_EX64(env->msr, MSR, LE)) { \
index = n_elems - index - 1; \
} \
\
@@ -512,7 +418,7 @@ LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
int adjust = HI_IDX * (n_elems - 1); \
int sh = sizeof(r->element[0]) >> 1; \
int index = (addr & 0xf) >> sh; \
- if (msr_le) { \
+ if (FIELD_EX64(env->msr, MSR, LE)) { \
index = n_elems - index - 1; \
} \
\
@@ -546,7 +452,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
t.s128 = int128_zero(); \
if (nb) { \
nb = (nb >= 16) ? 16 : nb; \
- if (msr_le && !lj) { \
+ if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
addr = addr_add(env, addr, 1); \
@@ -577,7 +483,7 @@ void helper_##name(CPUPPCState *env, target_ulong addr, \
} \
\
nb = (nb >= 16) ? 16 : nb; \
- if (msr_le && !lj) { \
+ if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
for (i = 16; i > 16 - nb; i--) { \
cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
addr = addr_add(env, addr, 1); \
@@ -613,11 +519,12 @@ void helper_tbegin(CPUPPCState *env)
env->spr[SPR_TEXASR] =
(1ULL << TEXASR_FAILURE_PERSISTENT) |
(1ULL << TEXASR_NESTING_OVERFLOW) |
- (msr_hv << TEXASR_PRIVILEGE_HV) |
- (msr_pr << TEXASR_PRIVILEGE_PR) |
+ (FIELD_EX64_HV(env->msr) << TEXASR_PRIVILEGE_HV) |
+ (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) |
(1ULL << TEXASR_FAILURE_SUMMARY) |
(1ULL << TEXASR_TFIAR_EXACT);
- env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
+ env->spr[SPR_TFIAR] = env->nip | (FIELD_EX64_HV(env->msr) << 1) |
+ FIELD_EX64(env->msr, MSR, PR);
env->spr[SPR_TFHAR] = env->nip + 4;
env->crf[0] = 0xB; /* 0b1010 = transaction failure */
}
diff --git a/target/ppc/meson.build b/target/ppc/meson.build
index b85f295703..0b89f9b89f 100644
--- a/target/ppc/meson.build
+++ b/target/ppc/meson.build
@@ -16,6 +16,7 @@ ppc_ss.add(when: 'CONFIG_TCG', if_true: files(
'misc_helper.c',
'timebase_helper.c',
'translate.c',
+ 'power8-pmu.c',
))
ppc_ss.add(libdecnumber)
@@ -27,26 +28,26 @@ gen = [
extra_args: ['--static-decode=decode_insn64',
'--insnwidth=64']),
]
-ppc_ss.add(gen)
+ppc_ss.add(when: 'CONFIG_TCG', if_true: gen)
-ppc_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
ppc_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user_only_helper.c'))
-ppc_softmmu_ss = ss.source_set()
-ppc_softmmu_ss.add(files(
+ppc_system_ss = ss.source_set()
+ppc_system_ss.add(files(
'arch_dump.c',
'machine.c',
'mmu-hash32.c',
'mmu_common.c',
- 'monitor.c',
+ 'ppc-qmp-cmds.c',
))
-ppc_softmmu_ss.add(when: 'CONFIG_TCG', if_true: files(
+ppc_system_ss.add(when: 'CONFIG_TCG', if_true: files(
'mmu_helper.c',
), if_false: files(
'tcg-stub.c',
))
+ppc_system_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
-ppc_softmmu_ss.add(when: 'TARGET_PPC64', if_true: files(
+ppc_system_ss.add(when: 'TARGET_PPC64', if_true: files(
'compat.c',
'mmu-book3s-v3.c',
'mmu-hash64.c',
@@ -54,4 +55,4 @@ ppc_softmmu_ss.add(when: 'TARGET_PPC64', if_true: files(
))
target_arch += {'ppc': ppc_ss}
-target_softmmu_arch += {'ppc': ppc_softmmu_ss}
+target_system_arch += {'ppc': ppc_system_ss}
diff --git a/target/ppc/mfrom_table.c.inc b/target/ppc/mfrom_table.c.inc
deleted file mode 100644
index 1653b974a4..0000000000
--- a/target/ppc/mfrom_table.c.inc
+++ /dev/null
@@ -1,78 +0,0 @@
-static const uint8_t mfrom_ROM_table[602] = {
- 77, 77, 76, 76, 75, 75, 74, 74,
- 73, 73, 72, 72, 71, 71, 70, 70,
- 69, 69, 68, 68, 68, 67, 67, 66,
- 66, 65, 65, 64, 64, 64, 63, 63,
- 62, 62, 61, 61, 61, 60, 60, 59,
- 59, 58, 58, 58, 57, 57, 56, 56,
- 56, 55, 55, 54, 54, 54, 53, 53,
- 53, 52, 52, 51, 51, 51, 50, 50,
- 50, 49, 49, 49, 48, 48, 47, 47,
- 47, 46, 46, 46, 45, 45, 45, 44,
- 44, 44, 43, 43, 43, 42, 42, 42,
- 42, 41, 41, 41, 40, 40, 40, 39,
- 39, 39, 39, 38, 38, 38, 37, 37,
- 37, 37, 36, 36, 36, 35, 35, 35,
- 35, 34, 34, 34, 34, 33, 33, 33,
- 33, 32, 32, 32, 32, 31, 31, 31,
- 31, 30, 30, 30, 30, 29, 29, 29,
- 29, 28, 28, 28, 28, 28, 27, 27,
- 27, 27, 26, 26, 26, 26, 26, 25,
- 25, 25, 25, 25, 24, 24, 24, 24,
- 24, 23, 23, 23, 23, 23, 23, 22,
- 22, 22, 22, 22, 21, 21, 21, 21,
- 21, 21, 20, 20, 20, 20, 20, 20,
- 19, 19, 19, 19, 19, 19, 19, 18,
- 18, 18, 18, 18, 18, 17, 17, 17,
- 17, 17, 17, 17, 16, 16, 16, 16,
- 16, 16, 16, 16, 15, 15, 15, 15,
- 15, 15, 15, 15, 14, 14, 14, 14,
- 14, 14, 14, 14, 13, 13, 13, 13,
- 13, 13, 13, 13, 13, 12, 12, 12,
- 12, 12, 12, 12, 12, 12, 12, 11,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 10, 10, 10, 10, 10, 10,
- 10, 10, 10, 10, 10, 9, 9, 9,
- 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 8, 8, 8, 8, 8, 8,
- 8, 8, 8, 8, 8, 8, 8, 8,
- 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 6, 6, 6, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 5, 5, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 0,
-};
diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c
deleted file mode 100644
index f96c4268ba..0000000000
--- a/target/ppc/mfrom_table_gen.c
+++ /dev/null
@@ -1,34 +0,0 @@
-#define _GNU_SOURCE
-#include "qemu/osdep.h"
-#include <math.h>
-
-int main(void)
-{
- double d;
- uint8_t n;
- int i;
-
- printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
- for (i = 0; i < 602; i++) {
- /*
- * Extremely decomposed:
- * -T0 / 256
- * T0 = 256 * log10(10 + 1.0) + 0.5
- */
- d = -i;
- d /= 256.0;
- d = exp10(d);
- d += 1.0;
- d = log10(d);
- d *= 256;
- d += 0.5;
- n = d;
- printf("%3d, ", n);
- if ((i & 7) == 7) {
- printf("\n ");
- }
- }
- printf("\n};\n");
-
- return 0;
-}
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index c33f5f39b9..58e808dc96 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -18,12 +18,14 @@
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "mmu-book3s-v3.h"
+#include "hw/ppc/ppc.h"
#include "helper_regs.h"
@@ -41,6 +43,49 @@ void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
env->spr[sprn]);
}
+void helper_spr_core_write_generic(CPUPPCState *env, uint32_t sprn,
+ target_ulong val)
+{
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1) {
+ env->spr[sprn] = val;
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cenv->spr[sprn] = val;
+ }
+}
+
+void helper_spr_write_CTRL(CPUPPCState *env, uint32_t sprn,
+ target_ulong val)
+{
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t run = val & 1;
+ uint32_t ts, ts_mask;
+
+ assert(sprn == SPR_CTRL);
+
+ env->spr[sprn] &= ~1U;
+ env->spr[sprn] |= run;
+
+ ts_mask = ~(1U << (8 + env->spr[SPR_TIR]));
+ ts = run << (8 + env->spr[SPR_TIR]);
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+
+ cenv->spr[sprn] &= ts_mask;
+ cenv->spr[sprn] |= ts;
+ }
+}
+
+
#ifdef TARGET_PPC64
static void raise_hv_fu_exception(CPUPPCState *env, uint32_t bit,
const char *caller, uint32_t cause,
@@ -72,7 +117,7 @@ void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
const char *caller, uint32_t cause)
{
#ifdef TARGET_PPC64
- if ((env->msr_mask & MSR_HVB) && !msr_hv &&
+ if ((env->msr_mask & MSR_HVB) && !FIELD_EX64(env->msr, MSR, HV) &&
!(env->spr[SPR_HFSCR] & (1UL << bit))) {
raise_hv_fu_exception(env, bit, caller, cause, GETPC());
}
@@ -151,56 +196,105 @@ void helper_store_pcr(CPUPPCState *env, target_ulong value)
env->spr[SPR_PCR] = value & pcc->pcr_mask;
}
+void helper_store_ciabr(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_ciabr(env, value);
+}
+
+void helper_store_dawr0(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawr0(env, value);
+}
+
+void helper_store_dawrx0(CPUPPCState *env, target_ulong value)
+{
+ ppc_store_dawrx0(env, value);
+}
+
/*
* DPDES register is shared. Each bit reflects the state of the
* doorbell interrupt of a thread of the same core.
*/
target_ulong helper_load_dpdes(CPUPPCState *env)
{
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
target_ulong dpdes = 0;
helper_hfscr_facility_check(env, HFSCR_MSGP, "load DPDES", HFSCR_IC_MSGP);
- /* TODO: TCG supports only one thread */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
- dpdes = 1;
+ if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
}
+ if (nr_threads == 1) {
+ if (env->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ dpdes = 1;
+ }
+ return dpdes;
+ }
+
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ CPUPPCState *cenv = &ccpu->env;
+ uint32_t thread_id = ppc_cpu_tir(ccpu);
+
+ if (cenv->pending_interrupts & PPC_INTERRUPT_DOORBELL) {
+ dpdes |= (0x1 << thread_id);
+ }
+ }
+ bql_unlock();
+
return dpdes;
}
void helper_store_dpdes(CPUPPCState *env, target_ulong val)
{
PowerPCCPU *cpu = env_archcpu(env);
- CPUState *cs = CPU(cpu);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
helper_hfscr_facility_check(env, HFSCR_MSGP, "store DPDES", HFSCR_IC_MSGP);
- /* TODO: TCG supports only one thread */
- if (val & ~0x1) {
+ if (!(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ nr_threads = 1; /* DPDES behaves as 1-thread in LPAR-per-thread mode */
+ }
+
+ if (val & ~(nr_threads - 1)) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid DPDES register value "
TARGET_FMT_lx"\n", val);
+ val &= (nr_threads - 1); /* Ignore the invalid bits */
+ }
+
+ if (nr_threads == 1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & 0x1);
return;
}
- if (val & 0x1) {
- env->pending_interrupts |= 1 << PPC_INTERRUPT_DOORBELL;
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- } else {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ uint32_t thread_id = ppc_cpu_tir(ccpu);
+
+ ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id));
}
+ bql_unlock();
}
#endif /* defined(TARGET_PPC64) */
void helper_store_pidr(CPUPPCState *env, target_ulong val)
{
- env->spr[SPR_BOOKS_PID] = val;
+ env->spr[SPR_BOOKS_PID] = (uint32_t)val;
tlb_flush(env_cpu(env));
}
void helper_store_lpidr(CPUPPCState *env, target_ulong val)
{
- env->spr[SPR_LPIDR] = val;
+ env->spr[SPR_LPIDR] = (uint32_t)val;
/*
* We need to flush the TLB on LPID changes as we only tag HV vs
@@ -211,30 +305,6 @@ void helper_store_lpidr(CPUPPCState *env, target_ulong val)
tlb_flush(env_cpu(env));
}
-void helper_store_hid0_601(CPUPPCState *env, target_ulong val)
-{
- target_ulong hid0;
-
- hid0 = env->spr[SPR_HID0];
- env->spr[SPR_HID0] = (uint32_t)val;
-
- if ((val ^ hid0) & 0x00000008) {
- /* Change current endianness */
- hreg_compute_hflags(env);
- qemu_log("%s: set endianness to %c => %08x\n", __func__,
- val & 0x8 ? 'l' : 'b', env->hflags);
- }
-}
-
-void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
-{
- if (likely(env->pb[num] != value)) {
- env->pb[num] = value;
- /* Should be optimized */
- tlb_flush(env_cpu(env));
- }
-}
-
void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
{
/* Bits 26 & 27 affect single-stepping. */
@@ -248,31 +318,6 @@ void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
store_40x_sler(env, val);
}
#endif
-/*****************************************************************************/
-/* PowerPC 601 specific instructions (POWER bridge) */
-
-target_ulong helper_clcs(CPUPPCState *env, uint32_t arg)
-{
- switch (arg) {
- case 0x0CUL:
- /* Instruction cache line size */
- return env->icache_line_size;
- case 0x0DUL:
- /* Data cache line size */
- return env->dcache_line_size;
- case 0x0EUL:
- /* Minimum cache line size */
- return (env->icache_line_size < env->dcache_line_size) ?
- env->icache_line_size : env->dcache_line_size;
- case 0x0FUL:
- /* Maximum cache line size */
- return (env->icache_line_size > env->dcache_line_size) ?
- env->icache_line_size : env->dcache_line_size;
- default:
- /* Undefined */
- return 0;
- }
-}
/*****************************************************************************/
/* Special registers manipulation */
diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c
index f4985bae78..c8f69b3df9 100644
--- a/target/ppc/mmu-book3s-v3.c
+++ b/target/ppc/mmu-book3s-v3.c
@@ -28,6 +28,11 @@ bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry)
uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB;
uint64_t pats = cpu->env.spr[SPR_PTCR] & PTCR_PATS;
+ /* Check if partition table is properly aligned */
+ if (patb & MAKE_64BIT_MASK(0, pats + 12)) {
+ return false;
+ }
+
/* Calculate number of entries */
pats = 1ull << (pats + 12 - 4);
if (pats <= lpid) {
diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h
index d6d5ed8f8e..674377a19e 100644
--- a/target/ppc/mmu-book3s-v3.h
+++ b/target/ppc/mmu-book3s-v3.h
@@ -50,6 +50,21 @@ struct prtb_entry {
#ifdef TARGET_PPC64
+/*
+ * tlbie[l] helper flags
+ *
+ * RIC, PRS, R and local are passed as flags in the last argument.
+ */
+#define TLBIE_F_RIC_SHIFT 0
+#define TLBIE_F_PRS_SHIFT 2
+#define TLBIE_F_R_SHIFT 3
+#define TLBIE_F_LOCAL_SHIFT 4
+
+#define TLBIE_F_RIC_MASK (3 << TLBIE_F_RIC_SHIFT)
+#define TLBIE_F_PRS (1 << TLBIE_F_PRS_SHIFT)
+#define TLBIE_F_R (1 << TLBIE_F_R_SHIFT)
+#define TLBIE_F_LOCAL (1 << TLBIE_F_LOCAL_SHIFT)
+
static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu)
{
return !!(cpu->env.spr[SPR_LPCR] & LPCR_UPRT);
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index 3957aab2dc..3976416840 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -125,30 +125,6 @@ static int hash32_bat_prot(PowerPCCPU *cpu,
return prot;
}
-static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
- target_ulong batu, target_ulong batl)
-{
- if (!(batl & BATL32_601_V)) {
- return 0;
- }
-
- return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
-}
-
-static int hash32_bat_601_prot(int mmu_idx,
- target_ulong batu, target_ulong batl)
-{
- int key, pp;
-
- pp = batu & BATU32_601_PP;
- if (mmuidx_pr(mmu_idx) == 0) {
- key = !!(batu & BATU32_601_KS);
- } else {
- key = !!(batu & BATU32_601_KP);
- }
- return ppc_hash32_pp_prot(key, pp, 0);
-}
-
static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
MMUAccessType access_type, int *prot,
int mmu_idx)
@@ -172,11 +148,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
target_ulong batl = BATlt[i];
target_ulong mask;
- if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- mask = hash32_bat_601_size(cpu, batu, batl);
- } else {
- mask = hash32_bat_size(mmu_idx, batu, batl);
- }
+ mask = hash32_bat_size(mmu_idx, batu, batl);
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n", __func__,
ifetch ? 'I' : 'D', i, ea, batu, batl);
@@ -184,11 +156,7 @@ static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
hwaddr raddr = (batl & mask) | (ea & ~mask);
- if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- *prot = hash32_bat_601_prot(mmu_idx, batu, batl);
- } else {
- *prot = hash32_bat_prot(cpu, batu, batl);
- }
+ *prot = hash32_bat_prot(cpu, batu, batl);
return raddr & TARGET_PAGE_MASK;
}
@@ -231,18 +199,6 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
- if ((sr & 0x1FF00000) >> 20 == 0x07f) {
- /*
- * Memory-forced I/O controller interface access
- *
- * If T=1 and BUID=x'07F', the 601 performs a memory access
- * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
- */
- *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return true;
- }
-
if (access_type == MMU_INST_FETCH) {
/* No code fetch is allowed in direct-store areas */
if (guest_visible) {
@@ -390,24 +346,24 @@ static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
ptem = (vsid << 7) | (pgidx >> 10);
/* Page address translation */
- qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
- " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx
+ " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
/* Primary PTEG lookup */
- qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=%" PRIx32 " ptem=%" PRIx32
- " hash=" TARGET_FMT_plx "\n",
+ " hash=" HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu),
vsid, ptem, hash);
pteg_off = get_pteg_offset32(cpu, hash);
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
- qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ qemu_log_mask(CPU_LOG_MMU, "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=%" PRIx32 " api=%" PRIx32
- " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
+ " hash=" HWADDR_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash);
pteg_off = get_pteg_offset32(cpu, ~hash);
pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h
index 3892b693d6..7119a63d97 100644
--- a/target/ppc/mmu-hash32.h
+++ b/target/ppc/mmu-hash32.h
@@ -34,15 +34,6 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
#define BATL32_WIMG 0x00000078
#define BATL32_PP 0x00000003
-/* PowerPC 601 has slightly different BAT registers */
-
-#define BATU32_601_KS 0x00000008
-#define BATU32_601_KP 0x00000004
-#define BATU32_601_PP 0x00000003
-
-#define BATL32_601_V 0x00000040
-#define BATL32_601_BL 0x0000003f
-
/*
* Hash page table definitions
*/
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 19832c4b46..d645c0bb94 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -101,7 +101,7 @@ void dump_slb(PowerPCCPU *cpu)
}
#ifdef CONFIG_TCG
-void helper_slbia(CPUPPCState *env, uint32_t ih)
+void helper_SLBIA(CPUPPCState *env, uint32_t ih)
{
PowerPCCPU *cpu = env_archcpu(env);
int starting_entry;
@@ -173,6 +173,33 @@ void helper_slbia(CPUPPCState *env, uint32_t ih)
}
}
+#if defined(TARGET_PPC64)
+void helper_SLBIAG(CPUPPCState *env, target_ulong rs, uint32_t l)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ int n;
+
+ /*
+ * slbiag must always flush all TLB (which is equivalent to ERAT in ppc
+ * architecture). Matching on SLB_ESID_V is not good enough, because slbmte
+ * can overwrite a valid SLB without flushing its lookaside information.
+ *
+ * It would be possible to keep the TLB in synch with the SLB by flushing
+ * when a valid entry is overwritten by slbmte, and therefore slbiag would
+ * not have to flush unless it evicts a valid SLB entry. However it is
+ * expected that slbmte is more common than slbiag, and slbiag is usually
+ * going to evict valid SLB entries, so that tradeoff is unlikely to be a
+ * good one.
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+
+ for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
+ ppc_slb_t *slb = &env->slb[n];
+ slb->esid &= ~SLB_ESID_V;
+ }
+}
+#endif
+
static void __helper_slbie(CPUPPCState *env, target_ulong addr,
target_ulong global)
{
@@ -197,12 +224,12 @@ static void __helper_slbie(CPUPPCState *env, target_ulong addr,
}
}
-void helper_slbie(CPUPPCState *env, target_ulong addr)
+void helper_SLBIE(CPUPPCState *env, target_ulong addr)
{
__helper_slbie(env, addr, false);
}
-void helper_slbieg(CPUPPCState *env, target_ulong addr)
+void helper_SLBIEG(CPUPPCState *env, target_ulong addr)
{
__helper_slbie(env, addr, true);
}
@@ -309,7 +336,7 @@ static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
return 0;
}
-void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
+void helper_SLBMTE(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
PowerPCCPU *cpu = env_archcpu(env);
@@ -319,7 +346,7 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
}
}
-target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
+target_ulong helper_SLBMFEE(CPUPPCState *env, target_ulong rb)
{
PowerPCCPU *cpu = env_archcpu(env);
target_ulong rt = 0;
@@ -331,7 +358,7 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
return rt;
}
-target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
+target_ulong helper_SLBFEE(CPUPPCState *env, target_ulong rb)
{
PowerPCCPU *cpu = env_archcpu(env);
target_ulong rt = 0;
@@ -343,7 +370,7 @@ target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
return rt;
}
-target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
+target_ulong helper_SLBMFEV(CPUPPCState *env, target_ulong rb)
{
PowerPCCPU *cpu = env_archcpu(env);
target_ulong rt = 0;
@@ -670,15 +697,15 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
/* Page address translation */
qemu_log_mask(CPU_LOG_MMU,
- "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ "htab_base " HWADDR_FMT_plx " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
/* Primary PTEG lookup */
qemu_log_mask(CPU_LOG_MMU,
- "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ "0 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
- " hash=" TARGET_FMT_plx "\n",
+ " hash=" HWADDR_FMT_plx "\n",
ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
vsid, ptem, hash);
ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
@@ -687,9 +714,9 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
/* Secondary PTEG lookup */
ptem |= HPTE64_V_SECONDARY;
qemu_log_mask(CPU_LOG_MMU,
- "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ "1 htab=" HWADDR_FMT_plx "/" HWADDR_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
- " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
+ " hash=" HWADDR_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
@@ -743,7 +770,8 @@ static bool ppc_hash64_use_vrma(CPUPPCState *env)
}
}
-static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t error_code)
+static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
+ uint64_t error_code)
{
CPUPPCState *env = &POWERPC_CPU(cs)->env;
bool vpm;
@@ -755,13 +783,15 @@ static void ppc_hash64_set_isi(CPUState *cs, int mmu_idx, uint64_t error_code)
}
if (vpm && !mmuidx_hv(mmu_idx)) {
cs->exception_index = POWERPC_EXCP_HISI;
+ env->spr[SPR_ASDR] = slb_vsid;
} else {
cs->exception_index = POWERPC_EXCP_ISI;
}
env->error_code = error_code;
}
-static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t dsisr)
+static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t slb_vsid,
+ uint64_t dar, uint64_t dsisr)
{
CPUPPCState *env = &POWERPC_CPU(cs)->env;
bool vpm;
@@ -775,6 +805,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t
cs->exception_index = POWERPC_EXCP_HDSI;
env->spr[SPR_HDAR] = dar;
env->spr[SPR_HDSISR] = dsisr;
+ env->spr[SPR_ASDR] = slb_vsid;
} else {
cs->exception_index = POWERPC_EXCP_DSI;
env->spr[SPR_DAR] = dar;
@@ -786,7 +817,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, int mmu_idx, uint64_t dar, uint64_t
static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
{
- hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
@@ -803,7 +834,7 @@ static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
{
- hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
+ hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C;
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc =
@@ -843,12 +874,46 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
return rma_sizes[rmls];
}
-static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+/* Return the LLP in SLB_VSID format */
+static uint64_t get_vrma_llp(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
- target_ulong lpcr = env->spr[SPR_LPCR];
- uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
- target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
+ uint64_t llp;
+
+ if (env->mmu_model == POWERPC_MMU_3_00) {
+ ppc_v3_pate_t pate;
+ uint64_t ps, l, lp;
+
+ /*
+ * ISA v3.0 removes the LPCR[VRMASD] field and puts the VRMA base
+ * page size (L||LP equivalent) in the PS field in the HPT partition
+ * table entry.
+ */
+ if (!ppc64_v3_get_pate(cpu, cpu->env.spr[SPR_LPIDR], &pate)) {
+ error_report("Bad VRMA with no partition table entry");
+ return 0;
+ }
+ ps = PATE0_GET_PS(pate.dw0);
+ /* PS has L||LP in 3 consecutive bits, put them into SLB LLP format */
+ l = (ps >> 2) & 0x1;
+ lp = ps & 0x3;
+ llp = (l << SLB_VSID_L_SHIFT) | (lp << SLB_VSID_LP_SHIFT);
+
+ } else {
+ uint64_t lpcr = env->spr[SPR_LPCR];
+ target_ulong vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+
+ /* VRMASD LLP matches SLB format, just shift and mask it */
+ llp = (vrmasd << SLB_VSID_LP_SHIFT) & SLB_VSID_LLP_MASK;
+ }
+
+ return llp;
+}
+
+static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+{
+ uint64_t llp = get_vrma_llp(cpu);
+ target_ulong vsid = SLB_VSID_VRMA | llp;
int i;
for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
@@ -866,8 +931,7 @@ static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
}
}
- error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
- TARGET_FMT_lx, lpcr);
+ error_report("Bad VRMA page size encoding 0x" TARGET_FMT_lx, llp);
return -1;
}
@@ -936,13 +1000,13 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
}
switch (access_type) {
case MMU_INST_FETCH:
- ppc_hash64_set_isi(cs, mmu_idx, SRR1_PROTFAULT);
+ ppc_hash64_set_isi(cs, mmu_idx, 0, SRR1_PROTFAULT);
break;
case MMU_DATA_LOAD:
- ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_PROTFAULT);
+ ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr, DSISR_PROTFAULT);
break;
case MMU_DATA_STORE:
- ppc_hash64_set_dsi(cs, mmu_idx, eaddr,
+ ppc_hash64_set_dsi(cs, mmu_idx, 0, eaddr,
DSISR_PROTFAULT | DSISR_ISSTORE);
break;
default:
@@ -995,7 +1059,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
/* 3. Check for segment level no-execute violation */
if (access_type == MMU_INST_FETCH && (slb->vsid & SLB_VSID_N)) {
if (guest_visible) {
- ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOEXEC_GUARD);
+ ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOEXEC_GUARD);
}
return false;
}
@@ -1008,13 +1072,14 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
}
switch (access_type) {
case MMU_INST_FETCH:
- ppc_hash64_set_isi(cs, mmu_idx, SRR1_NOPTE);
+ ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, SRR1_NOPTE);
break;
case MMU_DATA_LOAD:
- ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE);
+ ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, DSISR_NOPTE);
break;
case MMU_DATA_STORE:
- ppc_hash64_set_dsi(cs, mmu_idx, eaddr, DSISR_NOPTE | DSISR_ISSTORE);
+ ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr,
+ DSISR_NOPTE | DSISR_ISSTORE);
break;
default:
g_assert_not_reached();
@@ -1048,7 +1113,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
if (PAGE_EXEC & ~amr_prot) {
srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
}
- ppc_hash64_set_isi(cs, mmu_idx, srr1);
+ ppc_hash64_set_isi(cs, mmu_idx, slb->vsid, srr1);
} else {
int dsisr = 0;
if (need_prot & ~pp_prot) {
@@ -1060,7 +1125,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
if (need_prot & ~amr_prot) {
dsisr |= DSISR_AMR;
}
- ppc_hash64_set_dsi(cs, mmu_idx, eaddr, dsisr);
+ ppc_hash64_set_dsi(cs, mmu_idx, slb->vsid, eaddr, dsisr);
}
return false;
}
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
index c5b2f97ff7..de653fcae5 100644
--- a/target/ppc/mmu-hash64.h
+++ b/target/ppc/mmu-hash64.h
@@ -41,8 +41,10 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
#define SLB_VSID_KP 0x0000000000000400ULL
#define SLB_VSID_N 0x0000000000000200ULL /* no-execute */
#define SLB_VSID_L 0x0000000000000100ULL
+#define SLB_VSID_L_SHIFT PPC_BIT_NR(55)
#define SLB_VSID_C 0x0000000000000080ULL /* class */
#define SLB_VSID_LP 0x0000000000000030ULL
+#define SLB_VSID_LP_SHIFT PPC_BIT_NR(59)
#define SLB_VSID_ATTR 0x0000000000000FFFULL
#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
#define SLB_VSID_4K 0x0000000000000000ULL
@@ -58,6 +60,9 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
#define SDR_64_HTABSIZE 0x000000000000001FULL
#define PATE0_HTABORG 0x0FFFFFFFFFFC0000ULL
+#define PATE0_PS PPC_BITMASK(56, 58)
+#define PATE0_GET_PS(dw0) (((dw0) & PATE0_PS) >> PPC_BIT_NR(58))
+
#define HPTES_PER_GROUP 8
#define HASH_PTE_SIZE_64 16
#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
@@ -97,6 +102,11 @@ void ppc_hash64_finalize(PowerPCCPU *cpu);
#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
+/* PTE offsets */
+#define HPTE64_DW1 (HASH_PTE_SIZE_64 / 2)
+#define HPTE64_DW1_R (HPTE64_DW1 + 6)
+#define HPTE64_DW1_C (HPTE64_DW1 + 7)
+
/* Format changes for ARCH v3 */
#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL
#define HPTE64_R_3_0_SSIZE_SHIFT 58
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index 5b0e62e676..690dff7a49 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -32,7 +32,12 @@ static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
vaddr eaddr,
uint64_t *lpid, uint64_t *pid)
{
- if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
+ /* When EA(2:11) are nonzero, raise a segment interrupt */
+ if (eaddr & ~R_EADDR_VALID_MASK) {
+ return false;
+ }
+
+ if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
switch (eaddr & R_EADDR_QUADRANT) {
case R_EADDR_QUADRANT0:
*lpid = 0;
@@ -97,12 +102,22 @@ static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
env->error_code = 0;
}
+static inline const char *access_str(MMUAccessType access_type)
+{
+ return access_type == MMU_DATA_LOAD ? "reading" :
+ (access_type == MMU_DATA_STORE ? "writing" : "execute");
+}
+
static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
vaddr eaddr, uint32_t cause)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
+ __func__, access_str(access_type),
+ eaddr, cause);
+
switch (access_type) {
case MMU_INST_FETCH:
/* Instruction Storage Interrupt */
@@ -130,6 +145,18 @@ static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ env->error_code = 0;
+ if (cause & DSISR_PRTABLE_FAULT) {
+ /* HDSI PRTABLE_FAULT gets the originating access type in error_code */
+ env->error_code = access_type;
+ access_type = MMU_DATA_LOAD;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
+ HWADDR_PRIx" cause %08x\n",
+ __func__, access_str(access_type),
+ eaddr, g_raddr, cause);
+
switch (access_type) {
case MMU_INST_FETCH:
/* H Instruction Storage Interrupt */
@@ -146,7 +173,6 @@ static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
env->spr[SPR_HDSISR] = cause;
env->spr[SPR_HDAR] = eaddr;
env->spr[SPR_ASDR] = g_raddr;
- env->error_code = 0;
break;
default:
g_assert_not_reached();
@@ -171,12 +197,13 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
}
/* Determine permissions allowed by Encoded Access Authority */
- if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
+ if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
+ FIELD_EX64(env->msr, MSR, PR)) {
*prot = 0;
} else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
partition_scoped) {
*prot = ppc_radix64_get_prot_eaa(pte);
- } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
+ } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
*prot = ppc_radix64_get_prot_eaa(pte);
*prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
}
@@ -184,46 +211,76 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
/* Check if requested access type is allowed */
need_prot = prot_for_access_type(access_type);
if (need_prot & ~*prot) { /* Page Protected for that Access */
- *fault_cause |= DSISR_PROTFAULT;
+ *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
+ DSISR_PROTFAULT;
return true;
}
return false;
}
-static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
- uint64_t pte, hwaddr pte_addr, int *prot)
+static int ppc_radix64_check_rc(MMUAccessType access_type, uint64_t pte)
{
- CPUState *cs = CPU(cpu);
- uint64_t npte;
+ switch (access_type) {
+ case MMU_DATA_STORE:
+ if (!(pte & R_PTE_C)) {
+ break;
+ }
+ /* fall through */
+ case MMU_INST_FETCH:
+ case MMU_DATA_LOAD:
+ if (!(pte & R_PTE_R)) {
+ break;
+ }
- npte = pte | R_PTE_R; /* Always set reference bit */
+ /* R/C bits are already set appropriately for this access */
+ return 0;
+ }
- if (access_type == MMU_DATA_STORE) { /* Store/Write */
- npte |= R_PTE_C; /* Set change bit */
- } else {
- /*
- * Treat the page as read-only for now, so that a later write
- * will pass through this function again to set the C bit.
- */
- *prot &= ~PAGE_WRITE;
+ return 1;
+}
+
+static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
+{
+ bool ret;
+
+ /*
+ * Check if this is a valid level, according to POWER9 and POWER10
+ * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
+ * Supported Radix Tree Configurations and Resulting Page Sizes.
+ *
+ * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
+ * CPUs that supports a different Radix MMU configuration will need their
+ * own implementation.
+ */
+ switch (level) {
+ case 0: /* Root Page Dir */
+ ret = psize == 52 && nls == 13;
+ break;
+ case 1:
+ case 2:
+ ret = nls == 9;
+ break;
+ case 3:
+ ret = nls == 9 || nls == 5;
+ break;
+ default:
+ ret = false;
}
- if (pte ^ npte) { /* If pte has changed then write it back */
- stq_phys(cs->as, pte_addr, npte);
+ if (unlikely(!ret)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: "
+ "level %d size %d nls %"PRIu64"\n",
+ level, psize, nls);
}
+ return ret;
}
static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
uint64_t *pte_addr, uint64_t *nls,
int *psize, uint64_t *pte, int *fault_cause)
{
- uint64_t index, pde;
-
- if (*nls < 5) { /* Directory maps less than 2**5 entries */
- *fault_cause |= DSISR_R_BADCONFIG;
- return 1;
- }
+ uint64_t index, mask, nlb, pde;
/* Read page <directory/table> entry from guest address space */
pde = ldq_phys(as, *pte_addr);
@@ -238,7 +295,17 @@ static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
*nls = pde & R_PDE_NLS;
index = eaddr >> (*psize - *nls); /* Shift */
index &= ((1UL << *nls) - 1); /* Mask */
- *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
+ nlb = pde & R_PDE_NLB;
+ mask = MAKE_64BIT_MASK(0, *nls + 3);
+
+ if (nlb & mask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: misaligned page dir/table base: 0x%" PRIx64
+ " page dir size: 0x%" PRIx64 "\n",
+ __func__, nlb, mask + 1);
+ nlb &= ~mask;
+ }
+ *pte_addr = nlb + index * sizeof(pde);
}
return 0;
}
@@ -248,19 +315,30 @@ static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
hwaddr *raddr, int *psize, uint64_t *pte,
int *fault_cause, hwaddr *pte_addr)
{
- uint64_t index, pde, rpn , mask;
+ uint64_t index, pde, rpn, mask;
+ int level = 0;
- if (nls < 5) { /* Directory maps less than 2**5 entries */
- *fault_cause |= DSISR_R_BADCONFIG;
- return 1;
+ index = eaddr >> (*psize - nls); /* Shift */
+ index &= ((1UL << nls) - 1); /* Mask */
+ mask = MAKE_64BIT_MASK(0, nls + 3);
+
+ if (base_addr & mask) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: misaligned page dir base: 0x%" PRIx64
+ " page dir size: 0x%" PRIx64 "\n",
+ __func__, base_addr, mask + 1);
+ base_addr &= ~mask;
}
+ *pte_addr = base_addr + index * sizeof(pde);
- index = eaddr >> (*psize - nls); /* Shift */
- index &= ((1UL << nls) - 1); /* Mask */
- *pte_addr = base_addr + (index * sizeof(pde));
do {
int ret;
+ if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
+ *fault_cause |= DSISR_R_BADCONFIG;
+ return 1;
+ }
+
ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
fault_cause);
if (ret) {
@@ -284,7 +362,7 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
if (!(pate->dw0 & PATE0_HR)) {
return false;
}
- if (lpid == 0 && !msr_hv) {
+ if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
return false;
}
if ((pate->dw0 & PATE1_R_PRTS) < 5) {
@@ -295,17 +373,32 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
}
static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
- MMUAccessType access_type,
+ MMUAccessType orig_access_type,
vaddr eaddr, hwaddr g_raddr,
ppc_v3_pate_t pate,
hwaddr *h_raddr, int *h_prot,
int *h_page_size, bool pde_addr,
- int mmu_idx, bool guest_visible)
+ int mmu_idx, uint64_t lpid,
+ bool guest_visible)
{
+ MMUAccessType access_type = orig_access_type;
int fault_cause = 0;
hwaddr pte_addr;
uint64_t pte;
+ if (pde_addr) {
+ /*
+ * Translation of process-scoped tables/directories is performed as
+ * a read-access.
+ */
+ access_type = MMU_DATA_LOAD;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u 0x%"HWADDR_PRIx"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx, g_raddr);
+
*h_page_size = PRTBE_R_GET_RTS(pate.dw0);
/* No valid pte or access denied due to protection */
if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
@@ -317,35 +410,86 @@ static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
fault_cause |= DSISR_PRTABLE_FAULT;
}
if (guest_visible) {
- ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
+ ppc_radix64_raise_hsi(cpu, orig_access_type,
+ eaddr, g_raddr, fault_cause);
}
return 1;
}
if (guest_visible) {
- ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
+ if (ppc_radix64_check_rc(access_type, pte)) {
+ /*
+ * Per ISA 3.1 Book III, 7.5.3 and 7.5.5, failure to set R/C during
+ * partition-scoped translation when effLPID = 0 results in normal
+ * (non-Hypervisor) Data and Instruction Storage Interrupts
+ * respectively.
+ *
+ * ISA 3.0 is ambiguous about this, but tests on POWER9 hardware
+ * seem to exhibit the same behavior.
+ */
+ if (lpid > 0) {
+ ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr,
+ DSISR_ATOMIC_RC);
+ } else {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
+ }
+ return 1;
+ }
}
return 0;
}
+/*
+ * The spapr vhc has a flat partition scope provided by qemu memory when
+ * not nested.
+ *
+ * When running a nested guest, the addressing is 2-level radix on top of the
+ * vhc memory, so it works practically identically to the bare metal 2-level
+ * radix. So that code is selected directly. A cleaner and more flexible nested
+ * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
+ * function but that is not required for the moment.
+ */
+static bool vhyp_flat_addressing(PowerPCCPU *cpu)
+{
+ if (cpu->vhyp) {
+ return !vhyp_cpu_in_nested(cpu);
+ }
+ return false;
+}
+
static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
MMUAccessType access_type,
vaddr eaddr, uint64_t pid,
ppc_v3_pate_t pate, hwaddr *g_raddr,
int *g_prot, int *g_page_size,
- int mmu_idx, bool guest_visible)
+ int mmu_idx, uint64_t lpid,
+ bool guest_visible)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
+ uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
int fault_cause = 0, h_page_size, h_prot;
hwaddr h_raddr, pte_addr;
int ret;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u pid %"PRIu64"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx, pid);
+
+ prtb = (pate.dw1 & PATE1_R_PRTB);
+ size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
+ if (prtb & (size - 1)) {
+ /* Process Table not properly aligned */
+ if (guest_visible) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
+ }
+ return 1;
+ }
+
/* Index Process Table by PID to Find Corresponding Process Table Entry */
offset = pid * sizeof(struct prtb_entry);
- size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
if (offset >= size) {
/* offset exceeds size of the process table */
if (guest_visible) {
@@ -353,9 +497,9 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
}
return 1;
}
- prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
+ prtbe_addr = prtb + offset;
- if (cpu->vhyp) {
+ if (vhyp_flat_addressing(cpu)) {
prtbe0 = ldq_phys(cs->as, prtbe_addr);
} else {
/*
@@ -366,11 +510,11 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
* is only used to translate the effective addresses of the
* process table entries.
*/
- ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
- pate, &h_raddr, &h_prot,
- &h_page_size, true,
- /* mmu_idx is 5 because we're translating from hypervisor scope */
- 5, guest_visible);
+ /* mmu_idx is 5 because we're translating from hypervisor scope */
+ ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
+ prtbe_addr, pate, &h_raddr,
+ &h_prot, &h_page_size, true,
+ 5, lpid, guest_visible);
if (ret) {
return ret;
}
@@ -381,7 +525,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
*g_page_size = PRTBE_R_GET_RTS(prtbe0);
base_addr = prtbe0 & PRTBE_R_RPDB;
nls = prtbe0 & PRTBE_R_RPDS;
- if (msr_hv || cpu->vhyp) {
+ if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
/*
* Can treat process table addresses as real addresses
*/
@@ -397,6 +541,7 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
}
} else {
uint64_t rpn, mask;
+ int level = 0;
index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
index &= ((1UL << nls) - 1); /* Mask */
@@ -407,17 +552,25 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
* translation
*/
do {
- ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
- pate, &h_raddr, &h_prot,
- &h_page_size, true,
/* mmu_idx is 5 because we're translating from hypervisor scope */
- 5, guest_visible);
+ ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
+ pte_addr, pate, &h_raddr,
+ &h_prot, &h_page_size,
+ true, 5, lpid,
+ guest_visible);
if (ret) {
return ret;
}
- ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
- &nls, g_page_size, &pte, &fault_cause);
+ if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
+ fault_cause |= DSISR_R_BADCONFIG;
+ ret = 1;
+ } else {
+ ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK,
+ &h_raddr, &nls, g_page_size,
+ &pte, &fault_cause);
+ }
+
if (ret) {
/* No valid pte */
if (guest_visible) {
@@ -445,7 +598,11 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
}
if (guest_visible) {
- ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
+ /* R/C bits not appropriately set for access */
+ if (ppc_radix64_check_rc(access_type, pte)) {
+ ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC);
+ return 1;
+ }
}
return 0;
@@ -468,9 +625,10 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
* | = On | Process Scoped | Scoped |
* +-------------+----------------+---------------+
*/
-bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
- hwaddr *raddr, int *psizep, int *protp, int mmu_idx,
- bool guest_visible)
+static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
+ MMUAccessType access_type, hwaddr *raddr,
+ int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
{
CPUPPCState *env = &cpu->env;
uint64_t lpid, pid;
@@ -484,7 +642,7 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
relocation = !mmuidx_real(mmu_idx);
/* HV or virtual hypervisor Real Mode Access */
- if (!relocation && (mmuidx_hv(mmu_idx) || cpu->vhyp)) {
+ if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
/* In real mode top 4 effective addr bits (mostly) ignored */
*raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
@@ -517,21 +675,29 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
return false;
}
- /* Get Process Table */
+ /* Get Partition Table */
if (cpu->vhyp) {
PPCVirtualHypervisorClass *vhc;
vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->get_pate(cpu->vhyp, &pate);
+ if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
+ if (guest_visible) {
+ ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
+ DSISR_R_BADCONFIG);
+ }
+ return false;
+ }
} else {
if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
if (guest_visible) {
- ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
+ ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
+ DSISR_R_BADCONFIG);
}
return false;
}
if (!validate_pate(cpu, lpid, &pate)) {
if (guest_visible) {
- ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
+ ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
+ DSISR_R_BADCONFIG);
}
return false;
}
@@ -551,7 +717,8 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
if (relocation) {
int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
pate, &g_raddr, &prot,
- &psize, mmu_idx, guest_visible);
+ &psize, mmu_idx, lpid,
+ guest_visible);
if (ret) {
return false;
}
@@ -561,7 +728,7 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
g_raddr = eaddr & R_EADDR_MASK;
}
- if (cpu->vhyp) {
+ if (vhyp_flat_addressing(cpu)) {
*raddr = g_raddr;
} else {
/*
@@ -575,7 +742,8 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
g_raddr, pate, raddr,
&prot, &psize, false,
- mmu_idx, guest_visible);
+ mmu_idx, lpid,
+ guest_visible);
if (ret) {
return false;
}
@@ -588,3 +756,22 @@ bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
return true;
}
+
+bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
+{
+ bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
+ psizep, protp, mmu_idx, guest_visible);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx,
+ *protp & PAGE_READ ? 'r' : '-',
+ *protp & PAGE_WRITE ? 'w' : '-',
+ *protp & PAGE_EXEC ? 'x' : '-',
+ *raddrp);
+
+ return ret;
+}
diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h
index b70357cf34..4c768aa5cc 100644
--- a/target/ppc/mmu-radix64.h
+++ b/target/ppc/mmu-radix64.h
@@ -5,6 +5,7 @@
/* Radix Quadrants */
#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF
+#define R_EADDR_VALID_MASK 0xC00FFFFFFFFFFFFF
#define R_EADDR_QUADRANT 0xC000000000000000
#define R_EADDR_QUADRANT0 0x0000000000000000
#define R_EADDR_QUADRANT1 0x4000000000000000
diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c
index 754509e556..751403f1c8 100644
--- a/target/ppc/mmu_common.c
+++ b/target/ppc/mmu_common.c
@@ -28,35 +28,12 @@
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
#include "qemu/qemu-print.h"
#include "internal.h"
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
-/* #define DEBUG_MMU */
-/* #define DEBUG_BATS */
-/* #define DEBUG_SOFTWARE_TLB */
/* #define DUMP_PAGE_TABLES */
-/* #define FLUSH_ALL_TLBS */
-
-#ifdef DEBUG_MMU
-# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
-#else
-# define LOG_MMU_STATE(cpu) do { } while (0)
-#endif
-
-#ifdef DEBUG_SOFTWARE_TLB
-# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_SWTLB(...) do { } while (0)
-#endif
-
-#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_BATS(...) do { } while (0)
-#endif
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
@@ -231,18 +208,20 @@ static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
tlb = &env->tlb.tlb6[nr];
/* This test "emulates" the PTE index match for hardware TLBs */
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
- LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
- "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
- pte_is_valid(tlb->pte0) ? "valid" : "inval",
- tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
+ qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx
+ " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n",
+ nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
continue;
}
- LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
- TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
- pte_is_valid(tlb->pte0) ? "valid" : "inval",
- tlb->EPN, eaddr, tlb->pte1,
- access_type == MMU_DATA_STORE ? 'S' : 'L',
- access_type == MMU_INST_FETCH ? 'I' : 'D');
+ qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> "
+ TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n",
+ nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, eaddr, tlb->pte1,
+ access_type == MMU_DATA_STORE ? 'S' : 'L',
+ access_type == MMU_INST_FETCH ? 'I' : 'D');
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
0, access_type)) {
case -3:
@@ -272,8 +251,9 @@ static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
}
if (best != -1) {
done:
- LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
- ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx
+ " prot=%01x ret=%d\n",
+ ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
/* Update page flags */
pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
}
@@ -292,8 +272,8 @@ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
bl = (*BATu & 0x00001FFC) << 15;
valid = 0;
prot = 0;
- if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
- ((msr_pr != 0) && (*BATu & 0x00000001))) {
+ if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) ||
+ (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) {
valid = 1;
pp = *BATl & 0x00000003;
if (pp != 0) {
@@ -317,7 +297,7 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
int ret = -1;
bool ifetch = access_type == MMU_INST_FETCH;
- LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
ifetch ? 'I' : 'D', virtual);
if (ifetch) {
BATlt = env->IBAT[1];
@@ -332,9 +312,9 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
BEPIu = *BATu & 0xF0000000;
BEPIl = *BATu & 0x0FFE0000;
bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
- LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
- " BATl " TARGET_FMT_lx "\n", __func__,
- ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu "
+ TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
if ((virtual & 0xF0000000) == BEPIu &&
((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
/* BAT matches */
@@ -347,32 +327,33 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
ctx->prot = prot;
ret = check_prot(ctx->prot, access_type);
if (ret == 0) {
- LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
- i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
- ctx->prot & PAGE_WRITE ? 'W' : '-');
+ qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx
+ " prot=%c%c\n", i, ctx->raddr,
+ ctx->prot & PAGE_READ ? 'R' : '-',
+ ctx->prot & PAGE_WRITE ? 'W' : '-');
}
break;
}
}
}
if (ret < 0) {
-#if defined(DEBUG_BATS)
if (qemu_log_enabled()) {
- LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
+ qemu_log_mask(CPU_LOG_MMU, "no BAT match for "
+ TARGET_FMT_lx ":\n", virtual);
for (i = 0; i < 4; i++) {
BATu = &BATut[i];
BATl = &BATlt[i];
BEPIu = *BATu & 0xF0000000;
BEPIl = *BATu & 0x0FFE0000;
bl = (*BATu & 0x00001FFC) << 15;
- LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
- " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
- TARGET_FMT_lx " " TARGET_FMT_lx "\n",
- __func__, ifetch ? 'I' : 'D', i, virtual,
- *BATu, *BATl, BEPIu, BEPIl, bl);
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v "
+ TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, ifetch ? 'I' : 'D', i, virtual,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
}
}
-#endif
}
/* No hit */
return ret;
@@ -386,26 +367,29 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
PowerPCCPU *cpu = env_archcpu(env);
hwaddr hash;
target_ulong vsid;
- int ds, pr, target_page_bits;
+ int ds, target_page_bits;
+ bool pr;
int ret;
target_ulong sr, pgidx;
- pr = msr_pr;
+ pr = FIELD_EX64(env->msr, MSR, PR);
ctx->eaddr = eaddr;
sr = env->sr[eaddr >> 28];
- ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
- ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
+ ctx->key = (((sr & 0x20000000) && pr) ||
+ ((sr & 0x40000000) && !pr)) ? 1 : 0;
ds = sr & 0x80000000 ? 1 : 0;
ctx->nx = sr & 0x10000000 ? 1 : 0;
vsid = sr & 0x00FFFFFF;
target_page_bits = TARGET_PAGE_BITS;
qemu_log_mask(CPU_LOG_MMU,
- "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
- " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
- " ir=%d dr=%d pr=%d %d t=%d\n",
- eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
- (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
+ "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
+ " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
+ " ir=%d dr=%d pr=%d %d t=%d\n",
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr,
+ (int)FIELD_EX64(env->msr, MSR, IR),
+ (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0,
+ access_type == MMU_DATA_STORE, type);
pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
hash = vsid ^ pgidx;
ctx->ptem = (vsid << 7) | (pgidx >> 10);
@@ -418,9 +402,9 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* Check if instruction fetch is allowed, if needed */
if (type != ACCESS_CODE || ctx->nx == 0) {
/* Page address translation */
- qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
- " htab_mask " TARGET_FMT_plx
- " hash " TARGET_FMT_plx "\n",
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx
+ " htab_mask " HWADDR_FMT_plx
+ " hash " HWADDR_FMT_plx "\n",
ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
ctx->hash[0] = hash;
ctx->hash[1] = ~hash;
@@ -435,7 +419,7 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
hwaddr curaddr;
uint32_t a0, a1, a2, a3;
- qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
+ qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx
"\n", ppc_hash32_hpt_base(cpu),
ppc_hash32_hpt_mask(cpu) + 0x80);
for (curaddr = ppc_hash32_hpt_base(cpu);
@@ -447,7 +431,7 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
a2 = ldl_phys(cs->as, curaddr + 8);
a3 = ldl_phys(cs->as, curaddr + 12);
if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
- qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
+ qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n",
curaddr, a0, a1, a2, a3);
}
}
@@ -458,29 +442,9 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
ret = -3;
}
} else {
- target_ulong sr;
-
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
/* Direct-store segment : absolutely *BUGGY* for now */
- /*
- * Direct-store implies a 32-bit MMU.
- * Check the Segment Register's bus unit ID (BUID).
- */
- sr = env->sr[eaddr >> 28];
- if ((sr & 0x1FF00000) >> 20 == 0x07f) {
- /*
- * Memory-forced I/O controller interface access
- *
- * If T=1 and BUID=x'07F', the 601 performs a memory
- * access to SR[28-31] LA[4-31], bypassing all protection
- * mechanisms.
- */
- ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
- ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return 0;
- }
-
switch (type) {
case ACCESS_INT:
/* Integer load/store : only access allowed */
@@ -524,36 +488,47 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
}
/* Generic TLB check function for embedded PowerPC implementations */
-int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddrp,
- target_ulong address, uint32_t pid, int ext,
- int i)
+static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddrp,
+ target_ulong address, uint32_t pid, int i)
{
target_ulong mask;
/* Check valid flag */
if (!(tlb->prot & PAGE_VALID)) {
- return -1;
+ return false;
}
mask = ~(tlb->size - 1);
- LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
- " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
- mask, (uint32_t)tlb->PID, tlb->prot);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx
+ " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n",
+ __func__, i, address, pid, tlb->EPN,
+ mask, (uint32_t)tlb->PID, tlb->prot);
/* Check PID */
if (tlb->PID != 0 && tlb->PID != pid) {
- return -1;
+ return false;
}
/* Check effective address */
if ((address & mask) != tlb->EPN) {
- return -1;
+ return false;
}
*raddrp = (tlb->RPN & mask) | (address & ~mask);
- if (ext) {
- /* Extend the physical address to 36 bits */
- *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
- }
+ return true;
+}
- return 0;
+/* Generic TLB search function for PowerPC embedded implementations */
+int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) {
+ return i;
+ }
+ }
+ return -1;
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
@@ -566,17 +541,18 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
ret = -1;
raddr = (hwaddr)-1ULL;
- pr = msr_pr;
+ pr = FIELD_EX64(env->msr, MSR, PR);
for (i = 0; i < env->nb_tlb; i++) {
tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, address,
- env->spr[SPR_40x_PID], 0, i) < 0) {
+ if (!ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_40x_PID], i)) {
continue;
}
zsel = (tlb->attr >> 4) & 0xF;
zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
- LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
- __func__, i, zsel, zpr, access_type, tlb->attr);
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
+ __func__, i, zsel, zpr, access_type, tlb->attr);
/* Check execute enable bit */
switch (zpr) {
case 0x2:
@@ -610,66 +586,75 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
}
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
return 0;
}
}
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
return ret;
}
-static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
- hwaddr *raddr, int *prot, target_ulong address,
- MMUAccessType access_type, int i)
+static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, target_ulong addr, int i)
{
- int prot2;
-
- if (ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID],
- !env->nb_pids, i) >= 0) {
- goto found_tlb;
+ if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) {
+ if (!env->nb_pids) {
+ /* Extend the physical address to 36 bits */
+ *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32;
+ }
+ return true;
+ } else if (!env->nb_pids) {
+ return false;
}
-
if (env->spr[SPR_BOOKE_PID1] &&
- ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
- goto found_tlb;
+ ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) {
+ return true;
}
-
if (env->spr[SPR_BOOKE_PID2] &&
- ppcemb_tlb_check(env, tlb, raddr, address,
- env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
- goto found_tlb;
+ ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) {
+ return true;
}
+ return false;
+}
- LOG_SWTLB("%s: TLB entry not found\n", __func__);
- return -1;
+static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, int *prot, target_ulong address,
+ MMUAccessType access_type, int i)
+{
+ int prot2;
-found_tlb:
+ if (!mmubooke_check_pid(env, tlb, raddr, address, i)) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
+ return -1;
+ }
- if (msr_pr != 0) {
+ if (FIELD_EX64(env->msr, MSR, PR)) {
prot2 = tlb->prot & 0xF;
} else {
prot2 = (tlb->prot >> 4) & 0xF;
}
/* Check the address space */
- if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
- LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ if ((access_type == MMU_INST_FETCH ?
+ FIELD_EX64(env->msr, MSR, IR) :
+ FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
- LOG_SWTLB("%s: good TLB!\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
return 0;
}
- LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
@@ -694,19 +679,19 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx " %d %d\n", __func__,
+ address, ctx->raddr, ctx->prot, ret);
} else {
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx " %d %d\n", __func__,
+ address, raddr, ctx->prot, ret);
}
return ret;
}
-hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
- ppcmas_tlb_t *tlb)
+hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb)
{
int tlbm_size;
@@ -716,14 +701,13 @@ hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
}
/* TLB check function for MAS based SoftTLBs */
-int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
- hwaddr *raddrp, target_ulong address,
- uint32_t pid)
+int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp,
+ target_ulong address, uint32_t pid)
{
hwaddr mask;
uint32_t tlb_pid;
- if (!msr_cm) {
+ if (!FIELD_EX64(env->msr, MSR, CM)) {
/* In 32bit mode we can only address 32bit EAs */
address = (uint32_t)address;
}
@@ -734,10 +718,11 @@ int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
}
mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
- LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
- PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
- PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
- tlb->mas7_3, tlb->mas8);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx
+ " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%"
+ HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n",
+ __func__, address, pid, tlb->mas1, tlb->mas2, mask,
+ tlb->mas7_3, tlb->mas8);
/* Check PID */
tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
@@ -798,8 +783,8 @@ static bool mmubooke206_get_as(CPUPPCState *env,
*pr_out = !!(epidr & EPID_EPR);
return true;
} else {
- *as_out = msr_ds;
- *pr_out = msr_pr;
+ *as_out = FIELD_EX64(env->msr, MSR, DS);
+ *pr_out = FIELD_EX64(env->msr, MSR, PR);
return false;
}
}
@@ -838,7 +823,8 @@ static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
}
}
- LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address "
+ "0x" TARGET_FMT_lx "\n", __func__, address);
return -1;
found_tlb:
@@ -869,21 +855,21 @@ found_tlb:
if (access_type == MMU_INST_FETCH) {
/* There is no way to fetch code using epid load */
assert(!use_epid);
- as = msr_ir;
+ as = FIELD_EX64(env->msr, MSR, IR);
}
if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
- LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
- LOG_SWTLB("%s: good TLB!\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
return 0;
}
- LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
@@ -919,12 +905,13 @@ found_tlb:
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
+ ctx->raddr, ctx->prot, ret);
} else {
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
+ raddr, ctx->prot, ret);
}
return ret;
@@ -942,10 +929,12 @@ static void mmubooke_dump_mmu(CPUPPCState *env)
ppcemb_tlb_t *entry;
int i;
+#ifdef CONFIG_KVM
if (kvm_enabled() && !env->kvm_sw_tlb) {
qemu_printf("Cannot access KVM TLB\n");
return;
}
+#endif
qemu_printf("\nTLB:\n");
qemu_printf("Effective Physical Size PID Prot "
@@ -1005,7 +994,7 @@ static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
pa = entry->mas7_3 & ~(size - 1);
qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
- "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
+ " U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
(uint64_t)ea, (uint64_t)pa,
book3e_tsize_to_str[tsize],
(entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
@@ -1033,10 +1022,12 @@ static void mmubooke206_dump_mmu(CPUPPCState *env)
int offset = 0;
int i;
+#ifdef CONFIG_KVM
if (kvm_enabled() && !env->kvm_sw_tlb) {
qemu_printf("Cannot access KVM TLB\n");
return;
}
+#endif
for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
int size = booke206_tlb_size(env, i);
@@ -1147,7 +1138,6 @@ void dump_mmu(CPUPPCState *env)
mmubooke206_dump_mmu(env);
break;
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
mmu6xx_dump_mmu(env);
break;
#if defined(TARGET_PPC64)
@@ -1174,53 +1164,23 @@ void dump_mmu(CPUPPCState *env)
static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
MMUAccessType access_type)
{
- int in_plb, ret;
-
ctx->raddr = eaddr;
ctx->prot = PAGE_READ | PAGE_EXEC;
- ret = 0;
+
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
case POWERPC_MMU_SOFT_4xx:
case POWERPC_MMU_REAL:
case POWERPC_MMU_BOOKE:
ctx->prot |= PAGE_WRITE;
break;
- case POWERPC_MMU_SOFT_4xx_Z:
- if (unlikely(msr_pe != 0)) {
- /*
- * 403 family add some particular protections, using
- * PBL/PBU registers for accesses with no translation.
- */
- in_plb =
- /* Check PLB validity */
- (env->pb[0] < env->pb[1] &&
- /* and address in plb area */
- eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
- (env->pb[2] < env->pb[3] &&
- eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
- if (in_plb ^ msr_px) {
- /* Access in protected area */
- if (access_type == MMU_DATA_STORE) {
- /* Access is not allowed */
- ret = -2;
- }
- } else {
- /* Read-write access is allowed */
- ctx->prot |= PAGE_WRITE;
- }
- }
- break;
-
default:
/* Caller's checks mean we should never get here for other models */
- abort();
- return -1;
+ g_assert_not_reached();
}
- return ret;
+ return 0;
}
int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
@@ -1229,12 +1189,11 @@ int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
int mmu_idx)
{
int ret = -1;
- bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
- || (type != ACCESS_CODE && msr_dr == 0);
+ bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) ||
+ (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR));
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, access_type);
} else {
@@ -1250,7 +1209,6 @@ int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
break;
case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, access_type);
} else {
@@ -1293,7 +1251,7 @@ static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
if (access_type == MMU_INST_FETCH) {
- as = msr_ir;
+ as = FIELD_EX64(env->msr, MSR, IR);
}
env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
@@ -1371,7 +1329,7 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
}
if (guest_visible) {
- LOG_MMU_STATE(cs);
+ log_cpu_state_mask(CPU_LOG_MMU, cs, 0);
if (type == ACCESS_CODE) {
switch (ret) {
case -1:
@@ -1383,11 +1341,7 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
env->spr[SPR_IMISS] = eaddr;
env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
goto tlb_miss;
- case POWERPC_MMU_SOFT_74xx:
- cs->exception_index = POWERPC_EXCP_IFTLB;
- goto tlb_miss_74xx;
case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
cs->exception_index = POWERPC_EXCP_ITLB;
env->error_code = 0;
env->spr[SPR_40x_DEAR] = eaddr;
@@ -1414,22 +1368,34 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
case -2:
/* Access rights violation */
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x08000000;
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x08000000;
+ }
break;
case -3:
/* No execute protection violation */
if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
(env->mmu_model == POWERPC_MMU_BOOKE206)) {
env->spr[SPR_BOOKE_ESR] = 0x00000000;
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x10000000;
}
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x10000000;
break;
case -4:
/* Direct store exception */
/* No code fetch is allowed in direct-store areas */
cs->exception_index = POWERPC_EXCP_ISI;
- env->error_code = 0x10000000;
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->error_code = 0;
+ } else {
+ env->error_code = 0x10000000;
+ }
break;
}
} else {
@@ -1454,21 +1420,7 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
get_pteg_offset32(cpu, ctx.hash[1]);
break;
- case POWERPC_MMU_SOFT_74xx:
- if (access_type == MMU_DATA_STORE) {
- cs->exception_index = POWERPC_EXCP_DSTLB;
- } else {
- cs->exception_index = POWERPC_EXCP_DLTLB;
- }
- tlb_miss_74xx:
- /* Implement LRU algorithm */
- env->error_code = ctx.key << 19;
- env->spr[SPR_TLBMISS] = (eaddr & ~((target_ulong)0x3)) |
- ((env->last_way + 1) & (env->nb_ways - 1));
- env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
- break;
case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
cs->exception_index = POWERPC_EXCP_DTLB;
env->error_code = 0;
env->spr[SPR_40x_DEAR] = eaddr;
@@ -1501,8 +1453,7 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
/* Access rights violation */
cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
- if (env->mmu_model == POWERPC_MMU_SOFT_4xx
- || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
+ if (env->mmu_model == POWERPC_MMU_SOFT_4xx) {
env->spr[SPR_40x_DEAR] = eaddr;
if (access_type == MMU_DATA_STORE) {
env->spr[SPR_40x_ESR] |= 0x00800000;
@@ -1589,7 +1540,6 @@ bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
#endif
case POWERPC_MMU_32B:
- case POWERPC_MMU_601:
return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp,
psizep, protp, mmu_idx, guest_visible);
@@ -1611,9 +1561,9 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
* mapped by code TLBs, so we also try a MMU_INST_FETCH.
*/
if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p,
- cpu_mmu_index(&cpu->env, false), false) ||
+ ppc_env_mmu_index(&cpu->env, false), false) ||
ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p,
- cpu_mmu_index(&cpu->env, true), false)) {
+ ppc_env_mmu_index(&cpu->env, true), false)) {
return raddr & TARGET_PAGE_MASK;
}
return -1;
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index 2cb98c5169..c071b4d5e2 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -28,7 +28,6 @@
#include "exec/log.h"
#include "helper_regs.h"
#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
#include "qemu/qemu-print.h"
#include "internal.h"
#include "mmu-book3s-v3.h"
@@ -36,23 +35,8 @@
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
-/* #define DEBUG_BATS */
-/* #define DEBUG_SOFTWARE_TLB */
-/* #define DUMP_PAGE_TABLES */
/* #define FLUSH_ALL_TLBS */
-#ifdef DEBUG_SOFTWARE_TLB
-# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_SWTLB(...) do { } while (0)
-#endif
-
-#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_BATS(...) do { } while (0)
-#endif
-
/*****************************************************************************/
/* PowerPC MMU emulation */
@@ -89,8 +73,8 @@ static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
tlb = &env->tlb.tlb6[nr];
if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
- LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
- env->nb_tlb, eaddr);
+ qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
+ TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
pte_invalidate(&tlb->pte0);
tlb_flush_page(cs, tlb->EPN);
}
@@ -115,8 +99,9 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
tlb = &env->tlb.tlb6[nr];
- LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
- " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
+ qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
+ TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
+ EPN, pte0, pte1);
/* Invalidate any pending reference in QEMU for this virtual address */
ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
tlb->pte0 = pte0;
@@ -126,27 +111,6 @@ static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
env->last_way = way;
}
-/* Generic TLB search function for PowerPC embedded implementations */
-static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
- uint32_t pid)
-{
- ppcemb_tlb_t *tlb;
- hwaddr raddr;
- int i, ret;
-
- /* Default return value is no match */
- ret = -1;
- for (i = 0; i < env->nb_tlb; i++) {
- tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
- ret = i;
- break;
- }
- }
-
- return ret;
-}
-
/* Helpers specific to PowerPC 40x implementations */
static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
{
@@ -182,15 +146,6 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags,
tlb_flush(env_cpu(env));
}
-static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
- target_ulong eaddr, MMUAccessType access_type,
- int type)
-{
- return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
-}
-
-
-
/*****************************************************************************/
/* BATs management */
#if !defined(FLUSH_ALL_TLBS)
@@ -204,25 +159,27 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
end = base + mask + 0x00020000;
if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
/* Flushing 1024 4K pages is slower than a complete flush */
- LOG_BATS("Flush all BATs\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
tlb_flush(cs);
- LOG_BATS("Flush done\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
return;
}
- LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
- TARGET_FMT_lx ")\n", base, end, mask);
+ qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
+ " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
+ base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
tlb_flush_page(cs, page);
}
- LOG_BATS("Flush done\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
}
#endif
static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
target_ulong value)
{
- LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
- nr, ul == 0 ? 'u' : 'l', value, env->nip);
+ qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
+ TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
+ value, env->nip);
}
void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
@@ -291,88 +248,6 @@ void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
env->DBAT[1][nr] = value;
}
-void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
-{
- target_ulong mask;
-#if defined(FLUSH_ALL_TLBS)
- int do_inval;
-#endif
-
- dump_store_bat(env, 'I', 0, nr, value);
- if (env->IBAT[0][nr] != value) {
-#if defined(FLUSH_ALL_TLBS)
- do_inval = 0;
-#endif
- mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
- if (env->IBAT[1][nr] & 0x40) {
- /* Invalidate BAT only if it is valid */
-#if !defined(FLUSH_ALL_TLBS)
- do_invalidate_BAT(env, env->IBAT[0][nr], mask);
-#else
- do_inval = 1;
-#endif
- }
- /*
- * When storing valid upper BAT, mask BEPI and BRPN and
- * invalidate all TLBs covered by this BAT
- */
- env->IBAT[0][nr] = (value & 0x00001FFFUL) |
- (value & ~0x0001FFFFUL & ~mask);
- env->DBAT[0][nr] = env->IBAT[0][nr];
- if (env->IBAT[1][nr] & 0x40) {
-#if !defined(FLUSH_ALL_TLBS)
- do_invalidate_BAT(env, env->IBAT[0][nr], mask);
-#else
- do_inval = 1;
-#endif
- }
-#if defined(FLUSH_ALL_TLBS)
- if (do_inval) {
- tlb_flush(env_cpu(env));
- }
-#endif
- }
-}
-
-void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
-{
-#if !defined(FLUSH_ALL_TLBS)
- target_ulong mask;
-#else
- int do_inval;
-#endif
-
- dump_store_bat(env, 'I', 1, nr, value);
- if (env->IBAT[1][nr] != value) {
-#if defined(FLUSH_ALL_TLBS)
- do_inval = 0;
-#endif
- if (env->IBAT[1][nr] & 0x40) {
-#if !defined(FLUSH_ALL_TLBS)
- mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
- do_invalidate_BAT(env, env->IBAT[0][nr], mask);
-#else
- do_inval = 1;
-#endif
- }
- if (value & 0x40) {
-#if !defined(FLUSH_ALL_TLBS)
- mask = (value << 17) & 0x0FFE0000UL;
- do_invalidate_BAT(env, env->IBAT[0][nr], mask);
-#else
- do_inval = 1;
-#endif
- }
- env->IBAT[1][nr] = value;
- env->DBAT[1][nr] = value;
-#if defined(FLUSH_ALL_TLBS)
- if (do_inval) {
- tlb_flush(env_cpu(env));
- }
-#endif
- }
-}
-
/*****************************************************************************/
/* TLB management */
void ppc_tlb_invalidate_all(CPUPPCState *env)
@@ -385,11 +260,9 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
#endif /* defined(TARGET_PPC64) */
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
ppc6xx_tlb_invalidate_all(env);
break;
case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
ppc4xx_tlb_invalidate_all(env);
break;
case POWERPC_MMU_REAL:
@@ -406,7 +279,6 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
booke206_flush_tlb(env, -1, 0);
break;
case POWERPC_MMU_32B:
- case POWERPC_MMU_601:
env->tlb_need_flush = 0;
tlb_flush(env_cpu(env));
break;
@@ -434,14 +306,12 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
#endif /* defined(TARGET_PPC64) */
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
- case POWERPC_MMU_SOFT_74xx:
ppc6xx_tlb_invalidate_virt(env, addr, 0);
if (env->id_tlbs == 1) {
ppc6xx_tlb_invalidate_virt(env, addr, 1);
}
break;
case POWERPC_MMU_32B:
- case POWERPC_MMU_601:
/*
* Actual CPUs invalidate entire congruence classes based on
* the geometry of their TLBs and some OSes take that into
@@ -528,6 +398,160 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr)
ppc_tlb_invalidate_one(env, addr);
}
+#if defined(TARGET_PPC64)
+
+/* Invalidation Selector */
+#define TLBIE_IS_VA 0
+#define TLBIE_IS_PID 1
+#define TLBIE_IS_LPID 2
+#define TLBIE_IS_ALL 3
+
+/* Radix Invalidation Control */
+#define TLBIE_RIC_TLB 0
+#define TLBIE_RIC_PWC 1
+#define TLBIE_RIC_ALL 2
+#define TLBIE_RIC_GRP 3
+
+/* Radix Actual Page sizes */
+#define TLBIE_R_AP_4K 0
+#define TLBIE_R_AP_64K 5
+#define TLBIE_R_AP_2M 1
+#define TLBIE_R_AP_1G 2
+
+/* RB field masks */
+#define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
+#define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
+#define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
+
+void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
+ uint32_t flags)
+{
+ unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
+ /*
+ * With the exception of the checks for invalid instruction forms,
+ * PRS is currently ignored, because we don't know if a given TLB entry
+ * is process or partition scoped.
+ */
+ bool prs = flags & TLBIE_F_PRS;
+ bool r = flags & TLBIE_F_R;
+ bool local = flags & TLBIE_F_LOCAL;
+ bool effR;
+ unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
+ unsigned ap; /* actual page size */
+ target_ulong addr, pgoffs_mask;
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
+ __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
+
+ effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
+
+ /* Partial TLB invalidation is supported for Radix only for now. */
+ if (!effR) {
+ goto inval_all;
+ }
+
+ /* Check for invalid instruction forms (effR=1). */
+ if (unlikely(ric == TLBIE_RIC_GRP ||
+ ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
+ is == TLBIE_IS_VA) ||
+ (!prs && is == TLBIE_IS_PID))) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
+ __func__, ric, prs, r, is);
+ goto invalid;
+ }
+
+ /* We don't cache Page Walks. */
+ if (ric == TLBIE_RIC_PWC) {
+ if (local) {
+ unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
+ if (set != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
+ __func__, set);
+ goto invalid;
+ }
+ }
+ return;
+ }
+
+ /*
+ * Invalidation by LPID or PID is not supported, so fallback
+ * to full TLB flush in these cases.
+ */
+ if (is != TLBIE_IS_VA) {
+ goto inval_all;
+ }
+
+ /*
+ * The results of an attempt to invalidate a translation outside of
+ * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
+ * and EA 0:1 != 0b00) are boundedly undefined.
+ */
+ if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
+ (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: attempt to invalidate a translation outside of quadrant 0\n",
+ __func__);
+ goto inval_all;
+ }
+
+ assert(is == TLBIE_IS_VA);
+ assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
+
+ ap = extract64(rb, PPC_BIT_NR(58), 3);
+ switch (ap) {
+ case TLBIE_R_AP_4K:
+ pgoffs_mask = 0xfffull;
+ break;
+
+ case TLBIE_R_AP_64K:
+ pgoffs_mask = 0xffffull;
+ break;
+
+ case TLBIE_R_AP_2M:
+ pgoffs_mask = 0x1fffffull;
+ break;
+
+ case TLBIE_R_AP_1G:
+ pgoffs_mask = 0x3fffffffull;
+ break;
+
+ default:
+ /*
+ * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
+ * RB 44:51, or RB 56:63, when it is needed to perform the specified
+ * operation, is not supported by the implementation, the instruction
+ * is treated as if the instruction form were invalid.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
+ goto invalid;
+ }
+
+ addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
+
+ if (local) {
+ tlb_flush_page(env_cpu(env), addr);
+ } else {
+ tlb_flush_page_all_cpus(env_cpu(env), addr);
+ }
+ return;
+
+inval_all:
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ if (!local) {
+ env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
+ }
+ return;
+
+invalid:
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+}
+
+#endif
+
void helper_tlbiva(CPUPPCState *env, target_ulong addr)
{
/* tlbiva instruction only exists on BookE */
@@ -553,9 +577,9 @@ static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
}
way = (env->spr[SPR_SRR1] >> 17) & 1;
(void)EPN; /* avoid a compiler warning */
- LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
- " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
- RPN, way);
+ qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
+ " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
+ __func__, new_EPN, EPN, CMP, RPN, way);
/* Store this TLB */
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
way, is_code, CMP, RPN);
@@ -571,35 +595,6 @@ void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
do_6xx_tlb(env, EPN, 1);
}
-/* PowerPC 74xx software TLB load instructions helpers */
-static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
-{
- target_ulong RPN, CMP, EPN;
- int way;
-
- RPN = env->spr[SPR_PTELO];
- CMP = env->spr[SPR_PTEHI];
- EPN = env->spr[SPR_TLBMISS] & ~0x3;
- way = env->spr[SPR_TLBMISS] & 0x3;
- (void)EPN; /* avoid a compiler warning */
- LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
- " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
- RPN, way);
- /* Store this TLB */
- ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
- way, is_code, CMP, RPN);
-}
-
-void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
-{
- do_74xx_tlb(env, EPN, 0);
-}
-
-void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
-{
- do_74xx_tlb(env, EPN, 1);
-}
-
/*****************************************************************************/
/* PowerPC 601 specific instructions (POWER bridge) */
@@ -617,7 +612,7 @@ target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
*/
nb_BATs = env->nb_BATs;
env->nb_BATs = 0;
- if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
+ if (get_physical_address_wtlb(env, &ctx, addr, 0, ACCESS_INT, 0) == 0) {
ret = ctx.raddr;
}
env->nb_BATs = nb_BATs;
@@ -708,6 +703,14 @@ static inline int booke_page_size_to_tlb(target_ulong page_size)
#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
+void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
+{
+ if (env->spr[SPR_40x_PID] != val) {
+ env->spr[SPR_40x_PID] = val;
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ }
+}
+
target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
{
ppcemb_tlb_t *tlb;
@@ -725,7 +728,7 @@ target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
size = PPC4XX_TLBHI_SIZE_DEFAULT;
}
ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
- env->spr[SPR_40x_PID] = tlb->PID;
+ helper_store_40x_pid(env, tlb->PID);
return ret;
}
@@ -746,25 +749,41 @@ target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
return ret;
}
+static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb)
+{
+ unsigned mmu_idx = 0;
+
+ if (tlb->prot & 0xf) {
+ mmu_idx |= 0x1;
+ }
+ if ((tlb->prot >> 4) & 0xf) {
+ mmu_idx |= 0x2;
+ }
+ if (tlb->attr & 1) {
+ mmu_idx <<= 2;
+ }
+
+ tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx,
+ TARGET_LONG_BITS);
+}
+
void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
target_ulong val)
{
CPUState *cs = env_cpu(env);
ppcemb_tlb_t *tlb;
- target_ulong page, end;
- LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
+ __func__, (int)entry,
val);
entry &= PPC4XX_TLB_ENTRY_MASK;
tlb = &env->tlb.tlbe[entry];
/* Invalidate previous TLB (if it's valid) */
- if (tlb->prot & PAGE_VALID) {
- end = tlb->EPN + tlb->size;
- LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
- TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
- for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(cs, page);
- }
+ if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
+ TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
+ (int)entry, tlb->EPN, tlb->EPN + tlb->size);
+ ppcemb_tlb_flush(cs, tlb);
}
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
& PPC4XX_TLBHI_SIZE_MASK);
@@ -790,33 +809,33 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
tlb->prot &= ~PAGE_VALID;
}
tlb->PID = env->spr[SPR_40x_PID]; /* PID */
- LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
- " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
- (int)entry, tlb->RPN, tlb->EPN, tlb->size,
- tlb->prot & PAGE_READ ? 'r' : '-',
- tlb->prot & PAGE_WRITE ? 'w' : '-',
- tlb->prot & PAGE_EXEC ? 'x' : '-',
- tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
- /* Invalidate new TLB (if valid) */
- if (tlb->prot & PAGE_VALID) {
- end = tlb->EPN + tlb->size;
- LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
- TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
- for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(cs, page);
- }
- }
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
+ " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
+ " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
}
void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
target_ulong val)
{
+ CPUState *cs = env_cpu(env);
ppcemb_tlb_t *tlb;
- LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
- val);
+ qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
+ __func__, (int)entry, val);
entry &= PPC4XX_TLB_ENTRY_MASK;
tlb = &env->tlb.tlbe[entry];
+ /* Invalidate previous TLB (if it's valid) */
+ if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
+ TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
+ (int)entry, tlb->EPN, tlb->EPN + tlb->size);
+ ppcemb_tlb_flush(cs, tlb);
+ }
tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
tlb->prot = PAGE_READ;
@@ -826,13 +845,14 @@ void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
if (val & PPC4XX_TLBLO_WR) {
tlb->prot |= PAGE_WRITE;
}
- LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
- " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
- (int)entry, tlb->RPN, tlb->EPN, tlb->size,
- tlb->prot & PAGE_READ ? 'r' : '-',
- tlb->prot & PAGE_WRITE ? 'w' : '-',
- tlb->prot & PAGE_EXEC ? 'x' : '-',
- tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
+ " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
}
target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
@@ -840,54 +860,61 @@ target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
}
+static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb)
+{
+ if (tlb->PID == env->spr[SPR_BOOKE_PID]) {
+ return true;
+ }
+ if (!env->nb_pids) {
+ return false;
+ }
+
+ if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) {
+ return true;
+ }
+ if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) {
+ return true;
+ }
+
+ return false;
+}
+
/* PowerPC 440 TLB management */
void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
target_ulong value)
{
ppcemb_tlb_t *tlb;
- target_ulong EPN, RPN, size;
- int do_flush_tlbs;
- LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
- __func__, word, (int)entry, value);
- do_flush_tlbs = 0;
+ qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
+ __func__, word, (int)entry, value);
entry &= 0x3F;
tlb = &env->tlb.tlbe[entry];
+
+ /* Invalidate previous TLB (if it's valid) */
+ if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) {
+ qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
+ TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
+ (int)entry, tlb->EPN, tlb->EPN + tlb->size);
+ ppcemb_tlb_flush(env_cpu(env), tlb);
+ }
+
switch (word) {
default:
/* Just here to please gcc */
case 0:
- EPN = value & 0xFFFFFC00;
- if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
- do_flush_tlbs = 1;
- }
- tlb->EPN = EPN;
- size = booke_tlb_to_page_size((value >> 4) & 0xF);
- if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
- do_flush_tlbs = 1;
- }
- tlb->size = size;
+ tlb->EPN = value & 0xFFFFFC00;
+ tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF);
tlb->attr &= ~0x1;
tlb->attr |= (value >> 8) & 1;
if (value & 0x200) {
tlb->prot |= PAGE_VALID;
} else {
- if (tlb->prot & PAGE_VALID) {
- tlb->prot &= ~PAGE_VALID;
- do_flush_tlbs = 1;
- }
+ tlb->prot &= ~PAGE_VALID;
}
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
- if (do_flush_tlbs) {
- tlb_flush(env_cpu(env));
- }
break;
case 1:
- RPN = value & 0xFFFFFC0F;
- if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
- tlb_flush(env_cpu(env));
- }
- tlb->RPN = RPN;
+ tlb->RPN = value & 0xFFFFFC0F;
break;
case 2:
tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
@@ -1048,7 +1075,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
- !msr_gs) {
+ !FIELD_EX64(env->msr, MSR, GS)) {
/* XXX we don't support direct LRAT setting yet */
fprintf(stderr, "cpu: don't support LRAT setting yet\n");
return;
@@ -1075,7 +1102,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
POWERPC_EXCP_INVAL_INVAL, GETPC());
}
- if (msr_gs) {
+ if (FIELD_EX64(env->msr, MSR, GS)) {
cpu_abort(env_cpu(env), "missing HV implementation\n");
}
@@ -1116,7 +1143,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
/* Add a mask for page attributes */
mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
- if (!msr_cm) {
+ if (!FIELD_EX64(env->msr, MSR, CM)) {
/*
* Executing a tlbwe instruction in 32-bit mode will set bits
* 0:31 of the TLB EPN field to zero.
@@ -1216,7 +1243,7 @@ void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
}
static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
- uint32_t ea)
+ vaddr ea)
{
int i;
int ways = booke206_tlb_ways(env, tlbn);
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
new file mode 100644
index 0000000000..4956a8b350
--- /dev/null
+++ b/target/ppc/power8-pmu-regs.c.inc
@@ -0,0 +1,309 @@
+/*
+ * PMU register read/write functions for TCG IBM POWER chips
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+
+/*
+ * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
+ * PMCs) has problem state read access.
+ *
+ * Read access is granted for all PMCC values but 0b01, where a
+ * Facility Unavailable Interrupt will occur.
+ */
+static bool spr_groupA_read_allowed(DisasContext *ctx)
+{
+ if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the
+ * PMCs) has problem state write access.
+ *
+ * Write access is granted for PMCC values 0b10 and 0b11. Userspace
+ * writing with PMCC 0b00 will generate a Hypervisor Emulation
+ * Assistance Interrupt. Userspace writing with PMCC 0b01 will
+ * generate a Facility Unavailable Interrupt.
+ */
+static bool spr_groupA_write_allowed(DisasContext *ctx)
+{
+ if (ctx->mmcr0_pmcc0) {
+ return true;
+ }
+
+ if (ctx->mmcr0_pmcc1) {
+ /* PMCC = 0b01 */
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU);
+ } else {
+ /* PMCC = 0b00 */
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+
+ return false;
+}
+
+/*
+ * Helper function to avoid code repetition between MMCR0 and
+ * MMCR2 problem state write functions.
+ */
+static TCGv masked_gprn_for_spr_write(int gprn, int sprn,
+ uint64_t spr_mask)
+{
+ TCGv ret = tcg_temp_new();
+ TCGv t0 = tcg_temp_new();
+
+ /* 'ret' starts with all mask bits cleared */
+ gen_load_spr(ret, sprn);
+ tcg_gen_andi_tl(ret, ret, ~(spr_mask));
+
+ /* Apply the mask into 'gprn' in a temp var */
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask);
+
+ /* Add the masked gprn bits into 'ret' */
+ tcg_gen_or_tl(ret, ret, t0);
+
+ return ret;
+}
+
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv t0;
+
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ /*
+ * Filter out all bits but FC, PMAO, and PMAE, according
+ * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
+ * fourth paragraph.
+ */
+ gen_load_spr(t0, SPR_POWER_MMCR0);
+ tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK);
+ tcg_gen_mov_tl(cpu_gpr[gprn], t0);
+}
+
+static void write_MMCR0_common(DisasContext *ctx, TCGv val)
+{
+ /*
+ * helper_store_mmcr0 will make clock based operations that
+ * will cause 'bad icount read' errors if we do not execute
+ * translator_io_start() beforehand.
+ */
+ translator_io_start(&ctx->base);
+ gen_helper_store_mmcr0(tcg_env, val);
+
+ /*
+ * End the translation block because MMCR0 writes can change
+ * ctx->pmu_insn_cnt.
+ */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+}
+
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv masked_gprn;
+
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ /*
+ * Filter out all bits but FC, PMAO, and PMAE, according
+ * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0,
+ * fourth paragraph.
+ */
+ masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0,
+ MMCR0_UREG_MASK);
+ write_MMCR0_common(ctx, masked_gprn);
+}
+
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv t0;
+
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ /*
+ * On read, filter out all bits that are not FCnP0 bits.
+ * When MMCR0[PMCC] is set to 0b10 or 0b11, providing
+ * problem state programs read/write access to MMCR2,
+ * only the FCnP0 bits can be accessed. All other bits are
+ * not changed when mtspr is executed in problem state, and
+ * all other bits return 0s when mfspr is executed in problem
+ * state, according to ISA v3.1, section 10.4.6 Monitor Mode
+ * Control Register 2, p. 1316, third paragraph.
+ */
+ gen_load_spr(t0, SPR_POWER_MMCR2);
+ tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK);
+ tcg_gen_mov_tl(cpu_gpr[gprn], t0);
+}
+
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv masked_gprn;
+
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ /*
+ * Filter the bits that can be written using MMCR2_UREG_MASK,
+ * similar to what is done in spr_write_MMCR0_ureg().
+ */
+ masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2,
+ MMCR2_UREG_MASK);
+ gen_store_spr(SPR_POWER_MMCR2, masked_gprn);
+}
+
+void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
+
+ translator_io_start(&ctx->base);
+ gen_helper_read_pmc(cpu_gpr[gprn], tcg_env, t_sprn);
+}
+
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ if (!spr_groupA_read_allowed(ctx)) {
+ return;
+ }
+
+ spr_read_PMC(ctx, gprn, sprn + 0x10);
+}
+
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ /*
+ * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
+ * Monitor, and a read attempt results in a Facility Unavailable
+ * Interrupt.
+ */
+ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU);
+ return;
+ }
+
+ /* The remaining steps are similar to PMCs 1-4 userspace read */
+ spr_read_PMC14_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t_sprn = tcg_constant_i32(sprn);
+
+ translator_io_start(&ctx->base);
+ gen_helper_store_pmc(tcg_env, t_sprn, cpu_gpr[gprn]);
+}
+
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!spr_groupA_write_allowed(ctx)) {
+ return;
+ }
+
+ spr_write_PMC(ctx, sprn + 0x10, gprn);
+}
+
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ /*
+ * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance
+ * Monitor, and a write attempt results in a Facility Unavailable
+ * Interrupt.
+ */
+ if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU);
+ return;
+ }
+
+ /* The remaining steps are similar to PMCs 1-4 userspace write */
+ spr_write_PMC14_ureg(ctx, sprn, gprn);
+}
+
+void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
+{
+ write_MMCR0_common(ctx, cpu_gpr[gprn]);
+}
+
+void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_mmcr1(tcg_env, cpu_gpr[gprn]);
+}
+#else
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ spr_read_ureg(ctx, gprn, sprn);
+}
+
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_noaccess(ctx, gprn, sprn);
+}
+
+void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
+{
+ spr_write_generic(ctx, sprn, gprn);
+}
+#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c
new file mode 100644
index 0000000000..cbc5889d91
--- /dev/null
+++ b/target/ppc/power8-pmu.c
@@ -0,0 +1,365 @@
+/*
+ * PMU emulation helpers for TCG IBM POWER chips
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "helper_regs.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "hw/ppc/ppc.h"
+#include "power8-pmu.h"
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+
+static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
+{
+ if (sprn == SPR_POWER_PMC1) {
+ return env->spr[SPR_POWER_MMCR0] & MMCR0_PMC1CE;
+ }
+
+ return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
+}
+
+/*
+ * Called after MMCR0 or MMCR1 changes to update pmc_ins_cnt and pmc_cyc_cnt.
+ * hflags must subsequently be updated.
+ */
+static void pmu_update_summaries(CPUPPCState *env)
+{
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int ins_cnt = 0;
+ int cyc_cnt = 0;
+
+ if (mmcr0 & MMCR0_FC) {
+ goto out;
+ }
+
+ if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
+ target_ulong sel;
+
+ sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
+ switch (sel) {
+ case 0x02:
+ case 0xfe:
+ ins_cnt |= 1 << 1;
+ break;
+ case 0x1e:
+ case 0xf0:
+ cyc_cnt |= 1 << 1;
+ break;
+ }
+
+ sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 2;
+ cyc_cnt |= (sel == 0x1e) << 2;
+
+ sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 3;
+ cyc_cnt |= (sel == 0x1e) << 3;
+
+ sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
+ cyc_cnt |= (sel == 0x1e) << 4;
+ }
+
+ ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
+ cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
+
+ out:
+ env->pmc_ins_cnt = ins_cnt;
+ env->pmc_cyc_cnt = cyc_cnt;
+}
+
+void pmu_mmcr01_updated(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ pmu_update_summaries(env);
+ hreg_update_pmu_hflags(env);
+
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAO) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0);
+ }
+
+ /*
+ * Should this update overflow timers (if mmcr0 is updated) so they
+ * get set in cpu_post_load?
+ */
+}
+
+static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
+{
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ unsigned ins_cnt = env->pmc_ins_cnt;
+ bool overflow_triggered = false;
+ target_ulong tmp;
+
+ if (ins_cnt & (1 << 1)) {
+ tmp = env->spr[SPR_POWER_PMC1];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC1] = tmp;
+ }
+
+ if (ins_cnt & (1 << 2)) {
+ tmp = env->spr[SPR_POWER_PMC2];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC2] = tmp;
+ }
+
+ if (ins_cnt & (1 << 3)) {
+ tmp = env->spr[SPR_POWER_PMC3];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC3] = tmp;
+ }
+
+ if (ins_cnt & (1 << 4)) {
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
+ tmp = env->spr[SPR_POWER_PMC4];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC4] = tmp;
+ }
+ }
+
+ if (ins_cnt & (1 << 5)) {
+ tmp = env->spr[SPR_POWER_PMC5];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC5] = tmp;
+ }
+
+ return overflow_triggered;
+}
+
+static void pmu_update_cycles(CPUPPCState *env)
+{
+ uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ uint64_t time_delta = now - env->pmu_base_time;
+ int sprn, cyc_cnt = env->pmc_cyc_cnt;
+
+ for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
+ if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
+ /*
+ * The pseries and powernv clock runs at 1Ghz, meaning
+ * that 1 nanosec equals 1 cycle.
+ */
+ env->spr[sprn] += time_delta;
+ }
+ }
+
+ /* Update base_time for future calculations */
+ env->pmu_base_time = now;
+}
+
+/*
+ * Helper function to retrieve the cycle overflow timer of the
+ * 'sprn' counter.
+ */
+static QEMUTimer *get_cyc_overflow_timer(CPUPPCState *env, int sprn)
+{
+ return env->pmu_cyc_overflow_timers[sprn - SPR_POWER_PMC1];
+}
+
+static void pmc_update_overflow_timer(CPUPPCState *env, int sprn)
+{
+ QEMUTimer *pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
+ int64_t timeout;
+
+ /*
+ * PMC5 does not have an overflow timer and this pointer
+ * will be NULL.
+ */
+ if (!pmc_overflow_timer) {
+ return;
+ }
+
+ if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
+ !pmc_has_overflow_enabled(env, sprn)) {
+ /* Overflow timer is not needed for this counter */
+ timer_del(pmc_overflow_timer);
+ return;
+ }
+
+ if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
+ timeout = 0;
+ } else {
+ timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
+ }
+
+ /*
+ * Use timer_mod_anticipate() because an overflow timer might
+ * be already running for this PMC.
+ */
+ timer_mod_anticipate(pmc_overflow_timer, env->pmu_base_time + timeout);
+}
+
+static void pmu_update_overflow_timers(CPUPPCState *env)
+{
+ int sprn;
+
+ /*
+ * Scroll through all PMCs and start counter overflow timers for
+ * PM_CYC events, if needed.
+ */
+ for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
+ pmc_update_overflow_timer(env, sprn);
+ }
+}
+
+static void pmu_delete_timers(CPUPPCState *env)
+{
+ QEMUTimer *pmc_overflow_timer;
+ int sprn;
+
+ for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
+ pmc_overflow_timer = get_cyc_overflow_timer(env, sprn);
+
+ if (pmc_overflow_timer) {
+ timer_del(pmc_overflow_timer);
+ }
+ }
+}
+
+void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
+{
+ pmu_update_cycles(env);
+
+ env->spr[SPR_POWER_MMCR0] = value;
+
+ pmu_mmcr01_updated(env);
+
+ /* Update cycle overflow timers with the current MMCR0 state */
+ pmu_update_overflow_timers(env);
+}
+
+void helper_store_mmcr1(CPUPPCState *env, uint64_t value)
+{
+ pmu_update_cycles(env);
+
+ env->spr[SPR_POWER_MMCR1] = value;
+
+ pmu_mmcr01_updated(env);
+}
+
+target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
+{
+ pmu_update_cycles(env);
+
+ return env->spr[sprn];
+}
+
+void helper_store_pmc(CPUPPCState *env, uint32_t sprn, uint64_t value)
+{
+ pmu_update_cycles(env);
+
+ env->spr[sprn] = (uint32_t)value;
+
+ pmc_update_overflow_timer(env, sprn);
+}
+
+static void perfm_alert(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ pmu_update_cycles(env);
+
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_FCECE) {
+ env->spr[SPR_POWER_MMCR0] |= MMCR0_FC;
+
+ /* Changing MMCR0_FC requires summaries and hflags update */
+ pmu_mmcr01_updated(env);
+
+ /*
+ * Delete all pending timers if we need to freeze
+ * the PMC. We'll restart them when the PMC starts
+ * running again.
+ */
+ pmu_delete_timers(env);
+ }
+
+ if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE) {
+ /* These MMCR0 bits do not require summaries or hflags update. */
+ env->spr[SPR_POWER_MMCR0] &= ~MMCR0_PMAE;
+ env->spr[SPR_POWER_MMCR0] |= MMCR0_PMAO;
+ ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 1);
+ }
+
+ raise_ebb_perfm_exception(env);
+}
+
+void helper_handle_pmc5_overflow(CPUPPCState *env)
+{
+ env->spr[SPR_POWER_PMC5] = PMC_COUNTER_NEGATIVE_VAL;
+ perfm_alert(env_archcpu(env));
+}
+
+/* This helper assumes that the PMC is running. */
+void helper_insns_inc(CPUPPCState *env, uint32_t num_insns)
+{
+ bool overflow_triggered;
+
+ overflow_triggered = pmu_increment_insns(env, num_insns);
+ if (overflow_triggered) {
+ perfm_alert(env_archcpu(env));
+ }
+}
+
+static void cpu_ppc_pmu_timer_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ perfm_alert(cpu);
+}
+
+void cpu_ppc_pmu_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ int i, sprn;
+
+ for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
+ if (sprn == SPR_POWER_PMC5) {
+ continue;
+ }
+
+ i = sprn - SPR_POWER_PMC1;
+
+ env->pmu_cyc_overflow_timers[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ &cpu_ppc_pmu_timer_cb,
+ cpu);
+ }
+}
+#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
diff --git a/target/ppc/power8-pmu.h b/target/ppc/power8-pmu.h
new file mode 100644
index 0000000000..775e640053
--- /dev/null
+++ b/target/ppc/power8-pmu.h
@@ -0,0 +1,27 @@
+/*
+ * PMU emulation helpers for TCG IBM POWER chips
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef POWER8_PMU_H
+#define POWER8_PMU_H
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+
+#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL
+
+void cpu_ppc_pmu_init(CPUPPCState *env);
+void pmu_mmcr01_updated(CPUPPCState *env);
+#else
+static inline void cpu_ppc_pmu_init(CPUPPCState *env) { }
+static inline void pmu_mmcr01_updated(CPUPPCState *env) { }
+#endif
+
+#endif
diff --git a/target/ppc/monitor.c b/target/ppc/ppc-qmp-cmds.c
index a475108b2d..a25d86a8d1 100644
--- a/target/ppc/monitor.c
+++ b/target/ppc/ppc-qmp-cmds.c
@@ -1,5 +1,5 @@
/*
- * QEMU monitor
+ * QEMU PPC (monitor definitions)
*
* Copyright (c) 2003-2004 Fabrice Bellard
*
@@ -28,26 +28,35 @@
#include "qemu/ctype.h"
#include "monitor/hmp-target.h"
#include "monitor/hmp.h"
+#include "qapi/qapi-commands-machine-target.h"
+#include "cpu-models.h"
+#include "cpu-qom.h"
static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md,
int val)
{
CPUArchState *env = mon_get_cpu_env(mon);
unsigned int u;
- int i;
- u = 0;
- for (i = 0; i < 8; i++) {
- u |= env->crf[i] << (32 - (4 * (i + 1)));
- }
+ u = ppc_get_cr(env);
return u;
}
+static target_long monitor_get_xer(Monitor *mon, const struct MonitorDef *md,
+ int val)
+{
+ CPUArchState *env = mon_get_cpu_env(mon);
+ return cpu_read_xer(env);
+}
+
static target_long monitor_get_decr(Monitor *mon, const struct MonitorDef *md,
int val)
{
CPUArchState *env = mon_get_cpu_env(mon);
+ if (!env->tb_env) {
+ return 0;
+ }
return cpu_ppc_load_decr(env);
}
@@ -55,6 +64,9 @@ static target_long monitor_get_tbu(Monitor *mon, const struct MonitorDef *md,
int val)
{
CPUArchState *env = mon_get_cpu_env(mon);
+ if (!env->tb_env) {
+ return 0;
+ }
return cpu_ppc_load_tbu(env);
}
@@ -62,6 +74,9 @@ static target_long monitor_get_tbl(Monitor *mon, const struct MonitorDef *md,
int val)
{
CPUArchState *env = mon_get_cpu_env(mon);
+ if (!env->tb_env) {
+ return 0;
+ }
return cpu_ppc_load_tbl(env);
}
@@ -85,10 +100,14 @@ const MonitorDef monitor_defs[] = {
{ "decr", 0, &monitor_get_decr, },
{ "ccr|cr", 0, &monitor_get_ccr, },
/* Machine state register */
- { "xer", offsetof(CPUPPCState, xer) },
+ { "xer", 0, &monitor_get_xer },
{ "msr", offsetof(CPUPPCState, msr) },
{ "tbu", 0, &monitor_get_tbu, },
+#if defined(TARGET_PPC64)
+ { "tb", 0, &monitor_get_tbl, },
+#else
{ "tbl", 0, &monitor_get_tbl, },
+#endif
{ NULL },
};
@@ -118,8 +137,7 @@ static int ppc_cpu_get_reg_num(const char *numstr, int maxnum, int *pregnum)
int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
{
int i, regnum;
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
/* General purpose registers */
if ((qemu_tolower(name[0]) == 'r') &&
@@ -156,3 +174,47 @@ int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
return -EINVAL;
}
+
+static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **first = user_data;
+ const char *typename;
+ CpuDefinitionInfo *info;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = cpu_model_from_type(typename);
+
+ QAPI_LIST_PREPEND(*first, info);
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+ int i;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list);
+ g_slist_free(list);
+
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
+ ObjectClass *oc;
+ CpuDefinitionInfo *info;
+
+ oc = ppc_cpu_class_by_name(alias->model);
+ if (oc == NULL) {
+ continue;
+ }
+
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strdup(alias->alias);
+ info->q_typename = g_strdup(object_class_get_name(oc));
+
+ QAPI_LIST_PREPEND(cpu_list, info);
+ }
+
+ return cpu_list;
+}
diff --git a/target/ppc/spr_tcg.h b/target/ppc/spr_common.h
index 0be5f347d5..8a9d6cd994 100644
--- a/target/ppc/spr_tcg.h
+++ b/target/ppc/spr_common.h
@@ -16,15 +16,77 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef SPR_TCG_H
-#define SPR_TCG_H
+#ifndef SPR_COMMON_H
+#define SPR_COMMON_H
#define SPR_NOACCESS (&spr_noaccess)
+#ifdef CONFIG_TCG
+# define USR_ARG(X) X,
+# ifdef CONFIG_USER_ONLY
+# define SYS_ARG(X)
+# else
+# define SYS_ARG(X) X,
+# endif
+#else
+# define USR_ARG(X)
+# define SYS_ARG(X)
+#endif
+#ifdef CONFIG_KVM
+# define KVM_ARG(X) X,
+#else
+# define KVM_ARG(X)
+#endif
+
+typedef void spr_callback(DisasContext *, int, int);
+
+void _spr_register(CPUPPCState *env, int num, const char *name,
+ USR_ARG(spr_callback *uea_read)
+ USR_ARG(spr_callback *uea_write)
+ SYS_ARG(spr_callback *oea_read)
+ SYS_ARG(spr_callback *oea_write)
+ SYS_ARG(spr_callback *hea_read)
+ SYS_ARG(spr_callback *hea_write)
+ KVM_ARG(uint64_t one_reg_id)
+ target_ulong initial_value);
+
+/* spr_register_kvm_hv passes all required arguments. */
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, \
+ USR_ARG(uea_read) USR_ARG(uea_write) \
+ SYS_ARG(oea_read) SYS_ARG(oea_write) \
+ SYS_ARG(hea_read) SYS_ARG(hea_write) \
+ KVM_ARG(one_reg_id) initial_value)
+
+/* spr_register_kvm duplicates the oea callbacks to the hea callbacks. */
+#define spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, one_reg_id, ival) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
+ oea_write, oea_read, oea_write, one_reg_id, ival)
+
+/* spr_register_hv and spr_register are similar, except there is no kvm id. */
+#define spr_register_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, ival) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, oea_read, \
+ oea_write, hea_read, hea_write, 0, ival)
+
+#define spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, ival) \
+ spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, 0, ival)
+
/* prototypes for readers and writers for SPRs */
void spr_noaccess(DisasContext *ctx, int gprn, int sprn);
void spr_read_generic(DisasContext *ctx, int gprn, int sprn);
void spr_write_generic(DisasContext *ctx, int sprn, int gprn);
+void spr_write_generic32(DisasContext *ctx, int sprn, int gprn);
+void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn);
+void spr_write_PMC(DisasContext *ctx, int sprn, int gprn);
+void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn);
void spr_read_xer(DisasContext *ctx, int gprn, int sprn);
void spr_write_xer(DisasContext *ctx, int sprn, int gprn);
void spr_read_lr(DisasContext *ctx, int gprn, int sprn);
@@ -32,17 +94,23 @@ void spr_write_lr(DisasContext *ctx, int sprn, int gprn);
void spr_read_ctr(DisasContext *ctx, int gprn, int sprn);
void spr_write_ctr(DisasContext *ctx, int sprn, int gprn);
void spr_read_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_PMC(DisasContext *ctx, int gprn, int sprn);
+void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn);
+void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn);
void spr_read_tbl(DisasContext *ctx, int gprn, int sprn);
void spr_read_tbu(DisasContext *ctx, int gprn, int sprn);
void spr_read_atbl(DisasContext *ctx, int gprn, int sprn);
void spr_read_atbu(DisasContext *ctx, int gprn, int sprn);
-void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn);
-void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn);
void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn);
void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn);
+void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn);
#ifndef CONFIG_USER_ONLY
-void spr_write_generic32(DisasContext *ctx, int sprn, int gprn);
void spr_write_clear(DisasContext *ctx, int sprn, int gprn);
void spr_access_nop(DisasContext *ctx, int sprn, int gprn);
void spr_read_decr(DisasContext *ctx, int gprn, int sprn);
@@ -64,16 +132,13 @@ void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn);
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn);
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn);
void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn);
-void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn);
-void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn);
-void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn);
-void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn);
-void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn);
-void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn);
void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn);
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn);
void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn);
@@ -94,6 +159,9 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn);
#ifdef TARGET_PPC64
void spr_read_cfar(DisasContext *ctx, int gprn, int sprn);
void spr_write_cfar(DisasContext *ctx, int sprn, int gprn);
+void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn);
+void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn);
void spr_write_ureg(DisasContext *ctx, int sprn, int gprn);
void spr_read_purr(DisasContext *ctx, int gprn, int sprn);
void spr_write_purr(DisasContext *ctx, int sprn, int gprn);
@@ -130,7 +198,19 @@ void spr_write_ebb(DisasContext *ctx, int sprn, int gprn);
void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn);
void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn);
void spr_write_hmer(DisasContext *ctx, int sprn, int gprn);
+void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn);
+void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn);
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn);
+void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn);
#endif
+void register_low_BATs(CPUPPCState *env);
+void register_high_BATs(CPUPPCState *env);
+void register_sdr1_sprs(CPUPPCState *env);
+void register_thrm_sprs(CPUPPCState *env);
+void register_usprgh_sprs(CPUPPCState *env);
+void register_non_embedded_sprs(CPUPPCState *env);
+void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways);
+void register_generic_sprs(PowerPCCPU *cpu);
+
#endif
diff --git a/target/ppc/tcg-stub.c b/target/ppc/tcg-stub.c
index aadcf59d26..740d796b98 100644
--- a/target/ppc/tcg-stub.c
+++ b/target/ppc/tcg-stub.c
@@ -28,18 +28,3 @@ void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
void destroy_ppc_opcodes(PowerPCCPU *cpu)
{
}
-
-target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong shift)
-{
- g_assert_not_reached();
-}
-
-target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu,
- SpaprMachineState *spapr,
- target_ulong flags,
- target_ulong shift)
-{
- g_assert_not_reached();
-}
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
index 8ff4080eb9..39d397416e 100644
--- a/target/ppc/timebase_helper.c
+++ b/target/ppc/timebase_helper.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "cpu.h"
+#include "hw/ppc/ppc.h"
#include "exec/helper-proto.h"
#include "exec/exec-all.h"
#include "qemu/log.h"
@@ -59,29 +60,55 @@ target_ulong helper_load_purr(CPUPPCState *env)
void helper_store_purr(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_purr(env, val);
-}
-#endif
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
-target_ulong helper_load_601_rtcl(CPUPPCState *env)
-{
- return cpu_ppc601_load_rtcl(env);
-}
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_purr(env, val);
+ return;
+ }
-target_ulong helper_load_601_rtcu(CPUPPCState *env)
-{
- return cpu_ppc601_load_rtcu(env);
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_purr(cenv, val);
+ }
}
+#endif
#if !defined(CONFIG_USER_ONLY)
void helper_store_tbl(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_tbl(env, val);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_tbl(env, val);
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_tbl(cenv, val);
+ }
}
void helper_store_tbu(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_tbu(env, val);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_tbu(env, val);
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_tbu(cenv, val);
+ }
}
void helper_store_atbl(CPUPPCState *env, target_ulong val)
@@ -94,16 +121,6 @@ void helper_store_atbu(CPUPPCState *env, target_ulong val)
cpu_ppc_store_atbu(env, val);
}
-void helper_store_601_rtcl(CPUPPCState *env, target_ulong val)
-{
- cpu_ppc601_store_rtcl(env, val);
-}
-
-void helper_store_601_rtcu(CPUPPCState *env, target_ulong val)
-{
- cpu_ppc601_store_rtcu(env, val);
-}
-
target_ulong helper_load_decr(CPUPPCState *env)
{
return cpu_ppc_load_decr(env);
@@ -121,17 +138,53 @@ target_ulong helper_load_hdecr(CPUPPCState *env)
void helper_store_hdecr(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_hdecr(env, val);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_hdecr(env, val);
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_hdecr(cenv, val);
+ }
}
void helper_store_vtb(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_vtb(env, val);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_vtb(env, val);
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_vtb(cenv, val);
+ }
}
void helper_store_tbu40(CPUPPCState *env, target_ulong val)
{
- cpu_ppc_store_tbu40(env, val);
+ CPUState *cs = env_cpu(env);
+ CPUState *ccs;
+ uint32_t nr_threads = cs->nr_threads;
+
+ if (nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ cpu_ppc_store_tbu40(env, val);
+ return;
+ }
+
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cpu_ppc_store_tbu40(cenv, val);
+ }
}
target_ulong helper_load_40x_pit(CPUPPCState *env)
@@ -144,6 +197,16 @@ void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
store_40x_pit(env, val);
}
+void helper_store_40x_tcr(CPUPPCState *env, target_ulong val)
+{
+ store_40x_tcr(env, val);
+}
+
+void helper_store_40x_tsr(CPUPPCState *env, target_ulong val)
+{
+ store_40x_tsr(env, val);
+}
+
void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
{
store_booke_tcr(env, val);
@@ -153,6 +216,236 @@ void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
{
store_booke_tsr(env, val);
}
+
+#if defined(TARGET_PPC64)
+/*
+ * POWER processor Timebase Facility
+ */
+
+/*
+ * The TBST is the timebase state machine, which is a per-core machine that
+ * is used to synchronize the core TB with the ChipTOD. States 3,4,5 are
+ * not used in POWER8/9/10.
+ *
+ * The state machine gets driven by writes to TFMR SPR from the core, and
+ * by signals from the ChipTOD. The state machine table for common
+ * transitions is as follows (according to hardware specs, not necessarily
+ * this implementation):
+ *
+ * | Cur | Event | New |
+ * +----------------+----------------------------------+-----+
+ * | 0 RESET | TFMR |= LOAD_TOD_MOD | 1 |
+ * | 1 SEND_TOD_MOD | "immediate transition" | 2 |
+ * | 2 NOT_SET | mttbu/mttbu40/mttbl | 2 |
+ * | 2 NOT_SET | TFMR |= MOVE_CHIP_TOD_TO_TB | 6 |
+ * | 6 SYNC_WAIT | "sync pulse from ChipTOD" | 7 |
+ * | 7 GET_TOD | ChipTOD xscom MOVE_TOD_TO_TB_REG | 8 |
+ * | 8 TB_RUNNING | mttbu/mttbu40 | 8 |
+ * | 8 TB_RUNNING | TFMR |= LOAD_TOD_MOD | 1 |
+ * | 8 TB_RUNNING | mttbl | 9 |
+ * | 9 TB_ERROR | TFMR |= CLEAR_TB_ERRORS | 0 |
+ *
+ * - LOAD_TOD_MOD will also move states 2,6 to state 1, omitted from table
+ * because it's not a typical init flow.
+ *
+ * - The ERROR state can be entered from most/all other states on invalid
+ * states (e.g., if some TFMR control bit is set from a state where it's
+ * not listed to cause a transition away from), omitted to avoid clutter.
+ *
+ * Note: mttbl causes a timebase error because this inevitably causes
+ * ticks to be lost and TB to become unsynchronized, whereas TB can be
+ * adjusted using mttbu* without losing ticks. mttbl behaviour is not
+ * modelled.
+ *
+ * Note: the TB state machine does not actually cause any real TB adjustment!
+ * TB starts out synchronized across all vCPUs (hardware threads) in
+ * QMEU, so for now the purpose of the TBST and ChipTOD model is simply
+ * to step through firmware initialisation sequences.
+ */
+static unsigned int tfmr_get_tb_state(uint64_t tfmr)
+{
+ return (tfmr & TFMR_TBST_ENCODED) >> (63 - 31);
+}
+
+static uint64_t tfmr_new_tb_state(uint64_t tfmr, unsigned int tbst)
+{
+ tfmr &= ~TFMR_TBST_LAST;
+ tfmr |= (tfmr & TFMR_TBST_ENCODED) >> 4; /* move state to last state */
+ tfmr &= ~TFMR_TBST_ENCODED;
+ tfmr |= (uint64_t)tbst << (63 - 31); /* move new state to state */
+
+ if (tbst == TBST_TB_RUNNING) {
+ tfmr |= TFMR_TB_VALID;
+ } else {
+ tfmr &= ~TFMR_TB_VALID;
+ }
+
+ return tfmr;
+}
+
+static void write_tfmr(CPUPPCState *env, target_ulong val)
+{
+ CPUState *cs = env_cpu(env);
+
+ if (cs->nr_threads == 1) {
+ env->spr[SPR_TFMR] = val;
+ } else {
+ CPUState *ccs;
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ CPUPPCState *cenv = &POWERPC_CPU(ccs)->env;
+ cenv->spr[SPR_TFMR] = val;
+ }
+ }
+}
+
+static void tb_state_machine_step(CPUPPCState *env)
+{
+ uint64_t tfmr = env->spr[SPR_TFMR];
+ unsigned int tbst = tfmr_get_tb_state(tfmr);
+
+ if (!(tfmr & TFMR_TB_ECLIPZ) || tbst == TBST_TB_ERROR) {
+ return;
+ }
+
+ if (env->pnv_tod_tbst.tb_sync_pulse_timer) {
+ env->pnv_tod_tbst.tb_sync_pulse_timer--;
+ } else {
+ tfmr |= TFMR_TB_SYNC_OCCURED;
+ write_tfmr(env, tfmr);
+ }
+
+ if (env->pnv_tod_tbst.tb_state_timer) {
+ env->pnv_tod_tbst.tb_state_timer--;
+ return;
+ }
+
+ if (tfmr & TFMR_LOAD_TOD_MOD) {
+ tfmr &= ~TFMR_LOAD_TOD_MOD;
+ if (tbst == TBST_GET_TOD) {
+ tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
+ tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
+ } else {
+ tfmr = tfmr_new_tb_state(tfmr, TBST_SEND_TOD_MOD);
+ /* State seems to transition immediately */
+ tfmr = tfmr_new_tb_state(tfmr, TBST_NOT_SET);
+ }
+ } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
+ if (tbst == TBST_SYNC_WAIT) {
+ tfmr = tfmr_new_tb_state(tfmr, TBST_GET_TOD);
+ env->pnv_tod_tbst.tb_state_timer = 3;
+ } else if (tbst == TBST_GET_TOD) {
+ if (env->pnv_tod_tbst.tod_sent_to_tb) {
+ tfmr = tfmr_new_tb_state(tfmr, TBST_TB_RUNNING);
+ tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
+ env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ env->pnv_tod_tbst.tod_sent_to_tb = 0;
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
+ "state machine in invalid state 0x%x\n", tbst);
+ tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
+ tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
+ env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ }
+ }
+
+ write_tfmr(env, tfmr);
+}
+
+target_ulong helper_load_tfmr(CPUPPCState *env)
+{
+ tb_state_machine_step(env);
+
+ return env->spr[SPR_TFMR] | TFMR_TB_ECLIPZ;
+}
+
+void helper_store_tfmr(CPUPPCState *env, target_ulong val)
+{
+ uint64_t tfmr = env->spr[SPR_TFMR];
+ uint64_t clear_on_write;
+ unsigned int tbst = tfmr_get_tb_state(tfmr);
+
+ if (!(val & TFMR_TB_ECLIPZ)) {
+ qemu_log_mask(LOG_UNIMP, "TFMR non-ECLIPZ mode not implemented\n");
+ tfmr &= ~TFMR_TBST_ENCODED;
+ tfmr &= ~TFMR_TBST_LAST;
+ goto out;
+ }
+
+ /* Update control bits */
+ tfmr = (tfmr & ~TFMR_CONTROL_MASK) | (val & TFMR_CONTROL_MASK);
+
+ /* Several bits are clear-on-write, only one is implemented so far */
+ clear_on_write = val & TFMR_FIRMWARE_CONTROL_ERROR;
+ tfmr &= ~clear_on_write;
+
+ /*
+ * mtspr always clears this. The sync pulse timer makes it come back
+ * after the second mfspr.
+ */
+ tfmr &= ~TFMR_TB_SYNC_OCCURED;
+ env->pnv_tod_tbst.tb_sync_pulse_timer = 1;
+
+ if (ppc_cpu_tir(env_archcpu(env)) != 0 &&
+ (val & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB))) {
+ qemu_log_mask(LOG_UNIMP, "TFMR timebase state machine can only be "
+ "driven by thread 0\n");
+ goto out;
+ }
+
+ if (((tfmr | val) & (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) ==
+ (TFMR_LOAD_TOD_MOD | TFMR_MOVE_CHIP_TOD_TO_TB)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: LOAD_TOD_MOD and "
+ "MOVE_CHIP_TOD_TO_TB both set\n");
+ tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
+ tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
+ env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ goto out;
+ }
+
+ if (tfmr & TFMR_CLEAR_TB_ERRORS) {
+ /*
+ * Workbook says TFMR_CLEAR_TB_ERRORS should be written twice.
+ * This is not simulated/required here.
+ */
+ tfmr = tfmr_new_tb_state(tfmr, TBST_RESET);
+ tfmr &= ~TFMR_CLEAR_TB_ERRORS;
+ tfmr &= ~TFMR_LOAD_TOD_MOD;
+ tfmr &= ~TFMR_MOVE_CHIP_TOD_TO_TB;
+ tfmr &= ~TFMR_FIRMWARE_CONTROL_ERROR; /* XXX: should this be cleared? */
+ env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ env->pnv_tod_tbst.tod_sent_to_tb = 0;
+ goto out;
+ }
+
+ if (tbst == TBST_TB_ERROR) {
+ qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: mtspr TFMR in TB_ERROR"
+ " state\n");
+ tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
+ return;
+ }
+
+ if (tfmr & TFMR_LOAD_TOD_MOD) {
+ /* Wait for an arbitrary 3 mfspr until the next state transition. */
+ env->pnv_tod_tbst.tb_state_timer = 3;
+ } else if (tfmr & TFMR_MOVE_CHIP_TOD_TO_TB) {
+ if (tbst == TBST_NOT_SET) {
+ tfmr = tfmr_new_tb_state(tfmr, TBST_SYNC_WAIT);
+ env->pnv_tod_tbst.tb_ready_for_tod = 1;
+ env->pnv_tod_tbst.tb_state_timer = 3; /* arbitrary */
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "TFMR error: MOVE_CHIP_TOD_TO_TB "
+ "not in TB not set state 0x%x\n",
+ tbst);
+ tfmr = tfmr_new_tb_state(tfmr, TBST_TB_ERROR);
+ tfmr |= TFMR_FIRMWARE_CONTROL_ERROR;
+ env->pnv_tod_tbst.tb_ready_for_tod = 0;
+ }
+ }
+
+out:
+ write_tfmr(env, tfmr);
+}
#endif
/*****************************************************************************/
@@ -171,15 +464,15 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
} else {
int ret;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
if (unlikely(ret != 0)) {
qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
(uint32_t)dcrn, (uint32_t)dcrn);
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
- POWERPC_EXCP_PRIV_REG, GETPC());
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
}
}
return val;
@@ -194,15 +487,16 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
POWERPC_EXCP_INVAL_INVAL, GETPC());
} else {
int ret;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
if (unlikely(ret != 0)) {
qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
(uint32_t)dcrn, (uint32_t)dcrn);
raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
- POWERPC_EXCP_PRIV_REG, GETPC());
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
}
}
}
+#endif
diff --git a/target/ppc/trace-events b/target/ppc/trace-events
index c88cfccf8d..a79f1b4370 100644
--- a/target/ppc/trace-events
+++ b/target/ppc/trace-events
@@ -23,8 +23,16 @@ kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM"
kvm_handle_dcr_write(void) "handle dcr write"
kvm_handle_dcr_read(void) "handle dcr read"
kvm_handle_halt(void) "handle halt"
-kvm_handle_papr_hcall(void) "handle PAPR hypercall"
+kvm_handle_papr_hcall(uint64_t hcall) "0x%" PRIx64
kvm_handle_epr(void) "handle epr"
kvm_handle_watchdog_expiry(void) "handle watchdog expiry"
kvm_handle_debug_exception(void) "handle debug exception"
kvm_handle_nmi_exception(void) "handle NMI exception"
+
+# excp_helper.c
+ppc_excp_rfi(uint64_t nip, uint64_t msr) "Return from exception at 0x%" PRIx64 " with flags 0x%016" PRIx64
+ppc_excp_dsi(uint64_t dsisr, uint64_t dar) "DSI exception: DSISR=0x%" PRIx64 " DAR=0x%" PRIx64
+ppc_excp_isi(uint64_t msr, uint64_t nip) "ISI exception: msr=0x%016" PRIx64 " nip=0x%" PRIx64
+ppc_excp_fp_ignore(void) "Ignore floating point exception"
+ppc_excp_inval(uint64_t nip) "Invalid instruction at 0x%" PRIx64
+ppc_excp_print(const char *excp) "%s exception"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 5d8b06bd80..93ffec787c 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -26,8 +26,6 @@
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
#include "qemu/host-utils.h"
-#include "qemu/main-loop.h"
-#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
@@ -35,14 +33,18 @@
#include "exec/translator.h"
#include "exec/log.h"
#include "qemu/atomic128.h"
-#include "spr_tcg.h"
+#include "spr_common.h"
+#include "power8-pmu.h"
#include "qemu/qemu-print.h"
#include "qapi/error.h"
+#define HELPER_H "helper.h"
+#include "exec/helper-info.c.inc"
+#undef HELPER_H
+
#define CPU_SINGLE_STEP 0x1
#define CPU_BRANCH_STEP 0x2
-#define GDBSTUB_SINGLE_STEP 0x4
/* Include definitions for instructions classes and implementations flags */
/* #define PPC_DEBUG_DISAS */
@@ -71,12 +73,14 @@ static TCGv cpu_cfar;
#endif
static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
static TCGv cpu_reserve;
+static TCGv cpu_reserve_length;
static TCGv cpu_reserve_val;
+#if defined(TARGET_PPC64)
+static TCGv cpu_reserve_val2;
+#endif
static TCGv cpu_fpscr;
static TCGv_i32 cpu_access_type;
-#include "exec/gen-icount.h"
-
void ppc_translate_init(void)
{
int i;
@@ -88,7 +92,7 @@ void ppc_translate_init(void)
for (i = 0; i < 8; i++) {
snprintf(p, cpu_reg_names_size, "crf%d", i);
- cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_crf[i] = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUPPCState, crf[i]), p);
p += 5;
cpu_reg_names_size -= 5;
@@ -96,58 +100,67 @@ void ppc_translate_init(void)
for (i = 0; i < 32; i++) {
snprintf(p, cpu_reg_names_size, "r%d", i);
- cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ cpu_gpr[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, gpr[i]), p);
p += (i < 10) ? 3 : 4;
cpu_reg_names_size -= (i < 10) ? 3 : 4;
snprintf(p, cpu_reg_names_size, "r%dH", i);
- cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ cpu_gprh[i] = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, gprh[i]), p);
p += (i < 10) ? 4 : 5;
cpu_reg_names_size -= (i < 10) ? 4 : 5;
}
- cpu_nip = tcg_global_mem_new(cpu_env,
+ cpu_nip = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, nip), "nip");
- cpu_msr = tcg_global_mem_new(cpu_env,
+ cpu_msr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, msr), "msr");
- cpu_ctr = tcg_global_mem_new(cpu_env,
+ cpu_ctr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ctr), "ctr");
- cpu_lr = tcg_global_mem_new(cpu_env,
+ cpu_lr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, lr), "lr");
#if defined(TARGET_PPC64)
- cpu_cfar = tcg_global_mem_new(cpu_env,
+ cpu_cfar = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, cfar), "cfar");
#endif
- cpu_xer = tcg_global_mem_new(cpu_env,
+ cpu_xer = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, xer), "xer");
- cpu_so = tcg_global_mem_new(cpu_env,
+ cpu_so = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, so), "SO");
- cpu_ov = tcg_global_mem_new(cpu_env,
+ cpu_ov = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ov), "OV");
- cpu_ca = tcg_global_mem_new(cpu_env,
+ cpu_ca = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ca), "CA");
- cpu_ov32 = tcg_global_mem_new(cpu_env,
+ cpu_ov32 = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ov32), "OV32");
- cpu_ca32 = tcg_global_mem_new(cpu_env,
+ cpu_ca32 = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, ca32), "CA32");
- cpu_reserve = tcg_global_mem_new(cpu_env,
+ cpu_reserve = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, reserve_addr),
"reserve_addr");
- cpu_reserve_val = tcg_global_mem_new(cpu_env,
- offsetof(CPUPPCState, reserve_val),
- "reserve_val");
+ cpu_reserve_length = tcg_global_mem_new(tcg_env,
+ offsetof(CPUPPCState,
+ reserve_length),
+ "reserve_length");
+ cpu_reserve_val = tcg_global_mem_new(tcg_env,
+ offsetof(CPUPPCState, reserve_val),
+ "reserve_val");
+#if defined(TARGET_PPC64)
+ cpu_reserve_val2 = tcg_global_mem_new(tcg_env,
+ offsetof(CPUPPCState, reserve_val2),
+ "reserve_val2");
+#endif
- cpu_fpscr = tcg_global_mem_new(cpu_env,
+ cpu_fpscr = tcg_global_mem_new(tcg_env,
offsetof(CPUPPCState, fpscr), "fpscr");
- cpu_access_type = tcg_global_mem_new_i32(cpu_env,
+ cpu_access_type = tcg_global_mem_new_i32(tcg_env,
offsetof(CPUPPCState, access_type),
"access_type");
}
@@ -175,6 +188,12 @@ struct DisasContext {
bool spe_enabled;
bool tm_enabled;
bool gtse;
+ bool hr;
+ bool mmcr0_pmcc0;
+ bool mmcr0_pmcc1;
+ bool mmcr0_pmcjce;
+ bool pmc_other;
+ bool pmu_insn_cnt;
ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
int singlestep_enabled;
uint32_t flags;
@@ -190,7 +209,7 @@ struct DisasContext {
/* Return true iff byteswap is needed in a scalar memop */
static inline bool need_byteswap(const DisasContext *ctx)
{
-#if defined(TARGET_WORDS_BIGENDIAN)
+#if TARGET_BIG_ENDIAN
return ctx->le_mode;
#else
return !ctx->le_mode;
@@ -217,15 +236,48 @@ struct opc_handler_t {
void (*handler)(DisasContext *ctx);
};
+static inline bool gen_serialize(DisasContext *ctx)
+{
+ if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
+ /* Restart with exclusive lock. */
+ gen_helper_exit_atomic(tcg_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return false;
+ }
+ return true;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+static inline bool gen_serialize_core(DisasContext *ctx)
+{
+ if (ctx->flags & POWERPC_FLAG_SMT) {
+ return gen_serialize(ctx);
+ }
+ return true;
+}
+#endif
+
+static inline bool gen_serialize_core_lpar(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
+ return gen_serialize(ctx);
+ }
+#endif
+ return true;
+}
+#endif
+
/* SPR load/store helpers */
static inline void gen_load_spr(TCGv t, int reg)
{
- tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+ tcg_gen_ld_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
}
static inline void gen_store_spr(int reg, TCGv t)
{
- tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+ tcg_gen_st_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
}
static inline void gen_set_access_type(DisasContext *ctx, int access_type)
@@ -244,36 +296,26 @@ static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
tcg_gen_movi_tl(cpu_nip, nip);
}
-static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
+static void gen_exception_err_nip(DisasContext *ctx, uint32_t excp,
+ uint32_t error, target_ulong nip)
{
TCGv_i32 t0, t1;
- /*
- * These are all synchronous exceptions, we set the PC back to the
- * faulting instruction
- */
- gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
- t1 = tcg_const_i32(error);
- gen_helper_raise_exception_err(cpu_env, t0, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
+ gen_update_nip(ctx, nip);
+ t0 = tcg_constant_i32(excp);
+ t1 = tcg_constant_i32(error);
+ gen_helper_raise_exception_err(tcg_env, t0, t1);
ctx->base.is_jmp = DISAS_NORETURN;
}
-static void gen_exception(DisasContext *ctx, uint32_t excp)
+static inline void gen_exception_err(DisasContext *ctx, uint32_t excp,
+ uint32_t error)
{
- TCGv_i32 t0;
-
/*
* These are all synchronous exceptions, we set the PC back to the
* faulting instruction
*/
- gen_update_nip(ctx, ctx->cia);
- t0 = tcg_const_i32(excp);
- gen_helper_raise_exception(cpu_env, t0);
- tcg_temp_free_i32(t0);
- ctx->base.is_jmp = DISAS_NORETURN;
+ gen_exception_err_nip(ctx, excp, error, ctx->cia);
}
static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
@@ -282,26 +324,28 @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
TCGv_i32 t0;
gen_update_nip(ctx, nip);
- t0 = tcg_const_i32(excp);
- gen_helper_raise_exception(cpu_env, t0);
- tcg_temp_free_i32(t0);
+ t0 = tcg_constant_i32(excp);
+ gen_helper_raise_exception(tcg_env, t0);
ctx->base.is_jmp = DISAS_NORETURN;
}
-static void gen_icount_io_start(DisasContext *ctx)
+static inline void gen_exception(DisasContext *ctx, uint32_t excp)
{
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
- gen_io_start();
- /*
- * An I/O instruction must be last in the TB.
- * Chain to the next TB, and let the code from gen_tb_start
- * decide if we need to return to the main loop.
- * Doing this first also allows this value to be overridden.
- */
- ctx->base.is_jmp = DISAS_TOO_MANY;
- }
+ /*
+ * These are all synchronous exceptions, we set the PC back to the
+ * faulting instruction
+ */
+ gen_exception_nip(ctx, excp, ctx->cia);
}
+#if !defined(CONFIG_USER_ONLY)
+static void gen_ppc_maybe_interrupt(DisasContext *ctx)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_ppc_maybe_interrupt(tcg_env);
+}
+#endif
+
/*
* Tells the caller what is the appropriate exception to generate and prepares
* SPR registers for this exception.
@@ -309,8 +353,9 @@ static void gen_icount_io_start(DisasContext *ctx)
* The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
* POWERPC_EXCP_DEBUG (on BookE).
*/
-static uint32_t gen_prep_dbgex(DisasContext *ctx)
+static void gen_debug_exception(DisasContext *ctx, bool rfi_type)
{
+#if !defined(CONFIG_USER_ONLY)
if (ctx->flags & POWERPC_FLAG_DE) {
target_ulong dbsr = 0;
if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
@@ -323,17 +368,18 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx)
gen_load_spr(t0, SPR_BOOKE_DBSR);
tcg_gen_ori_tl(t0, t0, dbsr);
gen_store_spr(SPR_BOOKE_DBSR, t0);
- tcg_temp_free(t0);
- return POWERPC_EXCP_DEBUG;
+ gen_helper_raise_exception(tcg_env,
+ tcg_constant_i32(POWERPC_EXCP_DEBUG));
+ ctx->base.is_jmp = DISAS_NORETURN;
} else {
- return POWERPC_EXCP_TRACE;
+ if (!rfi_type) { /* BookS does not single step rfi type instructions */
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_movi_tl(t0, ctx->cia);
+ gen_helper_book3s_trace(tcg_env, t0);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ }
}
-}
-
-static void gen_debug_exception(DisasContext *ctx)
-{
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG));
- ctx->base.is_jmp = DISAS_NORETURN;
+#endif
}
static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
@@ -373,9 +419,8 @@ void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
static void spr_load_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
- gen_helper_load_dump_spr(cpu_env, t0);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
+ gen_helper_load_dump_spr(tcg_env, t0);
#endif
}
@@ -388,9 +433,8 @@ void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
static void spr_store_dump_spr(int sprn)
{
#ifdef PPC_DUMP_SPR_ACCESSES
- TCGv_i32 t0 = tcg_const_i32(sprn);
- gen_helper_store_dump_spr(cpu_env, t0);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
+ gen_helper_store_dump_spr(tcg_env, t0);
#endif
}
@@ -400,20 +444,71 @@ void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
spr_store_dump_spr(sprn);
}
-#if !defined(CONFIG_USER_ONLY)
void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
{
#ifdef TARGET_PPC64
TCGv t0 = tcg_temp_new();
tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
spr_store_dump_spr(sprn);
#else
spr_write_generic(ctx, sprn, gprn);
#endif
}
+void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!(ctx->flags & POWERPC_FLAG_SMT)) {
+ spr_write_generic(ctx, sprn, gprn);
+ return;
+ }
+
+ if (!gen_serialize(ctx)) {
+ return;
+ }
+
+ gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn),
+ cpu_gpr[gprn]);
+ spr_store_dump_spr(sprn);
+}
+
+static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
+{
+ /* This does not implement >1 thread */
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
+ tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
+ tcg_gen_or_tl(t1, t1, t0);
+ gen_store_spr(sprn, t1);
+}
+
+void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) {
+ /* CTRL behaves as 1-thread in LPAR-per-thread mode */
+ spr_write_CTRL_ST(ctx, sprn, gprn);
+ goto out;
+ }
+
+ if (!gen_serialize(ctx)) {
+ return;
+ }
+
+ gen_helper_spr_write_CTRL(tcg_env, tcg_constant_i32(sprn),
+ cpu_gpr[gprn]);
+out:
+ spr_store_dump_spr(sprn);
+
+ /*
+ * SPR_CTRL writes must force a new translation block,
+ * allowing the PMU to calculate the run latch events with
+ * more accuracy.
+ */
+ ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+}
+
+#if !defined(CONFIG_USER_ONLY)
void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
@@ -422,8 +517,6 @@ void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
tcg_gen_and_tl(t0, t0, t1);
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
@@ -453,9 +546,6 @@ void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
tcg_gen_or_tl(dst, dst, t0);
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
@@ -484,8 +574,9 @@ void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
}
-/* CFAR */
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+/* Debug facilities */
+/* CFAR */
void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
{
tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
@@ -495,6 +586,26 @@ void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
{
tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
}
+
+/* Breakpoint */
+void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_ciabr(tcg_env, cpu_gpr[gprn]);
+}
+
+/* Watchpoint */
+void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawr0(tcg_env, cpu_gpr[gprn]);
+}
+
+void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
+}
#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
/* CTR */
@@ -531,14 +642,14 @@ void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
#if !defined(CONFIG_USER_ONLY)
void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_decr(cpu_gpr[gprn], tcg_env);
}
void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_decr(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -546,91 +657,111 @@ void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
/* Time base */
void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_tbl(cpu_gpr[gprn], tcg_env);
}
void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_tbu(cpu_gpr[gprn], tcg_env);
}
void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_atbl(cpu_gpr[gprn], tcg_env);
}
void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
+ gen_helper_load_atbu(cpu_gpr[gprn], tcg_env);
}
#if !defined(CONFIG_USER_ONLY)
void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+
+ translator_io_start(&ctx->base);
+ gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]);
}
void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+
+ translator_io_start(&ctx->base);
+ gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]);
}
void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_atbl(tcg_env, cpu_gpr[gprn]);
}
void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_atbu(tcg_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_purr(cpu_gpr[gprn], tcg_env);
}
void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_purr(tcg_env, cpu_gpr[gprn]);
}
/* HDECR */
void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_hdecr(cpu_gpr[gprn], tcg_env);
}
void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]);
}
void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_vtb(cpu_gpr[gprn], tcg_env);
}
void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]);
}
void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
+ translator_io_start(&ctx->base);
+ gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -641,94 +772,86 @@ void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
/* IBAT0L...IBAT7L */
void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
}
void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
}
void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
- gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
- gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
- gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
+ gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
- gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
}
/* DBAT0U...DBAT7U */
/* DBAT0L...DBAT7L */
void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
}
void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
offsetof(CPUPPCState,
DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
}
void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
- gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
+ gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
- gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
- gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
+ gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
}
void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
- gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
}
/* SDR1 */
void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_sdr1(tcg_env, cpu_gpr[gprn]);
}
#if defined(TARGET_PPC64)
@@ -736,168 +859,124 @@ void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
/* PIDR */
void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_pidr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_lpidr(tcg_env, cpu_gpr[gprn]);
}
void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, offsetof(CPUPPCState, excp_prefix));
}
void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
- tcg_temp_free(t0);
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
}
void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_pcr(tcg_env, cpu_gpr[gprn]);
}
/* DPDES */
void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
-}
-
-void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
-{
- gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
-}
-#endif
-#endif
-
-/* PowerPC 601 specific registers */
-/* RTC */
-void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn)
-{
- gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env);
-}
-
-void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn)
-{
- gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env);
-}
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
-#if !defined(CONFIG_USER_ONLY)
-void spr_write_601_rtcu(DisasContext *ctx, int sprn, int gprn)
-{
- gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]);
+ gen_helper_load_dpdes(cpu_gpr[gprn], tcg_env);
}
-void spr_write_601_rtcl(DisasContext *ctx, int sprn, int gprn)
+void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]);
-}
+ if (!gen_serialize_core_lpar(ctx)) {
+ return;
+ }
-void spr_write_hid0_601(DisasContext *ctx, int sprn, int gprn)
-{
- gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]);
- /* Must stop the translation as endianness may have changed */
- ctx->base.is_jmp = DISAS_EXIT_UPDATE;
+ gen_helper_store_dpdes(tcg_env, cpu_gpr[gprn]);
}
#endif
-
-/* Unified bats */
-#if !defined(CONFIG_USER_ONLY)
-void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn)
-{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
- offsetof(CPUPPCState,
- IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
-}
-
-void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn)
-{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
- gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
-}
-
-void spr_write_601_ubatl(DisasContext *ctx, int sprn, int gprn)
-{
- TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
- gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
-}
#endif
/* PowerPC 40x specific registers */
#if !defined(CONFIG_USER_ONLY)
void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
{
- gen_icount_io_start(ctx);
- gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
+ translator_io_start(&ctx->base);
+ gen_helper_load_40x_pit(cpu_gpr[gprn], tcg_env);
}
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_40x_pit(tcg_env, cpu_gpr[gprn]);
}
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
+ translator_io_start(&ctx->base);
gen_store_spr(sprn, cpu_gpr[gprn]);
- gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
+ gen_helper_store_40x_dbcr0(tcg_env, cpu_gpr[gprn]);
/* We must stop translation as we may have rebooted */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
}
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_40x_sler(tcg_env, cpu_gpr[gprn]);
}
-void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
+void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_40x_tcr(tcg_env, cpu_gpr[gprn]);
}
-void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
+void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
{
- gen_icount_io_start(ctx);
- gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_40x_tsr(tcg_env, cpu_gpr[gprn]);
}
-#endif
-/* PowerPC 403 specific registers */
-/* PBL1 / PBU1 / PBL2 / PBU2 */
-#if !defined(CONFIG_USER_ONLY)
-void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn)
+void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
- offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
+ gen_helper_store_40x_pid(tcg_env, t0);
+}
+
+void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
+{
+ translator_io_start(&ctx->base);
+ gen_helper_store_booke_tcr(tcg_env, cpu_gpr[gprn]);
}
-void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn)
+void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1);
- gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ translator_io_start(&ctx->base);
+ gen_helper_store_booke_tsr(tcg_env, cpu_gpr[gprn]);
}
+#endif
+/* PIR */
+#if !defined(CONFIG_USER_ONLY)
void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
gen_store_spr(SPR_PIR, t0);
- tcg_temp_free(t0);
}
#endif
@@ -905,17 +984,15 @@ void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
{
TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_gen_ld_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
- tcg_temp_free_i32(t0);
}
void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
{
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
- tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
- tcg_temp_free_i32(t0);
+ tcg_gen_st_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
}
#if !defined(CONFIG_USER_ONLY)
@@ -923,11 +1000,10 @@ void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
+ tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivpr_mask));
tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
}
void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
@@ -941,18 +1017,17 @@ void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
} else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
} else {
- printf("Trying to write an unknown exception vector %d %03x\n",
- sprn, sprn);
- gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
+ " vector 0x%03x\n", sprn);
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
TCGv t0 = tcg_temp_new();
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
+ tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivor_mask));
tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
+ tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
}
#endif
@@ -987,10 +1062,6 @@ void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_AMR, t0);
spr_store_dump_spr(SPR_AMR);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
@@ -1018,10 +1089,6 @@ void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_UAMOR, t0);
spr_store_dump_spr(SPR_UAMOR);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
@@ -1049,10 +1116,6 @@ void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
tcg_gen_or_tl(t0, t0, t2);
gen_store_spr(SPR_IAMR, t0);
spr_store_dump_spr(SPR_IAMR);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
#endif
#endif
@@ -1060,7 +1123,7 @@ void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
#ifndef CONFIG_USER_ONLY
void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
{
- gen_helper_fixup_thrm(cpu_env);
+ gen_helper_fixup_thrm(tcg_env);
gen_load_spr(cpu_gpr[gprn], sprn);
spr_load_dump_spr(sprn);
}
@@ -1073,7 +1136,6 @@ void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
}
void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
@@ -1082,7 +1144,6 @@ void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
}
void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
@@ -1092,27 +1153,27 @@ void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
tcg_gen_andi_tl(t0, cpu_gpr[gprn],
~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
gen_store_spr(sprn, t0);
- tcg_temp_free(t0);
}
void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke206_tlbflush(tcg_env, cpu_gpr[gprn]);
}
void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
{
- TCGv_i32 t0 = tcg_const_i32(sprn);
- gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
- tcg_temp_free_i32(t0);
+ TCGv_i32 t0 = tcg_constant_i32(sprn);
+ gen_helper_booke_setpid(tcg_env, t0, cpu_gpr[gprn]);
}
+
void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke_set_eplc(tcg_env, cpu_gpr[gprn]);
}
+
void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
+ gen_helper_booke_set_epsc(tcg_env, cpu_gpr[gprn]);
}
#endif
@@ -1125,7 +1186,6 @@ void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
gen_store_spr(SPR_BOOKE_MAS3, val);
tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
gen_store_spr(SPR_BOOKE_MAS7, val);
- tcg_temp_free(val);
}
void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
@@ -1136,8 +1196,6 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
tcg_gen_shli_tl(mas7, mas7, 32);
gen_load_spr(mas3, SPR_BOOKE_MAS3);
tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
- tcg_temp_free(mas3);
- tcg_temp_free(mas7);
}
#endif
@@ -1146,29 +1204,21 @@ void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
-
- gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
- tcg_temp_free_i32(t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t1);
+ gen_helper_fscr_facility_check(tcg_env, t1, t2, t3);
}
static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
int bit, int sprn, int cause)
{
- TCGv_i32 t1 = tcg_const_i32(bit);
- TCGv_i32 t2 = tcg_const_i32(sprn);
- TCGv_i32 t3 = tcg_const_i32(cause);
+ TCGv_i32 t1 = tcg_constant_i32(bit);
+ TCGv_i32 t2 = tcg_constant_i32(sprn);
+ TCGv_i32 t3 = tcg_constant_i32(cause);
- gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
-
- tcg_temp_free_i32(t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t1);
+ gen_helper_msr_facility_check(tcg_env, t1, t2, t3);
}
void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
@@ -1179,9 +1229,6 @@ void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
gen_load_spr(spr, sprn - 1);
tcg_gen_shri_tl(spr_up, spr, 32);
tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
-
- tcg_temp_free(spr);
- tcg_temp_free(spr_up);
}
void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
@@ -1191,8 +1238,6 @@ void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
gen_load_spr(spr, sprn - 1);
tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
gen_store_spr(sprn - 1, spr);
-
- tcg_temp_free(spr);
}
#if !defined(CONFIG_USER_ONLY)
@@ -1204,12 +1249,29 @@ void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
gen_store_spr(sprn, hmer);
spr_store_dump_spr(sprn);
- tcg_temp_free(hmer);
+}
+
+void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
+{
+ /* Reading TFMR can cause it to be updated, so serialize threads here too */
+ if (!gen_serialize_core(ctx)) {
+ return;
+ }
+ gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env);
+}
+
+void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
+{
+ if (!gen_serialize_core(ctx)) {
+ return;
+ }
+ gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]);
}
void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
{
- gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+ translator_io_start(&ctx->base);
+ gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
}
#endif /* !defined(CONFIG_USER_ONLY) */
@@ -1272,6 +1334,23 @@ void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
spr_write_prev_upper32(ctx, sprn, gprn);
}
+
+void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ /*
+ * Access to the (H)DEXCR in problem state is done using separated
+ * SPR indexes which are 16 below the SPR indexes which have full
+ * access to the (H)DEXCR in privileged state. Problem state can
+ * only read bits 32:63, bits 0:31 return 0.
+ *
+ * See section 9.3.1-9.3.2 of PowerISA v3.1B
+ */
+
+ gen_load_spr(t0, sprn + 16);
+ tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
+}
#endif
#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
@@ -1301,38 +1380,43 @@ typedef struct opcode_t {
const char *oname;
} opcode_t;
+static void gen_priv_opc(DisasContext *ctx)
+{
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+}
+
/* Helpers for priv. check */
-#define GEN_PRIV \
- do { \
- gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
+#define GEN_PRIV(CTX) \
+ do { \
+ gen_priv_opc(CTX); return; \
} while (0)
#if defined(CONFIG_USER_ONLY)
-#define CHK_HV GEN_PRIV
-#define CHK_SV GEN_PRIV
-#define CHK_HVRM GEN_PRIV
+#define CHK_HV(CTX) GEN_PRIV(CTX)
+#define CHK_SV(CTX) GEN_PRIV(CTX)
+#define CHK_HVRM(CTX) GEN_PRIV(CTX)
#else
-#define CHK_HV \
- do { \
- if (unlikely(ctx->pr || !ctx->hv)) { \
- GEN_PRIV; \
- } \
+#define CHK_HV(CTX) \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv)) {\
+ GEN_PRIV(CTX); \
+ } \
} while (0)
-#define CHK_SV \
+#define CHK_SV(CTX) \
do { \
if (unlikely(ctx->pr)) { \
- GEN_PRIV; \
+ GEN_PRIV(CTX); \
} \
} while (0)
-#define CHK_HVRM \
- do { \
- if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
- GEN_PRIV; \
- } \
+#define CHK_HVRM(CTX) \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
+ GEN_PRIV(CTX); \
+ } \
} while (0)
#endif
-#define CHK_NONE
+#define CHK_NONE(CTX)
/*****************************************************************************/
/* PowerPC instructions table */
@@ -1442,17 +1526,12 @@ static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
tcg_gen_trunc_tl_i32(t, t0);
tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free_i32(t);
}
static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp(arg0, t0, s, crf);
- tcg_temp_free(t0);
}
static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
@@ -1468,15 +1547,12 @@ static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
tcg_gen_ext32u_tl(t1, arg1);
}
gen_op_cmp(t0, t1, s, crf);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
}
static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
{
- TCGv t0 = tcg_const_tl(arg1);
+ TCGv t0 = tcg_constant_tl(arg1);
gen_op_cmp32(arg0, t0, s, crf);
- tcg_temp_free(t0);
}
static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
@@ -1520,10 +1596,6 @@ static void gen_cmprb(DisasContext *ctx)
tcg_gen_or_i32(crf, crf, src2lo);
}
tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
- tcg_temp_free_i32(src1);
- tcg_temp_free_i32(src2);
- tcg_temp_free_i32(src2lo);
- tcg_temp_free_i32(src2hi);
}
#if defined(TARGET_PPC64)
@@ -1546,12 +1618,10 @@ static void gen_isel(DisasContext *ctx)
tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
tcg_gen_andi_tl(t0, t0, mask);
- zr = tcg_const_tl(0);
+ zr = tcg_constant_tl(0);
tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
cpu_gpr[rB(ctx->opcode)]);
- tcg_temp_free(zr);
- tcg_temp_free(t0);
}
/* cmpb: PowerPC 2.05 specification */
@@ -1575,7 +1645,6 @@ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
} else {
tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
}
- tcg_temp_free(t0);
if (NARROW_MODE(ctx)) {
tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
if (is_isa300(ctx)) {
@@ -1608,7 +1677,6 @@ static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
}
tcg_gen_xor_tl(t0, t0, res);
tcg_gen_extract_tl(ca32, t0, 32, 1);
- tcg_temp_free(t0);
}
/* Common add function */
@@ -1637,13 +1705,12 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_add_tl(t0, t0, ca);
}
tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */
- tcg_temp_free(t1);
tcg_gen_extract_tl(ca, ca, 32, 1);
if (is_isa300(ctx)) {
tcg_gen_mov_tl(ca32, ca);
}
} else {
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
if (add_ca) {
tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
@@ -1651,7 +1718,6 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
}
gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
- tcg_temp_free(zero);
}
} else {
tcg_gen_add_tl(t0, arg1, arg2);
@@ -1669,66 +1735,8 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
if (t0 != ret) {
tcg_gen_mov_tl(ret, t0);
- tcg_temp_free(t0);
}
}
-/* Add functions with two operands */
-#define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
- cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
- ca, glue(ca, 32), \
- add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
-}
-/* Add functions with one operand and one immediate */
-#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \
- add_ca, compute_ca, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv t0 = tcg_const_tl(const_val); \
- gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
- cpu_gpr[rA(ctx->opcode)], t0, \
- ca, glue(ca, 32), \
- add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
- tcg_temp_free(t0); \
-}
-
-/* add add. addo addo. */
-GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
-GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
-/* addc addc. addco addco. */
-GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
-GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
-/* adde adde. addeo addeo. */
-GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
-GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
-/* addme addme. addmeo addmeo. */
-GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
-GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
-/* addex */
-GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
-/* addze addze. addzeo addzeo.*/
-GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
-GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
-/* addic addic.*/
-static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
-{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
- gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
- tcg_temp_free(c);
-}
-
-static void gen_addic(DisasContext *ctx)
-{
- gen_op_addic(ctx, 0);
-}
-
-static void gen_addic_(DisasContext *ctx)
-{
- gen_op_addic(ctx, 1);
-}
static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
TCGv arg2, int sign, int compute_ov)
@@ -1764,10 +1772,6 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
}
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, ret);
@@ -1792,10 +1796,9 @@ GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
#define GEN_DIVE(name, hlpr, compute_ov) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv_i32 t0 = tcg_const_i32(compute_ov); \
- gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ TCGv_i32 t0 = tcg_constant_i32(compute_ov); \
+ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], tcg_env, \
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
- tcg_temp_free_i32(t0); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
} \
@@ -1839,10 +1842,6 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
}
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, ret);
@@ -1889,19 +1888,13 @@ static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_i32(t3, t0, t1);
tcg_gen_ext_i32_tl(ret, t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
} else {
- TCGv_i32 t2 = tcg_const_i32(1);
- TCGv_i32 t3 = tcg_const_i32(0);
+ TCGv_i32 t2 = tcg_constant_i32(1);
+ TCGv_i32 t3 = tcg_constant_i32(0);
tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
- tcg_gen_remu_i32(t3, t0, t1);
- tcg_gen_extu_i32_tl(ret, t3);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
+ tcg_gen_remu_i32(t0, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t0);
}
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
}
#define GEN_INT_ARITH_MODW(name, opc3, sign) \
@@ -1935,18 +1928,12 @@ static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
tcg_gen_movi_i64(t3, 0);
tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
tcg_gen_rem_i64(ret, t0, t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
} else {
- TCGv_i64 t2 = tcg_const_i64(1);
- TCGv_i64 t3 = tcg_const_i64(0);
+ TCGv_i64 t2 = tcg_constant_i64(1);
+ TCGv_i64 t3 = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
tcg_gen_remu_i64(ret, t0, t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
#define GEN_INT_ARITH_MODD(name, opc3, sign) \
@@ -1971,8 +1958,6 @@ static void gen_mulhw(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
tcg_gen_muls2_i32(t0, t1, t0, t1);
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -1988,8 +1973,6 @@ static void gen_mulhwu(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
tcg_gen_mulu2_i32(t0, t1, t0, t1);
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -2005,8 +1988,6 @@ static void gen_mullw(DisasContext *ctx)
tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
#else
tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rB(ctx->opcode)]);
@@ -2039,8 +2020,6 @@ static void gen_mullwo(DisasContext *ctx)
}
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -2060,7 +2039,6 @@ static void gen_mulhd(DisasContext *ctx)
TCGv lo = tcg_temp_new();
tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- tcg_temp_free(lo);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -2072,7 +2050,6 @@ static void gen_mulhdu(DisasContext *ctx)
TCGv lo = tcg_temp_new();
tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- tcg_temp_free(lo);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -2105,9 +2082,6 @@ static void gen_mulldo(DisasContext *ctx)
}
tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
-
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
}
@@ -2143,9 +2117,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
}
tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
tcg_gen_add_tl(t0, t0, inv1);
- tcg_temp_free(inv1);
tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
- tcg_temp_free(t1);
tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
if (is_isa300(ctx)) {
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
@@ -2153,12 +2125,10 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
} else if (add_ca) {
TCGv zero, inv1 = tcg_temp_new();
tcg_gen_not_tl(inv1, arg1);
- zero = tcg_const_tl(0);
+ zero = tcg_constant_tl(0);
tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
- tcg_temp_free(zero);
- tcg_temp_free(inv1);
} else {
tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
tcg_gen_sub_tl(t0, arg2, arg1);
@@ -2185,60 +2155,15 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
if (t0 != ret) {
tcg_gen_mov_tl(ret, t0);
- tcg_temp_free(t0);
}
}
-/* Sub functions with Two operands functions */
-#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
- cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
- add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
-}
-/* Sub functions with one operand and one immediate */
-#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
- add_ca, compute_ca, compute_ov) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv t0 = tcg_const_tl(const_val); \
- gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
- cpu_gpr[rA(ctx->opcode)], t0, \
- add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
- tcg_temp_free(t0); \
-}
-/* subf subf. subfo subfo. */
-GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
-GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
-/* subfc subfc. subfco subfco. */
-GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
-GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
-/* subfe subfe. subfeo subfo. */
-GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
-GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
-/* subfme subfme. subfmeo subfmeo. */
-GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
-GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
-/* subfze subfze. subfzeo subfzeo.*/
-GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
-GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
-
-/* subfic */
-static void gen_subfic(DisasContext *ctx)
-{
- TCGv c = tcg_const_tl(SIMM(ctx->opcode));
- gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- c, 0, 1, 0, 0);
- tcg_temp_free(c);
-}
/* neg neg. nego nego. */
static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
{
- TCGv zero = tcg_const_tl(0);
+ TCGv zero = tcg_constant_tl(0);
gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
zero, 0, 0, compute_ov, Rc(ctx->opcode));
- tcg_temp_free(zero);
}
static void gen_neg(DisasContext *ctx)
@@ -2301,7 +2226,6 @@ static void gen_cntlzw(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
tcg_gen_clzi_i32(t, t, 32);
tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
- tcg_temp_free_i32(t);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -2316,7 +2240,6 @@ static void gen_cnttzw(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
tcg_gen_ctzi_i32(t, t, 32);
tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
- tcg_temp_free_i32(t);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -2337,10 +2260,9 @@ GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
static void gen_pause(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(0);
- tcg_gen_st_i32(t0, cpu_env,
+ TCGv_i32 t0 = tcg_constant_i32(0);
+ tcg_gen_st_i32(t0, tcg_env,
-offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
- tcg_temp_free_i32(t0);
/* Stop translation, this gives other CPUs a chance to run */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
@@ -2419,7 +2341,6 @@ static void gen_or(DisasContext *ctx)
tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
gen_store_spr(SPR_PPR, t0);
- tcg_temp_free(t0);
}
#if !defined(CONFIG_USER_ONLY)
/*
@@ -2534,7 +2455,6 @@ static void gen_prtyw(DisasContext *ctx)
tcg_gen_shri_tl(t0, ra, 8);
tcg_gen_xor_tl(ra, ra, t0);
tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
- tcg_temp_free(t0);
}
#if defined(TARGET_PPC64)
@@ -2551,7 +2471,6 @@ static void gen_prtyd(DisasContext *ctx)
tcg_gen_shri_tl(t0, ra, 8);
tcg_gen_xor_tl(ra, ra, t0);
tcg_gen_andi_tl(ra, ra, 1);
- tcg_temp_free(t0);
}
#endif
@@ -2594,7 +2513,7 @@ static void gen_darn(DisasContext *ctx)
if (l > 2) {
tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
} else {
- gen_icount_io_start(ctx);
+ translator_io_start(&ctx->base);
if (l == 0) {
gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
} else {
@@ -2640,7 +2559,6 @@ static void gen_rlwimi(DisasContext *ctx)
tcg_gen_trunc_tl_i32(t0, t_rs);
tcg_gen_rotli_i32(t0, t0, sh);
tcg_gen_extu_i32_tl(t1, t0);
- tcg_temp_free_i32(t0);
} else {
#if defined(TARGET_PPC64)
tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
@@ -2653,7 +2571,6 @@ static void gen_rlwimi(DisasContext *ctx)
tcg_gen_andi_tl(t1, t1, mask);
tcg_gen_andi_tl(t_ra, t_ra, ~mask);
tcg_gen_or_tl(t_ra, t_ra, t1);
- tcg_temp_free(t1);
}
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, t_ra);
@@ -2697,7 +2614,6 @@ static void gen_rlwinm(DisasContext *ctx)
tcg_gen_rotli_i32(t0, t0, sh);
tcg_gen_andi_i32(t0, t0, mask);
tcg_gen_extu_i32_tl(t_ra, t0);
- tcg_temp_free_i32(t0);
}
} else {
#if defined(TARGET_PPC64)
@@ -2744,15 +2660,12 @@ static void gen_rlwnm(DisasContext *ctx)
tcg_gen_andi_i32(t0, t0, 0x1f);
tcg_gen_rotl_i32(t1, t1, t0);
tcg_gen_extu_i32_tl(t_ra, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
} else {
#if defined(TARGET_PPC64)
TCGv_i64 t0 = tcg_temp_new_i64();
tcg_gen_andi_i64(t0, t_rb, 0x1f);
tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
tcg_gen_rotl_i64(t_ra, t_ra, t0);
- tcg_temp_free_i64(t0);
#else
g_assert_not_reached();
#endif
@@ -2860,7 +2773,6 @@ static void gen_rldnm(DisasContext *ctx, int mb, int me)
t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, t_rb, 0x3f);
tcg_gen_rotl_tl(t_ra, t_rs, t0);
- tcg_temp_free(t0);
tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
if (unlikely(Rc(ctx->opcode) != 0)) {
@@ -2907,7 +2819,6 @@ static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
tcg_gen_andi_tl(t1, t1, mask);
tcg_gen_andi_tl(t_ra, t_ra, ~mask);
tcg_gen_or_tl(t_ra, t_ra, t1);
- tcg_temp_free(t1);
}
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, t_ra);
@@ -2936,8 +2847,6 @@ static void gen_slw(DisasContext *ctx)
t1 = tcg_temp_new();
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -2947,7 +2856,7 @@ static void gen_slw(DisasContext *ctx)
/* sraw & sraw. */
static void gen_sraw(DisasContext *ctx)
{
- gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -2973,7 +2882,6 @@ static void gen_srawi(DisasContext *ctx)
t0 = tcg_temp_new();
tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
- tcg_temp_free(t0);
tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
if (is_isa300(ctx)) {
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
@@ -3004,8 +2912,6 @@ static void gen_srw(DisasContext *ctx)
t1 = tcg_temp_new();
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -3025,8 +2931,6 @@ static void gen_sld(DisasContext *ctx)
t1 = tcg_temp_new();
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -3035,7 +2939,7 @@ static void gen_sld(DisasContext *ctx)
/* srad & srad. */
static void gen_srad(DisasContext *ctx)
{
- gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ gen_helper_srad(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
@@ -3059,7 +2963,6 @@ static inline void gen_sradi(DisasContext *ctx, int n)
t0 = tcg_temp_new();
tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
- tcg_temp_free(t0);
tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
if (is_isa300(ctx)) {
tcg_gen_mov_tl(cpu_ca32, cpu_ca);
@@ -3118,8 +3021,6 @@ static void gen_srd(DisasContext *ctx)
t1 = tcg_temp_new();
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -3195,6 +3096,20 @@ static inline void gen_align_no_le(DisasContext *ctx)
(ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
}
+static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
+{
+ TCGv ea = tcg_temp_new();
+ if (ra) {
+ tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
+ } else {
+ tcg_gen_mov_tl(ea, displ);
+ }
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(ea, ea);
+ }
+ return ea;
+}
+
/*** Integer load ***/
#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
@@ -3228,10 +3143,10 @@ GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
-GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_QEMU_STORE_TL(stop, op) \
@@ -3262,22 +3177,21 @@ static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
-GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
- chk; \
+ chk(ctx); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
}
#define GEN_LDX(name, ldop, opc2, opc3, type) \
@@ -3290,19 +3204,18 @@ static void glue(gen_, name##x)(DisasContext *ctx) \
static void glue(gen_, name##epx)(DisasContext *ctx) \
{ \
TCGv EA; \
- CHK_SV; \
+ CHK_SV(ctx); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
- tcg_temp_free(EA); \
}
GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#if defined(TARGET_PPC64)
@@ -3311,69 +3224,6 @@ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
-
-/* lq */
-static void gen_lq(DisasContext *ctx)
-{
- int ra, rd;
- TCGv EA, hi, lo;
-
- /* lq is a legal user mode instruction starting in ISA 2.07 */
- bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
- bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
-
- if (!legal_in_user_mode && ctx->pr) {
- gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
- return;
- }
-
- if (!le_is_supported && ctx->le_mode) {
- gen_align_no_le(ctx);
- return;
- }
- ra = rA(ctx->opcode);
- rd = rD(ctx->opcode);
- if (unlikely((rd & 1) || rd == ra)) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- return;
- }
-
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_imm_index(ctx, EA, 0x0F);
-
- /* Note that the low part is always in RD+1, even in LE mode. */
- lo = cpu_gpr[rd + 1];
- hi = cpu_gpr[rd];
-
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_ATOMIC128) {
- TCGv_i32 oi = tcg_temp_new_i32();
- if (ctx->le_mode) {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
- gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
- } else {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
- gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
- }
- tcg_temp_free_i32(oi);
- tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- }
- } else if (ctx->le_mode) {
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
- } else {
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
- }
- tcg_temp_free(EA);
-}
#endif
/*** Integer store ***/
@@ -3381,12 +3231,11 @@ static void gen_lq(DisasContext *ctx)
static void glue(gen_, name##x)(DisasContext *ctx) \
{ \
TCGv EA; \
- chk; \
+ chk(ctx); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
}
#define GEN_STX(name, stop, opc2, opc3, type) \
GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
@@ -3398,20 +3247,19 @@ static void glue(gen_, name##x)(DisasContext *ctx) \
static void glue(gen_, name##epx)(DisasContext *ctx) \
{ \
TCGv EA; \
- CHK_SV; \
+ CHK_SV(ctx); \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_qemu_st_tl( \
cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
- tcg_temp_free(EA); \
}
GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
#endif
#if defined(TARGET_PPC64)
@@ -3419,88 +3267,6 @@ GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
-
-static void gen_std(DisasContext *ctx)
-{
- int rs;
- TCGv EA;
-
- rs = rS(ctx->opcode);
- if ((ctx->opcode & 0x3) == 0x2) { /* stq */
- bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
- bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
- TCGv hi, lo;
-
- if (!(ctx->insns_flags & PPC_64BX)) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- }
-
- if (!legal_in_user_mode && ctx->pr) {
- gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
- return;
- }
-
- if (!le_is_supported && ctx->le_mode) {
- gen_align_no_le(ctx);
- return;
- }
-
- if (unlikely(rs & 1)) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- return;
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_imm_index(ctx, EA, 0x03);
-
- /* Note that the low part is always in RS+1, even in LE mode. */
- lo = cpu_gpr[rs + 1];
- hi = cpu_gpr[rs];
-
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_ATOMIC128) {
- TCGv_i32 oi = tcg_temp_new_i32();
- if (ctx->le_mode) {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
- gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
- } else {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
- gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
- }
- tcg_temp_free_i32(oi);
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- }
- } else if (ctx->le_mode) {
- tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ);
- } else {
- tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ);
- }
- tcg_temp_free(EA);
- } else {
- /* std / stdu */
- if (Rc(ctx->opcode)) {
- if (unlikely(rA(ctx->opcode) == 0)) {
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- return;
- }
- }
- gen_set_access_type(ctx, ACCESS_INT);
- EA = tcg_temp_new();
- gen_addr_imm_index(ctx, EA, 0x03);
- gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
- if (Rc(ctx->opcode)) {
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
- }
- tcg_temp_free(EA);
- }
-}
#endif
/*** Integer load and store with byte reverse ***/
@@ -3536,11 +3302,9 @@ static void gen_lmw(DisasContext *ctx)
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rD(ctx->opcode));
+ t1 = tcg_constant_i32(rD(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
- gen_helper_lmw(cpu_env, t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
+ gen_helper_lmw(tcg_env, t0, t1);
}
/* stmw */
@@ -3555,11 +3319,9 @@ static void gen_stmw(DisasContext *ctx)
}
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
- t1 = tcg_const_i32(rS(ctx->opcode));
+ t1 = tcg_constant_i32(rS(ctx->opcode));
gen_addr_imm_index(ctx, t0, 0);
- gen_helper_stmw(cpu_env, t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
+ gen_helper_stmw(tcg_env, t0, t1);
}
/*** Integer load and store strings ***/
@@ -3595,12 +3357,9 @@ static void gen_lswi(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_register(ctx, t0);
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(start);
- gen_helper_lsw(cpu_env, t0, t1, t2);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(start);
+ gen_helper_lsw(tcg_env, t0, t1, t2);
}
/* lswx */
@@ -3616,14 +3375,10 @@ static void gen_lswx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- t1 = tcg_const_i32(rD(ctx->opcode));
- t2 = tcg_const_i32(rA(ctx->opcode));
- t3 = tcg_const_i32(rB(ctx->opcode));
- gen_helper_lswx(cpu_env, t0, t1, t2, t3);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
+ t1 = tcg_constant_i32(rD(ctx->opcode));
+ t2 = tcg_constant_i32(rA(ctx->opcode));
+ t3 = tcg_constant_i32(rB(ctx->opcode));
+ gen_helper_lswx(tcg_env, t0, t1, t2, t3);
}
/* stswi */
@@ -3643,12 +3398,9 @@ static void gen_stswi(DisasContext *ctx)
if (nb == 0) {
nb = 32;
}
- t1 = tcg_const_i32(nb);
- t2 = tcg_const_i32(rS(ctx->opcode));
- gen_helper_stsw(cpu_env, t0, t1, t2);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
+ t1 = tcg_constant_i32(nb);
+ t2 = tcg_constant_i32(rS(ctx->opcode));
+ gen_helper_stsw(tcg_env, t0, t1, t2);
}
/* stswx */
@@ -3667,18 +3419,40 @@ static void gen_stswx(DisasContext *ctx)
t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
- t2 = tcg_const_i32(rS(ctx->opcode));
- gen_helper_stsw(cpu_env, t0, t1, t2);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
+ t2 = tcg_constant_i32(rS(ctx->opcode));
+ gen_helper_stsw(tcg_env, t0, t1, t2);
}
/*** Memory synchronisation ***/
/* eieio */
static void gen_eieio(DisasContext *ctx)
{
- TCGBar bar = TCG_MO_LD_ST;
+ TCGBar bar = TCG_MO_ALL;
+
+ /*
+ * eieio has complex semanitcs. It provides memory ordering between
+ * operations in the set:
+ * - loads from CI memory.
+ * - stores to CI memory.
+ * - stores to WT memory.
+ *
+ * It separately also orders memory for operations in the set:
+ * - stores to cacheble memory.
+ *
+ * It also serializes instructions:
+ * - dcbt and dcbst.
+ *
+ * It separately serializes:
+ * - tlbie and tlbsync.
+ *
+ * And separately serializes:
+ * - slbieg, slbiag, and slbsync.
+ *
+ * The end result is that CI memory ordering requires TCG_MO_ALL
+ * and it is not possible to special-case more relaxed ordering for
+ * cacheable accesses. TCG_BAR_SC is required to provide this
+ * serialization.
+ */
/*
* POWER9 has a eieio instruction variant using bit 6 as a hint to
@@ -3713,15 +3487,14 @@ static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
}
l = gen_new_label();
t = tcg_temp_new_i32();
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ld_i32(t, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
if (global) {
- gen_helper_check_tlb_flush_global(cpu_env);
+ gen_helper_check_tlb_flush_global(tcg_env);
} else {
- gen_helper_check_tlb_flush_local(cpu_env);
+ gen_helper_check_tlb_flush_local(tcg_env);
}
gen_set_label(l);
- tcg_temp_free_i32(t);
}
#else
static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
@@ -3752,9 +3525,8 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop)
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
tcg_gen_mov_tl(cpu_reserve, t0);
+ tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
tcg_gen_mov_tl(cpu_reserve_val, gpr);
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
- tcg_temp_free(t0);
}
#define LARX(name, memop) \
@@ -3788,10 +3560,6 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
/* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
-
- tcg_temp_free(t);
- tcg_temp_free(t2);
- tcg_temp_free(u);
}
static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
@@ -3854,9 +3622,6 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
cpu_gpr[(rt + 2) & 31], t0);
tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
tcg_gen_mov_tl(dst, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
break;
@@ -3886,11 +3651,10 @@ static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
/* invoke data storage error handler */
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
}
- tcg_temp_free(EA);
if (need_serial) {
/* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
}
}
@@ -3903,7 +3667,7 @@ static void gen_lwat(DisasContext *ctx)
#ifdef TARGET_PPC64
static void gen_ldat(DisasContext *ctx)
{
- gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
@@ -3946,7 +3710,7 @@ static void gen_st_atomic(DisasContext *ctx, MemOp memop)
case 24: /* Store twin */
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
/* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
+ gen_helper_exit_atomic(tcg_env);
ctx->base.is_jmp = DISAS_NORETURN;
} else {
TCGv t = tcg_temp_new();
@@ -3962,20 +3726,12 @@ static void gen_st_atomic(DisasContext *ctx, MemOp memop)
tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
-
- tcg_temp_free(ea_plus_s);
- tcg_temp_free(s2);
- tcg_temp_free(s);
- tcg_temp_free(t2);
- tcg_temp_free(t);
}
break;
default:
/* invoke data storage error handler */
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
}
- tcg_temp_free(discard);
- tcg_temp_free(EA);
}
static void gen_stwat(DisasContext *ctx)
@@ -3986,43 +3742,38 @@ static void gen_stwat(DisasContext *ctx)
#ifdef TARGET_PPC64
static void gen_stdat(DisasContext *ctx)
{
- gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
static void gen_conditional_store(DisasContext *ctx, MemOp memop)
{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- int reg = rS(ctx->opcode);
+ TCGLabel *lfail;
+ TCGv EA;
+ TCGv cr0;
+ TCGv t0;
+ int rs = rS(ctx->opcode);
+ lfail = gen_new_label();
+ EA = tcg_temp_new();
+ cr0 = tcg_temp_new();
+ t0 = tcg_temp_new();
+
+ tcg_gen_mov_tl(cr0, cpu_so);
gen_set_access_type(ctx, ACCESS_RES);
- gen_addr_reg_index(ctx, t0);
- tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
- tcg_temp_free(t0);
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
- t0 = tcg_temp_new();
tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
- cpu_gpr[reg], ctx->mem_idx,
+ cpu_gpr[rs], ctx->mem_idx,
DEF_MEMOP(memop) | MO_ALIGN);
tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
- tcg_gen_or_tl(t0, t0, cpu_so);
- tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
- tcg_temp_free(t0);
- tcg_gen_br(l2);
-
- gen_set_label(l1);
-
- /*
- * Address mismatch implies failure. But we still need to provide
- * the memory barrier semantics of the instruction.
- */
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_or_tl(cr0, cr0, t0);
- gen_set_label(l2);
+ gen_set_label(lfail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
tcg_gen_movi_tl(cpu_reserve, -1);
}
@@ -4038,15 +3789,16 @@ STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, DEF_MEMOP(MO_Q))
+LARX(ldarx, DEF_MEMOP(MO_UQ))
/* stdcx. */
-STCX(stdcx_, DEF_MEMOP(MO_Q))
+STCX(stdcx_, DEF_MEMOP(MO_UQ))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
{
int rd = rD(ctx->opcode);
TCGv EA, hi, lo;
+ TCGv_i128 t16;
if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
(rd == rB(ctx->opcode)))) {
@@ -4062,127 +3814,79 @@ static void gen_lqarx(DisasContext *ctx)
lo = cpu_gpr[rd + 1];
hi = cpu_gpr[rd];
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_ATOMIC128) {
- TCGv_i32 oi = tcg_temp_new_i32();
- if (ctx->le_mode) {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
- ctx->mem_idx));
- gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
- } else {
- tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
- ctx->mem_idx));
- gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
- }
- tcg_temp_free_i32(oi);
- tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- tcg_temp_free(EA);
- return;
- }
- } else if (ctx->le_mode) {
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
- tcg_gen_mov_tl(cpu_reserve, EA);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
- } else {
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
- tcg_gen_mov_tl(cpu_reserve, EA);
- gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
- }
- tcg_temp_free(EA);
+ t16 = tcg_temp_new_i128();
+ tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
+ tcg_gen_extr_i128_i64(lo, hi, t16);
- tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
- tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ tcg_gen_movi_tl(cpu_reserve_length, 16);
+ tcg_gen_st_tl(hi, tcg_env, offsetof(CPUPPCState, reserve_val));
+ tcg_gen_st_tl(lo, tcg_env, offsetof(CPUPPCState, reserve_val2));
}
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
+ TCGLabel *lfail;
+ TCGv EA, t0, t1;
+ TCGv cr0;
+ TCGv_i128 cmp, val;
int rs = rS(ctx->opcode);
- TCGv EA, hi, lo;
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- gen_set_access_type(ctx, ACCESS_RES);
+ lfail = gen_new_label();
EA = tcg_temp_new();
+ cr0 = tcg_temp_new();
+
+ tcg_gen_mov_tl(cr0, cpu_so);
+ gen_set_access_type(ctx, ACCESS_RES);
gen_addr_reg_index(ctx, EA);
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
- /* Note that the low part is always in RS+1, even in LE mode. */
- lo = cpu_gpr[rs + 1];
- hi = cpu_gpr[rs];
+ cmp = tcg_temp_new_i128();
+ val = tcg_temp_new_i128();
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_CMPXCHG128) {
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16);
- if (ctx->le_mode) {
- gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
- EA, lo, hi, oi);
- } else {
- gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
- EA, lo, hi, oi);
- }
- tcg_temp_free_i32(oi);
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- }
- tcg_temp_free(EA);
- } else {
- TCGLabel *lab_fail = gen_new_label();
- TCGLabel *lab_over = gen_new_label();
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
-
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
- tcg_temp_free(EA);
+ tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
- gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
- tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
- ? offsetof(CPUPPCState, reserve_val2)
- : offsetof(CPUPPCState, reserve_val)));
- tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+ /* Note that the low part is always in RS+1, even in LE mode. */
+ tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
- tcg_gen_addi_i64(t0, cpu_reserve, 8);
- gen_qemu_ld64_i64(ctx, t0, t0);
- tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
- ? offsetof(CPUPPCState, reserve_val)
- : offsetof(CPUPPCState, reserve_val2)));
- tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+ tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
+ DEF_MEMOP(MO_128 | MO_ALIGN));
- /* Success */
- gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
- tcg_gen_addi_i64(t0, cpu_reserve, 8);
- gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_extr_i128_i64(t1, t0, val);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
- tcg_gen_br(lab_over);
+ tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
+ tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
+ tcg_gen_or_tl(t0, t0, t1);
- gen_set_label(lab_fail);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
+ tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
+ tcg_gen_or_tl(cr0, cr0, t0);
- gen_set_label(lab_over);
- tcg_gen_movi_tl(cpu_reserve, -1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
+ gen_set_label(lfail);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
+ tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif /* defined(TARGET_PPC64) */
/* sync */
static void gen_sync(DisasContext *ctx)
{
+ TCGBar bar = TCG_MO_ALL;
uint32_t l = (ctx->opcode >> 21) & 3;
+ if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
+ bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
+ }
+
/*
* We may need to check for a pending TLB flush.
*
@@ -4194,32 +3898,111 @@ static void gen_sync(DisasContext *ctx)
if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
gen_check_tlb_flush(ctx, true);
}
- tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+
+ tcg_gen_mb(bar | TCG_BAR_SC);
}
/* wait */
static void gen_wait(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(1);
- tcg_gen_st_i32(t0, cpu_env,
- -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
- tcg_temp_free_i32(t0);
- /* Stop translation, as the CPU is supposed to sleep from now */
- gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+ uint32_t wc;
+
+ if (ctx->insns_flags & PPC_WAIT) {
+ /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
+
+ if (ctx->insns_flags2 & PPC2_PM_ISA206) {
+ /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
+ wc = WC(ctx->opcode);
+ } else {
+ wc = 0;
+ }
+
+ } else if (ctx->insns_flags2 & PPC2_ISA300) {
+ /* v3.0 defines a new 'wait' encoding. */
+ wc = WC(ctx->opcode);
+ if (ctx->insns_flags2 & PPC2_ISA310) {
+ uint32_t pl = PL(ctx->opcode);
+
+ /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
+ if (wc == 3) {
+ gen_invalid(ctx);
+ return;
+ }
+
+ /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
+ if (pl > 0 && wc != 2) {
+ gen_invalid(ctx);
+ return;
+ }
+
+ } else { /* ISA300 */
+ /* WC 1-3 are reserved */
+ if (wc > 0) {
+ gen_invalid(ctx);
+ return;
+ }
+ }
+
+ } else {
+ warn_report("wait instruction decoded with wrong ISA flags.");
+ gen_invalid(ctx);
+ return;
+ }
+
+ /*
+ * wait without WC field or with WC=0 waits for an exception / interrupt
+ * to occur.
+ */
+ if (wc == 0) {
+ TCGv_i32 t0 = tcg_constant_i32(1);
+ tcg_gen_st_i32(t0, tcg_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
+ }
+
+ /*
+ * Other wait types must not just wait until an exception occurs because
+ * ignoring their other wake-up conditions could cause a hang.
+ *
+ * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
+ * no-ops.
+ *
+ * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
+ *
+ * wc=2 waits for an implementation-specific condition, such could be
+ * always true, so it can be implemented as a no-op.
+ *
+ * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
+ *
+ * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
+ * Reservation-loss may have implementation-specific conditions, so it
+ * can be implemented as a no-op.
+ *
+ * wc=2 waits for an exception or an amount of time to pass. This
+ * amount is implementation-specific so it can be implemented as a
+ * no-op.
+ *
+ * ISA v3.1 allows for execution to resume "in the rare case of
+ * an implementation-dependent event", so in any case software must
+ * not depend on the architected resumption condition to become
+ * true, so no-op implementations should be architecturally correct
+ * (if suboptimal).
+ */
}
#if defined(TARGET_PPC64)
static void gen_doze(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv_i32 t;
- CHK_HV;
- t = tcg_const_i32(PPC_PM_DOZE);
- gen_helper_pminsn(cpu_env, t);
- tcg_temp_free_i32(t);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ t = tcg_constant_i32(PPC_PM_DOZE);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4228,14 +4011,14 @@ static void gen_doze(DisasContext *ctx)
static void gen_nap(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv_i32 t;
- CHK_HV;
- t = tcg_const_i32(PPC_PM_NAP);
- gen_helper_pminsn(cpu_env, t);
- tcg_temp_free_i32(t);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ t = tcg_constant_i32(PPC_PM_NAP);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4244,14 +4027,14 @@ static void gen_nap(DisasContext *ctx)
static void gen_stop(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv_i32 t;
- CHK_HV;
- t = tcg_const_i32(PPC_PM_STOP);
- gen_helper_pminsn(cpu_env, t);
- tcg_temp_free_i32(t);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ t = tcg_constant_i32(PPC_PM_STOP);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4260,14 +4043,14 @@ static void gen_stop(DisasContext *ctx)
static void gen_sleep(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv_i32 t;
- CHK_HV;
- t = tcg_const_i32(PPC_PM_SLEEP);
- gen_helper_pminsn(cpu_env, t);
- tcg_temp_free_i32(t);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ t = tcg_constant_i32(PPC_PM_SLEEP);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4276,14 +4059,14 @@ static void gen_sleep(DisasContext *ctx)
static void gen_rvwinkle(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv_i32 t;
- CHK_HV;
- t = tcg_const_i32(PPC_PM_RVWINKLE);
- gen_helper_pminsn(cpu_env, t);
- tcg_temp_free_i32(t);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ t = tcg_constant_i32(PPC_PM_RVWINKLE);
+ gen_helper_pminsn(tcg_env, t);
/* Stop translation, as the CPU is supposed to sleep from now */
gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
#endif /* defined(CONFIG_USER_ONLY) */
@@ -4299,23 +4082,88 @@ static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
#endif
}
+#if defined(TARGET_PPC64)
+static void pmu_count_insns(DisasContext *ctx)
+{
+ /*
+ * Do not bother calling the helper if the PMU isn't counting
+ * instructions.
+ */
+ if (!ctx->pmu_insn_cnt) {
+ return;
+ }
+
+ #if !defined(CONFIG_USER_ONLY)
+ TCGLabel *l;
+ TCGv t0;
+
+ /*
+ * The PMU insns_inc() helper stops the internal PMU timer if a
+ * counter overflows happens. In that case, if the guest is
+ * running with icount and we do not handle it beforehand,
+ * the helper can trigger a 'bad icount read'.
+ */
+ translator_io_start(&ctx->base);
+
+ /* Avoid helper calls when only PMC5-6 are enabled. */
+ if (!ctx->pmc_other) {
+ l = gen_new_label();
+ t0 = tcg_temp_new();
+
+ gen_load_spr(t0, SPR_POWER_PMC5);
+ tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
+ gen_store_spr(SPR_POWER_PMC5, t0);
+ /* Check for overflow, if it's enabled */
+ if (ctx->mmcr0_pmcjce) {
+ tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
+ gen_helper_handle_pmc5_overflow(tcg_env);
+ }
+
+ gen_set_label(l);
+ } else {
+ gen_helper_insns_inc(tcg_env, tcg_constant_i32(ctx->base.num_insns));
+ }
+ #else
+ /*
+ * User mode can read (but not write) PMC5 and start/stop
+ * the PMU via MMCR0_FC. In this case just increment
+ * PMC5 with base.num_insns.
+ */
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_spr(t0, SPR_POWER_PMC5);
+ tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
+ gen_store_spr(SPR_POWER_PMC5, t0);
+ #endif /* #if !defined(CONFIG_USER_ONLY) */
+}
+#else
+static void pmu_count_insns(DisasContext *ctx)
+{
+ return;
+}
+#endif /* #if defined(TARGET_PPC64) */
+
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
return translator_use_goto_tb(&ctx->base, dest);
}
static void gen_lookup_and_goto_ptr(DisasContext *ctx)
{
- int sse = ctx->singlestep_enabled;
- if (unlikely(sse)) {
- if (sse & GDBSTUB_SINGLE_STEP) {
- gen_debug_exception(ctx);
- } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) {
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
- } else {
- tcg_gen_exit_tb(NULL, 0);
- }
+ if (unlikely(ctx->singlestep_enabled)) {
+ gen_debug_exception(ctx, false);
} else {
+ /*
+ * tcg_gen_lookup_and_goto_ptr will exit the TB if
+ * CF_NO_GOTO_PTR is set. Count insns now.
+ */
+ if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
+ pmu_count_insns(ctx);
+ }
+
tcg_gen_lookup_and_goto_ptr();
}
}
@@ -4327,6 +4175,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
dest = (uint32_t) dest;
}
if (use_goto_tb(ctx, dest)) {
+ pmu_count_insns(ctx);
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
tcg_gen_exit_tb(ctx->base.tb, n);
@@ -4377,7 +4226,7 @@ static void gen_bcond(DisasContext *ctx, int type)
TCGv target;
if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
- target = tcg_temp_local_new();
+ target = tcg_temp_new();
if (type == BCOND_CTR) {
tcg_gen_mov_tl(target, cpu_ctr);
} else if (type == BCOND_TAR) {
@@ -4413,8 +4262,6 @@ static void gen_bcond(DisasContext *ctx, int type)
*/
if (unlikely(!is_book3s_arch2x(ctx))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
- tcg_temp_free(temp);
- tcg_temp_free(target);
return;
}
@@ -4442,7 +4289,6 @@ static void gen_bcond(DisasContext *ctx, int type)
tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
}
}
- tcg_temp_free(temp);
}
if ((bo & 0x10) == 0) {
/* Test CR */
@@ -4457,7 +4303,6 @@ static void gen_bcond(DisasContext *ctx, int type)
tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
}
- tcg_temp_free_i32(temp);
}
gen_update_cfar(ctx, ctx->cia);
if (type == BCOND_IM) {
@@ -4474,7 +4319,6 @@ static void gen_bcond(DisasContext *ctx, int type)
tcg_gen_andi_tl(cpu_nip, target, ~3);
}
gen_lookup_and_goto_ptr(ctx);
- tcg_temp_free(target);
}
if ((bo & 0x14) != 0x14) {
/* fallthrough case */
@@ -4532,8 +4376,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_gen_andi_i32(t0, t0, bitmask); \
tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
}
/* crand */
@@ -4565,7 +4407,7 @@ static void gen_mcrf(DisasContext *ctx)
static void gen_rfi(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
/*
* This instruction doesn't exist anymore on 64-bit server
@@ -4576,10 +4418,10 @@ static void gen_rfi(DisasContext *ctx)
return;
}
/* Restore CPU state */
- CHK_SV;
- gen_icount_io_start(ctx);
+ CHK_SV(ctx);
+ translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfi(cpu_env);
+ gen_helper_rfi(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4588,13 +4430,13 @@ static void gen_rfi(DisasContext *ctx)
static void gen_rfid(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
/* Restore CPU state */
- CHK_SV;
- gen_icount_io_start(ctx);
+ CHK_SV(ctx);
+ translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfid(cpu_env);
+ gen_helper_rfid(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4603,13 +4445,13 @@ static void gen_rfid(DisasContext *ctx)
static void gen_rfscv(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
/* Restore CPU state */
- CHK_SV;
- gen_icount_io_start(ctx);
+ CHK_SV(ctx);
+ translator_io_start(&ctx->base);
gen_update_cfar(ctx, ctx->cia);
- gen_helper_rfscv(cpu_env);
+ gen_helper_rfscv(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4618,11 +4460,12 @@ static void gen_rfscv(DisasContext *ctx)
static void gen_hrfid(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
/* Restore CPU state */
- CHK_HV;
- gen_helper_hrfid(cpu_env);
+ CHK_HV(ctx);
+ translator_io_start(&ctx->base);
+ gen_helper_hrfid(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif
}
@@ -4633,13 +4476,17 @@ static void gen_hrfid(DisasContext *ctx)
#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
#else
#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
-#define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
#endif
static void gen_sc(DisasContext *ctx)
{
uint32_t lev;
- lev = (ctx->opcode >> 5) & 0x7F;
+ /*
+ * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
+ * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
+ * for Ultravisor which TCG does not support, so just ignore the top 6.
+ */
+ lev = (ctx->opcode >> 5) & 0x1;
gen_exception_err(ctx, POWERPC_SYSCALL, lev);
}
@@ -4651,7 +4498,7 @@ static void gen_scv(DisasContext *ctx)
/* Set the PC back to the faulting instruction. */
gen_update_nip(ctx, ctx->cia);
- gen_helper_scv(cpu_env, tcg_constant_i32(lev));
+ gen_helper_scv(tcg_env, tcg_constant_i32(lev));
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -4683,10 +4530,9 @@ static void gen_tw(DisasContext *ctx)
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
- gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0 = tcg_constant_i32(TO(ctx->opcode));
+ gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
- tcg_temp_free_i32(t0);
}
/* twi */
@@ -4698,11 +4544,9 @@ static void gen_twi(DisasContext *ctx)
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
- gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
+ gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#if defined(TARGET_PPC64)
@@ -4714,10 +4558,9 @@ static void gen_td(DisasContext *ctx)
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_i32(TO(ctx->opcode));
- gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0 = tcg_constant_i32(TO(ctx->opcode));
+ gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
t0);
- tcg_temp_free_i32(t0);
}
/* tdi */
@@ -4729,11 +4572,9 @@ static void gen_tdi(DisasContext *ctx)
if (check_unconditional_trap(ctx)) {
return;
}
- t0 = tcg_const_tl(SIMM(ctx->opcode));
- t1 = tcg_const_i32(TO(ctx->opcode));
- gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i32(t1);
+ t0 = tcg_constant_tl(SIMM(ctx->opcode));
+ t1 = tcg_constant_i32(TO(ctx->opcode));
+ gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
}
#endif
@@ -4754,8 +4595,6 @@ static void gen_mcrxr(DisasContext *ctx)
tcg_gen_shli_i32(dst, dst, 1);
tcg_gen_or_i32(dst, dst, t0);
tcg_gen_or_i32(dst, dst, t1);
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t1);
tcg_gen_movi_tl(cpu_so, 0);
tcg_gen_movi_tl(cpu_ov, 0);
@@ -4779,8 +4618,6 @@ static void gen_mcrxrx(DisasContext *ctx)
tcg_gen_or_tl(t1, t1, cpu_ca32);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_trunc_tl_i32(dst, t0);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
#endif
@@ -4815,14 +4652,13 @@ static void gen_mfcr(DisasContext *ctx)
tcg_gen_shli_i32(t0, t0, 4);
tcg_gen_or_i32(t0, t0, cpu_crf[7]);
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free_i32(t0);
}
}
/* mfmsr */
static void gen_mfmsr(DisasContext *ctx)
{
- CHK_SV;
+ CHK_SV(ctx);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
}
@@ -4878,11 +4714,11 @@ static inline void gen_op_mfspr(DisasContext *ctx)
*/
if (sprn & 0x10) {
if (ctx->pr) {
- gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
- gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
}
}
@@ -4912,7 +4748,6 @@ static void gen_mtcrf(DisasContext *ctx)
tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
tcg_gen_shri_i32(temp, temp, crn * 4);
tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
- tcg_temp_free_i32(temp);
}
} else {
TCGv_i32 temp = tcg_temp_new_i32();
@@ -4923,7 +4758,6 @@ static void gen_mtcrf(DisasContext *ctx)
tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
}
}
- tcg_temp_free_i32(temp);
}
}
@@ -4936,33 +4770,38 @@ static void gen_mtmsrd(DisasContext *ctx)
return;
}
- CHK_SV;
+ CHK_SV(ctx);
#if !defined(CONFIG_USER_ONLY)
- gen_icount_io_start(ctx);
- if (ctx->opcode & 0x00010000) {
- /* L=1 form only updates EE and RI */
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
- (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(t1, cpu_msr,
- ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
- tcg_gen_or_tl(t1, t1, t0);
+ TCGv t0, t1;
+ target_ulong mask;
- gen_helper_store_msr(cpu_env, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ translator_io_start(&ctx->base);
+ if (ctx->opcode & 0x00010000) {
+ /* L=1 form only updates EE and RI */
+ mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
} else {
+ /* mtmsrd does not alter HV, S, ME, or LE */
+ mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
+ (1ULL << MSR_HV));
/*
* XXX: we need to update nip before the store if we enter
* power saving mode, we will exit the loop directly from
* ppc_store_msr
*/
gen_update_nip(ctx, ctx->base.pc_next);
- gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]);
}
+
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
+ tcg_gen_andi_tl(t1, cpu_msr, ~mask);
+ tcg_gen_or_tl(t0, t0, t1);
+
+ gen_helper_store_msr(tcg_env, t0);
+
/* Must stop the translation as machine state (may have) changed */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
#endif /* !defined(CONFIG_USER_ONLY) */
@@ -4971,26 +4810,22 @@ static void gen_mtmsrd(DisasContext *ctx)
static void gen_mtmsr(DisasContext *ctx)
{
- CHK_SV;
+ CHK_SV(ctx);
#if !defined(CONFIG_USER_ONLY)
- gen_icount_io_start(ctx);
- if (ctx->opcode & 0x00010000) {
- /* L=1 form only updates EE and RI */
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)],
- (1 << MSR_RI) | (1 << MSR_EE));
- tcg_gen_andi_tl(t1, cpu_msr,
- ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
- tcg_gen_or_tl(t1, t1, t0);
+ TCGv t0, t1;
+ target_ulong mask = 0xFFFFFFFF;
- gen_helper_store_msr(cpu_env, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ translator_io_start(&ctx->base);
+ if (ctx->opcode & 0x00010000) {
+ /* L=1 form only updates EE and RI */
+ mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
} else {
- TCGv msr = tcg_temp_new();
+ /* mtmsr does not alter S, ME, or LE */
+ mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
/*
* XXX: we need to update nip before the store if we enter
@@ -4998,14 +4833,14 @@ static void gen_mtmsr(DisasContext *ctx)
* ppc_store_msr
*/
gen_update_nip(ctx, ctx->base.pc_next);
-#if defined(TARGET_PPC64)
- tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32);
-#else
- tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]);
-#endif
- gen_helper_store_msr(cpu_env, msr);
- tcg_temp_free(msr);
}
+
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
+ tcg_gen_andi_tl(t1, cpu_msr, ~mask);
+ tcg_gen_or_tl(t0, t0, t1);
+
+ gen_helper_store_msr(tcg_env, t0);
+
/* Must stop the translation as machine state (may have) changed */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
#endif
@@ -5058,11 +4893,11 @@ static void gen_mtspr(DisasContext *ctx)
*/
if (sprn & 0x10) {
if (ctx->pr) {
- gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
if (ctx->pr || sprn == 0) {
- gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
}
}
@@ -5073,19 +4908,13 @@ static void gen_mtspr(DisasContext *ctx)
static void gen_setb(DisasContext *ctx)
{
TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i32 t8 = tcg_temp_new_i32();
- TCGv_i32 tm1 = tcg_temp_new_i32();
+ TCGv_i32 t8 = tcg_constant_i32(8);
+ TCGv_i32 tm1 = tcg_constant_i32(-1);
int crf = crfS(ctx->opcode);
tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
- tcg_gen_movi_i32(t8, 8);
- tcg_gen_movi_i32(tm1, -1);
tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
-
- tcg_temp_free_i32(t0);
- tcg_temp_free_i32(t8);
- tcg_temp_free_i32(tm1);
}
#endif
@@ -5100,7 +4929,6 @@ static void gen_dcbf(DisasContext *ctx)
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_ld8u(ctx, t0, t0);
- tcg_temp_free(t0);
}
/* dcbfep (external PID dcbf) */
@@ -5108,23 +4936,22 @@ static void gen_dcbfep(DisasContext *ctx)
{
/* XXX: specification says this is treated as a load by the MMU */
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
gen_set_access_type(ctx, ACCESS_CACHE);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
- tcg_temp_free(t0);
}
/* dcbi (Supervisor only) */
static void gen_dcbi(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv EA, val;
- CHK_SV;
+ CHK_SV(ctx);
EA = tcg_temp_new();
gen_set_access_type(ctx, ACCESS_CACHE);
gen_addr_reg_index(ctx, EA);
@@ -5132,8 +4959,6 @@ static void gen_dcbi(DisasContext *ctx)
/* XXX: specification says this should be treated as a store by the MMU */
gen_qemu_ld8u(ctx, val, EA);
gen_qemu_st8(ctx, val, EA);
- tcg_temp_free(val);
- tcg_temp_free(EA);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5146,7 +4971,6 @@ static void gen_dcbst(DisasContext *ctx)
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_ld8u(ctx, t0, t0);
- tcg_temp_free(t0);
}
/* dcbstep (dcbstep External PID version) */
@@ -5158,7 +4982,6 @@ static void gen_dcbstep(DisasContext *ctx)
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
- tcg_temp_free(t0);
}
/* dcbt */
@@ -5209,7 +5032,14 @@ static void gen_dcbtls(DisasContext *ctx)
gen_load_spr(t0, SPR_Exxx_L1CSR0);
tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
gen_store_spr(SPR_Exxx_L1CSR0, t0);
- tcg_temp_free(t0);
+}
+
+/* dcblc */
+static void gen_dcblc(DisasContext *ctx)
+{
+ /*
+ * interpreted as no-op
+ */
}
/* dcbz */
@@ -5220,11 +5050,9 @@ static void gen_dcbz(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
- tcg_temp_free(tcgv_addr);
- tcg_temp_free_i32(tcgv_op);
+ gen_helper_dcbz(tcg_env, tcgv_addr, tcgv_op);
}
/* dcbzep */
@@ -5235,11 +5063,9 @@ static void gen_dcbzep(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_CACHE);
tcgv_addr = tcg_temp_new();
- tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
gen_addr_reg_index(ctx, tcgv_addr);
- gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
- tcg_temp_free(tcgv_addr);
- tcg_temp_free_i32(tcgv_op);
+ gen_helper_dcbzep(tcg_env, tcgv_addr, tcgv_op);
}
/* dst / dstt */
@@ -5276,8 +5102,7 @@ static void gen_icbi(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_CACHE);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_icbi(cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_icbi(tcg_env, t0);
}
/* icbiep */
@@ -5287,8 +5112,7 @@ static void gen_icbiep(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_CACHE);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_icbiep(cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_icbiep(tcg_env, t0);
}
/* Optional: */
@@ -5309,14 +5133,13 @@ static void gen_dcba(DisasContext *ctx)
static void gen_mfsr(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
- t0 = tcg_const_tl(SR(ctx->opcode));
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ CHK_SV(ctx);
+ t0 = tcg_constant_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5324,15 +5147,14 @@ static void gen_mfsr(DisasContext *ctx)
static void gen_mfsrin(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5340,14 +5162,13 @@ static void gen_mfsrin(DisasContext *ctx)
static void gen_mtsr(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
- t0 = tcg_const_tl(SR(ctx->opcode));
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_temp_free(t0);
+ CHK_SV(ctx);
+ t0 = tcg_constant_tl(SR(ctx->opcode));
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5355,15 +5176,14 @@ static void gen_mtsr(DisasContext *ctx)
static void gen_mtsrin(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
- tcg_temp_free(t0);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rD(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5374,14 +5194,13 @@ static void gen_mtsrin(DisasContext *ctx)
static void gen_mfsr_64b(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
- t0 = tcg_const_tl(SR(ctx->opcode));
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ CHK_SV(ctx);
+ t0 = tcg_constant_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5389,15 +5208,14 @@ static void gen_mfsr_64b(DisasContext *ctx)
static void gen_mfsrin_64b(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5405,14 +5223,13 @@ static void gen_mfsrin_64b(DisasContext *ctx)
static void gen_mtsr_64b(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
- t0 = tcg_const_tl(SR(ctx->opcode));
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_temp_free(t0);
+ CHK_SV(ctx);
+ t0 = tcg_constant_tl(SR(ctx->opcode));
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -5420,79 +5237,17 @@ static void gen_mtsr_64b(DisasContext *ctx)
static void gen_mtsrin_64b(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
- gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
- tcg_temp_free(t0);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* slbmte */
-static void gen_slbmte(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-static void gen_slbmfee(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env,
- cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-static void gen_slbmfev(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
- cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
-static void gen_slbfee_(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
-#else
- TCGLabel *l1, *l2;
-
- if (unlikely(ctx->pr)) {
- gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
- return;
- }
- gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
- cpu_gpr[rB(ctx->opcode)]);
- l1 = gen_new_label();
- l2 = gen_new_label();
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1);
- tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0);
- gen_set_label(l2);
-#endif
-}
#endif /* defined(TARGET_PPC64) */
/*** Lookaside buffer management ***/
@@ -5502,67 +5257,25 @@ static void gen_slbfee_(DisasContext *ctx)
static void gen_tlbia(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_HV;
+ CHK_HV(ctx);
- gen_helper_tlbia(cpu_env);
+ gen_helper_tlbia(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
-/* tlbiel */
-static void gen_tlbiel(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* tlbie */
-static void gen_tlbie(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- TCGv_i32 t1;
-
- if (ctx->gtse) {
- CHK_SV; /* If gtse is set then tlbie is supervisor privileged */
- } else {
- CHK_HV; /* Else hypervisor privileged */
- }
-
- if (NARROW_MODE(ctx)) {
- TCGv t0 = tcg_temp_new();
- tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
- gen_helper_tlbie(cpu_env, t0);
- tcg_temp_free(t0);
- } else {
- gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
- }
- t1 = tcg_temp_new_i32();
- tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
- tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
- tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
- tcg_temp_free_i32(t1);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
/* tlbsync */
static void gen_tlbsync(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
if (ctx->gtse) {
- CHK_SV; /* If gtse is set then tlbsync is supervisor privileged */
+ CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
} else {
- CHK_HV; /* Else hypervisor privileged */
+ CHK_HV(ctx); /* Else hypervisor privileged */
}
/* BookS does both ptesync and tlbsync make tlbsync a nop for server */
@@ -5572,60 +5285,6 @@ static void gen_tlbsync(DisasContext *ctx)
#endif /* defined(CONFIG_USER_ONLY) */
}
-#if defined(TARGET_PPC64)
-/* slbia */
-static void gen_slbia(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- uint32_t ih = (ctx->opcode >> 21) & 0x7;
- TCGv_i32 t0 = tcg_const_i32(ih);
-
- CHK_SV;
-
- gen_helper_slbia(cpu_env, t0);
- tcg_temp_free_i32(t0);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* slbie */
-static void gen_slbie(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* slbieg */
-static void gen_slbieg(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_slbieg(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* slbsync */
-static void gen_slbsync(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_check_tlb_flush(ctx, true);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-#endif /* defined(TARGET_PPC64) */
-
/*** External control ***/
/* Optional: */
@@ -5639,7 +5298,6 @@ static void gen_eciwx(DisasContext *ctx)
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
DEF_MEMOP(MO_UL | MO_ALIGN));
- tcg_temp_free(t0);
}
/* ecowx */
@@ -5652,697 +5310,6 @@ static void gen_ecowx(DisasContext *ctx)
gen_addr_reg_index(ctx, t0);
tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
DEF_MEMOP(MO_UL | MO_ALIGN));
- tcg_temp_free(t0);
-}
-
-/* PowerPC 601 specific instructions */
-
-/* abs - abs. */
-static void gen_abs(DisasContext *ctx)
-{
- TCGv d = cpu_gpr[rD(ctx->opcode)];
- TCGv a = cpu_gpr[rA(ctx->opcode)];
-
- tcg_gen_abs_tl(d, a);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, d);
- }
-}
-
-/* abso - abso. */
-static void gen_abso(DisasContext *ctx)
-{
- TCGv d = cpu_gpr[rD(ctx->opcode)];
- TCGv a = cpu_gpr[rA(ctx->opcode)];
-
- tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_ov, a, 0x80000000);
- tcg_gen_abs_tl(d, a);
- tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, d);
- }
-}
-
-/* clcs */
-static void gen_clcs(DisasContext *ctx)
-{
- TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
- gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free_i32(t0);
- /* Rc=1 sets CR0 to an undefined state */
-}
-
-/* div - div. */
-static void gen_div(DisasContext *ctx)
-{
- gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* divo - divo. */
-static void gen_divo(DisasContext *ctx)
-{
- gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* divs - divs. */
-static void gen_divs(DisasContext *ctx)
-{
- gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* divso - divso. */
-static void gen_divso(DisasContext *ctx)
-{
- gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env,
- cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* doz - doz. */
-static void gen_doz(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
- cpu_gpr[rA(ctx->opcode)], l1);
- tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
- cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
- gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* dozo - dozo. */
-static void gen_dozo(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- /* Start with XER OV disabled, the most likely case */
- tcg_gen_movi_tl(cpu_ov, 0);
- tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)],
- cpu_gpr[rA(ctx->opcode)], l1);
- tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
- gen_set_label(l2);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* dozi */
-static void gen_dozi(DisasContext *ctx)
-{
- target_long simm = SIMM(ctx->opcode);
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1);
- tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
- gen_set_label(l2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* lscbx - lscbx. */
-static void gen_lscbx(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
- TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
- TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
-
- gen_addr_reg_index(ctx, t0);
- gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(t3);
- tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
- tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, t0);
- }
- tcg_temp_free(t0);
-}
-
-/* maskg - maskg. */
-static void gen_maskg(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- TCGv t3 = tcg_temp_new();
- tcg_gen_movi_tl(t3, 0xFFFFFFFF);
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F);
- tcg_gen_addi_tl(t2, t0, 1);
- tcg_gen_shr_tl(t2, t3, t2);
- tcg_gen_shr_tl(t3, t3, t1);
- tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3);
- tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
- tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
- gen_set_label(l1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- tcg_temp_free(t3);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* maskir - maskir. */
-static void gen_maskir(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* mul - mul. */
-static void gen_mul(DisasContext *ctx)
-{
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv t2 = tcg_temp_new();
- tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_trunc_i64_tl(t2, t0);
- gen_store_spr(SPR_MQ, t2);
- tcg_gen_shri_i64(t1, t0, 32);
- tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* mulo - mulo. */
-static void gen_mulo(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv t2 = tcg_temp_new();
- /* Start with XER OV disabled, the most likely case */
- tcg_gen_movi_tl(cpu_ov, 0);
- tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
- tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
- tcg_gen_mul_i64(t0, t0, t1);
- tcg_gen_trunc_i64_tl(t2, t0);
- gen_store_spr(SPR_MQ, t2);
- tcg_gen_shri_i64(t1, t0, 32);
- tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
- tcg_gen_ext32s_i64(t1, t0);
- tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
- tcg_gen_movi_tl(cpu_ov, 1);
- tcg_gen_movi_tl(cpu_so, 1);
- gen_set_label(l1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
- }
-}
-
-/* nabs - nabs. */
-static void gen_nabs(DisasContext *ctx)
-{
- TCGv d = cpu_gpr[rD(ctx->opcode)];
- TCGv a = cpu_gpr[rA(ctx->opcode)];
-
- tcg_gen_abs_tl(d, a);
- tcg_gen_neg_tl(d, d);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, d);
- }
-}
-
-/* nabso - nabso. */
-static void gen_nabso(DisasContext *ctx)
-{
- TCGv d = cpu_gpr[rD(ctx->opcode)];
- TCGv a = cpu_gpr[rA(ctx->opcode)];
-
- tcg_gen_abs_tl(d, a);
- tcg_gen_neg_tl(d, d);
- /* nabs never overflows */
- tcg_gen_movi_tl(cpu_ov, 0);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, d);
- }
-}
-
-/* rlmi - rlmi. */
-static void gen_rlmi(DisasContext *ctx)
-{
- uint32_t mb = MB(ctx->opcode);
- uint32_t me = ME(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
- tcg_gen_andi_tl(t0, t0, MASK(mb, me));
- tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
- ~MASK(mb, me));
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free(t0);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* rrib - rrib. */
-static void gen_rrib(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_movi_tl(t1, 0x80000000);
- tcg_gen_shr_tl(t1, t1, t0);
- tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
- tcg_gen_and_tl(t0, t0, t1);
- tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sle - sle. */
-static void gen_sle(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_subfi_tl(t1, 32, t1);
- tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_or_tl(t1, t0, t1);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- gen_store_spr(SPR_MQ, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sleq - sleq. */
-static void gen_sleq(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_movi_tl(t2, 0xFFFFFFFF);
- tcg_gen_shl_tl(t2, t2, t0);
- tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
- gen_load_spr(t1, SPR_MQ);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_and_tl(t0, t0, t2);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sliq - sliq. */
-static void gen_sliq(DisasContext *ctx)
-{
- int sh = SH(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
- tcg_gen_or_tl(t1, t0, t1);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- gen_store_spr(SPR_MQ, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* slliq - slliq. */
-static void gen_slliq(DisasContext *ctx)
-{
- int sh = SH(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- gen_load_spr(t1, SPR_MQ);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh));
- tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh));
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sllq - sllq. */
-static void gen_sllq(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
- TCGv t2 = tcg_temp_local_new();
- tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_movi_tl(t1, 0xFFFFFFFF);
- tcg_gen_shl_tl(t1, t1, t2);
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- gen_load_spr(t0, SPR_MQ);
- tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
- gen_load_spr(t2, SPR_MQ);
- tcg_gen_andc_tl(t1, t2, t1);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- gen_set_label(l2);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* slq - slq. */
-static void gen_slq(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_subfi_tl(t1, 32, t1);
- tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_or_tl(t1, t0, t1);
- gen_store_spr(SPR_MQ, t1);
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
- gen_set_label(l1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sraiq - sraiq. */
-static void gen_sraiq(DisasContext *ctx)
-{
- int sh = SH(ctx->opcode);
- TCGLabel *l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
- tcg_gen_or_tl(t0, t0, t1);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_movi_tl(cpu_ca, 0);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
- tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
- tcg_gen_movi_tl(cpu_ca, 1);
- gen_set_label(l1);
- tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sraq - sraq. */
-static void gen_sraq(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_local_new();
- TCGv t2 = tcg_temp_local_new();
- tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
- tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2);
- tcg_gen_subfi_tl(t2, 32, t2);
- tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2);
- tcg_gen_or_tl(t0, t0, t2);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1);
- tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]);
- tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31);
- gen_set_label(l1);
- tcg_temp_free(t0);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1);
- tcg_gen_movi_tl(cpu_ca, 0);
- tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2);
- tcg_gen_movi_tl(cpu_ca, 1);
- gen_set_label(l2);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sre - sre. */
-static void gen_sre(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_subfi_tl(t1, 32, t1);
- tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_or_tl(t1, t0, t1);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- gen_store_spr(SPR_MQ, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* srea - srea. */
-static void gen_srea(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sreq */
-static void gen_sreq(DisasContext *ctx)
-{
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- TCGv t2 = tcg_temp_new();
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_movi_tl(t1, 0xFFFFFFFF);
- tcg_gen_shr_tl(t1, t1, t0);
- tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
- gen_load_spr(t2, SPR_MQ);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_and_tl(t0, t0, t1);
- tcg_gen_andc_tl(t2, t2, t1);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* sriq */
-static void gen_sriq(DisasContext *ctx)
-{
- int sh = SH(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
- tcg_gen_or_tl(t1, t0, t1);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- gen_store_spr(SPR_MQ, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* srliq */
-static void gen_srliq(DisasContext *ctx)
-{
- int sh = SH(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
- gen_load_spr(t1, SPR_MQ);
- gen_store_spr(SPR_MQ, t0);
- tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh));
- tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh));
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* srlq */
-static void gen_srlq(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGLabel *l2 = gen_new_label();
- TCGv t0 = tcg_temp_local_new();
- TCGv t1 = tcg_temp_local_new();
- TCGv t2 = tcg_temp_local_new();
- tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_movi_tl(t1, 0xFFFFFFFF);
- tcg_gen_shr_tl(t2, t1, t2);
- tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- gen_load_spr(t0, SPR_MQ);
- tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
- tcg_gen_br(l2);
- gen_set_label(l1);
- tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
- tcg_gen_and_tl(t0, t0, t2);
- gen_load_spr(t1, SPR_MQ);
- tcg_gen_andc_tl(t1, t1, t2);
- tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
- gen_set_label(l2);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* srq */
-static void gen_srq(DisasContext *ctx)
-{
- TCGLabel *l1 = gen_new_label();
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
- tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_subfi_tl(t1, 32, t1);
- tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
- tcg_gen_or_tl(t1, t0, t1);
- gen_store_spr(SPR_MQ, t1);
- tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
- tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
- gen_set_label(l1);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
- gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
- }
-}
-
-/* PowerPC 602 specific instructions */
-
-/* dsa */
-static void gen_dsa(DisasContext *ctx)
-{
- /* XXX: TODO */
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
-}
-
-/* esa */
-static void gen_esa(DisasContext *ctx)
-{
- /* XXX: TODO */
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
-}
-
-/* mfrom */
-static void gen_mfrom(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
}
/* 602 - 603 - G2 TLB management */
@@ -6351,10 +5318,10 @@ static void gen_mfrom(DisasContext *ctx)
static void gen_tlbld_6xx(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
- gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ CHK_SV(ctx);
+ gen_helper_6xx_tlbd(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6362,112 +5329,13 @@ static void gen_tlbld_6xx(DisasContext *ctx)
static void gen_tlbli_6xx(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* 74xx TLB management */
-
-/* tlbld */
-static void gen_tlbld_74xx(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* tlbli */
-static void gen_tlbli_74xx(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* POWER instructions not in PowerPC 601 */
-
-/* clf */
-static void gen_clf(DisasContext *ctx)
-{
- /* Cache line flush: implemented as no-op */
-}
-
-/* cli */
-static void gen_cli(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- /* Cache line invalidate: privileged and treated as no-op */
- CHK_SV;
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* dclst */
-static void gen_dclst(DisasContext *ctx)
-{
- /* Data cache line store: treated as no-op */
-}
-
-static void gen_mfsri(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0;
-
- CHK_SV;
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- tcg_gen_extract_tl(t0, t0, 28, 4);
- gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0);
- tcg_temp_free(t0);
- if (ra != 0 && ra != rd) {
- tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
- }
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-static void gen_rac(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- TCGv t0;
-
- CHK_SV;
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ CHK_SV(ctx);
+ gen_helper_6xx_tlbi(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
-static void gen_rfsvc(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
-
- gen_helper_rfsvc(cpu_env);
- ctx->base.is_jmp = DISAS_EXIT;
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-/* svc is not implemented for now */
-
/* BookE specific instructions */
/* XXX: not implemented on 440 ? */
@@ -6481,15 +5349,14 @@ static void gen_mfapidi(DisasContext *ctx)
static void gen_tlbiva(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
- tcg_temp_free(t0);
+ gen_helper_tlbiva(tcg_env, cpu_gpr[rB(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6499,8 +5366,8 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
{
TCGv t0, t1;
- t0 = tcg_temp_local_new();
- t1 = tcg_temp_local_new();
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
switch (opc3 & 0x0D) {
case 0x05:
@@ -6607,8 +5474,6 @@ static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
} else {
tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
if (unlikely(Rc) != 0) {
/* Update Rc0 */
gen_set_Rc0(ctx, cpu_gpr[rt]);
@@ -6712,14 +5577,13 @@ GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
static void gen_mfdcr(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv dcrn;
- CHK_SV;
- dcrn = tcg_const_tl(SPR(ctx->opcode));
- gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
- tcg_temp_free(dcrn);
+ CHK_SV(ctx);
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, dcrn);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6727,14 +5591,13 @@ static void gen_mfdcr(DisasContext *ctx)
static void gen_mtdcr(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv dcrn;
- CHK_SV;
- dcrn = tcg_const_tl(SPR(ctx->opcode));
- gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
- tcg_temp_free(dcrn);
+ CHK_SV(ctx);
+ dcrn = tcg_constant_tl(SPR(ctx->opcode));
+ gen_helper_store_dcr(tcg_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6743,10 +5606,10 @@ static void gen_mtdcr(DisasContext *ctx)
static void gen_mfdcrx(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
- gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ CHK_SV(ctx);
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif /* defined(CONFIG_USER_ONLY) */
@@ -6757,35 +5620,19 @@ static void gen_mfdcrx(DisasContext *ctx)
static void gen_mtdcrx(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
- gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ CHK_SV(ctx);
+ gen_helper_store_dcr(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif /* defined(CONFIG_USER_ONLY) */
}
-/* mfdcrux (PPC 460) : user-mode access to DCR */
-static void gen_mfdcrux(DisasContext *ctx)
-{
- gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
- cpu_gpr[rA(ctx->opcode)]);
- /* Note: Rc update flag set leads to undefined state of Rc0 */
-}
-
-/* mtdcrux (PPC 460) : user-mode access to DCR */
-static void gen_mtdcrux(DisasContext *ctx)
-{
- gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
- cpu_gpr[rS(ctx->opcode)]);
- /* Note: Rc update flag set leads to undefined state of Rc0 */
-}
-
/* dccci */
static void gen_dccci(DisasContext *ctx)
{
- CHK_SV;
+ CHK_SV(ctx);
/* interpreted as no-op */
}
@@ -6793,19 +5640,17 @@ static void gen_dccci(DisasContext *ctx)
static void gen_dcread(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv EA, val;
- CHK_SV;
+ CHK_SV(ctx);
gen_set_access_type(ctx, ACCESS_CACHE);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
val = tcg_temp_new();
gen_qemu_ld32u(ctx, val, EA);
- tcg_temp_free(val);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
- tcg_temp_free(EA);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6822,14 +5667,14 @@ static void gen_icbt_40x(DisasContext *ctx)
/* iccci */
static void gen_iccci(DisasContext *ctx)
{
- CHK_SV;
+ CHK_SV(ctx);
/* interpreted as no-op */
}
/* icread */
static void gen_icread(DisasContext *ctx)
{
- CHK_SV;
+ CHK_SV(ctx);
/* interpreted as no-op */
}
@@ -6837,11 +5682,11 @@ static void gen_icread(DisasContext *ctx)
static void gen_rfci_40x(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_40x_rfci(cpu_env);
+ gen_helper_40x_rfci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6849,11 +5694,11 @@ static void gen_rfci_40x(DisasContext *ctx)
static void gen_rfci(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfci(cpu_env);
+ gen_helper_rfci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6864,11 +5709,11 @@ static void gen_rfci(DisasContext *ctx)
static void gen_rfdi(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfdi(cpu_env);
+ gen_helper_rfdi(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6877,11 +5722,11 @@ static void gen_rfdi(DisasContext *ctx)
static void gen_rfmci(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
/* Restore CPU state */
- gen_helper_rfmci(cpu_env);
+ gen_helper_rfmci(tcg_env);
ctx->base.is_jmp = DISAS_EXIT;
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -6892,16 +5737,16 @@ static void gen_rfmci(DisasContext *ctx)
static void gen_tlbre_40x(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
switch (rB(ctx->opcode)) {
case 0:
- gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
break;
case 1:
- gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], tcg_env,
cpu_gpr[rA(ctx->opcode)]);
break;
default:
@@ -6915,15 +5760,14 @@ static void gen_tlbre_40x(DisasContext *ctx)
static void gen_tlbsx_40x(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
if (Rc(ctx->opcode)) {
TCGLabel *l1 = gen_new_label();
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
@@ -6938,17 +5782,17 @@ static void gen_tlbsx_40x(DisasContext *ctx)
static void gen_tlbwe_40x(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
switch (rB(ctx->opcode)) {
case 0:
- gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_4xx_tlbwe_hi(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
break;
case 1:
- gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ gen_helper_4xx_tlbwe_lo(tcg_env, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
break;
default:
@@ -6964,19 +5808,18 @@ static void gen_tlbwe_40x(DisasContext *ctx)
static void gen_tlbre_440(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
switch (rB(ctx->opcode)) {
case 0:
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
- gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
+ gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], tcg_env,
t0, cpu_gpr[rA(ctx->opcode)]);
- tcg_temp_free_i32(t0);
}
break;
default:
@@ -6990,15 +5833,14 @@ static void gen_tlbre_440(DisasContext *ctx)
static void gen_tlbsx_440(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
if (Rc(ctx->opcode)) {
TCGLabel *l1 = gen_new_label();
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
@@ -7013,18 +5855,17 @@ static void gen_tlbsx_440(DisasContext *ctx)
static void gen_tlbwe_440(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
switch (rB(ctx->opcode)) {
case 0:
case 1:
case 2:
{
- TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
- gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
+ TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(tcg_env, t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rS(ctx->opcode)]);
- tcg_temp_free_i32(t0);
}
break;
default:
@@ -7040,10 +5881,10 @@ static void gen_tlbwe_440(DisasContext *ctx)
static void gen_tlbre_booke206(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
- gen_helper_booke206_tlbre(cpu_env);
+ CHK_SV(ctx);
+ gen_helper_booke206_tlbre(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -7051,21 +5892,18 @@ static void gen_tlbre_booke206(DisasContext *ctx)
static void gen_tlbsx_booke206(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
if (rA(ctx->opcode)) {
t0 = tcg_temp_new();
- tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
} else {
- t0 = tcg_const_tl(0);
+ t0 = cpu_gpr[rB(ctx->opcode)];
}
-
- tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
- gen_helper_booke206_tlbsx(cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_booke206_tlbsx(tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
@@ -7073,73 +5911,69 @@ static void gen_tlbsx_booke206(DisasContext *ctx)
static void gen_tlbwe_booke206(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
- gen_helper_booke206_tlbwe(cpu_env);
+ CHK_SV(ctx);
+ gen_helper_booke206_tlbwe(tcg_env);
#endif /* defined(CONFIG_USER_ONLY) */
}
static void gen_tlbivax_booke206(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_booke206_tlbivax(cpu_env, t0);
- tcg_temp_free(t0);
+ gen_helper_booke206_tlbivax(tcg_env, t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
static void gen_tlbilx_booke206(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
switch ((ctx->opcode >> 21) & 0x3) {
case 0:
- gen_helper_booke206_tlbilx0(cpu_env, t0);
+ gen_helper_booke206_tlbilx0(tcg_env, t0);
break;
case 1:
- gen_helper_booke206_tlbilx1(cpu_env, t0);
+ gen_helper_booke206_tlbilx1(tcg_env, t0);
break;
case 3:
- gen_helper_booke206_tlbilx3(cpu_env, t0);
+ gen_helper_booke206_tlbilx3(tcg_env, t0);
break;
default:
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
break;
}
-
- tcg_temp_free(t0);
#endif /* defined(CONFIG_USER_ONLY) */
}
-
/* wrtee */
static void gen_wrtee(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
TCGv t0;
- CHK_SV;
+ CHK_SV(ctx);
t0 = tcg_temp_new();
tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
- tcg_temp_free(t0);
+ gen_ppc_maybe_interrupt(ctx);
/*
* Stop translation to have a chance to raise an exception if we
* just set msr_ee to 1
@@ -7152,11 +5986,12 @@ static void gen_wrtee(DisasContext *ctx)
static void gen_wrteei(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
+ GEN_PRIV(ctx);
#else
- CHK_SV;
+ CHK_SV(ctx);
if (ctx->opcode & 0x00008000) {
tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ gen_ppc_maybe_interrupt(ctx);
/* Stop translation to have a chance to raise an exception */
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
} else {
@@ -7170,10 +6005,9 @@ static void gen_wrteei(DisasContext *ctx)
/* dlmzb */
static void gen_dlmzb(DisasContext *ctx)
{
- TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
- gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], tcg_env,
cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
- tcg_temp_free_i32(t0);
}
/* mbar replaces eieio on 440 */
@@ -7203,68 +6037,6 @@ static void gen_icbt_440(DisasContext *ctx)
*/
}
-/* Embedded.Processor Control */
-
-static void gen_msgclr(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_HV;
- if (is_book3s_arch2x(ctx)) {
- gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
- } else {
- gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
- }
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-static void gen_msgsnd(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_HV;
- if (is_book3s_arch2x(ctx)) {
- gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]);
- } else {
- gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]);
- }
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-#if defined(TARGET_PPC64)
-static void gen_msgclrp(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-
-static void gen_msgsndp(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_SV;
- gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
-#endif /* defined(CONFIG_USER_ONLY) */
-}
-#endif
-
-static void gen_msgsync(DisasContext *ctx)
-{
-#if defined(CONFIG_USER_ONLY)
- GEN_PRIV;
-#else
- CHK_HV;
-#endif /* defined(CONFIG_USER_ONLY) */
- /* interpreted as no-op */
-}
-
#if defined(TARGET_PPC64)
static void gen_maddld(DisasContext *ctx)
{
@@ -7272,7 +6044,6 @@ static void gen_maddld(DisasContext *ctx)
tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
- tcg_temp_free_i64(t1);
}
/* maddhd maddhdu */
@@ -7293,9 +6064,6 @@ static void gen_maddhd_maddhdu(DisasContext *ctx)
}
tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
cpu_gpr[rC(ctx->opcode)], t1);
- tcg_temp_free_i64(lo);
- tcg_temp_free_i64(hi);
- tcg_temp_free_i64(t1);
}
#endif /* defined(TARGET_PPC64) */
@@ -7305,7 +6073,7 @@ static void gen_tbegin(DisasContext *ctx)
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
return;
}
- gen_helper_tbegin(cpu_env);
+ gen_helper_tbegin(tcg_env);
}
#define GEN_TM_NOOP(name) \
@@ -7371,7 +6139,7 @@ static void gen_tcheck(DisasContext *ctx)
#define GEN_TM_PRIV_NOOP(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \
+ gen_priv_opc(ctx); \
}
#else
@@ -7379,7 +6147,7 @@ static inline void gen_##name(DisasContext *ctx) \
#define GEN_TM_PRIV_NOOP(name) \
static inline void gen_##name(DisasContext *ctx) \
{ \
- CHK_SV; \
+ CHK_SV(ctx); \
if (unlikely(!ctx->tm_enabled)) { \
gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
return; \
@@ -7401,32 +6169,55 @@ GEN_TM_PRIV_NOOP(trechkpt);
static inline void get_fpr(TCGv_i64 dst, int regno)
{
- tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
+ tcg_gen_ld_i64(dst, tcg_env, fpr_offset(regno));
}
static inline void set_fpr(int regno, TCGv_i64 src)
{
- tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
+ tcg_gen_st_i64(src, tcg_env, fpr_offset(regno));
+ /*
+ * Before PowerISA v3.1 the result of doubleword 1 of the VSR
+ * corresponding to the target FPR was undefined. However,
+ * most (if not all) real hardware were setting the result to 0.
+ * Starting at ISA v3.1, the result for doubleword 1 is now defined
+ * to be 0.
+ */
+ tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false));
}
static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
{
- tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
+ tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
}
static inline void set_avr64(int regno, TCGv_i64 src, bool high)
{
- tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
+ tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
}
/*
* Helpers for decodetree used by !function for decoding arguments.
*/
+static int times_2(DisasContext *ctx, int x)
+{
+ return x * 2;
+}
+
static int times_4(DisasContext *ctx, int x)
{
return x * 4;
}
+static int times_16(DisasContext *ctx, int x)
+{
+ return x * 16;
+}
+
+static int64_t dw_compose_ea(DisasContext *ctx, int x)
+{
+ return deposit64(0xfffffffffffffe00, 3, 6, x);
+}
+
/*
* Helpers for trans_* functions to check for specific insns flags.
* Use token pasting to ensure that we use the proper flag with the
@@ -7453,6 +6244,51 @@ static int times_4(DisasContext *ctx, int x)
# define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
#endif
+#define REQUIRE_VECTOR(CTX) \
+ do { \
+ if (unlikely(!(CTX)->altivec_enabled)) { \
+ gen_exception((CTX), POWERPC_EXCP_VPU); \
+ return true; \
+ } \
+ } while (0)
+
+#define REQUIRE_VSX(CTX) \
+ do { \
+ if (unlikely(!(CTX)->vsx_enabled)) { \
+ gen_exception((CTX), POWERPC_EXCP_VSXU); \
+ return true; \
+ } \
+ } while (0)
+
+#define REQUIRE_FPU(ctx) \
+ do { \
+ if (unlikely(!(ctx)->fpu_enabled)) { \
+ gen_exception((ctx), POWERPC_EXCP_FPU); \
+ return true; \
+ } \
+ } while (0)
+
+#if !defined(CONFIG_USER_ONLY)
+#define REQUIRE_SV(CTX) \
+ do { \
+ if (unlikely((CTX)->pr)) { \
+ gen_priv_opc(CTX); \
+ return true; \
+ } \
+ } while (0)
+
+#define REQUIRE_HV(CTX) \
+ do { \
+ if (unlikely((CTX)->pr || !(CTX)->hv)) { \
+ gen_priv_opc(CTX); \
+ return true; \
+ } \
+ } while (0)
+#else
+#define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
+#define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
+#endif
+
/*
* Helpers for implementing sets of trans_* functions.
* Defer the implementation of NAME to FUNC, with optional extra arguments.
@@ -7460,16 +6296,56 @@ static int times_4(DisasContext *ctx, int x)
#define TRANS(NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ return FUNC(ctx, a, __VA_ARGS__); }
+#define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { \
+ REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
+ return FUNC(ctx, a, __VA_ARGS__); \
+ }
+#define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { \
+ REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
+ return FUNC(ctx, a, __VA_ARGS__); \
+ }
#define TRANS64(NAME, FUNC, ...) \
static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
{ REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
+#define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
+ static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+ { \
+ REQUIRE_64BIT(ctx); \
+ REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
+ return FUNC(ctx, a, __VA_ARGS__); \
+ }
/* TODO: More TRANS* helpers for extra insn_flags checks. */
#include "decode-insn32.c.inc"
#include "decode-insn64.c.inc"
+#include "power8-pmu-regs.c.inc"
+
+/*
+ * Incorporate CIA into the constant when R=1.
+ * Validate that when R=1, RA=0.
+ */
+static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
+{
+ d->rt = a->rt;
+ d->ra = a->ra;
+ d->si = a->si;
+ if (a->r) {
+ if (unlikely(a->ra != 0)) {
+ gen_invalid(ctx);
+ return false;
+ }
+ d->si += ctx->cia;
+ }
+ return true;
+}
+
#include "translate/fixedpoint-impl.c.inc"
#include "translate/fp-impl.c.inc"
@@ -7477,68 +6353,35 @@ static int times_4(DisasContext *ctx, int x)
#include "translate/vmx-impl.c.inc"
#include "translate/vsx-impl.c.inc"
-#include "translate/vector-impl.c.inc"
#include "translate/dfp-impl.c.inc"
#include "translate/spe-impl.c.inc"
-/* Handles lfdp, lxsd, lxssp */
+#include "translate/branch-impl.c.inc"
+
+#include "translate/processor-ctrl-impl.c.inc"
+
+#include "translate/storage-ctrl-impl.c.inc"
+
+/* Handles lfdp */
static void gen_dform39(DisasContext *ctx)
{
- switch (ctx->opcode & 0x3) {
- case 0: /* lfdp */
+ if ((ctx->opcode & 0x3) == 0) {
if (ctx->insns_flags2 & PPC2_ISA205) {
return gen_lfdp(ctx);
}
- break;
- case 2: /* lxsd */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_lxsd(ctx);
- }
- break;
- case 3: /* lxssp */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_lxssp(ctx);
- }
- break;
}
return gen_invalid(ctx);
}
-/* handles stfdp, lxv, stxsd, stxssp lxvx */
+/* Handles stfdp */
static void gen_dform3D(DisasContext *ctx)
{
- if ((ctx->opcode & 3) == 1) { /* DQ-FORM */
- switch (ctx->opcode & 0x7) {
- case 1: /* lxv */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_lxv(ctx);
- }
- break;
- case 5: /* stxv */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_stxv(ctx);
- }
- break;
- }
- } else { /* DS-FORM */
- switch (ctx->opcode & 0x3) {
- case 0: /* stfdp */
- if (ctx->insns_flags2 & PPC2_ISA205) {
- return gen_stfdp(ctx);
- }
- break;
- case 2: /* stxsd */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_stxsd(ctx);
- }
- break;
- case 3: /* stxssp */
- if (ctx->insns_flags2 & PPC2_ISA300) {
- return gen_stxssp(ctx);
- }
- break;
+ if ((ctx->opcode & 3) == 0) { /* DS-FORM */
+ /* stfdp */
+ if (ctx->insns_flags2 & PPC2_ISA205) {
+ return gen_stfdp(ctx);
}
}
return gen_invalid(ctx);
@@ -7562,20 +6405,15 @@ static void gen_brw(DisasContext *ctx)
/* brh */
static void gen_brh(DisasContext *ctx)
{
- TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, 0x00ff00ff00ff00ffull);
tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
- tcg_gen_and_i64(t2, t1, t0);
- tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_and_i64(t2, t1, mask);
+ tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
tcg_gen_shli_i64(t1, t1, 8);
tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
}
#endif
@@ -7592,8 +6430,6 @@ GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
-GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
-GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
@@ -7604,7 +6440,6 @@ GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
#endif
GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
-GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
@@ -7647,13 +6482,9 @@ GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
PPC_NONE, PPC2_ISA300),
#endif
-#if defined(TARGET_PPC64)
-GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX),
-GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B),
-#endif
/* handles lfdp, lxsd, lxssp */
GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
-/* handles stfdp, lxv, stxsd, stxssp, stxv */
+/* handles stfdp, stxsd, stxssp */
GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
@@ -7680,8 +6511,9 @@ GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
#endif
GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
-GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT),
-GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039FF801, PPC_NONE, PPC2_ISA300),
+/* ISA v3.0 changed the extended opcode from 62 to 30 */
+GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
+GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
@@ -7736,6 +6568,7 @@ GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
@@ -7755,92 +6588,23 @@ GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
PPC_SEGMENT_64B),
-GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B),
-GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B),
-GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B),
-GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B),
#endif
GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
/*
* XXX Those instructions will need to be handled differently for
* different ISA versions
*/
-GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE),
-GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE),
-GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300),
GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
-#if defined(TARGET_PPC64)
-GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI),
-GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
-GEN_HANDLER_E(slbieg, 0x1F, 0x12, 0x0E, 0x001F0001, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(slbsync, 0x1F, 0x12, 0x0A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
-#endif
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
-GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR),
-GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR),
-GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR),
-GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR),
-GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC),
-GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC),
-GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC),
GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
-GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB),
-GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB),
-GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER),
-GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER),
-GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER),
-GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER),
-GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER),
-GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER),
-GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
-GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
-GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2),
-GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2),
-GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
-GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
-GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2),
-GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2),
GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
-GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX),
-GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX),
GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
@@ -7866,12 +6630,6 @@ GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
PPC_NONE, PPC2_BOOKE206),
-GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001,
- PPC_NONE, PPC2_PRCNTL),
-GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001,
- PPC_NONE, PPC2_PRCNTL),
-GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000,
- PPC_NONE, PPC2_PRCNTL),
GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
@@ -7886,36 +6644,12 @@ GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
-GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
PPC2_ISA300),
GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001,
- PPC_NONE, PPC2_ISA207S),
-GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001,
- PPC_NONE, PPC2_ISA207S),
#endif
-#undef GEN_INT_ARITH_ADD
-#undef GEN_INT_ARITH_ADD_CONST
-#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
-GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
-#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
- add_ca, compute_ca, compute_ov) \
-GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
-GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
-GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
-GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
-GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
-GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
-GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
-GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
-GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
-GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
-GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
-GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
-
#undef GEN_INT_ARITH_DIVW
#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
@@ -7954,24 +6688,6 @@ GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
#endif
-#undef GEN_INT_ARITH_SUBF
-#undef GEN_INT_ARITH_SUBF_CONST
-#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
-GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
-#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
- add_ca, compute_ca, compute_ov) \
-GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
-GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
-GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
-GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
-GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
-GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
-GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
-GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
-GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
-GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
-GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
-
#undef GEN_LOGICAL1
#undef GEN_LOGICAL2
#define GEN_LOGICAL2(name, tcg_op, opc, type) \
@@ -8039,7 +6755,7 @@ GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#undef GEN_STX_E
@@ -8065,7 +6781,7 @@ GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
#endif
#undef GEN_CRLOGIC
@@ -8155,8 +6871,6 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
#include "translate/vsx-ops.c.inc"
-#include "translate/dfp-ops.c.inc"
-
#include "translate/spe-ops.c.inc"
};
@@ -8348,7 +7062,7 @@ static int test_opcode_table(opc_handler_t **table, int len)
tmp = test_opcode_table(ind_table(table[i]),
PPC_CPU_INDIRECT_OPCODES_LEN);
if (tmp == 0) {
- free(table[i]);
+ g_free(table[i]);
table[i] = &invalid_handler;
} else {
count++;
@@ -8510,7 +7224,7 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
uint32_t hflags = ctx->base.tb->flags;
ctx->spr_cb = env->spr_cb;
@@ -8530,7 +7244,6 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
#endif
ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
- || env->mmu_model == POWERPC_MMU_601
|| env->mmu_model & POWERPC_MMU_64;
ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
@@ -8539,21 +7252,21 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
+ ctx->hr = (hflags >> HFLAGS_HR) & 1;
+ ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
+ ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
+ ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
+ ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
+ ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
ctx->singlestep_enabled = 0;
if ((hflags >> HFLAGS_SE) & 1) {
ctx->singlestep_enabled |= CPU_SINGLE_STEP;
+ ctx->base.max_insns = 1;
}
if ((hflags >> HFLAGS_BE) & 1) {
ctx->singlestep_enabled |= CPU_BRANCH_STEP;
}
- if (unlikely(ctx->base.singlestep_enabled)) {
- ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP;
- }
-
- if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) {
- ctx->base.max_insns = 1;
- }
}
static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
@@ -8575,7 +7288,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = cs->env_ptr;
+ CPUPPCState *env = cpu_env(cs);
target_ulong pc;
uint32_t insn;
bool ok;
@@ -8613,8 +7326,6 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
ctx->base.is_jmp = DISAS_TOO_MANY;
}
-
- translator_loop_temp_check(&ctx->base);
}
static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
@@ -8622,7 +7333,6 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
DisasContext *ctx = container_of(dcbase, DisasContext, base);
DisasJumpType is_jmp = ctx->base.is_jmp;
target_ulong nip = ctx->base.pc_next;
- int sse;
if (is_jmp == DISAS_NORETURN) {
/* We have already exited the TB. */
@@ -8630,8 +7340,9 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
}
/* Honor single stepping. */
- sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP);
- if (unlikely(sse)) {
+ if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) {
+ bool rfi_type = false;
+
switch (is_jmp) {
case DISAS_TOO_MANY:
case DISAS_EXIT_UPDATE:
@@ -8640,25 +7351,26 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
break;
case DISAS_EXIT:
case DISAS_CHAIN:
+ /*
+ * This is a heuristic, to put it kindly. The rfi class of
+ * instructions are among the few outside branches that change
+ * NIP without taking an interrupt. Single step trace interrupts
+ * do not fire on completion of these instructions.
+ */
+ rfi_type = true;
break;
default:
g_assert_not_reached();
}
- if (sse & GDBSTUB_SINGLE_STEP) {
- gen_debug_exception(ctx);
- return;
- }
- /* else CPU_SINGLE_STEP... */
- if (nip <= 0x100 || nip > 0xf00) {
- gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
- return;
- }
+ gen_debug_exception(ctx, rfi_type);
+ return;
}
switch (is_jmp) {
case DISAS_TOO_MANY:
if (use_goto_tb(ctx, nip)) {
+ pmu_count_insns(ctx);
tcg_gen_goto_tb(0);
gen_update_nip(ctx, nip);
tcg_gen_exit_tb(ctx->base.tb, 0);
@@ -8669,6 +7381,14 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
gen_update_nip(ctx, nip);
/* fall through */
case DISAS_CHAIN:
+ /*
+ * tcg_gen_lookup_and_goto_ptr will exit the TB if
+ * CF_NO_GOTO_PTR is set. Count insns now.
+ */
+ if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
+ pmu_count_insns(ctx);
+ }
+
tcg_gen_lookup_and_goto_ptr();
break;
@@ -8676,6 +7396,7 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
gen_update_nip(ctx, nip);
/* fall through */
case DISAS_EXIT:
+ pmu_count_insns(ctx);
tcg_gen_exit_tb(NULL, 0);
break;
@@ -8684,10 +7405,11 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
}
}
-static void ppc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+static void ppc_tr_disas_log(const DisasContextBase *dcbase,
+ CPUState *cs, FILE *logfile)
{
- qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
- log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+ fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
+ target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
}
static const TranslatorOps ppc_tr_ops = {
@@ -8699,15 +7421,10 @@ static const TranslatorOps ppc_tr_ops = {
.disas_log = ppc_tr_disas_log,
};
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
+ vaddr pc, void *host_pc)
{
DisasContext ctx;
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
-}
-
-void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
- target_ulong *data)
-{
- env->nip = data[0];
+ translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
}
diff --git a/target/ppc/translate/branch-impl.c.inc b/target/ppc/translate/branch-impl.c.inc
new file mode 100644
index 0000000000..fb0fcf30cc
--- /dev/null
+++ b/target/ppc/translate/branch-impl.c.inc
@@ -0,0 +1,33 @@
+/*
+ * Power ISA decode for branch instructions
+ *
+ * Copyright IBM Corp. 2021
+ *
+ * Authors:
+ * Daniel Henrique Barboza <danielhb413@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+
+static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
+
+ translator_io_start(&ctx->base);
+ gen_update_cfar(ctx, ctx->cia);
+ gen_helper_rfebb(tcg_env, cpu_gpr[arg->s]);
+
+ ctx->base.is_jmp = DISAS_CHAIN;
+
+ return true;
+}
+#else
+static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg)
+{
+ gen_invalid(ctx);
+ return true;
+}
+#endif
diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc
index 6c556dc2e1..371076582b 100644
--- a/target/ppc/translate/dfp-impl.c.inc
+++ b/target/ppc/translate/dfp-impl.c.inc
@@ -3,230 +3,207 @@
static inline TCGv_ptr gen_fprp_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
+ tcg_gen_addi_ptr(r, tcg_env, offsetof(CPUPPCState, vsr[reg].u64[0]));
return r;
}
-#define GEN_DFP_T_A_B_Rc(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rd, ra, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- rd = gen_fprp_ptr(rD(ctx->opcode)); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, ra, rb); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
+#define TRANS_DFP_T_A_B_Rc(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ ra = gen_fprp_ptr(a->ra); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(tcg_env, rt, ra, rb); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ return true; \
}
-#define GEN_DFP_BF_A_B(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr ra, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
- cpu_env, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
+#define TRANS_DFP_BF_A_B(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ ra = gen_fprp_ptr(a->ra); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ tcg_env, ra, rb); \
+ return true; \
}
-#define GEN_DFP_BF_I_B(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_i32 uim; \
- TCGv_ptr rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- uim = tcg_const_i32(UIMM5(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
- cpu_env, uim, rb); \
- tcg_temp_free_i32(uim); \
- tcg_temp_free_ptr(rb); \
+#define TRANS_DFP_BF_I_B(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ tcg_env, tcg_constant_i32(a->uim), rb);\
+ return true; \
}
-#define GEN_DFP_BF_A_DCM(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr ra; \
- TCGv_i32 dcm; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- dcm = tcg_const_i32(DCM(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
- cpu_env, ra, dcm); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_i32(dcm); \
+#define TRANS_DFP_BF_A_DCM(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr ra; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ ra = gen_fprp_ptr(a->fra); \
+ gen_helper_##NAME(cpu_crf[a->bf], \
+ tcg_env, ra, tcg_constant_i32(a->dm)); \
+ return true; \
}
-#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rb; \
- TCGv_i32 u32_1, u32_2; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
- u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_i32(u32_1); \
- tcg_temp_free_i32(u32_2); \
+#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->frt); \
+ rb = gen_fprp_ptr(a->frb); \
+ gen_helper_##NAME(tcg_env, rt, rb, \
+ tcg_constant_i32(a->U32F1), \
+ tcg_constant_i32(a->U32F2)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ return true; \
}
-#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, ra, rb; \
- TCGv_i32 i32; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- ra = gen_fprp_ptr(rA(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- i32 = tcg_const_i32(i32fld(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, ra, rb, i32); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_i32(i32); \
- }
-
-#define GEN_DFP_T_B_Rc(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rb; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rb = gen_fprp_ptr(rB(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rb); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rb); \
- }
-
-#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr rt, rs; \
- TCGv_i32 i32; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_update_nip(ctx, ctx->base.pc_next - 4); \
- rt = gen_fprp_ptr(rD(ctx->opcode)); \
- rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
- i32 = tcg_const_i32(i32fld(ctx->opcode)); \
- gen_helper_##name(cpu_env, rt, rs, i32); \
- if (unlikely(Rc(ctx->opcode) != 0)) { \
- gen_set_cr1_from_fpscr(ctx); \
- } \
- tcg_temp_free_ptr(rt); \
- tcg_temp_free_ptr(rs); \
- tcg_temp_free_i32(i32); \
+#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->frt); \
+ ra = gen_fprp_ptr(a->fra); \
+ rb = gen_fprp_ptr(a->frb); \
+ gen_helper_##NAME(tcg_env, rt, ra, rb, \
+ tcg_constant_i32(a->I32FLD)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ return true; \
}
-GEN_DFP_T_A_B_Rc(dadd)
-GEN_DFP_T_A_B_Rc(daddq)
-GEN_DFP_T_A_B_Rc(dsub)
-GEN_DFP_T_A_B_Rc(dsubq)
-GEN_DFP_T_A_B_Rc(dmul)
-GEN_DFP_T_A_B_Rc(dmulq)
-GEN_DFP_T_A_B_Rc(ddiv)
-GEN_DFP_T_A_B_Rc(ddivq)
-GEN_DFP_BF_A_B(dcmpu)
-GEN_DFP_BF_A_B(dcmpuq)
-GEN_DFP_BF_A_B(dcmpo)
-GEN_DFP_BF_A_B(dcmpoq)
-GEN_DFP_BF_A_DCM(dtstdc)
-GEN_DFP_BF_A_DCM(dtstdcq)
-GEN_DFP_BF_A_DCM(dtstdg)
-GEN_DFP_BF_A_DCM(dtstdgq)
-GEN_DFP_BF_A_B(dtstex)
-GEN_DFP_BF_A_B(dtstexq)
-GEN_DFP_BF_A_B(dtstsf)
-GEN_DFP_BF_A_B(dtstsfq)
-GEN_DFP_BF_I_B(dtstsfi)
-GEN_DFP_BF_I_B(dtstsfiq)
-GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC)
-GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC)
-GEN_DFP_T_A_B_I32_Rc(dqua, RMC)
-GEN_DFP_T_A_B_I32_Rc(dquaq, RMC)
-GEN_DFP_T_A_B_I32_Rc(drrnd, RMC)
-GEN_DFP_T_A_B_I32_Rc(drrndq, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC)
-GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC)
-GEN_DFP_T_B_Rc(dctdp)
-GEN_DFP_T_B_Rc(dctqpq)
-GEN_DFP_T_B_Rc(drsp)
-GEN_DFP_T_B_Rc(drdpq)
-GEN_DFP_T_B_Rc(dcffix)
-GEN_DFP_T_B_Rc(dcffixq)
-GEN_DFP_T_B_Rc(dctfix)
-GEN_DFP_T_B_Rc(dctfixq)
-GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP)
-GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP)
-GEN_DFP_T_B_Rc(dxex)
-GEN_DFP_T_B_Rc(dxexq)
-GEN_DFP_T_A_B_Rc(diex)
-GEN_DFP_T_A_B_Rc(diexq)
-GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM)
-GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM)
-
-#undef GEN_DFP_T_A_B_Rc
-#undef GEN_DFP_BF_A_B
-#undef GEN_DFP_BF_A_DCM
-#undef GEN_DFP_T_B_U32_U32_Rc
-#undef GEN_DFP_T_A_B_I32_Rc
-#undef GEN_DFP_T_B_Rc
-#undef GEN_DFP_T_FPR_I32_Rc
+#define TRANS_DFP_T_B_Rc(NAME) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rb; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ rb = gen_fprp_ptr(a->rb); \
+ gen_helper_##NAME(tcg_env, rt, rb); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ return true; \
+}
+
+#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \
+static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
+{ \
+ TCGv_ptr rt, rx; \
+ REQUIRE_INSNS_FLAGS2(ctx, DFP); \
+ REQUIRE_FPU(ctx); \
+ rt = gen_fprp_ptr(a->rt); \
+ rx = gen_fprp_ptr(a->FPRFLD); \
+ gen_helper_##NAME(tcg_env, rt, rx, \
+ tcg_constant_i32(a->I32FLD)); \
+ if (unlikely(a->rc)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ return true; \
+}
+
+TRANS_DFP_T_A_B_Rc(DADD)
+TRANS_DFP_T_A_B_Rc(DADDQ)
+TRANS_DFP_T_A_B_Rc(DSUB)
+TRANS_DFP_T_A_B_Rc(DSUBQ)
+TRANS_DFP_T_A_B_Rc(DMUL)
+TRANS_DFP_T_A_B_Rc(DMULQ)
+TRANS_DFP_T_A_B_Rc(DDIV)
+TRANS_DFP_T_A_B_Rc(DDIVQ)
+TRANS_DFP_BF_A_B(DCMPU)
+TRANS_DFP_BF_A_B(DCMPUQ)
+TRANS_DFP_BF_A_B(DCMPO)
+TRANS_DFP_BF_A_B(DCMPOQ)
+TRANS_DFP_BF_A_DCM(DTSTDC)
+TRANS_DFP_BF_A_DCM(DTSTDCQ)
+TRANS_DFP_BF_A_DCM(DTSTDG)
+TRANS_DFP_BF_A_DCM(DTSTDGQ)
+TRANS_DFP_BF_A_B(DTSTEX)
+TRANS_DFP_BF_A_B(DTSTEXQ)
+TRANS_DFP_BF_A_B(DTSTSF)
+TRANS_DFP_BF_A_B(DTSTSFQ)
+TRANS_DFP_BF_I_B(DTSTSFI)
+TRANS_DFP_BF_I_B(DTSTSFIQ)
+TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc)
+TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc)
+TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc)
+TRANS_DFP_T_B_Rc(DCTDP)
+TRANS_DFP_T_B_Rc(DCTQPQ)
+TRANS_DFP_T_B_Rc(DRSP)
+TRANS_DFP_T_B_Rc(DRDPQ)
+TRANS_DFP_T_B_Rc(DCFFIX)
+TRANS_DFP_T_B_Rc(DCFFIXQ)
+TRANS_DFP_T_B_Rc(DCTFIX)
+TRANS_DFP_T_B_Rc(DCTFIXQ)
+TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp)
+TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp)
+TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s)
+TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s)
+TRANS_DFP_T_B_Rc(DXEX)
+TRANS_DFP_T_B_Rc(DXEXQ)
+TRANS_DFP_T_A_B_Rc(DIEX)
+TRANS_DFP_T_A_B_Rc(DIEXQ)
+TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh)
+TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh)
+
+static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a)
+{
+ TCGv_ptr rt, rb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, DFP);
+ REQUIRE_FPU(ctx);
+ REQUIRE_VECTOR(ctx);
+
+ rt = gen_fprp_ptr(a->frtp);
+ rb = gen_avr_ptr(a->vrb);
+ gen_helper_DCFFIXQQ(tcg_env, rt, rb);
+
+ return true;
+}
+
+static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a)
+{
+ TCGv_ptr rt, rb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, DFP);
+ REQUIRE_FPU(ctx);
+ REQUIRE_VECTOR(ctx);
+
+ rt = gen_avr_ptr(a->vrt);
+ rb = gen_fprp_ptr(a->frbp);
+ gen_helper_DCTFIXQQ(tcg_env, rt, rb);
+
+ return true;
+}
diff --git a/target/ppc/translate/dfp-ops.c.inc b/target/ppc/translate/dfp-ops.c.inc
deleted file mode 100644
index 6ef38e5712..0000000000
--- a/target/ppc/translate/dfp-ops.c.inc
+++ /dev/null
@@ -1,165 +0,0 @@
-#define _GEN_DFP_LONG(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_LONG_300(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300)
-
-#define _GEN_DFP_LONGx2(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_LONGx4(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUAD(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300)
-
-#define _GEN_DFP_QUADx2(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define _GEN_DFP_QUADx4(name, op1, op2, mask) \
-GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
-GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
-
-#define GEN_DFP_T_A_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00210800)
-
-#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00200800)
-
-#define GEN_DFP_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x001F0000)
-
-#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x003F0800)
-
-#define GEN_DFP_Tp_B_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x003F0000)
-
-#define GEN_DFP_T_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x001F0800)
-
-#define GEN_DFP_BF_A_B(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00000001)
-
-#define GEN_DFP_BF_A_B_300(name, op1, op2) \
-_GEN_DFP_LONG_300(name, op1, op2, 0x00400001)
-
-#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00610801)
-
-#define GEN_DFP_BF_A_Bp(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00600801)
-
-#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \
-_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001)
-
-#define GEN_DFP_BF_A_DCM(name, op1, op2) \
-_GEN_DFP_LONGx2(name, op1, op2, 0x00600001)
-
-#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \
-_GEN_DFP_QUADx2(name, op1, op2, 0x00610001)
-
-#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x02010800)
-
-#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x02000800)
-
-#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x00200800)
-
-#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \
-_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000)
-
-#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \
-_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800)
-
-#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x00070000)
-
-#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x00270800)
-
-#define GEN_DFP_S_T_B_Rc(name, op1, op2) \
-_GEN_DFP_LONG(name, op1, op2, 0x000F0000)
-
-#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \
-_GEN_DFP_QUAD(name, op1, op2, 0x002F0800)
-
-#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \
-_GEN_DFP_LONGx2(name, op1, op2, 0x00000000)
-
-#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \
-_GEN_DFP_QUADx2(name, op1, op2, 0x00210000)
-
-GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00),
-GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00),
-GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10),
-GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10),
-GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01),
-GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01),
-GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11),
-GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11),
-GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14),
-GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14),
-GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04),
-GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04),
-GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06),
-GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06),
-GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07),
-GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07),
-GEN_DFP_BF_A_B(dtstex, 0x02, 0x05),
-GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05),
-GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15),
-GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15),
-GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15),
-GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15),
-GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02),
-GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02),
-GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00),
-GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00),
-GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01),
-GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01),
-GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03),
-GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03),
-GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07),
-GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07),
-GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08),
-GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08),
-GEN_DFP_T_B_Rc(drsp, 0x02, 0x18),
-GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18),
-GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19),
-GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19),
-GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09),
-GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09),
-GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a),
-GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a),
-GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a),
-GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a),
-GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b),
-GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b),
-GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b),
-GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b),
-GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02),
-GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02),
-GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03),
-GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03),
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
index 2e2518ee15..0c66465d96 100644
--- a/target/ppc/translate/fixedpoint-impl.c.inc
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
@@ -18,25 +18,6 @@
*/
/*
- * Incorporate CIA into the constant when R=1.
- * Validate that when R=1, RA=0.
- */
-static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
-{
- d->rt = a->rt;
- d->ra = a->ra;
- d->si = a->si;
- if (a->r) {
- if (unlikely(a->ra != 0)) {
- gen_invalid(ctx);
- return false;
- }
- d->si += ctx->cia;
- }
- return true;
-}
-
-/*
* Fixed-Point Load/Store Instructions
*/
@@ -51,15 +32,7 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
}
gen_set_access_type(ctx, ACCESS_INT);
- ea = tcg_temp_new();
- if (ra) {
- tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
- } else {
- tcg_gen_mov_tl(ea, displ);
- }
- if (NARROW_MODE(ctx)) {
- tcg_gen_ext32u_tl(ea, ea);
- }
+ ea = do_ea_calc(ctx, ra, displ);
mop ^= ctx->default_tcg_memop_mask;
if (store) {
tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop);
@@ -69,8 +42,6 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update,
if (update) {
tcg_gen_mov_tl(cpu_gpr[ra], ea);
}
- tcg_temp_free(ea);
-
return true;
}
@@ -96,6 +67,66 @@ static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update,
return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop);
}
+static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed)
+{
+#if defined(TARGET_PPC64)
+ TCGv ea;
+ TCGv_i64 lo, hi;
+ TCGv_i128 t16;
+
+ REQUIRE_INSNS_FLAGS(ctx, 64BX);
+
+ if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
+ /* lq and stq were privileged prior to V. 2.07 */
+ REQUIRE_SV(ctx);
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return true;
+ }
+ }
+
+ if (!store && unlikely(a->ra == a->rt)) {
+ gen_invalid(ctx);
+ return true;
+ }
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si));
+
+ if (ctx->le_mode && prefixed) {
+ lo = cpu_gpr[a->rt];
+ hi = cpu_gpr[a->rt + 1];
+ } else {
+ lo = cpu_gpr[a->rt + 1];
+ hi = cpu_gpr[a->rt];
+ }
+ t16 = tcg_temp_new_i128();
+
+ if (store) {
+ tcg_gen_concat_i64_i128(t16, lo, hi);
+ tcg_gen_qemu_st_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
+ } else {
+ tcg_gen_qemu_ld_i128(t16, ea, ctx->mem_idx, DEF_MEMOP(MO_128));
+ tcg_gen_extr_i128_i64(lo, hi, t16);
+ }
+#else
+ qemu_build_not_reached();
+#endif
+
+ return true;
+}
+
+static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
+{
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_ldst_quad(ctx, &d, store, true);
+}
+
/* Load Byte and Zero */
TRANS(LBZ, do_ldst_D, false, false, MO_UB)
TRANS(LBZX, do_ldst_X, false, false, MO_UB)
@@ -131,11 +162,15 @@ TRANS64(LWAUX, do_ldst_X, true, false, MO_SL)
TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
/* Load Doubleword */
-TRANS64(LD, do_ldst_D, false, false, MO_Q)
-TRANS64(LDX, do_ldst_X, false, false, MO_Q)
-TRANS64(LDU, do_ldst_D, true, false, MO_Q)
-TRANS64(LDUX, do_ldst_X, true, false, MO_Q)
-TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q)
+TRANS64(LD, do_ldst_D, false, false, MO_UQ)
+TRANS64(LDX, do_ldst_X, false, false, MO_UQ)
+TRANS64(LDU, do_ldst_D, true, false, MO_UQ)
+TRANS64(LDUX, do_ldst_X, true, false, MO_UQ)
+TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ)
+
+/* Load Quadword */
+TRANS64(LQ, do_ldst_quad, false, false);
+TRANS64(PLQ, do_ldst_quad_PLS_D, false);
/* Store Byte */
TRANS(STB, do_ldst_D, false, true, MO_UB)
@@ -159,11 +194,15 @@ TRANS(STWUX, do_ldst_X, true, true, MO_UL)
TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
/* Store Doubleword */
-TRANS64(STD, do_ldst_D, false, true, MO_Q)
-TRANS64(STDX, do_ldst_X, false, true, MO_Q)
-TRANS64(STDU, do_ldst_D, true, true, MO_Q)
-TRANS64(STDUX, do_ldst_X, true, true, MO_Q)
-TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q)
+TRANS64(STD, do_ldst_D, false, true, MO_UQ)
+TRANS64(STDX, do_ldst_X, false, true, MO_UQ)
+TRANS64(STDU, do_ldst_D, true, true, MO_UQ)
+TRANS64(STDUX, do_ldst_X, true, true, MO_UQ)
+TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ)
+
+/* Store Quadword */
+TRANS64(STQ, do_ldst_quad, true, false);
+TRANS64(PSTQ, do_ldst_quad_PLS_D, true);
/*
* Fixed-Point Compare Instructions
@@ -286,6 +325,76 @@ static bool trans_ADDPCIS(DisasContext *ctx, arg_DX *a)
return true;
}
+static bool trans_ADDEX(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
+ cpu_ov, cpu_ov32, true, true, false, false);
+ return true;
+}
+
+static bool do_add_D(DisasContext *ctx, arg_D *a, bool add_ca, bool compute_ca,
+ bool compute_ov, bool compute_rc0)
+{
+ gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra],
+ tcg_constant_tl(a->si), cpu_ca, cpu_ca32,
+ add_ca, compute_ca, compute_ov, compute_rc0);
+ return true;
+}
+
+static bool do_add_XO(DisasContext *ctx, arg_XO *a, bool add_ca,
+ bool compute_ca)
+{
+ gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
+ cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc);
+ return true;
+}
+
+static bool do_add_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val,
+ bool add_ca, bool compute_ca)
+{
+ gen_op_arith_add(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val,
+ cpu_ca, cpu_ca32, add_ca, compute_ca, a->oe, a->rc);
+ return true;
+}
+
+TRANS(ADD, do_add_XO, false, false);
+TRANS(ADDC, do_add_XO, false, true);
+TRANS(ADDE, do_add_XO, true, true);
+TRANS(ADDME, do_add_const_XO, tcg_constant_tl(-1LL), true, true);
+TRANS(ADDZE, do_add_const_XO, tcg_constant_tl(0), true, true);
+TRANS(ADDIC, do_add_D, false, true, false, false);
+TRANS(ADDIC_, do_add_D, false, true, false, true);
+
+static bool trans_SUBFIC(DisasContext *ctx, arg_D *a)
+{
+ gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra],
+ tcg_constant_tl(a->si), false, true, false, false);
+ return true;
+}
+
+static bool do_subf_XO(DisasContext *ctx, arg_XO *a, bool add_ca,
+ bool compute_ca)
+{
+ gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb],
+ add_ca, compute_ca, a->oe, a->rc);
+ return true;
+}
+
+static bool do_subf_const_XO(DisasContext *ctx, arg_XO_ta *a, TCGv const_val,
+ bool add_ca, bool compute_ca)
+{
+ gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], const_val,
+ add_ca, compute_ca, a->oe, a->rc);
+ return true;
+}
+
+TRANS(SUBF, do_subf_XO, false, false)
+TRANS(SUBFC, do_subf_XO, false, true)
+TRANS(SUBFE, do_subf_XO, true, true)
+TRANS(SUBFME, do_subf_const_XO, tcg_constant_tl(-1LL), true, true)
+TRANS(SUBFZE, do_subf_const_XO, tcg_constant_tl(0), true, true)
+
static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a)
{
gen_invalid(ctx);
@@ -303,15 +412,15 @@ static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev)
uint32_t mask = 0x08 >> (a->bi & 0x03);
TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE;
TCGv temp = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]);
tcg_gen_andi_tl(temp, temp, mask);
- tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0);
if (neg) {
- tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]);
+ tcg_gen_negsetcond_tl(cond, cpu_gpr[a->rt], temp, zero);
+ } else {
+ tcg_gen_setcond_tl(cond, cpu_gpr[a->rt], temp, zero);
}
- tcg_temp_free(temp);
-
return true;
}
@@ -325,9 +434,164 @@ static bool trans_CFUGED(DisasContext *ctx, arg_X *a)
REQUIRE_64BIT(ctx);
REQUIRE_INSNS_FLAGS2(ctx, ISA310);
#if defined(TARGET_PPC64)
- gen_helper_cfuged(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+ gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail)
+{
+ TCGv_i64 t0, t1;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_and_i64(t0, src, mask);
+ if (trail) {
+ tcg_gen_ctzi_i64(t0, t0, -1);
+ } else {
+ tcg_gen_clzi_i64(t0, t0, -1);
+ }
+
+ tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1);
+ tcg_gen_andi_i64(t0, t0, 63);
+ tcg_gen_xori_i64(t0, t0, 63);
+ if (trail) {
+ tcg_gen_shl_i64(t0, mask, t0);
+ tcg_gen_shl_i64(t0, t0, t1);
+ } else {
+ tcg_gen_shr_i64(t0, mask, t0);
+ tcg_gen_shr_i64(t0, t0, t1);
+ }
+
+ tcg_gen_ctpop_i64(dst, t0);
+}
+
+static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_PDEPD(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_PEXTD(DisasContext *ctx, arg_X *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+#if defined(TARGET_PPC64)
+ gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]);
#else
qemu_build_not_reached();
#endif
return true;
}
+
+static bool trans_ADDG6S(DisasContext *ctx, arg_X *a)
+{
+ const target_ulong carry_bits = (target_ulong)-1 / 0xf;
+ TCGv in1, in2, carryl, carryh, tmp;
+ TCGv zero = tcg_constant_tl(0);
+
+ REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
+
+ in1 = cpu_gpr[a->ra];
+ in2 = cpu_gpr[a->rb];
+ tmp = tcg_temp_new();
+ carryl = tcg_temp_new();
+ carryh = tcg_temp_new();
+
+ /* Addition with carry. */
+ tcg_gen_add2_tl(carryl, carryh, in1, zero, in2, zero);
+ /* Addition without carry. */
+ tcg_gen_xor_tl(tmp, in1, in2);
+ /* Difference between the two is carry in to each bit. */
+ tcg_gen_xor_tl(carryl, carryl, tmp);
+
+ /*
+ * The carry-out that we're looking for is the carry-in to
+ * the next nibble. Shift the double-word down one nibble,
+ * which puts all of the bits back into one word.
+ */
+ tcg_gen_extract2_tl(carryl, carryl, carryh, 4);
+
+ /* Invert, isolate the carry bits, and produce 6's. */
+ tcg_gen_andc_tl(carryl, tcg_constant_tl(carry_bits), carryl);
+ tcg_gen_muli_tl(cpu_gpr[a->rt], carryl, 6);
+ return true;
+}
+
+static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
+ gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
+ return true;
+}
+
+static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206);
+ gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]);
+ return true;
+}
+
+static bool do_hash(DisasContext *ctx, arg_X *a, bool priv,
+ void (*helper)(TCGv_ptr, TCGv, TCGv, TCGv))
+{
+ TCGv ea;
+
+ if (!(ctx->insns_flags2 & PPC2_ISA310)) {
+ /* if version is before v3.1, this operation is a nop */
+ return true;
+ }
+
+ if (priv) {
+ /* if instruction is privileged but the context is in user space */
+ REQUIRE_SV(ctx);
+ }
+
+ if (unlikely(a->ra == 0)) {
+ /* if RA=0, the instruction form is invalid */
+ gen_invalid(ctx);
+ return true;
+ }
+
+ ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->rt));
+ helper(tcg_env, ea, cpu_gpr[a->ra], cpu_gpr[a->rb]);
+ return true;
+}
+
+TRANS(HASHST, do_hash, false, gen_helper_HASHST)
+TRANS(HASHCHK, do_hash, false, gen_helper_HASHCHK)
+TRANS(HASHSTP, do_hash, true, gen_helper_HASHSTP)
+TRANS(HASHCHKP, do_hash, true, gen_helper_HASHCHKP)
diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc
index 9f7868ee28..189cd8c979 100644
--- a/target/ppc/translate/fp-impl.c.inc
+++ b/target/ppc/translate/fp-impl.c.inc
@@ -6,13 +6,13 @@
static inline void gen_reset_fpstatus(void)
{
- gen_helper_reset_fpstatus(cpu_env);
+ gen_helper_reset_fpstatus(tcg_env);
}
static inline void gen_compute_fprf_float64(TCGv_i64 arg)
{
- gen_helper_compute_fprf_float64(cpu_env, arg);
- gen_helper_float_check_status(cpu_env);
+ gen_helper_compute_fprf_float64(tcg_env, arg);
+ gen_helper_float_check_status(tcg_env);
}
#if defined(TARGET_PPC64)
@@ -21,7 +21,6 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx)
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
- tcg_temp_free_i32(tmp);
}
#else
static void gen_set_cr1_from_fpscr(DisasContext *ctx)
@@ -31,7 +30,7 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx)
#endif
/*** Floating-Point arithmetic ***/
-#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \
static void gen_f##name(DisasContext *ctx) \
{ \
TCGv_i64 t0; \
@@ -50,10 +49,7 @@ static void gen_f##name(DisasContext *ctx) \
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rC(ctx->opcode)); \
get_fpr(t2, rB(ctx->opcode)); \
- gen_helper_f##op(t3, cpu_env, t0, t1, t2); \
- if (isfloat) { \
- gen_helper_frsp(t3, cpu_env, t3); \
- } \
+ gen_helper_f##name(t3, tcg_env, t0, t1, t2); \
set_fpr(rD(ctx->opcode), t3); \
if (set_fprf) { \
gen_compute_fprf_float64(t3); \
@@ -61,17 +57,13 @@ static void gen_f##name(DisasContext *ctx) \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
- tcg_temp_free_i64(t2); \
- tcg_temp_free_i64(t3); \
}
#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
-_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
-_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
+_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \
+_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type);
-#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \
static void gen_f##name(DisasContext *ctx) \
{ \
TCGv_i64 t0; \
@@ -87,10 +79,7 @@ static void gen_f##name(DisasContext *ctx) \
gen_reset_fpstatus(); \
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rB(ctx->opcode)); \
- gen_helper_f##op(t2, cpu_env, t0, t1); \
- if (isfloat) { \
- gen_helper_frsp(t2, cpu_env, t2); \
- } \
+ gen_helper_f##name(t2, tcg_env, t0, t1); \
set_fpr(rD(ctx->opcode), t2); \
if (set_fprf) { \
gen_compute_fprf_float64(t2); \
@@ -98,15 +87,12 @@ static void gen_f##name(DisasContext *ctx) \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
- tcg_temp_free_i64(t2); \
}
#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
-_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \
+_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type);
-#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \
static void gen_f##name(DisasContext *ctx) \
{ \
TCGv_i64 t0; \
@@ -122,10 +108,7 @@ static void gen_f##name(DisasContext *ctx) \
gen_reset_fpstatus(); \
get_fpr(t0, rA(ctx->opcode)); \
get_fpr(t1, rC(ctx->opcode)); \
- gen_helper_f##op(t2, cpu_env, t0, t1); \
- if (isfloat) { \
- gen_helper_frsp(t2, cpu_env, t2); \
- } \
+ gen_helper_f##name(t2, tcg_env, t0, t1); \
set_fpr(rD(ctx->opcode), t2); \
if (set_fprf) { \
gen_compute_fprf_float64(t2); \
@@ -133,13 +116,10 @@ static void gen_f##name(DisasContext *ctx) \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
- tcg_temp_free_i64(t2); \
}
#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
-_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
-_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \
+_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type);
#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
static void gen_f##name(DisasContext *ctx) \
@@ -154,16 +134,15 @@ static void gen_f##name(DisasContext *ctx) \
t1 = tcg_temp_new_i64(); \
gen_reset_fpstatus(); \
get_fpr(t0, rB(ctx->opcode)); \
- gen_helper_f##name(t1, cpu_env, t0); \
+ gen_helper_f##name(t1, tcg_env, t0); \
set_fpr(rD(ctx->opcode), t1); \
if (set_fprf) { \
- gen_compute_fprf_float64(t1); \
+ gen_helper_compute_fprf_float64(tcg_env, t1); \
} \
+ gen_helper_float_check_status(tcg_env); \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
}
#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
@@ -179,7 +158,7 @@ static void gen_f##name(DisasContext *ctx) \
t1 = tcg_temp_new_i64(); \
gen_reset_fpstatus(); \
get_fpr(t0, rB(ctx->opcode)); \
- gen_helper_f##name(t1, cpu_env, t0); \
+ gen_helper_f##name(t1, tcg_env, t0); \
set_fpr(rD(ctx->opcode), t1); \
if (set_fprf) { \
gen_compute_fprf_float64(t1); \
@@ -187,8 +166,6 @@ static void gen_f##name(DisasContext *ctx) \
if (unlikely(Rc(ctx->opcode) != 0)) { \
gen_set_cr1_from_fpscr(ctx); \
} \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
}
/* fadd - fadds */
@@ -220,69 +197,66 @@ static void gen_frsqrtes(DisasContext *ctx)
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
get_fpr(t0, rB(ctx->opcode));
- gen_helper_frsqrte(t1, cpu_env, t0);
- gen_helper_frsp(t1, cpu_env, t1);
+ gen_helper_frsqrtes(t1, tcg_env, t0);
set_fpr(rD(ctx->opcode), t1);
gen_compute_fprf_float64(t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
-/* fsel */
-_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
-/* fsub - fsubs */
-GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
-/* Optional: */
-
-/* fsqrt */
-static void gen_fsqrt(DisasContext *ctx)
+static bool trans_FSEL(DisasContext *ctx, arg_A *a)
{
- TCGv_i64 t0;
- TCGv_i64 t1;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
+ TCGv_i64 t0, t1, t2;
+
+ REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSEL);
+ REQUIRE_FPU(ctx);
+
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- gen_reset_fpstatus();
- get_fpr(t0, rB(ctx->opcode));
- gen_helper_fsqrt(t1, cpu_env, t0);
- set_fpr(rD(ctx->opcode), t1);
- gen_compute_fprf_float64(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
+ t2 = tcg_temp_new_i64();
+
+ get_fpr(t0, a->fra);
+ get_fpr(t1, a->frb);
+ get_fpr(t2, a->frc);
+
+ gen_helper_FSEL(t0, t0, t1, t2);
+ set_fpr(a->frt, t0);
+ if (a->rc) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ return true;
}
-static void gen_fsqrts(DisasContext *ctx)
+/* fsub - fsubs */
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
+/* Optional: */
+
+static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a,
+ void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64))
{
- TCGv_i64 t0;
- TCGv_i64 t1;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
+ TCGv_i64 t0, t1;
+
+ REQUIRE_INSNS_FLAGS(ctx, FLOAT_FSQRT);
+ REQUIRE_FPU(ctx);
+
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
+
gen_reset_fpstatus();
- get_fpr(t0, rB(ctx->opcode));
- gen_helper_fsqrt(t1, cpu_env, t0);
- gen_helper_frsp(t1, cpu_env, t1);
- set_fpr(rD(ctx->opcode), t1);
+ get_fpr(t0, a->frb);
+ helper(t1, tcg_env, t0);
+ set_fpr(a->frt, t1);
gen_compute_fprf_float64(t1);
- if (unlikely(Rc(ctx->opcode) != 0)) {
+ if (unlikely(a->rc != 0)) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ return true;
}
+TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT);
+TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS);
+
/*** Floating-Point multiply-and-add ***/
/* fmadd - fmadds */
GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
@@ -343,8 +317,6 @@ static void gen_ftdiv(DisasContext *ctx)
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static void gen_ftsqrt(DisasContext *ctx)
@@ -357,7 +329,6 @@ static void gen_ftsqrt(DisasContext *ctx)
t0 = tcg_temp_new_i64();
get_fpr(t0, rB(ctx->opcode));
gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0);
- tcg_temp_free_i64(t0);
}
@@ -377,14 +348,11 @@ static void gen_fcmpo(DisasContext *ctx)
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
- gen_helper_fcmpo(cpu_env, t0, t1, crf);
- tcg_temp_free_i32(crf);
- gen_helper_float_check_status(cpu_env);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ gen_helper_fcmpo(tcg_env, t0, t1, crf);
+ gen_helper_float_check_status(tcg_env);
}
/* fcmpu */
@@ -400,14 +368,11 @@ static void gen_fcmpu(DisasContext *ctx)
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_reset_fpstatus();
- crf = tcg_const_i32(crfD(ctx->opcode));
+ crf = tcg_constant_i32(crfD(ctx->opcode));
get_fpr(t0, rA(ctx->opcode));
get_fpr(t1, rB(ctx->opcode));
- gen_helper_fcmpu(cpu_env, t0, t1, crf);
- tcg_temp_free_i32(crf);
- gen_helper_float_check_status(cpu_env);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ gen_helper_fcmpu(tcg_env, t0, t1, crf);
+ gen_helper_float_check_status(tcg_env);
}
/*** Floating-point move ***/
@@ -429,8 +394,6 @@ static void gen_fabs(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode))) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
/* fmr - fmr. */
@@ -448,7 +411,6 @@ static void gen_fmr(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode))) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
}
/* fnabs */
@@ -469,8 +431,6 @@ static void gen_fnabs(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode))) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
/* fneg */
@@ -491,8 +451,6 @@ static void gen_fneg(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode))) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
/* fcpsgn: PowerPC 2.05 specification */
@@ -516,9 +474,6 @@ static void gen_fcpsgn(DisasContext *ctx)
if (unlikely(Rc(ctx->opcode))) {
gen_set_cr1_from_fpscr(ctx);
}
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
}
static void gen_fmrgew(DisasContext *ctx)
@@ -538,9 +493,6 @@ static void gen_fmrgew(DisasContext *ctx)
get_fpr(t0, rA(ctx->opcode));
tcg_gen_deposit_i64(t1, t0, b0, 0, 32);
set_fpr(rD(ctx->opcode), t1);
- tcg_temp_free_i64(b0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static void gen_fmrgow(DisasContext *ctx)
@@ -559,9 +511,6 @@ static void gen_fmrgow(DisasContext *ctx)
get_fpr(t1, rA(ctx->opcode));
tcg_gen_deposit_i64(t2, t0, t1, 32, 32);
set_fpr(rD(ctx->opcode), t2);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
}
/*** Floating-Point status & ctrl register ***/
@@ -587,153 +536,147 @@ static void gen_mcrfs(DisasContext *ctx)
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)],
0xf);
- tcg_temp_free(tmp);
tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
/* Only the exception bits (including FX) should be cleared if read */
tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr,
~((0xF << shift) & FP_EX_CLEAR_BITS));
/* FEX and VX need to be updated, so don't set fpscr directly */
- tmask = tcg_const_i32(1 << nibble);
- gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
- tcg_temp_free_i32(tmask);
- tcg_temp_free_i64(tnew_fpscr);
+ tmask = tcg_constant_i32(1 << nibble);
+ gen_helper_store_fpscr(tcg_env, tnew_fpscr, tmask);
}
-/* mffs */
-static void gen_mffs(DisasContext *ctx)
+static TCGv_i64 place_from_fpscr(int rt, uint64_t mask)
{
- TCGv_i64 t0;
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
- t0 = tcg_temp_new_i64();
- gen_reset_fpstatus();
- tcg_gen_extu_tl_i64(t0, cpu_fpscr);
- set_fpr(rD(ctx->opcode), t0);
- if (unlikely(Rc(ctx->opcode))) {
- gen_set_cr1_from_fpscr(ctx);
- }
- tcg_temp_free_i64(t0);
+ TCGv_i64 fpscr = tcg_temp_new_i64();
+ TCGv_i64 fpscr_masked = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(fpscr, cpu_fpscr);
+ tcg_gen_andi_i64(fpscr_masked, fpscr, mask);
+ set_fpr(rt, fpscr_masked);
+
+ return fpscr;
}
-/* mffsl */
-static void gen_mffsl(DisasContext *ctx)
+static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask,
+ TCGv_i64 set_mask, uint32_t store_mask)
{
- TCGv_i64 t0;
+ TCGv_i64 fpscr_masked = tcg_temp_new_i64();
+ TCGv_i32 st_mask = tcg_constant_i32(store_mask);
- if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
- return gen_mffs(ctx);
- }
+ tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask);
+ tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask);
+ gen_helper_store_fpscr(tcg_env, fpscr_masked, st_mask);
+}
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
+static bool trans_MFFS_ISA207(DisasContext *ctx, arg_X_t_rc *a)
+{
+ if (!(ctx->insns_flags2 & PPC2_ISA300)) {
+ /*
+ * Before Power ISA v3.0, MFFS bits 11~15 were reserved, any instruction
+ * with OPCD=63 and XO=583 should be decoded as MFFS.
+ */
+ return trans_MFFS(ctx, a);
}
- t0 = tcg_temp_new_i64();
- gen_reset_fpstatus();
- tcg_gen_extu_tl_i64(t0, cpu_fpscr);
- /* Mask everything except mode, status, and enables. */
- tcg_gen_andi_i64(t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN);
- set_fpr(rD(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
+ /*
+ * For Power ISA v3.0+, return false and let the pattern group
+ * select the correct instruction.
+ */
+ return false;
}
-/* mffsce */
-static void gen_mffsce(DisasContext *ctx)
+static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a)
{
- TCGv_i64 t0;
- TCGv_i32 mask;
+ REQUIRE_FPU(ctx);
- if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
- return gen_mffs(ctx);
+ gen_reset_fpstatus();
+ place_from_fpscr(a->rt, UINT64_MAX);
+ if (a->rc) {
+ gen_set_cr1_from_fpscr(ctx);
}
+ return true;
+}
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
+static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a)
+{
+ TCGv_i64 fpscr;
- t0 = tcg_temp_new_i64();
+ REQUIRE_FPU(ctx);
gen_reset_fpstatus();
- tcg_gen_extu_tl_i64(t0, cpu_fpscr);
- set_fpr(rD(ctx->opcode), t0);
-
- /* Clear exception enable bits in the FPSCR. */
- tcg_gen_andi_i64(t0, t0, ~FP_ENABLES);
- mask = tcg_const_i32(0x0003);
- gen_helper_store_fpscr(cpu_env, t0, mask);
-
- tcg_temp_free_i32(mask);
- tcg_temp_free_i64(t0);
+ fpscr = place_from_fpscr(a->rt, UINT64_MAX);
+ store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003);
+ return true;
}
-static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1)
+static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a)
{
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i32 mask = tcg_const_i32(0x0001);
+ TCGv_i64 t1, fpscr;
+
+ REQUIRE_FPU(ctx);
+
+ t1 = tcg_temp_new_i64();
+ get_fpr(t1, a->rb);
+ tcg_gen_andi_i64(t1, t1, FP_RN);
gen_reset_fpstatus();
- tcg_gen_extu_tl_i64(t0, cpu_fpscr);
- tcg_gen_andi_i64(t0, t0, FP_DRN | FP_ENABLES | FP_RN);
- set_fpr(rD(ctx->opcode), t0);
+ fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN);
+ store_fpscr_masked(fpscr, FP_RN, t1, 0x0001);
+ return true;
+}
- /* Mask FPSCR value to clear RN. */
- tcg_gen_andi_i64(t0, t0, ~FP_RN);
+static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a)
+{
+ TCGv_i64 t1, fpscr;
- /* Merge RN into FPSCR value. */
- tcg_gen_or_i64(t0, t0, t1);
+ REQUIRE_FPU(ctx);
- gen_helper_store_fpscr(cpu_env, t0, mask);
+ t1 = tcg_temp_new_i64();
+ get_fpr(t1, a->rb);
+ tcg_gen_andi_i64(t1, t1, FP_DRN);
- tcg_temp_free_i32(mask);
- tcg_temp_free_i64(t0);
+ gen_reset_fpstatus();
+ fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN);
+ store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100);
+ return true;
}
-/* mffscrn */
-static void gen_mffscrn(DisasContext *ctx)
+static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a)
{
- TCGv_i64 t1;
+ TCGv_i64 t1, fpscr;
- if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
- return gen_mffs(ctx);
- }
-
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
+ REQUIRE_FPU(ctx);
t1 = tcg_temp_new_i64();
- get_fpr(t1, rB(ctx->opcode));
- /* Mask FRB to get just RN. */
- tcg_gen_andi_i64(t1, t1, FP_RN);
-
- gen_helper_mffscrn(ctx, t1);
+ tcg_gen_movi_i64(t1, a->imm);
- tcg_temp_free_i64(t1);
+ gen_reset_fpstatus();
+ fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN);
+ store_fpscr_masked(fpscr, FP_RN, t1, 0x0001);
+ return true;
}
-/* mffscrni */
-static void gen_mffscrni(DisasContext *ctx)
+static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a)
{
- TCGv_i64 t1;
+ TCGv_i64 t1, fpscr;
- if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) {
- return gen_mffs(ctx);
- }
+ REQUIRE_FPU(ctx);
- if (unlikely(!ctx->fpu_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_FPU);
- return;
- }
+ t1 = tcg_temp_new_i64();
+ tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0);
- t1 = tcg_const_i64((uint64_t)RM(ctx->opcode));
+ gen_reset_fpstatus();
+ fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN);
+ store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100);
+ return true;
+}
- gen_helper_mffscrn(ctx, t1);
+static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a)
+{
+ REQUIRE_FPU(ctx);
- tcg_temp_free_i64(t1);
+ gen_reset_fpstatus();
+ place_from_fpscr(a->rt, FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN);
+ return true;
}
/* mtfsb0 */
@@ -748,10 +691,7 @@ static void gen_mtfsb0(DisasContext *ctx)
crb = 31 - crbD(ctx->opcode);
gen_reset_fpstatus();
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_clrbit(cpu_env, t0);
- tcg_temp_free_i32(t0);
+ gen_helper_fpscr_clrbit(tcg_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
@@ -769,20 +709,16 @@ static void gen_mtfsb1(DisasContext *ctx)
return;
}
crb = 31 - crbD(ctx->opcode);
- gen_reset_fpstatus();
/* XXX: we pretend we can only do IEEE floating-point computations */
if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
- TCGv_i32 t0;
- t0 = tcg_const_i32(crb);
- gen_helper_fpscr_setbit(cpu_env, t0);
- tcg_temp_free_i32(t0);
+ gen_helper_fpscr_setbit(tcg_env, tcg_constant_i32(crb));
}
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_float_check_status(cpu_env);
+ gen_helper_fpscr_check_status(tcg_env);
}
/* mtfsf */
@@ -803,23 +739,22 @@ static void gen_mtfsf(DisasContext *ctx)
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
- gen_reset_fpstatus();
- if (l) {
- t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ if (!l) {
+ t0 = tcg_constant_i32(flm << (w * 8));
+ } else if (ctx->insns_flags2 & PPC2_ISA205) {
+ t0 = tcg_constant_i32(0xffff);
} else {
- t0 = tcg_const_i32(flm << (w * 8));
+ t0 = tcg_constant_i32(0xff);
}
t1 = tcg_temp_new_i64();
get_fpr(t1, rB(ctx->opcode));
- gen_helper_store_fpscr(cpu_env, t1, t0);
- tcg_temp_free_i32(t0);
+ gen_helper_store_fpscr(tcg_env, t1, t0);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_float_check_status(cpu_env);
- tcg_temp_free_i64(t1);
+ gen_helper_fpscr_check_status(tcg_env);
}
/* mtfsfi */
@@ -840,132 +775,30 @@ static void gen_mtfsfi(DisasContext *ctx)
return;
}
sh = (8 * w) + 7 - bf;
- gen_reset_fpstatus();
- t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
- t1 = tcg_const_i32(1 << sh);
- gen_helper_store_fpscr(cpu_env, t0, t1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i32(t1);
+ t0 = tcg_constant_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_constant_i32(1 << sh);
+ gen_helper_store_fpscr(tcg_env, t0, t1);
if (unlikely(Rc(ctx->opcode) != 0)) {
tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
}
/* We can raise a deferred exception */
- gen_helper_float_check_status(cpu_env);
-}
-
-/*** Floating-point load ***/
-#define GEN_LDF(name, ldop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##ldop(ctx, t0, EA); \
- set_fpr(rD(ctx->opcode), t0); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
-}
-
-#define GEN_LDUF(name, ldop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_imm_index(ctx, EA, 0); \
- gen_qemu_##ldop(ctx, t0, EA); \
- set_fpr(rD(ctx->opcode), t0); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
-}
-
-#define GEN_LDUXF(name, ldop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- t0 = tcg_temp_new_i64(); \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##ldop(ctx, t0, EA); \
- set_fpr(rD(ctx->opcode), t0); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
+ gen_helper_fpscr_check_status(tcg_env);
}
-#define GEN_LDXF(name, ldop, opc2, opc3, type) \
-static void glue(gen_, name##x)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_reg_index(ctx, EA); \
- gen_qemu_##ldop(ctx, t0, EA); \
- set_fpr(rD(ctx->opcode), t0); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
-}
-
-#define GEN_LDFS(name, ldop, op, type) \
-GEN_LDF(name, ldop, op | 0x20, type); \
-GEN_LDUF(name, ldop, op | 0x21, type); \
-GEN_LDUXF(name, ldop, op | 0x01, type); \
-GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-
static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr)
{
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
gen_helper_todouble(dest, tmp);
- tcg_temp_free_i32(tmp);
}
- /* lfd lfdu lfdux lfdx */
-GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
- /* lfs lfsu lfsux lfsx */
-GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
-
/* lfdepx (external PID lfdx) */
static void gen_lfdepx(DisasContext *ctx)
{
TCGv EA;
TCGv_i64 t0;
- CHK_SV;
+ CHK_SV(ctx);
if (unlikely(!ctx->fpu_enabled)) {
gen_exception(ctx, POWERPC_EXCP_FPU);
return;
@@ -974,10 +807,8 @@ static void gen_lfdepx(DisasContext *ctx)
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
+ tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ));
set_fpr(rD(ctx->opcode), t0);
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
/* lfdp */
@@ -1010,8 +841,6 @@ static void gen_lfdp(DisasContext *ctx)
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
/* lfdpx */
@@ -1044,8 +873,6 @@ static void gen_lfdpx(DisasContext *ctx)
gen_qemu_ld64_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode) + 1, t0);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
/* lfiwax */
@@ -1066,9 +893,6 @@ static void gen_lfiwax(DisasContext *ctx)
gen_qemu_ld32s(ctx, t0, EA);
tcg_gen_ext_tl_i64(t1, t0);
set_fpr(rD(ctx->opcode), t1);
- tcg_temp_free(EA);
- tcg_temp_free(t0);
- tcg_temp_free_i64(t1);
}
/* lfiwzx */
@@ -1086,75 +910,6 @@ static void gen_lfiwzx(DisasContext *ctx)
gen_addr_reg_index(ctx, EA);
gen_qemu_ld32u_i64(ctx, t0, EA);
set_fpr(rD(ctx->opcode), t0);
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
-}
-/*** Floating-point store ***/
-#define GEN_STF(name, stop, opc, type) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_imm_index(ctx, EA, 0); \
- get_fpr(t0, rS(ctx->opcode)); \
- gen_qemu_##stop(ctx, t0, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
-}
-
-#define GEN_STUF(name, stop, opc, type) \
-static void glue(gen_, name##u)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_imm_index(ctx, EA, 0); \
- get_fpr(t0, rS(ctx->opcode)); \
- gen_qemu_##stop(ctx, t0, EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
-}
-
-#define GEN_STUXF(name, stop, opc, type) \
-static void glue(gen_, name##ux)(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 t0; \
- if (unlikely(!ctx->fpu_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_FPU); \
- return; \
- } \
- if (unlikely(rA(ctx->opcode) == 0)) { \
- gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
- return; \
- } \
- gen_set_access_type(ctx, ACCESS_FLOAT); \
- EA = tcg_temp_new(); \
- t0 = tcg_temp_new_i64(); \
- gen_addr_reg_index(ctx, EA); \
- get_fpr(t0, rS(ctx->opcode)); \
- gen_qemu_##stop(ctx, t0, EA); \
- tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
}
#define GEN_STXF(name, stop, opc2, opc3, type) \
@@ -1172,35 +927,21 @@ static void glue(gen_, name##x)(DisasContext *ctx) \
gen_addr_reg_index(ctx, EA); \
get_fpr(t0, rS(ctx->opcode)); \
gen_qemu_##stop(ctx, t0, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
}
-#define GEN_STFS(name, stop, op, type) \
-GEN_STF(name, stop, op | 0x20, type); \
-GEN_STUF(name, stop, op | 0x21, type); \
-GEN_STUXF(name, stop, op | 0x01, type); \
-GEN_STXF(name, stop, 0x17, op | 0x00, type)
-
static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr)
{
TCGv_i32 tmp = tcg_temp_new_i32();
gen_helper_tosingle(tmp, src);
tcg_gen_qemu_st_i32(tmp, addr, ctx->mem_idx, DEF_MEMOP(MO_UL));
- tcg_temp_free_i32(tmp);
}
-/* stfd stfdu stfdux stfdx */
-GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
-/* stfs stfsu stfsux stfsx */
-GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
-
/* stfdepx (external PID lfdx) */
static void gen_stfdepx(DisasContext *ctx)
{
TCGv EA;
TCGv_i64 t0;
- CHK_SV;
+ CHK_SV(ctx);
if (unlikely(!ctx->fpu_enabled)) {
gen_exception(ctx, POWERPC_EXCP_FPU);
return;
@@ -1210,9 +951,7 @@ static void gen_stfdepx(DisasContext *ctx)
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
get_fpr(t0, rD(ctx->opcode));
- tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
+ tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ));
}
/* stfdp */
@@ -1245,8 +984,6 @@ static void gen_stfdp(DisasContext *ctx)
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
/* stfdpx */
@@ -1279,8 +1016,6 @@ static void gen_stfdpx(DisasContext *ctx)
get_fpr(t0, rD(ctx->opcode) + 1);
gen_qemu_st64_i64(ctx, t0, EA);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
/* Optional: */
@@ -1289,189 +1024,92 @@ static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
TCGv t0 = tcg_temp_new();
tcg_gen_trunc_i64_tl(t0, arg1),
gen_qemu_st32(ctx, t0, arg2);
- tcg_temp_free(t0);
}
/* stfiwx */
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
-/* POWER2 specific instructions */
-/* Quad manipulation (load/store two floats at a time) */
-
-/* lfq */
-static void gen_lfq(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- TCGv_i64 t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- t1 = tcg_temp_new_i64();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64_i64(ctx, t1, t0);
- set_fpr(rd, t1);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64_i64(ctx, t1, t0);
- set_fpr((rd + 1) % 32, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i64(t1);
-}
-
-/* lfqu */
-static void gen_lfqu(DisasContext *ctx)
+/* Floating-point Load/Store Instructions */
+static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ,
+ bool update, bool store, bool single)
{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- TCGv_i64 t2;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
- t2 = tcg_temp_new_i64();
- gen_addr_imm_index(ctx, t0, 0);
- gen_qemu_ld64_i64(ctx, t2, t0);
- set_fpr(rd, t2);
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64_i64(ctx, t2, t1);
- set_fpr((rd + 1) % 32, t2);
- if (ra != 0) {
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ TCGv ea;
+ TCGv_i64 t0;
+ REQUIRE_INSNS_FLAGS(ctx, FLOAT);
+ REQUIRE_FPU(ctx);
+ if (update && ra == 0) {
+ gen_invalid(ctx);
+ return true;
}
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free_i64(t2);
-}
-
-/* lfqux */
-static void gen_lfqux(DisasContext *ctx)
-{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
gen_set_access_type(ctx, ACCESS_FLOAT);
- TCGv t0, t1;
- TCGv_i64 t2;
- t2 = tcg_temp_new_i64();
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64_i64(ctx, t2, t0);
- set_fpr(rd, t2);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- gen_qemu_ld64_i64(ctx, t2, t1);
- set_fpr((rd + 1) % 32, t2);
- tcg_temp_free(t1);
- if (ra != 0) {
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ t0 = tcg_temp_new_i64();
+ ea = do_ea_calc(ctx, ra, displ);
+ if (store) {
+ get_fpr(t0, rt);
+ if (single) {
+ gen_qemu_st32fs(ctx, t0, ea);
+ } else {
+ gen_qemu_st64_i64(ctx, t0, ea);
+ }
+ } else {
+ if (single) {
+ gen_qemu_ld32fs(ctx, t0, ea);
+ } else {
+ gen_qemu_ld64_i64(ctx, t0, ea);
+ }
+ set_fpr(rt, t0);
}
- tcg_temp_free(t0);
- tcg_temp_free_i64(t2);
-}
-
-/* lfqx */
-static void gen_lfqx(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- TCGv_i64 t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- t1 = tcg_temp_new_i64();
- gen_addr_reg_index(ctx, t0);
- gen_qemu_ld64_i64(ctx, t1, t0);
- set_fpr(rd, t1);
- gen_addr_add(ctx, t0, t0, 8);
- gen_qemu_ld64_i64(ctx, t1, t0);
- set_fpr((rd + 1) % 32, t1);
- tcg_temp_free(t0);
- tcg_temp_free_i64(t1);
+ if (update) {
+ tcg_gen_mov_tl(cpu_gpr[ra], ea);
+ }
+ return true;
}
-/* stfq */
-static void gen_stfq(DisasContext *ctx)
+static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store,
+ bool single)
{
- int rd = rD(ctx->opcode);
- TCGv t0;
- TCGv_i64 t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t0 = tcg_temp_new();
- t1 = tcg_temp_new_i64();
- gen_addr_imm_index(ctx, t0, 0);
- get_fpr(t1, rd);
- gen_qemu_st64_i64(ctx, t1, t0);
- gen_addr_add(ctx, t0, t0, 8);
- get_fpr(t1, (rd + 1) % 32);
- gen_qemu_st64_i64(ctx, t1, t0);
- tcg_temp_free(t0);
- tcg_temp_free_i64(t1);
+ return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store,
+ single);
}
-/* stfqu */
-static void gen_stfqu(DisasContext *ctx)
+static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update,
+ bool store, bool single)
{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- TCGv_i64 t2;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t2 = tcg_temp_new_i64();
- t0 = tcg_temp_new();
- gen_addr_imm_index(ctx, t0, 0);
- get_fpr(t2, rd);
- gen_qemu_st64_i64(ctx, t2, t0);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- get_fpr(t2, (rd + 1) % 32);
- gen_qemu_st64_i64(ctx, t2, t1);
- tcg_temp_free(t1);
- if (ra != 0) {
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ arg_D d;
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
}
- tcg_temp_free(t0);
- tcg_temp_free_i64(t2);
+ return do_lsfp_D(ctx, &d, update, store, single);
}
-/* stfqux */
-static void gen_stfqux(DisasContext *ctx)
+static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update,
+ bool store, bool single)
{
- int ra = rA(ctx->opcode);
- int rd = rD(ctx->opcode);
- TCGv t0, t1;
- TCGv_i64 t2;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t2 = tcg_temp_new_i64();
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- get_fpr(t2, rd);
- gen_qemu_st64_i64(ctx, t2, t0);
- t1 = tcg_temp_new();
- gen_addr_add(ctx, t1, t0, 8);
- get_fpr(t2, (rd + 1) % 32);
- gen_qemu_st64_i64(ctx, t2, t1);
- tcg_temp_free(t1);
- if (ra != 0) {
- tcg_gen_mov_tl(cpu_gpr[ra], t0);
- }
- tcg_temp_free(t0);
- tcg_temp_free_i64(t2);
+ return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single);
}
-/* stfqx */
-static void gen_stfqx(DisasContext *ctx)
-{
- int rd = rD(ctx->opcode);
- TCGv t0;
- TCGv_i64 t1;
- gen_set_access_type(ctx, ACCESS_FLOAT);
- t1 = tcg_temp_new_i64();
- t0 = tcg_temp_new();
- gen_addr_reg_index(ctx, t0);
- get_fpr(t1, rd);
- gen_qemu_st64_i64(ctx, t1, t0);
- gen_addr_add(ctx, t0, t0, 8);
- get_fpr(t1, (rd + 1) % 32);
- gen_qemu_st64_i64(ctx, t1, t0);
- tcg_temp_free(t0);
- tcg_temp_free_i64(t1);
-}
+TRANS(LFS, do_lsfp_D, false, false, true)
+TRANS(LFSU, do_lsfp_D, true, false, true)
+TRANS(LFSX, do_lsfp_X, false, false, true)
+TRANS(LFSUX, do_lsfp_X, true, false, true)
+TRANS(PLFS, do_lsfp_PLS_D, false, false, true)
+
+TRANS(LFD, do_lsfp_D, false, false, false)
+TRANS(LFDU, do_lsfp_D, true, false, false)
+TRANS(LFDX, do_lsfp_X, false, false, false)
+TRANS(LFDUX, do_lsfp_X, true, false, false)
+TRANS(PLFD, do_lsfp_PLS_D, false, false, false)
+
+TRANS(STFS, do_lsfp_D, false, true, true)
+TRANS(STFSU, do_lsfp_D, true, true, true)
+TRANS(STFSX, do_lsfp_X, false, true, true)
+TRANS(STFSUX, do_lsfp_X, true, true, true)
+TRANS(PSTFS, do_lsfp_PLS_D, false, true, true)
+
+TRANS(STFD, do_lsfp_D, false, true, false)
+TRANS(STFDU, do_lsfp_D, true, true, false)
+TRANS(STFDX, do_lsfp_X, false, true, false)
+TRANS(STFDUX, do_lsfp_X, true, true, false)
+TRANS(PSTFD, do_lsfp_PLS_D, false, true, false)
#undef _GEN_FLOAT_ACB
#undef GEN_FLOAT_ACB
diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc
index 88fab65628..d4c6c4bed1 100644
--- a/target/ppc/translate/fp-ops.c.inc
+++ b/target/ppc/translate/fp-ops.c.inc
@@ -24,7 +24,6 @@ GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
-_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
@@ -50,50 +49,19 @@ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
-#define GEN_LDF(name, ldop, opc, type) \
-GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_LDUF(name, ldop, opc, type) \
-GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_LDUXF(name, ldop, opc, type) \
-GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
-#define GEN_LDXF(name, ldop, opc2, opc3, type) \
-GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
-#define GEN_LDFS(name, ldop, op, type) \
-GEN_LDF(name, ldop, op | 0x20, type) \
-GEN_LDUF(name, ldop, op | 0x21, type) \
-GEN_LDUXF(name, ldop, op | 0x01, type) \
-GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-
-GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
-GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
-#define GEN_STF(name, stop, opc, type) \
-GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_STUF(name, stop, opc, type) \
-GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
-#define GEN_STUXF(name, stop, opc, type) \
-GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
#define GEN_STXF(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
-#define GEN_STFS(name, stop, op, type) \
-GEN_STF(name, stop, op | 0x20, type) \
-GEN_STUF(name, stop, op | 0x21, type) \
-GEN_STUXF(name, stop, op | 0x01, type) \
-GEN_STXF(name, stop, 0x17, op | 0x00, type)
-GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
-GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206),
GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
-GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
-GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),
@@ -104,15 +72,6 @@ GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
-GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE),
-GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT,
- PPC2_ISA300),
-GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT,
- PPC2_ISA300),
-GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT,
- PPC_NONE),
-GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT,
- PPC_NONE),
GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),
diff --git a/target/ppc/translate/processor-ctrl-impl.c.inc b/target/ppc/translate/processor-ctrl-impl.c.inc
new file mode 100644
index 0000000000..0142801985
--- /dev/null
+++ b/target/ppc/translate/processor-ctrl-impl.c.inc
@@ -0,0 +1,105 @@
+/*
+ * Power ISA decode for Storage Control instructions
+ *
+ * Copyright (c) 2022 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Processor Control Instructions
+ */
+
+static bool trans_MSGCLR(DisasContext *ctx, arg_X_rb *a)
+{
+ if (!(ctx->insns_flags2 & PPC2_ISA207S)) {
+ /*
+ * Before Power ISA 2.07, processor control instructions were only
+ * implemented in the "Embedded.Processor Control" category.
+ */
+ REQUIRE_INSNS_FLAGS2(ctx, PRCNTL);
+ }
+
+ REQUIRE_HV(ctx);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (is_book3s_arch2x(ctx)) {
+ gen_helper_book3s_msgclr(tcg_env, cpu_gpr[a->rb]);
+ } else {
+ gen_helper_msgclr(tcg_env, cpu_gpr[a->rb]);
+ }
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_MSGSND(DisasContext *ctx, arg_X_rb *a)
+{
+ if (!(ctx->insns_flags2 & PPC2_ISA207S)) {
+ /*
+ * Before Power ISA 2.07, processor control instructions were only
+ * implemented in the "Embedded.Processor Control" category.
+ */
+ REQUIRE_INSNS_FLAGS2(ctx, PRCNTL);
+ }
+
+ REQUIRE_HV(ctx);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (is_book3s_arch2x(ctx)) {
+ gen_helper_book3s_msgsnd(cpu_gpr[a->rb]);
+ } else {
+ gen_helper_msgsnd(cpu_gpr[a->rb]);
+ }
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_MSGCLRP(DisasContext *ctx, arg_X_rb *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
+ REQUIRE_SV(ctx);
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_book3s_msgclrp(tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_MSGSNDP(DisasContext *ctx, arg_X_rb *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
+ REQUIRE_SV(ctx);
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_book3s_msgsndp(tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_MSGSYNC(DisasContext *ctx, arg_MSGSYNC *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_HV(ctx);
+
+ /* interpreted as no-op */
+ return true;
+}
diff --git a/target/ppc/translate/spe-impl.c.inc b/target/ppc/translate/spe-impl.c.inc
index 2e6e799a25..454dac823e 100644
--- a/target/ppc/translate/spe-impl.c.inc
+++ b/target/ppc/translate/spe-impl.c.inc
@@ -22,8 +22,7 @@ static inline void gen_evmra(DisasContext *ctx)
cpu_gprh[rA(ctx->opcode)]);
/* spe_acc := tmp */
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
- tcg_temp_free_i64(tmp);
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := rA */
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
@@ -96,8 +95,6 @@ static inline void gen_##name(DisasContext *ctx) \
tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
tcg_opi(t0, t0, rB(ctx->opcode)); \
tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
}
GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
@@ -122,8 +119,6 @@ static inline void gen_##name(DisasContext *ctx) \
tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
tcg_op(t0, t0); \
tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
}
GEN_SPEOP_ARITH1(evabs, tcg_gen_abs_i32);
@@ -159,16 +154,13 @@ static inline void gen_##name(DisasContext *ctx) \
tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \
tcg_op(t0, t0, t1); \
tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
}
static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@@ -178,14 +170,13 @@ static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
gen_set_label(l1);
tcg_gen_movi_i32(ret, 0);
gen_set_label(l2);
- tcg_temp_free_i32(t0);
}
GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@@ -195,14 +186,13 @@ static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
gen_set_label(l1);
tcg_gen_movi_i32(ret, 0);
gen_set_label(l2);
- tcg_temp_free_i32(t0);
}
GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
@@ -212,7 +202,6 @@ static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
gen_set_label(l1);
tcg_gen_movi_i32(ret, 0);
gen_set_label(l2);
- tcg_temp_free_i32(t0);
}
GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
@@ -220,7 +209,6 @@ static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_andi_i32(t0, arg2, 0x1F);
tcg_gen_rotl_i32(ret, arg1, t0);
- tcg_temp_free_i32(t0);
}
GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
static inline void gen_evmergehi(DisasContext *ctx)
@@ -257,8 +245,6 @@ static inline void gen_##name(DisasContext *ctx) \
tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \
tcg_op(t0, t0, rA(ctx->opcode)); \
tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
}
GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
@@ -341,7 +327,6 @@ static inline void gen_evmergelohi(DisasContext *ctx)
tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp);
- tcg_temp_free(tmp);
} else {
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
@@ -378,7 +363,7 @@ static inline void gen_evsel(DisasContext *ctx)
TCGLabel *l2 = gen_new_label();
TCGLabel *l3 = gen_new_label();
TCGLabel *l4 = gen_new_label();
- TCGv_i32 t0 = tcg_temp_local_new_i32();
+ TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
@@ -394,7 +379,6 @@ static inline void gen_evsel(DisasContext *ctx)
gen_set_label(l3);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
gen_set_label(l4);
- tcg_temp_free_i32(t0);
}
static void gen_evsel0(DisasContext *ctx)
@@ -456,9 +440,6 @@ static inline void gen_evmwumi(DisasContext *ctx)
tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static inline void gen_evmwumia(DisasContext *ctx)
@@ -476,8 +457,7 @@ static inline void gen_evmwumia(DisasContext *ctx)
/* acc := rD */
gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
- tcg_temp_free_i64(tmp);
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
}
static inline void gen_evmwumiaa(DisasContext *ctx)
@@ -499,19 +479,16 @@ static inline void gen_evmwumiaa(DisasContext *ctx)
gen_load_gpr64(tmp, rD(ctx->opcode));
/* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_ld_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* acc := tmp + acc */
tcg_gen_add_i64(acc, acc, tmp);
/* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := acc */
gen_store_gpr64(rD(ctx->opcode), acc);
-
- tcg_temp_free_i64(acc);
- tcg_temp_free_i64(tmp);
}
static inline void gen_evmwsmi(DisasContext *ctx)
@@ -535,9 +512,6 @@ static inline void gen_evmwsmi(DisasContext *ctx)
tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
}
static inline void gen_evmwsmia(DisasContext *ctx)
@@ -555,9 +529,7 @@ static inline void gen_evmwsmia(DisasContext *ctx)
/* acc := rD */
gen_load_gpr64(tmp, rD(ctx->opcode));
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
-
- tcg_temp_free_i64(tmp);
+ tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUPPCState, spe_acc));
}
static inline void gen_evmwsmiaa(DisasContext *ctx)
@@ -579,19 +551,16 @@ static inline void gen_evmwsmiaa(DisasContext *ctx)
gen_load_gpr64(tmp, rD(ctx->opcode));
/* Load acc */
- tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_ld_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* acc := tmp + acc */
tcg_gen_add_i64(acc, acc, tmp);
/* Store acc */
- tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_gen_st_i64(acc, tcg_env, offsetof(CPUPPCState, spe_acc));
/* rD := acc */
gen_store_gpr64(rD(ctx->opcode), acc);
-
- tcg_temp_free_i64(acc);
- tcg_temp_free_i64(tmp);
}
GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
@@ -644,7 +613,6 @@ static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
TCGv_i64 t0 = tcg_temp_new_i64();
gen_qemu_ld64_i64(ctx, t0, addr);
gen_store_gpr64(rD(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
}
static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
@@ -668,7 +636,6 @@ static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
gen_addr_add(ctx, addr, addr, 2);
gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
@@ -678,7 +645,6 @@ static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
@@ -687,7 +653,6 @@ static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
@@ -696,7 +661,6 @@ static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
gen_qemu_ld16s(ctx, t0, addr);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
@@ -707,7 +671,6 @@ static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
gen_addr_add(ctx, addr, addr, 2);
gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
- tcg_temp_free(t0);
}
static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
@@ -730,7 +693,6 @@ static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
gen_qemu_ld32u(ctx, t0, addr);
tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
@@ -743,7 +705,6 @@ static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_temp_free(t0);
}
static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
@@ -751,7 +712,6 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
TCGv_i64 t0 = tcg_temp_new_i64();
gen_load_gpr64(t0, rS(ctx->opcode));
gen_qemu_st64_i64(ctx, t0, addr);
- tcg_temp_free_i64(t0);
}
static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
@@ -771,7 +731,6 @@ static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
gen_qemu_st16(ctx, t0, addr);
- tcg_temp_free(t0);
gen_addr_add(ctx, addr, addr, 2);
gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
@@ -784,7 +743,6 @@ static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
gen_qemu_st16(ctx, t0, addr);
- tcg_temp_free(t0);
}
static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
@@ -820,7 +778,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
gen_addr_reg_index(ctx, t0); \
} \
gen_op_##name(ctx, t0); \
- tcg_temp_free(t0); \
}
GEN_SPEOP_LDST(evldd, 0x00, 3);
@@ -921,9 +878,8 @@ static inline void gen_##name(DisasContext *ctx) \
{ \
TCGv_i32 t0 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0); \
+ gen_helper_##name(t0, tcg_env, t0); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- tcg_temp_free_i32(t0); \
}
#define GEN_SPEFPUOP_CONV_32_64(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -937,10 +893,8 @@ static inline void gen_##name(DisasContext *ctx) \
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i32(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t1, cpu_env, t0); \
+ gen_helper_##name(t1, tcg_env, t0); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i32(t1); \
}
#define GEN_SPEFPUOP_CONV_64_32(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -954,10 +908,8 @@ static inline void gen_##name(DisasContext *ctx) \
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t1); \
+ gen_helper_##name(t0, tcg_env, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i32(t1); \
}
#define GEN_SPEFPUOP_CONV_64_64(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -969,9 +921,8 @@ static inline void gen_##name(DisasContext *ctx) \
} \
t0 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0); \
+ gen_helper_##name(t0, tcg_env, t0); \
gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
}
#define GEN_SPEFPUOP_ARITH2_32_32(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -980,11 +931,8 @@ static inline void gen_##name(DisasContext *ctx) \
TCGv_i32 t1 = tcg_temp_new_i32(); \
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_helper_##name(t0, tcg_env, t0, t1); \
tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
}
#define GEN_SPEFPUOP_ARITH2_64_64(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -998,10 +946,8 @@ static inline void gen_##name(DisasContext *ctx) \
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_helper_##name(t0, tcg_env, t0, t1); \
gen_store_gpr64(rD(ctx->opcode), t0); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
}
#define GEN_SPEFPUOP_COMP_32(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -1011,10 +957,7 @@ static inline void gen_##name(DisasContext *ctx) \
\
tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
- \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i32(t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], tcg_env, t0, t1); \
}
#define GEN_SPEFPUOP_COMP_64(name) \
static inline void gen_##name(DisasContext *ctx) \
@@ -1028,9 +971,7 @@ static inline void gen_##name(DisasContext *ctx) \
t1 = tcg_temp_new_i64(); \
gen_load_gpr64(t0, rA(ctx->opcode)); \
gen_load_gpr64(t1, rB(ctx->opcode)); \
- gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], tcg_env, t0, t1); \
}
/* Single precision floating-point vectors operations */
diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc
new file mode 100644
index 0000000000..74c23a4191
--- /dev/null
+++ b/target/ppc/translate/storage-ctrl-impl.c.inc
@@ -0,0 +1,248 @@
+/*
+ * Power ISA decode for Storage Control instructions
+ *
+ * Copyright (c) 2022 Instituto de Pesquisas Eldorado (eldorado.org.br)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Store Control Instructions
+ */
+
+#include "mmu-book3s-v3.h"
+
+static bool trans_SLBIE(DisasContext *ctx, arg_SLBIE *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SLBI);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBIE(tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBIEG(DisasContext *ctx, arg_SLBIEG *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBIEG(tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBIA(DisasContext *ctx, arg_SLBIA *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SLBI);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBIA(tcg_env, tcg_constant_i32(a->ih));
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBIAG(DisasContext *ctx, arg_SLBIAG *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBIAG(tcg_env, cpu_gpr[a->rs], tcg_constant_i32(a->l));
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBMTE(DisasContext *ctx, arg_SLBMTE *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBMTE(tcg_env, cpu_gpr[a->rb], cpu_gpr[a->rt]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBMFEV(DisasContext *ctx, arg_SLBMFEV *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBMFEV(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBMFEE(DisasContext *ctx, arg_SLBMFEE *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_helper_SLBMFEE(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->rb]);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool trans_SLBFEE(DisasContext *ctx, arg_SLBFEE *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS(ctx, SEGMENT_64B);
+
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+
+#if defined(TARGET_PPC64)
+ TCGLabel *l1, *l2;
+
+ if (unlikely(ctx->pr)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return true;
+ }
+ gen_helper_SLBFEE(cpu_gpr[a->rt], tcg_env,
+ cpu_gpr[a->rb]);
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[a->rt], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[a->rt], 0);
+ gen_set_label(l2);
+#else
+ qemu_build_not_reached();
+#endif
+#endif
+ return true;
+}
+
+static bool trans_SLBSYNC(DisasContext *ctx, arg_SLBSYNC *a)
+{
+ REQUIRE_64BIT(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_SV(ctx);
+
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+ gen_check_tlb_flush(ctx, true);
+#else
+ qemu_build_not_reached();
+#endif
+ return true;
+}
+
+static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return true;
+#else
+ TCGv_i32 t1;
+ int rb;
+
+ rb = a->rb;
+
+ if ((ctx->insns_flags2 & PPC2_ISA300) == 0) {
+ /*
+ * Before Power ISA 3.0, the corresponding bits of RIC, PRS, and R
+ * (and RS for tlbiel) were reserved fields and should be ignored.
+ */
+ a->ric = 0;
+ a->prs = false;
+ a->r = false;
+ if (local) {
+ a->rs = 0;
+ }
+ }
+
+ if (ctx->pr) {
+ /* tlbie[l] is privileged... */
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return true;
+ } else if (!ctx->hv) {
+ if ((!a->prs && ctx->hr) || (!local && !ctx->gtse)) {
+ /*
+ * ... except when PRS=0 and HR=1, or when GTSE=0 for tlbie,
+ * making it hypervisor privileged.
+ */
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return true;
+ }
+ }
+
+ if (!local && NARROW_MODE(ctx)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rb]);
+ gen_helper_tlbie(tcg_env, t0);
+
+#if defined(TARGET_PPC64)
+ /*
+ * ISA 3.1B says that MSR SF must be 1 when this instruction is executed;
+ * otherwise the results are undefined.
+ */
+ } else if (a->r) {
+ gen_helper_tlbie_isa300(tcg_env, cpu_gpr[rb], cpu_gpr[a->rs],
+ tcg_constant_i32(a->ric << TLBIE_F_RIC_SHIFT |
+ a->prs << TLBIE_F_PRS_SHIFT |
+ a->r << TLBIE_F_R_SHIFT |
+ local << TLBIE_F_LOCAL_SHIFT));
+ return true;
+#endif
+
+ } else {
+ gen_helper_tlbie(tcg_env, cpu_gpr[rb]);
+ }
+
+ if (local) {
+ return true;
+ }
+
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
+
+ return true;
+#endif
+}
+
+TRANS_FLAGS(MEM_TLBIE, TLBIE, do_tlbie, false)
+TRANS_FLAGS(MEM_TLBIE, TLBIEL, do_tlbie, true)
diff --git a/target/ppc/translate/vector-impl.c.inc b/target/ppc/translate/vector-impl.c.inc
deleted file mode 100644
index 117ce9b137..0000000000
--- a/target/ppc/translate/vector-impl.c.inc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Power ISA decode for Vector Facility instructions
- *
- * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#define REQUIRE_ALTIVEC(CTX) \
- do { \
- if (unlikely(!(CTX)->altivec_enabled)) { \
- gen_exception((CTX), POWERPC_EXCP_VPU); \
- return true; \
- } \
- } while (0)
-
-static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
-{
- TCGv_i64 tgt, src, mask;
-
- REQUIRE_INSNS_FLAGS2(ctx, ISA310);
- REQUIRE_ALTIVEC(ctx);
-
- tgt = tcg_temp_new_i64();
- src = tcg_temp_new_i64();
- mask = tcg_temp_new_i64();
-
- /* centrifuge lower double word */
- get_cpu_vsrl(src, a->vra + 32);
- get_cpu_vsrl(mask, a->vrb + 32);
- gen_helper_cfuged(tgt, src, mask);
- set_cpu_vsrl(a->vrt + 32, tgt);
-
- /* centrifuge higher double word */
- get_cpu_vsrh(src, a->vra + 32);
- get_cpu_vsrh(mask, a->vrb + 32);
- gen_helper_cfuged(tgt, src, mask);
- set_cpu_vsrh(a->vrt + 32, tgt);
-
- tcg_temp_free_i64(tgt);
- tcg_temp_free_i64(src);
- tcg_temp_free_i64(mask);
-
- return true;
-}
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 92b9527aff..b56e615c24 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -10,7 +10,7 @@
static inline TCGv_ptr gen_avr_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, avr_full_offset(reg));
+ tcg_gen_addi_ptr(r, tcg_env, avr_full_offset(reg));
return r;
}
@@ -45,8 +45,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
gen_qemu_ld64_i64(ctx, avr, EA); \
set_avr64(rD(ctx->opcode), avr, false); \
} \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(avr); \
}
#define GEN_VR_STX(name, opc2, opc3) \
@@ -80,8 +78,6 @@ static void gen_st##name(DisasContext *ctx) \
get_avr64(avr, rD(ctx->opcode), false); \
gen_qemu_st64_i64(ctx, avr, EA); \
} \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(avr); \
}
#define GEN_VR_LVE(name, opc2, opc3, size) \
@@ -100,9 +96,7 @@ static void gen_lve##name(DisasContext *ctx) \
tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
} \
rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_lve##name(cpu_env, rs, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_ptr(rs); \
+ gen_helper_lve##name(tcg_env, rs, EA); \
}
#define GEN_VR_STVE(name, opc2, opc3, size) \
@@ -121,13 +115,11 @@ static void gen_stve##name(DisasContext *ctx) \
tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
} \
rs = gen_avr_ptr(rS(ctx->opcode)); \
- gen_helper_stve##name(cpu_env, rs, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_ptr(rs); \
+ gen_helper_stve##name(tcg_env, rs, EA); \
}
GEN_VR_LDX(lvx, 0x07, 0x03);
-/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
+/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */
GEN_VR_LDX(lvxl, 0x07, 0x0B);
GEN_VR_LVE(bx, 0x07, 0x00, 1);
@@ -135,7 +127,7 @@ GEN_VR_LVE(hx, 0x07, 0x01, 2);
GEN_VR_LVE(wx, 0x07, 0x02, 4);
GEN_VR_STX(svx, 0x07, 0x07);
-/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
+/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */
GEN_VR_STX(svxl, 0x07, 0x0F);
GEN_VR_STVE(bx, 0x07, 0x04, 1);
@@ -154,11 +146,9 @@ static void gen_mfvscr(DisasContext *ctx)
tcg_gen_movi_i64(avr, 0);
set_avr64(rD(ctx->opcode), avr, true);
t = tcg_temp_new_i32();
- gen_helper_mfvscr(t, cpu_env);
+ gen_helper_mfvscr(t, tcg_env);
tcg_gen_extu_i32_i64(avr, t);
set_avr64(rD(ctx->opcode), avr, false);
- tcg_temp_free_i32(t);
- tcg_temp_free_i64(avr);
}
static void gen_mtvscr(DisasContext *ctx)
@@ -173,69 +163,64 @@ static void gen_mtvscr(DisasContext *ctx)
val = tcg_temp_new_i32();
bofs = avr_full_offset(rB(ctx->opcode));
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
bofs += 3 * 4;
#endif
- tcg_gen_ld_i32(val, cpu_env, bofs);
- gen_helper_mtvscr(cpu_env, val);
- tcg_temp_free_i32(val);
+ tcg_gen_ld_i32(val, tcg_env, bofs);
+ gen_helper_mtvscr(tcg_env, val);
+}
+
+static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ TCGv_i64 avr;
+ TCGv_i64 ten, z;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ avr = tcg_temp_new_i64();
+ ten = tcg_constant_i64(10);
+ z = tcg_constant_i64(0);
+
+ if (add_cin) {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ get_avr64(avr, rB(ctx->opcode), false);
+ tcg_gen_andi_i64(t2, avr, 0xF);
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(avr, t2, avr, ten);
+ set_avr64(rD(ctx->opcode), avr, false);
+ }
+
+ if (ret_carry) {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ set_avr64(rD(ctx->opcode), z, true);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mul_i64(t0, avr, ten);
+ tcg_gen_add_i64(avr, t0, t2);
+ set_avr64(rD(ctx->opcode), avr, true);
+ }
}
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_i64 t0; \
- TCGv_i64 t1; \
- TCGv_i64 t2; \
- TCGv_i64 avr; \
- TCGv_i64 ten, z; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- t2 = tcg_temp_new_i64(); \
- avr = tcg_temp_new_i64(); \
- ten = tcg_const_i64(10); \
- z = tcg_const_i64(0); \
- \
- if (add_cin) { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- get_avr64(avr, rB(ctx->opcode), false); \
- tcg_gen_andi_i64(t2, avr, 0xF); \
- tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(avr, t2, avr, ten); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } \
- \
- if (ret_carry) { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- set_avr64(rD(ctx->opcode), z, true); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mul_i64(t0, avr, ten); \
- tcg_gen_add_i64(avr, t0, t2); \
- set_avr64(rD(ctx->opcode), avr, true); \
- } \
- \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
- tcg_temp_free_i64(t2); \
- tcg_temp_free_i64(avr); \
- tcg_temp_free_i64(ten); \
- tcg_temp_free_i64(z); \
-} \
+ static void glue(gen_, name)(DisasContext *ctx) \
+ { gen_vx_vmul10(ctx, add_cin, ret_carry); }
GEN_VX_VMUL10(vmul10uq, 0, 0);
GEN_VX_VMUL10(vmul10euq, 1, 0);
@@ -279,9 +264,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_TRANS(name, opc2, opc3) \
@@ -305,10 +287,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
+ gen_helper_##name(tcg_env, rd, ra, rb); \
}
#define GEN_VXFORM3(name, opc2, opc3) \
@@ -324,10 +303,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
rc = gen_avr_ptr(rC(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, ra, rb, rc); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rc); \
- tcg_temp_free_ptr(rd); \
}
/*
@@ -400,7 +375,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], rb); \
- tcg_temp_free_ptr(rb); \
}
GEN_VXFORM_V(vaddubm, MO_8, tcg_gen_gvec_add, 0, 0);
@@ -431,21 +405,6 @@ GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12);
GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13);
GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14);
GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15);
-GEN_VXFORM(vavgub, 1, 16);
-GEN_VXFORM(vabsdub, 1, 16);
-GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
- vabsdub, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM(vavguh, 1, 17);
-GEN_VXFORM(vabsduh, 1, 17);
-GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
- vabsduh, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM(vavguw, 1, 18);
-GEN_VXFORM(vabsduw, 1, 18);
-GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
- vabsduw, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM(vavgsb, 1, 20);
-GEN_VXFORM(vavgsh, 1, 21);
-GEN_VXFORM(vavgsw, 1, 22);
GEN_VXFORM(vmrghb, 6, 0);
GEN_VXFORM(vmrghh, 6, 1);
GEN_VXFORM(vmrghw, 6, 2);
@@ -472,9 +431,6 @@ static void trans_vmrgew(DisasContext *ctx)
get_avr64(avr, VA, false);
tcg_gen_deposit_i64(avr, avr, tmp, 0, 32);
set_avr64(VT, avr, false);
-
- tcg_temp_free_i64(tmp);
- tcg_temp_free_i64(avr);
}
static void trans_vmrgow(DisasContext *ctx)
@@ -495,10 +451,6 @@ static void trans_vmrgow(DisasContext *ctx)
get_avr64(t1, VA, false);
tcg_gen_deposit_i64(avr, t0, t1, 32, 32);
set_avr64(VT, avr, false);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(avr);
}
/*
@@ -533,10 +485,6 @@ static void trans_lvsl(DisasContext *ctx)
*/
tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL);
set_avr64(VT, result, false);
-
- tcg_temp_free_i64(result);
- tcg_temp_free_i64(sh);
- tcg_temp_free(EA);
}
/*
@@ -572,10 +520,6 @@ static void trans_lvsr(DisasContext *ctx)
*/
tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh);
set_avr64(VT, result, false);
-
- tcg_temp_free_i64(result);
- tcg_temp_free_i64(sh);
- tcg_temp_free(EA);
}
/*
@@ -618,11 +562,6 @@ static void trans_vsl(DisasContext *ctx)
tcg_gen_shl_i64(avr, avr, sh);
tcg_gen_or_i64(avr, avr, carry);
set_avr64(VT, avr, true);
-
- tcg_temp_free_i64(avr);
- tcg_temp_free_i64(sh);
- tcg_temp_free_i64(carry);
- tcg_temp_free_i64(tmp);
}
/*
@@ -664,11 +603,6 @@ static void trans_vsr(DisasContext *ctx)
tcg_gen_shr_i64(avr, avr, sh);
tcg_gen_or_i64(avr, avr, carry);
set_avr64(VT, avr, false);
-
- tcg_temp_free_i64(avr);
- tcg_temp_free_i64(sh);
- tcg_temp_free_i64(carry);
- tcg_temp_free_i64(tmp);
}
/*
@@ -737,13 +671,6 @@ static void trans_vgbbd(DisasContext *ctx)
for (j = 0; j < 2; j++) {
set_avr64(VT, result[j], j);
}
-
- tcg_temp_free_i64(tmp);
- tcg_temp_free_i64(tcg_mask);
- tcg_temp_free_i64(result[0]);
- tcg_temp_free_i64(result[1]);
- tcg_temp_free_i64(avr[0]);
- tcg_temp_free_i64(avr[1]);
}
/*
@@ -762,14 +689,12 @@ static void trans_vclzw(DisasContext *ctx)
/* Perform count for every word element using tcg_gen_clzi_i32. */
for (i = 0; i < 4; i++) {
- tcg_gen_ld_i32(tmp, cpu_env,
+ tcg_gen_ld_i32(tmp, tcg_env,
offsetof(CPUPPCState, vsr[32 + VB].u64[0]) + i * 4);
tcg_gen_clzi_i32(tmp, tmp, 32);
- tcg_gen_st_i32(tmp, cpu_env,
+ tcg_gen_st_i32(tmp, tcg_env,
offsetof(CPUPPCState, vsr[32 + VT].u64[0]) + i * 4);
}
-
- tcg_temp_free_i32(tmp);
}
/*
@@ -794,55 +719,360 @@ static void trans_vclzd(DisasContext *ctx)
get_avr64(avr, VB, false);
tcg_gen_clzi_i64(avr, avr, 64);
set_avr64(VT, avr, false);
-
- tcg_temp_free_i64(avr);
}
-GEN_VXFORM(vmuloub, 4, 0);
-GEN_VXFORM(vmulouh, 4, 1);
-GEN_VXFORM(vmulouw, 4, 2);
GEN_VXFORM_V(vmuluwm, MO_32, tcg_gen_gvec_mul, 4, 2);
-GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
- vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vmulosb, 4, 4);
-GEN_VXFORM(vmulosh, 4, 5);
-GEN_VXFORM(vmulosw, 4, 6);
-GEN_VXFORM_V(vmulld, MO_64, tcg_gen_gvec_mul, 4, 7);
-GEN_VXFORM(vmuleub, 4, 8);
-GEN_VXFORM(vmuleuh, 4, 9);
-GEN_VXFORM(vmuleuw, 4, 10);
-GEN_VXFORM(vmulhuw, 4, 10);
-GEN_VXFORM(vmulhud, 4, 11);
-GEN_VXFORM_DUAL(vmuleuw, PPC_ALTIVEC, PPC_NONE,
- vmulhuw, PPC_NONE, PPC2_ISA310);
-GEN_VXFORM(vmulesb, 4, 12);
-GEN_VXFORM(vmulesh, 4, 13);
-GEN_VXFORM(vmulesw, 4, 14);
-GEN_VXFORM(vmulhsw, 4, 14);
-GEN_VXFORM_DUAL(vmulesw, PPC_ALTIVEC, PPC_NONE,
- vmulhsw, PPC_NONE, PPC2_ISA310);
-GEN_VXFORM(vmulhsd, 4, 15);
-GEN_VXFORM_V(vslb, MO_8, tcg_gen_gvec_shlv, 2, 4);
-GEN_VXFORM_V(vslh, MO_16, tcg_gen_gvec_shlv, 2, 5);
-GEN_VXFORM_V(vslw, MO_32, tcg_gen_gvec_shlv, 2, 6);
-GEN_VXFORM(vrlwnm, 2, 6);
-GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \
- vrlwnm, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_V(vsld, MO_64, tcg_gen_gvec_shlv, 2, 23);
-GEN_VXFORM_V(vsrb, MO_8, tcg_gen_gvec_shrv, 2, 8);
-GEN_VXFORM_V(vsrh, MO_16, tcg_gen_gvec_shrv, 2, 9);
-GEN_VXFORM_V(vsrw, MO_32, tcg_gen_gvec_shrv, 2, 10);
-GEN_VXFORM_V(vsrd, MO_64, tcg_gen_gvec_shrv, 2, 27);
-GEN_VXFORM_V(vsrab, MO_8, tcg_gen_gvec_sarv, 2, 12);
-GEN_VXFORM_V(vsrah, MO_16, tcg_gen_gvec_sarv, 2, 13);
-GEN_VXFORM_V(vsraw, MO_32, tcg_gen_gvec_sarv, 2, 14);
-GEN_VXFORM_V(vsrad, MO_64, tcg_gen_gvec_sarv, 2, 15);
GEN_VXFORM(vsrv, 2, 28);
GEN_VXFORM(vslv, 2, 29);
GEN_VXFORM(vslo, 6, 16);
GEN_VXFORM(vsro, 6, 17);
-GEN_VXFORM(vaddcuw, 0, 6);
-GEN_VXFORM(vsubcuw, 0, 22);
+
+static bool do_vector_gvec3_VX(DisasContext *ctx, arg_VX *a, int vece,
+ void (*gen_gvec)(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t))
+{
+ REQUIRE_VECTOR(ctx);
+
+ gen_gvec(vece, avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16);
+
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VSLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shlv);
+TRANS_FLAGS(ALTIVEC, VSLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shlv);
+TRANS_FLAGS(ALTIVEC, VSLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shlv);
+TRANS_FLAGS2(ALTIVEC_207, VSLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shlv);
+
+TRANS_FLAGS(ALTIVEC, VSRB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_shrv);
+TRANS_FLAGS(ALTIVEC, VSRH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_shrv);
+TRANS_FLAGS(ALTIVEC, VSRW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_shrv);
+TRANS_FLAGS2(ALTIVEC_207, VSRD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_shrv);
+
+TRANS_FLAGS(ALTIVEC, VSRAB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_sarv);
+TRANS_FLAGS(ALTIVEC, VSRAH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_sarv);
+TRANS_FLAGS(ALTIVEC, VSRAW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_sarv);
+TRANS_FLAGS2(ALTIVEC_207, VSRAD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_sarv);
+
+TRANS_FLAGS(ALTIVEC, VRLB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_rotlv)
+TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv)
+TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv)
+TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv)
+
+static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb)
+{
+ TCGv_vec t0 = tcg_temp_new_vec_matching(vrb),
+ t1 = tcg_temp_new_vec_matching(vrb),
+ t2 = tcg_temp_new_vec_matching(vrb),
+ ones = tcg_constant_vec_matching(vrb, vece, -1);
+
+ /* Extract b and e */
+ tcg_gen_dupi_vec(vece, t2, (8 << vece) - 1);
+
+ tcg_gen_shri_vec(vece, t0, vrb, 16);
+ tcg_gen_and_vec(vece, t0, t0, t2);
+
+ tcg_gen_shri_vec(vece, t1, vrb, 8);
+ tcg_gen_and_vec(vece, t1, t1, t2);
+
+ /* Compare b and e to negate the mask where begin > end */
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, t2, t0, t1);
+
+ /* Create the mask with (~0 >> b) ^ ((~0 >> e) >> 1) */
+ tcg_gen_shrv_vec(vece, t0, ones, t0);
+ tcg_gen_shrv_vec(vece, t1, ones, t1);
+ tcg_gen_shri_vec(vece, t1, t1, 1);
+ tcg_gen_xor_vec(vece, t0, t0, t1);
+
+ /* negate the mask */
+ tcg_gen_xor_vec(vece, t0, t0, t2);
+
+ return t0;
+}
+
+static void gen_vrlnm_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
+ TCGv_vec vrb)
+{
+ TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt);
+
+ /* Create the mask */
+ mask = do_vrl_mask_vec(vece, vrb);
+
+ /* Extract n */
+ tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
+ tcg_gen_and_vec(vece, n, vrb, n);
+
+ /* Rotate and mask */
+ tcg_gen_rotlv_vec(vece, vrt, vra, n);
+ tcg_gen_and_vec(vece, vrt, vrt, mask);
+}
+
+static bool do_vrlnm(DisasContext *ctx, arg_VX *a, int vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
+ };
+ static const GVecGen3 ops[2] = {
+ {
+ .fniv = gen_vrlnm_vec,
+ .fno = gen_helper_VRLWNM,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vrlnm_vec,
+ .fno = gen_helper_VRLDNM,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64
+ }
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
+
+ return true;
+}
+
+TRANS(VRLWNM, do_vrlnm, MO_32)
+TRANS(VRLDNM, do_vrlnm, MO_64)
+
+static void gen_vrlmi_vec(unsigned vece, TCGv_vec vrt, TCGv_vec vra,
+ TCGv_vec vrb)
+{
+ TCGv_vec mask, n = tcg_temp_new_vec_matching(vrt),
+ tmp = tcg_temp_new_vec_matching(vrt);
+
+ /* Create the mask */
+ mask = do_vrl_mask_vec(vece, vrb);
+
+ /* Extract n */
+ tcg_gen_dupi_vec(vece, n, (8 << vece) - 1);
+ tcg_gen_and_vec(vece, n, vrb, n);
+
+ /* Rotate and insert */
+ tcg_gen_rotlv_vec(vece, tmp, vra, n);
+ tcg_gen_bitsel_vec(vece, vrt, mask, tmp, vrt);
+}
+
+static bool do_vrlmi(DisasContext *ctx, arg_VX *a, int vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, INDEX_op_rotlv_vec, INDEX_op_sari_vec,
+ INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_shrv_vec, 0
+ };
+ static const GVecGen3 ops[2] = {
+ {
+ .fniv = gen_vrlmi_vec,
+ .fno = gen_helper_VRLWMI,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vrlnm_vec,
+ .fno = gen_helper_VRLDMI,
+ .opt_opc = vecop_list,
+ .load_dest = true,
+ .vece = MO_64
+ }
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &ops[vece - 2]);
+
+ return true;
+}
+
+TRANS(VRLWMI, do_vrlmi, MO_32)
+TRANS(VRLDMI, do_vrlmi, MO_64)
+
+static bool do_vector_shift_quad(DisasContext *ctx, arg_VX *a, bool right,
+ bool alg)
+{
+ TCGv_i64 hi, lo, t0, t1, n, zero = tcg_constant_i64(0);
+
+ REQUIRE_VECTOR(ctx);
+
+ n = tcg_temp_new_i64();
+ hi = tcg_temp_new_i64();
+ lo = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+
+ get_avr64(lo, a->vra, false);
+ get_avr64(hi, a->vra, true);
+
+ get_avr64(n, a->vrb, true);
+
+ tcg_gen_andi_i64(t0, n, 64);
+ if (right) {
+ tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, hi, lo);
+ if (alg) {
+ t1 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, lo, 63);
+ } else {
+ t1 = zero;
+ }
+ tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, t1, hi);
+ } else {
+ tcg_gen_movcond_i64(TCG_COND_NE, hi, t0, zero, lo, hi);
+ tcg_gen_movcond_i64(TCG_COND_NE, lo, t0, zero, zero, lo);
+ }
+ tcg_gen_andi_i64(n, n, 0x3F);
+
+ if (right) {
+ if (alg) {
+ tcg_gen_sar_i64(t0, hi, n);
+ } else {
+ tcg_gen_shr_i64(t0, hi, n);
+ }
+ } else {
+ tcg_gen_shl_i64(t0, lo, n);
+ }
+ set_avr64(a->vrt, t0, right);
+
+ if (right) {
+ tcg_gen_shr_i64(lo, lo, n);
+ } else {
+ tcg_gen_shl_i64(hi, hi, n);
+ }
+ tcg_gen_xori_i64(n, n, 63);
+ if (right) {
+ tcg_gen_shl_i64(hi, hi, n);
+ tcg_gen_shli_i64(hi, hi, 1);
+ } else {
+ tcg_gen_shr_i64(lo, lo, n);
+ tcg_gen_shri_i64(lo, lo, 1);
+ }
+ tcg_gen_or_i64(hi, hi, lo);
+ set_avr64(a->vrt, hi, !right);
+ return true;
+}
+
+TRANS_FLAGS2(ISA310, VSLQ, do_vector_shift_quad, false, false);
+TRANS_FLAGS2(ISA310, VSRQ, do_vector_shift_quad, true, false);
+TRANS_FLAGS2(ISA310, VSRAQ, do_vector_shift_quad, true, true);
+
+static void do_vrlq_mask(TCGv_i64 mh, TCGv_i64 ml, TCGv_i64 b, TCGv_i64 e)
+{
+ TCGv_i64 th, tl, t0, t1, zero = tcg_constant_i64(0),
+ ones = tcg_constant_i64(-1);
+
+ th = tcg_temp_new_i64();
+ tl = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* m = ~0 >> b */
+ tcg_gen_andi_i64(t0, b, 64);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
+ tcg_gen_andi_i64(t0, b, 0x3F);
+ tcg_gen_shr_i64(mh, t1, t0);
+ tcg_gen_shr_i64(ml, ones, t0);
+ tcg_gen_xori_i64(t0, t0, 63);
+ tcg_gen_shl_i64(t1, t1, t0);
+ tcg_gen_shli_i64(t1, t1, 1);
+ tcg_gen_or_i64(ml, t1, ml);
+
+ /* t = ~0 >> e */
+ tcg_gen_andi_i64(t0, e, 64);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t0, zero, zero, ones);
+ tcg_gen_andi_i64(t0, e, 0x3F);
+ tcg_gen_shr_i64(th, t1, t0);
+ tcg_gen_shr_i64(tl, ones, t0);
+ tcg_gen_xori_i64(t0, t0, 63);
+ tcg_gen_shl_i64(t1, t1, t0);
+ tcg_gen_shli_i64(t1, t1, 1);
+ tcg_gen_or_i64(tl, t1, tl);
+
+ /* t = t >> 1 */
+ tcg_gen_extract2_i64(tl, tl, th, 1);
+ tcg_gen_shri_i64(th, th, 1);
+
+ /* m = m ^ t */
+ tcg_gen_xor_i64(mh, mh, th);
+ tcg_gen_xor_i64(ml, ml, tl);
+
+ /* Negate the mask if begin > end */
+ tcg_gen_movcond_i64(TCG_COND_GT, t0, b, e, ones, zero);
+
+ tcg_gen_xor_i64(mh, mh, t0);
+ tcg_gen_xor_i64(ml, ml, t0);
+}
+
+static bool do_vector_rotl_quad(DisasContext *ctx, arg_VX *a, bool mask,
+ bool insert)
+{
+ TCGv_i64 ah, al, vrb, n, t0, t1, zero = tcg_constant_i64(0);
+
+ REQUIRE_VECTOR(ctx);
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+
+ ah = tcg_temp_new_i64();
+ al = tcg_temp_new_i64();
+ vrb = tcg_temp_new_i64();
+ n = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(ah, a->vra, true);
+ get_avr64(al, a->vra, false);
+ get_avr64(vrb, a->vrb, true);
+
+ tcg_gen_mov_i64(t0, ah);
+ tcg_gen_andi_i64(t1, vrb, 64);
+ tcg_gen_movcond_i64(TCG_COND_NE, ah, t1, zero, al, ah);
+ tcg_gen_movcond_i64(TCG_COND_NE, al, t1, zero, t0, al);
+ tcg_gen_andi_i64(n, vrb, 0x3F);
+
+ tcg_gen_shl_i64(t0, ah, n);
+ tcg_gen_shl_i64(t1, al, n);
+
+ tcg_gen_xori_i64(n, n, 63);
+
+ tcg_gen_shr_i64(al, al, n);
+ tcg_gen_shri_i64(al, al, 1);
+ tcg_gen_or_i64(t0, al, t0);
+
+ tcg_gen_shr_i64(ah, ah, n);
+ tcg_gen_shri_i64(ah, ah, 1);
+ tcg_gen_or_i64(t1, ah, t1);
+
+ if (mask || insert) {
+ tcg_gen_extract_i64(n, vrb, 8, 7);
+ tcg_gen_extract_i64(vrb, vrb, 16, 7);
+
+ do_vrlq_mask(ah, al, vrb, n);
+
+ tcg_gen_and_i64(t0, t0, ah);
+ tcg_gen_and_i64(t1, t1, al);
+
+ if (insert) {
+ get_avr64(n, a->vrt, true);
+ get_avr64(vrb, a->vrt, false);
+ tcg_gen_andc_i64(n, n, ah);
+ tcg_gen_andc_i64(vrb, vrb, al);
+ tcg_gen_or_i64(t0, t0, n);
+ tcg_gen_or_i64(t1, t1, vrb);
+ }
+ }
+
+ set_avr64(a->vrt, t0, true);
+ set_avr64(a->vrt, t1, false);
+ return true;
+}
+
+TRANS(VRLQ, do_vector_rotl_quad, false, false)
+TRANS(VRLQNM, do_vector_rotl_quad, true, false)
+TRANS(VRLQMI, do_vector_rotl_quad, false, true)
#define GEN_VXFORM_SAT(NAME, VECE, NORM, SAT, OPC2, OPC3) \
static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \
@@ -854,7 +1084,6 @@ static void glue(glue(gen_, NAME), _vec)(unsigned vece, TCGv_vec t, \
glue(glue(tcg_gen_, SAT), _vec)(VECE, t, a, b); \
tcg_gen_cmp_vec(TCG_COND_NE, VECE, x, x, t); \
tcg_gen_or_vec(VECE, sat, sat, x); \
- tcg_temp_free_vec(x); \
} \
static void glue(gen_, NAME)(DisasContext *ctx) \
{ \
@@ -897,32 +1126,7 @@ GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26);
GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28);
GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29);
GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30);
-GEN_VXFORM(vadduqm, 0, 4);
-GEN_VXFORM(vaddcuq, 0, 5);
-GEN_VXFORM3(vaddeuqm, 30, 0);
-GEN_VXFORM3(vaddecuq, 30, 0);
-GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
- vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM(vsubuqm, 0, 20);
-GEN_VXFORM(vsubcuq, 0, 21);
-GEN_VXFORM3(vsubeuqm, 31, 0);
-GEN_VXFORM3(vsubecuq, 31, 0);
-GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
- vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXFORM_V(vrlb, MO_8, tcg_gen_gvec_rotlv, 2, 0);
-GEN_VXFORM_V(vrlh, MO_16, tcg_gen_gvec_rotlv, 2, 1);
-GEN_VXFORM_V(vrlw, MO_32, tcg_gen_gvec_rotlv, 2, 2);
-GEN_VXFORM(vrlwmi, 2, 2);
-GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \
- vrlwmi, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_V(vrld, MO_64, tcg_gen_gvec_rotlv, 2, 3);
-GEN_VXFORM(vrldmi, 2, 3);
-GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
- vrldmi, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_TRANS(vsl, 2, 7);
-GEN_VXFORM(vrldnm, 2, 7);
-GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \
- vrldnm, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_TRANS(vsr, 2, 11);
GEN_VXFORM_ENV(vpkuhum, 7, 0);
GEN_VXFORM_ENV(vpkuwum, 7, 1);
@@ -970,10 +1174,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##opname(cpu_env, rd, ra, rb); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
+ gen_helper_##opname(tcg_env, rd, ra, rb); \
}
#define GEN_VXRFORM(name, opc2, opc3) \
@@ -982,7 +1183,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
/*
* Support for Altivec instructions that use bit 31 (Rc) as an opcode
- * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
+ * bit but also use bit 21 as an actual Rc bit. In general, these pairs
* come from different versions of the ISA, so we must also support a
* pair of flags for each instruction.
*/
@@ -1008,41 +1209,230 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
} \
}
-GEN_VXRFORM(vcmpequb, 3, 0)
-GEN_VXRFORM(vcmpequh, 3, 1)
-GEN_VXRFORM(vcmpequw, 3, 2)
-GEN_VXRFORM(vcmpequd, 3, 3)
-GEN_VXRFORM(vcmpnezb, 3, 4)
-GEN_VXRFORM(vcmpnezh, 3, 5)
-GEN_VXRFORM(vcmpnezw, 3, 6)
-GEN_VXRFORM(vcmpgtsb, 3, 12)
-GEN_VXRFORM(vcmpgtsh, 3, 13)
-GEN_VXRFORM(vcmpgtsw, 3, 14)
-GEN_VXRFORM(vcmpgtsd, 3, 15)
-GEN_VXRFORM(vcmpgtub, 3, 8)
-GEN_VXRFORM(vcmpgtuh, 3, 9)
-GEN_VXRFORM(vcmpgtuw, 3, 10)
-GEN_VXRFORM(vcmpgtud, 3, 11)
+static void do_vcmp_rc(int vrt)
+{
+ TCGv_i64 tmp, set, clr;
+
+ tmp = tcg_temp_new_i64();
+ set = tcg_temp_new_i64();
+ clr = tcg_temp_new_i64();
+
+ get_avr64(tmp, vrt, true);
+ tcg_gen_mov_i64(set, tmp);
+ get_avr64(tmp, vrt, false);
+ tcg_gen_or_i64(clr, set, tmp);
+ tcg_gen_and_i64(set, set, tmp);
+
+ tcg_gen_setcondi_i64(TCG_COND_EQ, clr, clr, 0);
+ tcg_gen_shli_i64(clr, clr, 1);
+
+ tcg_gen_setcondi_i64(TCG_COND_EQ, set, set, -1);
+ tcg_gen_shli_i64(set, set, 3);
+
+ tcg_gen_or_i64(tmp, set, clr);
+ tcg_gen_extrl_i64_i32(cpu_crf[6], tmp);
+}
+
+static bool do_vcmp(DisasContext *ctx, arg_VC *a, TCGCond cond, int vece)
+{
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_cmp(cond, vece, avr_full_offset(a->vrt),
+ avr_full_offset(a->vra), avr_full_offset(a->vrb), 16, 16);
+
+ if (a->rc) {
+ do_vcmp_rc(a->vrt);
+ }
+
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VCMPEQUB, do_vcmp, TCG_COND_EQ, MO_8)
+TRANS_FLAGS(ALTIVEC, VCMPEQUH, do_vcmp, TCG_COND_EQ, MO_16)
+TRANS_FLAGS(ALTIVEC, VCMPEQUW, do_vcmp, TCG_COND_EQ, MO_32)
+TRANS_FLAGS2(ALTIVEC_207, VCMPEQUD, do_vcmp, TCG_COND_EQ, MO_64)
+
+TRANS_FLAGS(ALTIVEC, VCMPGTSB, do_vcmp, TCG_COND_GT, MO_8)
+TRANS_FLAGS(ALTIVEC, VCMPGTSH, do_vcmp, TCG_COND_GT, MO_16)
+TRANS_FLAGS(ALTIVEC, VCMPGTSW, do_vcmp, TCG_COND_GT, MO_32)
+TRANS_FLAGS2(ALTIVEC_207, VCMPGTSD, do_vcmp, TCG_COND_GT, MO_64)
+TRANS_FLAGS(ALTIVEC, VCMPGTUB, do_vcmp, TCG_COND_GTU, MO_8)
+TRANS_FLAGS(ALTIVEC, VCMPGTUH, do_vcmp, TCG_COND_GTU, MO_16)
+TRANS_FLAGS(ALTIVEC, VCMPGTUW, do_vcmp, TCG_COND_GTU, MO_32)
+TRANS_FLAGS2(ALTIVEC_207, VCMPGTUD, do_vcmp, TCG_COND_GTU, MO_64)
+
+TRANS_FLAGS2(ISA300, VCMPNEB, do_vcmp, TCG_COND_NE, MO_8)
+TRANS_FLAGS2(ISA300, VCMPNEH, do_vcmp, TCG_COND_NE, MO_16)
+TRANS_FLAGS2(ISA300, VCMPNEW, do_vcmp, TCG_COND_NE, MO_32)
+
+static void gen_vcmpnez_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t0, t1, zero;
+
+ t0 = tcg_temp_new_vec_matching(t);
+ t1 = tcg_temp_new_vec_matching(t);
+ zero = tcg_constant_vec_matching(t, vece, 0);
+
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t0, a, zero);
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t1, b, zero);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, t, a, b);
+
+ tcg_gen_or_vec(vece, t, t, t0);
+ tcg_gen_or_vec(vece, t, t, t1);
+}
+
+static bool do_vcmpnez(DisasContext *ctx, arg_VC *a, int vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, 0
+ };
+ static const GVecGen3 ops[3] = {
+ {
+ .fniv = gen_vcmpnez_vec,
+ .fno = gen_helper_VCMPNEZB,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vcmpnez_vec,
+ .fno = gen_helper_VCMPNEZH,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vcmpnez_vec,
+ .fno = gen_helper_VCMPNEZW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ }
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &ops[vece]);
+
+ if (a->rc) {
+ do_vcmp_rc(a->vrt);
+ }
+
+ return true;
+}
+
+TRANS(VCMPNEZB, do_vcmpnez, MO_8)
+TRANS(VCMPNEZH, do_vcmpnez, MO_16)
+TRANS(VCMPNEZW, do_vcmpnez, MO_32)
+
+static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
+{
+ TCGv_i64 t0, t1, t2;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vra, true);
+ get_avr64(t1, a->vrb, true);
+ tcg_gen_xor_i64(t2, t0, t1);
+
+ get_avr64(t0, a->vra, false);
+ get_avr64(t1, a->vrb, false);
+ tcg_gen_xor_i64(t1, t0, t1);
+
+ tcg_gen_or_i64(t1, t1, t2);
+ tcg_gen_negsetcond_i64(TCG_COND_EQ, t1, t1, tcg_constant_i64(0));
+
+ set_avr64(a->vrt, t1, true);
+ set_avr64(a->vrt, t1, false);
+
+ if (a->rc) {
+ tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
+ tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
+ tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
+ }
+ return true;
+}
+
+static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
+{
+ TCGv_i64 t0, t1, t2;
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vra, false);
+ get_avr64(t1, a->vrb, false);
+ tcg_gen_negsetcond_i64(TCG_COND_GTU, t2, t0, t1);
+
+ get_avr64(t0, a->vra, true);
+ get_avr64(t1, a->vrb, true);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
+ tcg_gen_negsetcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
+
+ tcg_gen_or_i64(t1, t1, t2);
+
+ set_avr64(a->vrt, t1, true);
+ set_avr64(a->vrt, t1, false);
+
+ if (a->rc) {
+ tcg_gen_extrl_i64_i32(cpu_crf[6], t1);
+ tcg_gen_andi_i32(cpu_crf[6], cpu_crf[6], 0xa);
+ tcg_gen_xori_i32(cpu_crf[6], cpu_crf[6], 0x2);
+ }
+ return true;
+}
+
+TRANS(VCMPGTSQ, do_vcmpgtq, true)
+TRANS(VCMPGTUQ, do_vcmpgtq, false)
+
+static bool do_vcmpq(DisasContext *ctx, arg_VX_bf *a, bool sign)
+{
+ TCGv_i64 vra, vrb;
+ TCGLabel *gt, *lt, *done;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ vra = tcg_temp_new_i64();
+ vrb = tcg_temp_new_i64();
+ gt = gen_new_label();
+ lt = gen_new_label();
+ done = gen_new_label();
+
+ get_avr64(vra, a->vra, true);
+ get_avr64(vrb, a->vrb, true);
+ tcg_gen_brcond_i64((sign ? TCG_COND_GT : TCG_COND_GTU), vra, vrb, gt);
+ tcg_gen_brcond_i64((sign ? TCG_COND_LT : TCG_COND_LTU), vra, vrb, lt);
+
+ get_avr64(vra, a->vra, false);
+ get_avr64(vrb, a->vrb, false);
+ tcg_gen_brcond_i64(TCG_COND_GTU, vra, vrb, gt);
+ tcg_gen_brcond_i64(TCG_COND_LTU, vra, vrb, lt);
+
+ tcg_gen_movi_i32(cpu_crf[a->bf], CRF_EQ);
+ tcg_gen_br(done);
+
+ gen_set_label(gt);
+ tcg_gen_movi_i32(cpu_crf[a->bf], CRF_GT);
+ tcg_gen_br(done);
+
+ gen_set_label(lt);
+ tcg_gen_movi_i32(cpu_crf[a->bf], CRF_LT);
+ tcg_gen_br(done);
+
+ gen_set_label(done);
+ return true;
+}
+
+TRANS(VCMPSQ, do_vcmpq, true)
+TRANS(VCMPUQ, do_vcmpq, false)
+
GEN_VXRFORM(vcmpeqfp, 3, 3)
GEN_VXRFORM(vcmpgefp, 3, 7)
GEN_VXRFORM(vcmpgtfp, 3, 11)
GEN_VXRFORM(vcmpbfp, 3, 15)
-GEN_VXRFORM(vcmpneb, 3, 0)
-GEN_VXRFORM(vcmpneh, 3, 1)
-GEN_VXRFORM(vcmpnew, 3, 2)
-
-GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \
- vcmpneb, PPC_NONE, PPC2_ISA300)
-GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \
- vcmpneh, PPC_NONE, PPC2_ISA300)
-GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \
- vcmpnew, PPC_NONE, PPC2_ISA300)
-GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpequd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207)
-GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
- vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
static void gen_vsplti(DisasContext *ctx, int vece)
{
@@ -1075,8 +1465,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, rb); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
@@ -1090,9 +1478,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
+ gen_helper_##name(tcg_env, rd, rb); \
}
#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
@@ -1106,8 +1492,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, rb); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \
@@ -1120,7 +1504,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \
- tcg_temp_free_ptr(rb); \
}
GEN_VXFORM_NOA(vupkhsb, 7, 8);
GEN_VXFORM_NOA(vupkhsh, 7, 9);
@@ -1138,9 +1521,70 @@ GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
-GEN_VXFORM_NOA(vprtybw, 1, 24);
-GEN_VXFORM_NOA(vprtybd, 1, 24);
-GEN_VXFORM_NOA(vprtybq, 1, 24);
+
+static void gen_vprtyb_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
+{
+ int i;
+ TCGv_vec tmp = tcg_temp_new_vec_matching(b);
+ /* MO_32 is 2, so 2 iterations for MO_32 and 3 for MO_64 */
+ for (i = 0; i < vece; i++) {
+ tcg_gen_shri_vec(vece, tmp, b, (4 << (vece - i)));
+ tcg_gen_xor_vec(vece, b, tmp, b);
+ }
+ tcg_gen_and_vec(vece, t, b, tcg_constant_vec_matching(t, vece, 1));
+}
+
+/* vprtybw */
+static void gen_vprtyb_i32(TCGv_i32 t, TCGv_i32 b)
+{
+ tcg_gen_ctpop_i32(t, b);
+ tcg_gen_and_i32(t, t, tcg_constant_i32(1));
+}
+
+/* vprtybd */
+static void gen_vprtyb_i64(TCGv_i64 t, TCGv_i64 b)
+{
+ tcg_gen_ctpop_i64(t, b);
+ tcg_gen_and_i64(t, t, tcg_constant_i64(1));
+}
+
+static bool do_vx_vprtyb(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, 0
+ };
+
+ static const GVecGen2 op[] = {
+ {
+ .fniv = gen_vprtyb_vec,
+ .fni4 = gen_vprtyb_i32,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vprtyb_vec,
+ .fni8 = gen_vprtyb_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_VPRTYBQ,
+ .vece = MO_128
+ },
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_2(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
+ 16, 16, &op[vece - MO_32]);
+
+ return true;
+}
+
+TRANS(VPRTYBW, do_vx_vprtyb, MO_32)
+TRANS(VPRTYBD, do_vx_vprtyb, MO_64)
+TRANS(VPRTYBQ, do_vx_vprtyb, MO_128)
static void gen_vsplt(DisasContext *ctx, int vece)
{
@@ -1157,7 +1601,7 @@ static void gen_vsplt(DisasContext *ctx, int vece)
/* Experimental testing shows that hardware masks the immediate. */
bofs += (uimm << vece) & 15;
-#ifndef HOST_WORDS_BIGENDIAN
+#if !HOST_BIG_ENDIAN
bofs ^= 15;
bofs &= ~((1 << vece) - 1);
#endif
@@ -1178,13 +1622,10 @@ static void glue(gen_, name)(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
- uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ uimm = tcg_constant_i32(UIMM5(ctx->opcode)); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- gen_helper_##name(cpu_env, rd, rb, uimm); \
- tcg_temp_free_i32(uimm); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
+ gen_helper_##name(tcg_env, rd, rb, uimm); \
}
#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
@@ -1205,9 +1646,6 @@ static void glue(gen_, name)(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, rb, t0); \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
}
GEN_VXFORM_VSPLT(vspltb, MO_8, 6, 8);
@@ -1217,10 +1655,6 @@ GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
-GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
-GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
-GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
-GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
@@ -1231,12 +1665,292 @@ GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
vextractuh, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
vextractuw, PPC_NONE, PPC2_ISA300);
-GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE,
- vinsertb, PPC_NONE, PPC2_ISA300);
-GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE,
- vinserth, PPC_NONE, PPC2_ISA300);
-GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE,
- vinsertw, PPC_NONE, PPC2_ISA300);
+
+static bool trans_VGNB(DisasContext *ctx, arg_VX_n *a)
+{
+ /*
+ * Similar to do_vextractm, we'll use a sequence of mask-shift-or operations
+ * to gather the bits. The masks can be created with
+ *
+ * uint64_t mask(uint64_t n, uint64_t step)
+ * {
+ * uint64_t p = ((1UL << (1UL << step)) - 1UL) << ((n - 1UL) << step),
+ * plen = n << step, m = 0;
+ * for(int i = 0; i < 64/plen; i++) {
+ * m |= p;
+ * m = ror64(m, plen);
+ * }
+ * p >>= plen * DIV_ROUND_UP(64, plen) - 64;
+ * return m | p;
+ * }
+ *
+ * But since there are few values of N, we'll use a lookup table to avoid
+ * these calculations at runtime.
+ */
+ static const uint64_t mask[6][5] = {
+ {
+ 0xAAAAAAAAAAAAAAAAULL, 0xccccccccccccccccULL, 0xf0f0f0f0f0f0f0f0ULL,
+ 0xff00ff00ff00ff00ULL, 0xffff0000ffff0000ULL
+ },
+ {
+ 0x9249249249249249ULL, 0xC30C30C30C30C30CULL, 0xF00F00F00F00F00FULL,
+ 0xFF0000FF0000FF00ULL, 0xFFFF00000000FFFFULL
+ },
+ {
+ /* For N >= 4, some mask operations can be elided */
+ 0x8888888888888888ULL, 0, 0xf000f000f000f000ULL, 0,
+ 0xFFFF000000000000ULL
+ },
+ {
+ 0x8421084210842108ULL, 0, 0xF0000F0000F0000FULL, 0, 0
+ },
+ {
+ 0x8208208208208208ULL, 0, 0xF00000F00000F000ULL, 0, 0
+ },
+ {
+ 0x8102040810204081ULL, 0, 0xF000000F000000F0ULL, 0, 0
+ }
+ };
+ uint64_t m;
+ int i, sh, nbits = DIV_ROUND_UP(64, a->n);
+ TCGv_i64 hi, lo, t0, t1;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ if (a->n < 2) {
+ /*
+ * "N can be any value between 2 and 7, inclusive." Otherwise, the
+ * result is undefined, so we don't need to change RT. Also, N > 7 is
+ * impossible since the immediate field is 3 bits only.
+ */
+ return true;
+ }
+
+ hi = tcg_temp_new_i64();
+ lo = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(hi, a->vrb, true);
+ get_avr64(lo, a->vrb, false);
+
+ /* Align the lower doubleword so we can use the same mask */
+ tcg_gen_shli_i64(lo, lo, a->n * nbits - 64);
+
+ /*
+ * Starting from the most significant bit, gather every Nth bit with a
+ * sequence of mask-shift-or operation. E.g.: for N=3
+ * AxxBxxCxxDxxExxFxxGxxHxxIxxJxxKxxLxxMxxNxxOxxPxxQxxRxxSxxTxxUxxV
+ * & rep(0b100)
+ * A..B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V
+ * << 2
+ * .B..C..D..E..F..G..H..I..J..K..L..M..N..O..P..Q..R..S..T..U..V..
+ * |
+ * AB.BC.CD.DE.EF.FG.GH.HI.IJ.JK.KL.LM.MN.NO.OP.PQ.QR.RS.ST.TU.UV.V
+ * & rep(0b110000)
+ * AB....CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV..
+ * << 4
+ * ..CD....EF....GH....IJ....KL....MN....OP....QR....ST....UV......
+ * |
+ * ABCD..CDEF..EFGH..GHIJ..IJKL..KLMN..MNOP..OPQR..QRST..STUV..UV..
+ * & rep(0b111100000000)
+ * ABCD........EFGH........IJKL........MNOP........QRST........UV..
+ * << 8
+ * ....EFGH........IJKL........MNOP........QRST........UV..........
+ * |
+ * ABCDEFGH....EFGHIJKL....IJKLMNOP....MNOPQRST....QRSTUV......UV..
+ * & rep(0b111111110000000000000000)
+ * ABCDEFGH................IJKLMNOP................QRSTUV..........
+ * << 16
+ * ........IJKLMNOP................QRSTUV..........................
+ * |
+ * ABCDEFGHIJKLMNOP........IJKLMNOPQRSTUV..........QRSTUV..........
+ * & rep(0b111111111111111100000000000000000000000000000000)
+ * ABCDEFGHIJKLMNOP................................QRSTUV..........
+ * << 32
+ * ................QRSTUV..........................................
+ * |
+ * ABCDEFGHIJKLMNOPQRSTUV..........................QRSTUV..........
+ */
+ for (i = 0, sh = a->n - 1; i < 5; i++, sh <<= 1) {
+ m = mask[a->n - 2][i];
+ if (m) {
+ tcg_gen_andi_i64(hi, hi, m);
+ tcg_gen_andi_i64(lo, lo, m);
+ }
+ if (sh < 64) {
+ tcg_gen_shli_i64(t0, hi, sh);
+ tcg_gen_shli_i64(t1, lo, sh);
+ tcg_gen_or_i64(hi, t0, hi);
+ tcg_gen_or_i64(lo, t1, lo);
+ }
+ }
+
+ tcg_gen_andi_i64(hi, hi, ~(~0ULL >> nbits));
+ tcg_gen_andi_i64(lo, lo, ~(~0ULL >> nbits));
+ tcg_gen_shri_i64(lo, lo, nbits);
+ tcg_gen_or_i64(hi, hi, lo);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], hi);
+ return true;
+}
+
+static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv))
+{
+ TCGv_ptr vrt, vra, vrb;
+ TCGv rc;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ vrt = gen_avr_ptr(a->vrt);
+ vra = gen_avr_ptr(a->vra);
+ vrb = gen_avr_ptr(a->vrb);
+ rc = tcg_temp_new();
+
+ tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F);
+ if (right) {
+ tcg_gen_subfi_tl(rc, 32 - size, rc);
+ }
+ gen_helper(tcg_env, vrt, vra, vrb, rc);
+ return true;
+}
+
+TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX)
+TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX)
+TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX)
+TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX)
+
+TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX)
+TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX)
+TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX)
+TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX)
+
+static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
+ TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ TCGv_ptr t;
+ TCGv idx;
+
+ t = gen_avr_ptr(vrt);
+ idx = tcg_temp_new();
+
+ tcg_gen_andi_tl(idx, ra, 0xF);
+ if (right) {
+ tcg_gen_subfi_tl(idx, 16 - size, idx);
+ }
+
+ gen_helper(tcg_env, t, rb, idx);
+ return true;
+}
+
+static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra,
+ int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ TCGv_i64 val;
+
+ val = tcg_temp_new_i64();
+ get_avr64(val, vrb, true);
+ return do_vinsx(ctx, vrt, size, right, ra, val, gen_helper);
+}
+
+static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ TCGv_i64 val;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ val = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
+
+ return do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper);
+}
+
+static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb,
+ gen_helper);
+}
+
+static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ TCGv_i64 val;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ if (a->uim > (16 - size)) {
+ /*
+ * PowerISA v3.1 says that the resulting value is undefined in this
+ * case, so just log a guest error and leave VRT unchanged. The
+ * real hardware would do a partial insert, e.g. if VRT is zeroed and
+ * RB is 0x12345678, executing "vinsw VRT,RB,14" results in
+ * VRT = 0x0000...00001234, but we don't bother to reproduce this
+ * behavior as software shouldn't rely on it.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at"
+ " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
+ 16 - size);
+ return true;
+ }
+
+ val = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]);
+
+ return do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val,
+ gen_helper);
+}
+
+static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ if (a->uim > (16 - size)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at"
+ " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim,
+ 16 - size);
+ return true;
+ }
+
+ return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb,
+ gen_helper);
+}
+
+TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX)
+TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX)
+TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX)
+TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX)
+
+TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX)
+TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX)
+TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX)
+TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX)
+
+TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX)
+TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX)
+
+TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX)
+TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX)
+TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX)
+
+TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX)
+TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX)
+TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX)
+
+TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX)
+TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX)
+TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX)
+TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX)
static void gen_vsldoi(DisasContext *ctx)
{
@@ -1249,14 +1963,379 @@ static void gen_vsldoi(DisasContext *ctx)
ra = gen_avr_ptr(rA(ctx->opcode));
rb = gen_avr_ptr(rB(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
- sh = tcg_const_i32(VSH(ctx->opcode));
+ sh = tcg_constant_i32(VSH(ctx->opcode));
gen_helper_vsldoi(rd, ra, rb, sh);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rb);
- tcg_temp_free_ptr(rd);
- tcg_temp_free_i32(sh);
}
+static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a)
+{
+ TCGv_i64 t0, t1, t2;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vra, true);
+ get_avr64(t1, a->vra, false);
+
+ if (a->sh != 0) {
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t2, a->vrb, true);
+
+ tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh);
+ tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh);
+ }
+
+ set_avr64(a->vrt, t0, true);
+ set_avr64(a->vrt, t1, false);
+ return true;
+}
+
+static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a)
+{
+ TCGv_i64 t2, t1, t0;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(t0, a->vrb, false);
+ get_avr64(t1, a->vrb, true);
+
+ if (a->sh != 0) {
+ t2 = tcg_temp_new_i64();
+
+ get_avr64(t2, a->vra, false);
+
+ tcg_gen_extract2_i64(t0, t0, t1, a->sh);
+ tcg_gen_extract2_i64(t1, t1, t2, a->sh);
+ }
+
+ set_avr64(a->vrt, t0, false);
+ set_avr64(a->vrt, t1, true);
+ return true;
+}
+
+static bool do_vexpand(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_sari(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
+ (8 << vece) - 1, 16, 16);
+
+ return true;
+}
+
+TRANS(VEXPANDBM, do_vexpand, MO_8)
+TRANS(VEXPANDHM, do_vexpand, MO_16)
+TRANS(VEXPANDWM, do_vexpand, MO_32)
+TRANS(VEXPANDDM, do_vexpand, MO_64)
+
+static bool trans_VEXPANDQM(DisasContext *ctx, arg_VX_tb *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp = tcg_temp_new_i64();
+
+ get_avr64(tmp, a->vrb, true);
+ tcg_gen_sari_i64(tmp, tmp, 63);
+ set_avr64(a->vrt, tmp, false);
+ set_avr64(a->vrt, tmp, true);
+ return true;
+}
+
+static bool do_vextractm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece,
+ mask = dup_const(vece, 1ULL << (elem_width - 1));
+ uint64_t i, j;
+ TCGv_i64 lo, hi, t0, t1;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ hi = tcg_temp_new_i64();
+ lo = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ get_avr64(lo, a->vrb, false);
+ get_avr64(hi, a->vrb, true);
+
+ tcg_gen_andi_i64(lo, lo, mask);
+ tcg_gen_andi_i64(hi, hi, mask);
+
+ /*
+ * Gather the most significant bit of each element in the highest element
+ * element. E.g. for bytes:
+ * aXXXXXXXbXXXXXXXcXXXXXXXdXXXXXXXeXXXXXXXfXXXXXXXgXXXXXXXhXXXXXXX
+ * & dup(1 << (elem_width - 1))
+ * a0000000b0000000c0000000d0000000e0000000f0000000g0000000h0000000
+ * << 32 - 4
+ * 0000e0000000f0000000g0000000h00000000000000000000000000000000000
+ * |
+ * a000e000b000f000c000g000d000h000e0000000f0000000g0000000h0000000
+ * << 16 - 2
+ * 00c000g000d000h000e0000000f0000000g0000000h000000000000000000000
+ * |
+ * a0c0e0g0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h0000000
+ * << 8 - 1
+ * 0b0d0f0h0c0e0g000d0f0h000e0g00000f0h00000g0000000h00000000000000
+ * |
+ * abcdefghbcdefgh0cdefgh00defgh000efgh0000fgh00000gh000000h0000000
+ */
+ for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
+ tcg_gen_shli_i64(t0, hi, j - i);
+ tcg_gen_shli_i64(t1, lo, j - i);
+ tcg_gen_or_i64(hi, hi, t0);
+ tcg_gen_or_i64(lo, lo, t1);
+ }
+
+ tcg_gen_shri_i64(hi, hi, 64 - elem_count_half);
+ tcg_gen_extract2_i64(lo, lo, hi, 64 - elem_count_half);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], lo);
+ return true;
+}
+
+TRANS(VEXTRACTBM, do_vextractm, MO_8)
+TRANS(VEXTRACTHM, do_vextractm, MO_16)
+TRANS(VEXTRACTWM, do_vextractm, MO_32)
+TRANS(VEXTRACTDM, do_vextractm, MO_64)
+
+static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp = tcg_temp_new_i64();
+
+ get_avr64(tmp, a->vrb, true);
+ tcg_gen_shri_i64(tmp, tmp, 63);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->vrt], tmp);
+ return true;
+}
+
+static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
+ uint64_t c;
+ int i, j;
+ TCGv_i64 hi, lo, t0, t1;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ hi = tcg_temp_new_i64();
+ lo = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
+ tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
+ tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
+
+ /*
+ * Spread the bits into their respective elements.
+ * E.g. for bytes:
+ * 00000000000000000000000000000000000000000000000000000000abcdefgh
+ * << 32 - 4
+ * 0000000000000000000000000000abcdefgh0000000000000000000000000000
+ * |
+ * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
+ * << 16 - 2
+ * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
+ * |
+ * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
+ * << 8 - 1
+ * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
+ * |
+ * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
+ * & dup(1)
+ * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
+ * * 0xff
+ * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
+ */
+ for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
+ tcg_gen_shli_i64(t0, hi, j - i);
+ tcg_gen_shli_i64(t1, lo, j - i);
+ tcg_gen_or_i64(hi, hi, t0);
+ tcg_gen_or_i64(lo, lo, t1);
+ }
+
+ c = dup_const(vece, 1);
+ tcg_gen_andi_i64(hi, hi, c);
+ tcg_gen_andi_i64(lo, lo, c);
+
+ c = MAKE_64BIT_MASK(0, elem_width);
+ tcg_gen_muli_i64(hi, hi, c);
+ tcg_gen_muli_i64(lo, lo, c);
+
+ set_avr64(a->vrt, lo, false);
+ set_avr64(a->vrt, hi, true);
+ return true;
+}
+
+TRANS(MTVSRBM, do_mtvsrm, MO_8)
+TRANS(MTVSRHM, do_mtvsrm, MO_16)
+TRANS(MTVSRWM, do_mtvsrm, MO_32)
+TRANS(MTVSRDM, do_mtvsrm, MO_64)
+
+static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
+ tcg_gen_sextract_i64(tmp, tmp, 0, 1);
+ set_avr64(a->vrt, tmp, false);
+ set_avr64(a->vrt, tmp, true);
+ return true;
+}
+
+static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
+{
+ const uint64_t mask = dup_const(MO_8, 1);
+ uint64_t hi, lo;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ hi = extract16(a->b, 8, 8);
+ lo = extract16(a->b, 0, 8);
+
+ for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
+ hi |= hi << (j - i);
+ lo |= lo << (j - i);
+ }
+
+ hi = (hi & mask) * 0xFF;
+ lo = (lo & mask) * 0xFF;
+
+ set_avr64(a->vrt, tcg_constant_i64(hi), true);
+ set_avr64(a->vrt, tcg_constant_i64(lo), false);
+
+ return true;
+}
+
+static bool do_vcntmb(DisasContext *ctx, arg_VX_mp *a, int vece)
+{
+ TCGv_i64 r[2], mask;
+
+ r[0] = tcg_temp_new_i64();
+ r[1] = tcg_temp_new_i64();
+ mask = tcg_constant_i64(dup_const(vece, 1ULL << ((8 << vece) - 1)));
+
+ for (int i = 0; i < 2; i++) {
+ get_avr64(r[i], a->vrb, i);
+ if (a->mp) {
+ tcg_gen_and_i64(r[i], mask, r[i]);
+ } else {
+ tcg_gen_andc_i64(r[i], mask, r[i]);
+ }
+ tcg_gen_ctpop_i64(r[i], r[i]);
+ }
+
+ tcg_gen_add_i64(r[0], r[0], r[1]);
+ tcg_gen_shli_i64(r[0], r[0], TARGET_LONG_BITS - 8 + vece);
+ tcg_gen_trunc_i64_tl(cpu_gpr[a->rt], r[0]);
+ return true;
+}
+
+TRANS(VCNTMBB, do_vcntmb, MO_8)
+TRANS(VCNTMBH, do_vcntmb, MO_16)
+TRANS(VCNTMBW, do_vcntmb, MO_32)
+TRANS(VCNTMBD, do_vcntmb, MO_64)
+
+static bool do_vstri(DisasContext *ctx, arg_VX_tb_rc *a,
+ void (*gen_helper)(TCGv_i32, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr vrt, vrb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ vrt = gen_avr_ptr(a->vrt);
+ vrb = gen_avr_ptr(a->vrb);
+
+ if (a->rc) {
+ gen_helper(cpu_crf[6], vrt, vrb);
+ } else {
+ TCGv_i32 discard = tcg_temp_new_i32();
+ gen_helper(discard, vrt, vrb);
+ }
+ return true;
+}
+
+TRANS(VSTRIBL, do_vstri, gen_helper_VSTRIBL)
+TRANS(VSTRIBR, do_vstri, gen_helper_VSTRIBR)
+TRANS(VSTRIHL, do_vstri, gen_helper_VSTRIHL)
+TRANS(VSTRIHR, do_vstri, gen_helper_VSTRIHR)
+
+static bool do_vclrb(DisasContext *ctx, arg_VX *a, bool right)
+{
+ TCGv_i64 rb, mh, ml, tmp,
+ ones = tcg_constant_i64(-1),
+ zero = tcg_constant_i64(0);
+
+ rb = tcg_temp_new_i64();
+ mh = tcg_temp_new_i64();
+ ml = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(rb, cpu_gpr[a->vrb]);
+ tcg_gen_andi_i64(tmp, rb, 7);
+ tcg_gen_shli_i64(tmp, tmp, 3);
+ if (right) {
+ tcg_gen_shr_i64(tmp, ones, tmp);
+ } else {
+ tcg_gen_shl_i64(tmp, ones, tmp);
+ }
+ tcg_gen_not_i64(tmp, tmp);
+
+ if (right) {
+ tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
+ tmp, ones);
+ tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
+ zero, tmp);
+ tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(16),
+ ml, ones);
+ } else {
+ tcg_gen_movcond_i64(TCG_COND_LTU, ml, rb, tcg_constant_i64(8),
+ tmp, ones);
+ tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(8),
+ zero, tmp);
+ tcg_gen_movcond_i64(TCG_COND_LTU, mh, rb, tcg_constant_i64(16),
+ mh, ones);
+ }
+
+ get_avr64(tmp, a->vra, true);
+ tcg_gen_and_i64(tmp, tmp, mh);
+ set_avr64(a->vrt, tmp, true);
+
+ get_avr64(tmp, a->vra, false);
+ tcg_gen_and_i64(tmp, tmp, ml);
+ set_avr64(a->vrt, tmp, false);
+ return true;
+}
+
+TRANS(VCLRLB, do_vclrb, false)
+TRANS(VCLRRB, do_vclrb, true)
+
#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
{ \
@@ -1270,71 +2349,190 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
rc = gen_avr_ptr(rC(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
if (Rc(ctx->opcode)) { \
- gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
+ gen_helper_##name1(tcg_env, rd, ra, rb, rc); \
} else { \
- gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
+ gen_helper_##name0(tcg_env, rd, ra, rb, rc); \
} \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rc); \
- tcg_temp_free_ptr(rd); \
}
-GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
-static void gen_vmladduhm(DisasContext *ctx)
+static bool do_va_helper(DisasContext *ctx, arg_VA *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
{
- TCGv_ptr ra, rb, rc, rd;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- ra = gen_avr_ptr(rA(ctx->opcode));
- rb = gen_avr_ptr(rB(ctx->opcode));
- rc = gen_avr_ptr(rC(ctx->opcode));
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_vmladduhm(rd, ra, rb, rc);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rb);
- tcg_temp_free_ptr(rc);
- tcg_temp_free_ptr(rd);
+ TCGv_ptr vrt, vra, vrb, vrc;
+ REQUIRE_VECTOR(ctx);
+
+ vrt = gen_avr_ptr(a->vrt);
+ vra = gen_avr_ptr(a->vra);
+ vrb = gen_avr_ptr(a->vrb);
+ vrc = gen_avr_ptr(a->rc);
+ gen_helper(vrt, vra, vrb, vrc);
+ return true;
}
-static void gen_vpermr(DisasContext *ctx)
+TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ)
+TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM)
+
+TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM)
+TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ)
+
+TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM)
+TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR)
+
+static void gen_vmladduhm_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c)
{
- TCGv_ptr ra, rb, rc, rd;
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
- ra = gen_avr_ptr(rA(ctx->opcode));
- rb = gen_avr_ptr(rB(ctx->opcode));
- rc = gen_avr_ptr(rC(ctx->opcode));
- rd = gen_avr_ptr(rD(ctx->opcode));
- gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rb);
- tcg_temp_free_ptr(rc);
- tcg_temp_free_ptr(rd);
+ tcg_gen_mul_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, t, t, c);
}
-GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
-GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
-GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
-GEN_VAFORM_PAIRED(vsel, vperm, 21)
-GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+static bool trans_VMLADDUHM(DisasContext *ctx, arg_VA *a)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_add_vec, INDEX_op_mul_vec, 0
+ };
+
+ static const GVecGen4 op = {
+ .fno = gen_helper_VMLADDUHM,
+ .fniv = gen_vmladduhm_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ };
+
+ REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_4(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), avr_full_offset(a->rc),
+ 16, 16, &op);
+
+ return true;
+}
+
+static bool trans_VSEL(DisasContext *ctx, arg_VA *a)
+{
+ REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_bitsel(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->rc),
+ avr_full_offset(a->vrb), avr_full_offset(a->vra),
+ 16, 16);
+
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VMSUMUBM, do_va_helper, gen_helper_VMSUMUBM)
+TRANS_FLAGS(ALTIVEC, VMSUMMBM, do_va_helper, gen_helper_VMSUMMBM)
+TRANS_FLAGS(ALTIVEC, VMSUMSHM, do_va_helper, gen_helper_VMSUMSHM)
+TRANS_FLAGS(ALTIVEC, VMSUMUHM, do_va_helper, gen_helper_VMSUMUHM)
+
+static bool do_va_env_helper(DisasContext *ctx, arg_VA *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr vrt, vra, vrb, vrc;
+ REQUIRE_VECTOR(ctx);
+
+ vrt = gen_avr_ptr(a->vrt);
+ vra = gen_avr_ptr(a->vra);
+ vrb = gen_avr_ptr(a->vrb);
+ vrc = gen_avr_ptr(a->rc);
+ gen_helper(tcg_env, vrt, vra, vrb, vrc);
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VMSUMUHS, do_va_env_helper, gen_helper_VMSUMUHS)
+TRANS_FLAGS(ALTIVEC, VMSUMSHS, do_va_env_helper, gen_helper_VMSUMSHS)
+
+TRANS_FLAGS(ALTIVEC, VMHADDSHS, do_va_env_helper, gen_helper_VMHADDSHS)
+TRANS_FLAGS(ALTIVEC, VMHRADDSHS, do_va_env_helper, gen_helper_VMHRADDSHS)
GEN_VXFORM_NOA(vclzb, 1, 28)
GEN_VXFORM_NOA(vclzh, 1, 29)
GEN_VXFORM_TRANS(vclzw, 1, 30)
GEN_VXFORM_TRANS(vclzd, 1, 31)
-GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
-GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
-GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16)
-GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17)
-GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24)
-GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25)
-GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26)
+
+static bool do_vneg(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_neg(vece, avr_full_offset(a->vrt), avr_full_offset(a->vrb),
+ 16, 16);
+ return true;
+}
+
+TRANS(VNEGW, do_vneg, MO_32)
+TRANS(VNEGD, do_vneg, MO_64)
+
+static void gen_vexts_i64(TCGv_i64 t, TCGv_i64 b, int64_t s)
+{
+ tcg_gen_sextract_i64(t, b, 0, 64 - s);
+}
+
+static void gen_vexts_i32(TCGv_i32 t, TCGv_i32 b, int32_t s)
+{
+ tcg_gen_sextract_i32(t, b, 0, 32 - s);
+}
+
+static void gen_vexts_vec(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t s)
+{
+ tcg_gen_shli_vec(vece, t, b, s);
+ tcg_gen_sari_vec(vece, t, t, s);
+}
+
+static bool do_vexts(DisasContext *ctx, arg_VX_tb *a, unsigned vece, int64_t s)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, 0
+ };
+
+ static const GVecGen2i op[2] = {
+ {
+ .fni4 = gen_vexts_i32,
+ .fniv = gen_vexts_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vexts_i64,
+ .fniv = gen_vexts_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_2i(avr_full_offset(a->vrt), avr_full_offset(a->vrb),
+ 16, 16, s, &op[vece - MO_32]);
+
+ return true;
+}
+
+TRANS(VEXTSB2W, do_vexts, MO_32, 24);
+TRANS(VEXTSH2W, do_vexts, MO_32, 16);
+TRANS(VEXTSB2D, do_vexts, MO_64, 56);
+TRANS(VEXTSH2D, do_vexts, MO_64, 48);
+TRANS(VEXTSW2D, do_vexts, MO_64, 32);
+
+static bool trans_VEXTSD2Q(DisasContext *ctx, arg_VX_tb *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp = tcg_temp_new_i64();
+
+ get_avr64(tmp, a->vrb, false);
+ set_avr64(a->vrt, tmp, false);
+ tcg_gen_sari_i64(tmp, tmp, 63);
+ set_avr64(a->vrt, tmp, true);
+ return true;
+}
+
GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
@@ -1359,7 +2557,6 @@ GEN_VXFORM_TRANS(vgbbd, 6, 20);
GEN_VXFORM(vpmsumb, 4, 16)
GEN_VXFORM(vpmsumh, 4, 17)
GEN_VXFORM(vpmsumw, 4, 18)
-GEN_VXFORM(vpmsumd, 4, 19)
#define GEN_BCD(op) \
static void gen_##op(DisasContext *ctx) \
@@ -1376,14 +2573,9 @@ static void gen_##op(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
- \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_i32(ps); \
}
#define GEN_BCD2(op) \
@@ -1400,13 +2592,9 @@ static void gen_##op(DisasContext *ctx) \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
\
- ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ ps = tcg_constant_i32((ctx->opcode & 0x200) != 0); \
\
gen_helper_##op(cpu_crf[6], rd, rb, ps); \
- \
- tcg_temp_free_ptr(rb); \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_i32(ps); \
}
GEN_BCD(bcdadd)
@@ -1483,8 +2671,6 @@ static void gen_xpnd04_2(DisasContext *ctx)
}
-GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
- xpnd04_1, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
xpnd04_2, PPC_NONE, PPC2_ISA300)
@@ -1504,11 +2690,6 @@ GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \
bcdus, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \
bcdtrunc, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \
- bcdtrunc, PPC_NONE, PPC2_ISA300)
-GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \
- bcdutrunc, PPC_NONE, PPC2_ISA300)
-
static void gen_vsbox(DisasContext *ctx)
{
@@ -1520,8 +2701,6 @@ static void gen_vsbox(DisasContext *ctx)
ra = gen_avr_ptr(rA(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
gen_helper_vsbox(rd, ra);
- tcg_temp_free_ptr(ra);
- tcg_temp_free_ptr(rd);
}
GEN_VXFORM(vcipher, 4, 20)
@@ -1545,11 +2724,8 @@ static void gen_##op(DisasContext *ctx) \
} \
ra = gen_avr_ptr(rA(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
- st_six = tcg_const_i32(rB(ctx->opcode)); \
+ st_six = tcg_constant_i32(rB(ctx->opcode)); \
gen_helper_##op(rd, ra, st_six); \
- tcg_temp_free_ptr(ra); \
- tcg_temp_free_ptr(rd); \
- tcg_temp_free_i32(st_six); \
}
VSHASIGMA(vshasigmaw)
@@ -1559,6 +2735,636 @@ GEN_VXFORM3(vpermxor, 22, 0xFF)
GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
+static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_CFUGED,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3i g = {
+ .fni8 = do_cntzdm,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, false, &g);
+
+ return true;
+}
+
+static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3i g = {
+ .fni8 = do_cntzdm,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, true, &g);
+
+ return true;
+}
+
+static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_PDEPD,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a)
+{
+ static const GVecGen3 g = {
+ .fni8 = gen_helper_PEXTD,
+ .vece = MO_64,
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &g);
+
+ return true;
+}
+
+static bool trans_VMSUMUDM(DisasContext *ctx, arg_VA *a)
+{
+ TCGv_i64 rl, rh, src1, src2;
+ int dw;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VECTOR(ctx);
+
+ rh = tcg_temp_new_i64();
+ rl = tcg_temp_new_i64();
+ src1 = tcg_temp_new_i64();
+ src2 = tcg_temp_new_i64();
+
+ get_avr64(rl, a->rc, false);
+ get_avr64(rh, a->rc, true);
+
+ for (dw = 0; dw < 2; dw++) {
+ get_avr64(src1, a->vra, dw);
+ get_avr64(src2, a->vrb, dw);
+ tcg_gen_mulu2_i64(src1, src2, src1, src2);
+ tcg_gen_add2_i64(rl, rh, rl, rh, src1, src2);
+ }
+
+ set_avr64(a->vrt, rl, false);
+ set_avr64(a->vrt, rh, true);
+ return true;
+}
+
+static bool trans_VMSUMCUD(DisasContext *ctx, arg_VA *a)
+{
+ TCGv_i64 tmp0, tmp1, prod1h, prod1l, prod0h, prod0l, zero;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp0 = tcg_temp_new_i64();
+ tmp1 = tcg_temp_new_i64();
+ prod1h = tcg_temp_new_i64();
+ prod1l = tcg_temp_new_i64();
+ prod0h = tcg_temp_new_i64();
+ prod0l = tcg_temp_new_i64();
+ zero = tcg_constant_i64(0);
+
+ /* prod1 = vsr[vra+32].dw[1] * vsr[vrb+32].dw[1] */
+ get_avr64(tmp0, a->vra, false);
+ get_avr64(tmp1, a->vrb, false);
+ tcg_gen_mulu2_i64(prod1l, prod1h, tmp0, tmp1);
+
+ /* prod0 = vsr[vra+32].dw[0] * vsr[vrb+32].dw[0] */
+ get_avr64(tmp0, a->vra, true);
+ get_avr64(tmp1, a->vrb, true);
+ tcg_gen_mulu2_i64(prod0l, prod0h, tmp0, tmp1);
+
+ /* Sum lower 64-bits elements */
+ get_avr64(tmp1, a->rc, false);
+ tcg_gen_add2_i64(tmp1, tmp0, tmp1, zero, prod1l, zero);
+ tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0l, zero);
+
+ /*
+ * Discard lower 64-bits, leaving the carry into bit 64.
+ * Then sum the higher 64-bit elements.
+ */
+ get_avr64(tmp1, a->rc, true);
+ tcg_gen_add2_i64(tmp1, tmp0, tmp0, zero, tmp1, zero);
+ tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod1h, zero);
+ tcg_gen_add2_i64(tmp1, tmp0, tmp1, tmp0, prod0h, zero);
+
+ /* Discard 64 more bits to complete the CHOP128(temp >> 128) */
+ set_avr64(a->vrt, tmp0, false);
+ set_avr64(a->vrt, zero, true);
+ return true;
+}
+
+static bool do_vx_helper(DisasContext *ctx, arg_VX *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr ra, rb, rd;
+ REQUIRE_VECTOR(ctx);
+
+ ra = gen_avr_ptr(a->vra);
+ rb = gen_avr_ptr(a->vrb);
+ rd = gen_avr_ptr(a->vrt);
+ gen_helper(rd, ra, rb);
+ return true;
+}
+
+TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ)
+TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM)
+
+TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD)
+
+TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ)
+TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM)
+
+static void gen_VADDCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_not_vec(vece, a, a);
+ tcg_gen_cmp_vec(TCG_COND_LTU, vece, t, a, b);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
+}
+
+static void gen_VADDCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_not_i32(a, a);
+ tcg_gen_setcond_i32(TCG_COND_LTU, t, a, b);
+}
+
+static void gen_VSUBCUW_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_cmp_vec(TCG_COND_GEU, vece, t, a, b);
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(t, vece, 1));
+}
+
+static void gen_VSUBCUW_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_setcond_i32(TCG_COND_GEU, t, a, b);
+}
+
+static bool do_vx_vaddsubcuw(DisasContext *ctx, arg_VX *a, int add)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, 0
+ };
+
+ static const GVecGen3 op[] = {
+ {
+ .fniv = gen_VSUBCUW_vec,
+ .fni4 = gen_VSUBCUW_i32,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_VADDCUW_vec,
+ .fni4 = gen_VADDCUW_i32,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ };
+
+ REQUIRE_INSNS_FLAGS(ctx, ALTIVEC);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &op[add]);
+
+ return true;
+}
+
+TRANS(VSUBCUW, do_vx_vaddsubcuw, 0)
+TRANS(VADDCUW, do_vx_vaddsubcuw, 1)
+
+static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even,
+ void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 vra, vrb, vrt0, vrt1;
+ REQUIRE_VECTOR(ctx);
+
+ vra = tcg_temp_new_i64();
+ vrb = tcg_temp_new_i64();
+ vrt0 = tcg_temp_new_i64();
+ vrt1 = tcg_temp_new_i64();
+
+ get_avr64(vra, a->vra, even);
+ get_avr64(vrb, a->vrb, even);
+ gen_mul(vrt0, vrt1, vra, vrb);
+ set_avr64(a->vrt, vrt0, false);
+ set_avr64(a->vrt, vrt1, true);
+ return true;
+}
+
+static bool trans_VMULLD(DisasContext *ctx, arg_VX *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_mul(MO_64, avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16);
+
+ return true;
+}
+
+TRANS_FLAGS(ALTIVEC, VMULESB, do_vx_helper, gen_helper_VMULESB)
+TRANS_FLAGS(ALTIVEC, VMULOSB, do_vx_helper, gen_helper_VMULOSB)
+TRANS_FLAGS(ALTIVEC, VMULEUB, do_vx_helper, gen_helper_VMULEUB)
+TRANS_FLAGS(ALTIVEC, VMULOUB, do_vx_helper, gen_helper_VMULOUB)
+TRANS_FLAGS(ALTIVEC, VMULESH, do_vx_helper, gen_helper_VMULESH)
+TRANS_FLAGS(ALTIVEC, VMULOSH, do_vx_helper, gen_helper_VMULOSH)
+TRANS_FLAGS(ALTIVEC, VMULEUH, do_vx_helper, gen_helper_VMULEUH)
+TRANS_FLAGS(ALTIVEC, VMULOUH, do_vx_helper, gen_helper_VMULOUH)
+TRANS_FLAGS2(ALTIVEC_207, VMULESW, do_vx_helper, gen_helper_VMULESW)
+TRANS_FLAGS2(ALTIVEC_207, VMULOSW, do_vx_helper, gen_helper_VMULOSW)
+TRANS_FLAGS2(ALTIVEC_207, VMULEUW, do_vx_helper, gen_helper_VMULEUW)
+TRANS_FLAGS2(ALTIVEC_207, VMULOUW, do_vx_helper, gen_helper_VMULOUW)
+TRANS_FLAGS2(ISA310, VMULESD, do_vx_vmuleo, true , tcg_gen_muls2_i64)
+TRANS_FLAGS2(ISA310, VMULOSD, do_vx_vmuleo, false, tcg_gen_muls2_i64)
+TRANS_FLAGS2(ISA310, VMULEUD, do_vx_vmuleo, true , tcg_gen_mulu2_i64)
+TRANS_FLAGS2(ISA310, VMULOUD, do_vx_vmuleo, false, tcg_gen_mulu2_i64)
+
+static void do_vx_vmulhw_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
+{
+ TCGv_i64 hh, lh, temp;
+
+ hh = tcg_temp_new_i64();
+ lh = tcg_temp_new_i64();
+ temp = tcg_temp_new_i64();
+
+ if (sign) {
+ tcg_gen_ext32s_i64(lh, a);
+ tcg_gen_ext32s_i64(temp, b);
+ } else {
+ tcg_gen_ext32u_i64(lh, a);
+ tcg_gen_ext32u_i64(temp, b);
+ }
+ tcg_gen_mul_i64(lh, lh, temp);
+
+ if (sign) {
+ tcg_gen_sari_i64(hh, a, 32);
+ tcg_gen_sari_i64(temp, b, 32);
+ } else {
+ tcg_gen_shri_i64(hh, a, 32);
+ tcg_gen_shri_i64(temp, b, 32);
+ }
+ tcg_gen_mul_i64(hh, hh, temp);
+
+ tcg_gen_shri_i64(lh, lh, 32);
+ tcg_gen_deposit_i64(t, hh, lh, 0, 32);
+}
+
+static void do_vx_vmulhd_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, bool sign)
+{
+ TCGv_i64 tlow;
+
+ tlow = tcg_temp_new_i64();
+ if (sign) {
+ tcg_gen_muls2_i64(tlow, t, a, b);
+ } else {
+ tcg_gen_mulu2_i64(tlow, t, a, b);
+ }
+}
+
+static bool do_vx_mulh(DisasContext *ctx, arg_VX *a, bool sign,
+ void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, bool))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ TCGv_i64 vra, vrb, vrt;
+ int i;
+
+ vra = tcg_temp_new_i64();
+ vrb = tcg_temp_new_i64();
+ vrt = tcg_temp_new_i64();
+
+ for (i = 0; i < 2; i++) {
+ get_avr64(vra, a->vra, i);
+ get_avr64(vrb, a->vrb, i);
+ get_avr64(vrt, a->vrt, i);
+
+ func(vrt, vra, vrb, sign);
+
+ set_avr64(a->vrt, vrt, i);
+ }
+ return true;
+}
+
+TRANS(VMULHSW, do_vx_mulh, true , do_vx_vmulhw_i64)
+TRANS(VMULHSD, do_vx_mulh, true , do_vx_vmulhd_i64)
+TRANS(VMULHUW, do_vx_mulh, false, do_vx_vmulhw_i64)
+TRANS(VMULHUD, do_vx_mulh, false, do_vx_vmulhd_i64)
+
+static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ void (*gen_shr_vec)(unsigned, TCGv_vec, TCGv_vec, int64_t))
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(t);
+ tcg_gen_or_vec(vece, tmp, a, b);
+ tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
+ gen_shr_vec(vece, a, a, 1);
+ gen_shr_vec(vece, b, b, 1);
+ tcg_gen_add_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, t, t, tmp);
+}
+
+QEMU_FLATTEN
+static void gen_vavgu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_shri_vec);
+}
+
+QEMU_FLATTEN
+static void gen_vavgs(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_sari_vec);
+}
+
+static bool do_vx_vavg(DisasContext *ctx, arg_VX *a, int sign, int vece)
+{
+ static const TCGOpcode vecop_list_s[] = {
+ INDEX_op_add_vec, INDEX_op_sari_vec, 0
+ };
+ static const TCGOpcode vecop_list_u[] = {
+ INDEX_op_add_vec, INDEX_op_shri_vec, 0
+ };
+
+ static const GVecGen3 op[2][3] = {
+ {
+ {
+ .fniv = gen_vavgu,
+ .fno = gen_helper_VAVGUB,
+ .opt_opc = vecop_list_u,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgu,
+ .fno = gen_helper_VAVGUH,
+ .opt_opc = vecop_list_u,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgu,
+ .fno = gen_helper_VAVGUW,
+ .opt_opc = vecop_list_u,
+ .vece = MO_32
+ },
+ },
+ {
+ {
+ .fniv = gen_vavgs,
+ .fno = gen_helper_VAVGSB,
+ .opt_opc = vecop_list_s,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgs,
+ .fno = gen_helper_VAVGSH,
+ .opt_opc = vecop_list_s,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgs,
+ .fno = gen_helper_VAVGSW,
+ .opt_opc = vecop_list_s,
+ .vece = MO_32
+ },
+ },
+ };
+
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &op[sign][vece]);
+
+
+ return true;
+}
+
+
+TRANS_FLAGS(ALTIVEC, VAVGSB, do_vx_vavg, 1, MO_8)
+TRANS_FLAGS(ALTIVEC, VAVGSH, do_vx_vavg, 1, MO_16)
+TRANS_FLAGS(ALTIVEC, VAVGSW, do_vx_vavg, 1, MO_32)
+TRANS_FLAGS(ALTIVEC, VAVGUB, do_vx_vavg, 0, MO_8)
+TRANS_FLAGS(ALTIVEC, VAVGUH, do_vx_vavg, 0, MO_16)
+TRANS_FLAGS(ALTIVEC, VAVGUW, do_vx_vavg, 0, MO_32)
+
+static void gen_vabsdu(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_umax_vec(vece, t, a, b);
+ tcg_gen_umin_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, t, t, a);
+}
+
+static bool do_vabsdu(DisasContext *ctx, arg_VX *a, const int vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
+ };
+
+ static const GVecGen3 op[] = {
+ {
+ .fniv = gen_vabsdu,
+ .fno = gen_helper_VABSDUB,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vabsdu,
+ .fno = gen_helper_VABSDUH,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vabsdu,
+ .fno = gen_helper_VABSDUW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ };
+
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &op[vece]);
+
+ return true;
+}
+
+TRANS_FLAGS2(ISA300, VABSDUB, do_vabsdu, MO_8)
+TRANS_FLAGS2(ISA300, VABSDUH, do_vabsdu, MO_16)
+TRANS_FLAGS2(ISA300, VABSDUW, do_vabsdu, MO_32)
+
+static bool do_vdiv_vmod(DisasContext *ctx, arg_VX *a, const int vece,
+ void (*func_32)(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b),
+ void (*func_64)(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b))
+{
+ const GVecGen3 op = {
+ .fni4 = func_32,
+ .fni8 = func_64,
+ .vece = vece
+ };
+
+ REQUIRE_VECTOR(ctx);
+
+ tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra),
+ avr_full_offset(a->vrb), 16, 16, &op);
+
+ return true;
+}
+
+#define DIVU32(NAME, DIV) \
+static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
+{ \
+ TCGv_i32 zero = tcg_constant_i32(0); \
+ TCGv_i32 one = tcg_constant_i32(1); \
+ tcg_gen_movcond_i32(TCG_COND_EQ, b, b, zero, one, b); \
+ DIV(t, a, b); \
+}
+
+#define DIVS32(NAME, DIV) \
+static void NAME(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t0, a, INT32_MIN); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, -1); \
+ tcg_gen_and_i32(t0, t0, t1); \
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t1, b, 0); \
+ tcg_gen_or_i32(t0, t0, t1); \
+ tcg_gen_movi_i32(t1, 0); \
+ tcg_gen_movcond_i32(TCG_COND_NE, b, t0, t1, t0, b); \
+ DIV(t, a, b); \
+}
+
+#define DIVU64(NAME, DIV) \
+static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
+{ \
+ TCGv_i64 zero = tcg_constant_i64(0); \
+ TCGv_i64 one = tcg_constant_i64(1); \
+ tcg_gen_movcond_i64(TCG_COND_EQ, b, b, zero, one, b); \
+ DIV(t, a, b); \
+}
+
+#define DIVS64(NAME, DIV) \
+static void NAME(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i64 t1 = tcg_temp_new_i64(); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t0, a, INT64_MIN); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, -1); \
+ tcg_gen_and_i64(t0, t0, t1); \
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t1, b, 0); \
+ tcg_gen_or_i64(t0, t0, t1); \
+ tcg_gen_movi_i64(t1, 0); \
+ tcg_gen_movcond_i64(TCG_COND_NE, b, t0, t1, t0, b); \
+ DIV(t, a, b); \
+}
+
+DIVS32(do_divsw, tcg_gen_div_i32)
+DIVU32(do_divuw, tcg_gen_divu_i32)
+DIVS64(do_divsd, tcg_gen_div_i64)
+DIVU64(do_divud, tcg_gen_divu_i64)
+
+TRANS_FLAGS2(ISA310, VDIVSW, do_vdiv_vmod, MO_32, do_divsw, NULL)
+TRANS_FLAGS2(ISA310, VDIVUW, do_vdiv_vmod, MO_32, do_divuw, NULL)
+TRANS_FLAGS2(ISA310, VDIVSD, do_vdiv_vmod, MO_64, NULL, do_divsd)
+TRANS_FLAGS2(ISA310, VDIVUD, do_vdiv_vmod, MO_64, NULL, do_divud)
+TRANS_FLAGS2(ISA310, VDIVSQ, do_vx_helper, gen_helper_VDIVSQ)
+TRANS_FLAGS2(ISA310, VDIVUQ, do_vx_helper, gen_helper_VDIVUQ)
+
+static void do_dives_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 val1, val2;
+
+ val1 = tcg_temp_new_i64();
+ val2 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(val1, a);
+ tcg_gen_ext_i32_i64(val2, b);
+
+ /* (a << 32)/b */
+ tcg_gen_shli_i64(val1, val1, 32);
+ tcg_gen_div_i64(val1, val1, val2);
+
+ /* if quotient doesn't fit in 32 bits the result is undefined */
+ tcg_gen_extrl_i64_i32(t, val1);
+}
+
+static void do_diveu_i32(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i64 val1, val2;
+
+ val1 = tcg_temp_new_i64();
+ val2 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(val1, a);
+ tcg_gen_extu_i32_i64(val2, b);
+
+ /* (a << 32)/b */
+ tcg_gen_shli_i64(val1, val1, 32);
+ tcg_gen_divu_i64(val1, val1, val2);
+
+ /* if quotient doesn't fit in 32 bits the result is undefined */
+ tcg_gen_extrl_i64_i32(t, val1);
+}
+
+DIVS32(do_divesw, do_dives_i32)
+DIVU32(do_diveuw, do_diveu_i32)
+
+DIVS32(do_modsw, tcg_gen_rem_i32)
+DIVU32(do_moduw, tcg_gen_remu_i32)
+DIVS64(do_modsd, tcg_gen_rem_i64)
+DIVU64(do_modud, tcg_gen_remu_i64)
+
+TRANS_FLAGS2(ISA310, VDIVESW, do_vdiv_vmod, MO_32, do_divesw, NULL)
+TRANS_FLAGS2(ISA310, VDIVEUW, do_vdiv_vmod, MO_32, do_diveuw, NULL)
+TRANS_FLAGS2(ISA310, VDIVESD, do_vx_helper, gen_helper_VDIVESD)
+TRANS_FLAGS2(ISA310, VDIVEUD, do_vx_helper, gen_helper_VDIVEUD)
+TRANS_FLAGS2(ISA310, VDIVESQ, do_vx_helper, gen_helper_VDIVESQ)
+TRANS_FLAGS2(ISA310, VDIVEUQ, do_vx_helper, gen_helper_VDIVEUQ)
+
+TRANS_FLAGS2(ISA310, VMODSW, do_vdiv_vmod, MO_32, do_modsw , NULL)
+TRANS_FLAGS2(ISA310, VMODUW, do_vdiv_vmod, MO_32, do_moduw, NULL)
+TRANS_FLAGS2(ISA310, VMODSD, do_vdiv_vmod, MO_64, NULL, do_modsd)
+TRANS_FLAGS2(ISA310, VMODUD, do_vdiv_vmod, MO_64, NULL, do_modud)
+TRANS_FLAGS2(ISA310, VMODSQ, do_vx_helper, gen_helper_VMODSQ)
+TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ)
+
+#undef DIVS32
+#undef DIVU32
+#undef DIVS64
+#undef DIVU64
+
#undef GEN_VR_LDX
#undef GEN_VR_STX
#undef GEN_VR_LVE
diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc
index f3f4855111..33fec8aca4 100644
--- a/target/ppc/translate/vmx-ops.c.inc
+++ b/target/ppc/translate/vmx-ops.c.inc
@@ -83,12 +83,6 @@ GEN_VXFORM(vminsb, 1, 12),
GEN_VXFORM(vminsh, 1, 13),
GEN_VXFORM(vminsw, 1, 14),
GEN_VXFORM_207(vminsd, 1, 15),
-GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vavgsb, 1, 20),
-GEN_VXFORM(vavgsh, 1, 21),
-GEN_VXFORM(vavgsw, 1, 22),
GEN_VXFORM(vmrghb, 6, 0),
GEN_VXFORM(vmrghh, 6, 1),
GEN_VXFORM(vmrghw, 6, 2),
@@ -101,43 +95,13 @@ GEN_VXFORM_DUAL(vmrgow, vextuwlx, 6, 26, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_300(vextubrx, 6, 28),
GEN_VXFORM_300(vextuhrx, 6, 29),
GEN_VXFORM_DUAL(vmrgew, vextuwrx, 6, 30, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM(vmuloub, 4, 0),
-GEN_VXFORM(vmulouh, 4, 1),
-GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM(vmulosb, 4, 4),
-GEN_VXFORM(vmulosh, 4, 5),
-GEN_VXFORM_207(vmulosw, 4, 6),
-GEN_VXFORM_310(vmulld, 4, 7),
-GEN_VXFORM(vmuleub, 4, 8),
-GEN_VXFORM(vmuleuh, 4, 9),
-GEN_VXFORM_DUAL(vmuleuw, vmulhuw, 4, 10, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_310(vmulhud, 4, 11),
-GEN_VXFORM(vmulesb, 4, 12),
-GEN_VXFORM(vmulesh, 4, 13),
-GEN_VXFORM_DUAL(vmulesw, vmulhsw, 4, 14, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_310(vmulhsd, 4, 15),
-GEN_VXFORM(vslb, 2, 4),
-GEN_VXFORM(vslh, 2, 5),
-GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_207(vsld, 2, 23),
-GEN_VXFORM(vsrb, 2, 8),
-GEN_VXFORM(vsrh, 2, 9),
-GEN_VXFORM(vsrw, 2, 10),
-GEN_VXFORM_207(vsrd, 2, 27),
-GEN_VXFORM(vsrab, 2, 12),
-GEN_VXFORM(vsrah, 2, 13),
-GEN_VXFORM(vsraw, 2, 14),
-GEN_VXFORM_207(vsrad, 2, 15),
+GEN_VXFORM_207(vmuluwm, 4, 2),
GEN_VXFORM_300(vsrv, 2, 28),
GEN_VXFORM_300(vslv, 2, 29),
GEN_VXFORM(vslo, 6, 16),
GEN_VXFORM(vsro, 6, 17),
-GEN_VXFORM(vaddcuw, 0, 6),
-GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300),
-GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(xpnd04_1, 0, 22),
GEN_VXFORM_300(bcdsr, 0, 23),
GEN_VXFORM_300(bcdsr, 0, 31),
GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE),
@@ -152,17 +116,9 @@ GEN_VXFORM(vsubuws, 0, 26),
GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300),
GEN_VXFORM(vsubshs, 0, 29),
GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_207(vadduqm, 0, 4),
-GEN_VXFORM_207(vaddcuq, 0, 5),
-GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300),
-GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300),
-GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM(vrlb, 2, 0),
-GEN_VXFORM(vrlh, 2, 1),
-GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE),
-GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207),
-GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_300(bcdtrunc, 0, 20),
+GEN_VXFORM_300(bcdutrunc, 0, 21),
+GEN_VXFORM(vsl, 2, 7),
GEN_VXFORM(vsr, 2, 11),
GEN_VXFORM(vpkuhum, 7, 0),
GEN_VXFORM(vpkuwum, 7, 1),
@@ -198,22 +154,10 @@ GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300),
GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \
GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
-GEN_VXRFORM_300(vcmpnezb, 3, 4)
-GEN_VXRFORM_300(vcmpnezh, 3, 5)
-GEN_VXRFORM_300(vcmpnezw, 3, 6)
-GEN_VXRFORM(vcmpgtsb, 3, 12)
-GEN_VXRFORM(vcmpgtsh, 3, 13)
-GEN_VXRFORM(vcmpgtsw, 3, 14)
-GEN_VXRFORM(vcmpgtub, 3, 8)
-GEN_VXRFORM(vcmpgtuh, 3, 9)
-GEN_VXRFORM(vcmpgtuw, 3, 10)
-GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
GEN_VXRFORM(vcmpgefp, 3, 7)
-GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE)
-GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
@@ -225,27 +169,15 @@ GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
PPC_ALTIVEC),
GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
-GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
- PPC_ALTIVEC),
-GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
- PPC_ALTIVEC),
-GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
- PPC_ALTIVEC),
-GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
-GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
-GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
-GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),
-GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11),
-GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18),
-GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19),
-GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A),
+GEN_VXFORM(vspltisb, 6, 12),
+GEN_VXFORM(vspltish, 6, 13),
+GEN_VXFORM(vspltisw, 6, 14),
GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0),
GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1),
-GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
#define GEN_VXFORM_NOA(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
@@ -273,14 +205,8 @@ GEN_VXFORM_UIMM(vcfsx, 5, 13),
GEN_VXFORM_UIMM(vctuxs, 5, 14),
GEN_VXFORM_UIMM(vctsxs, 5, 15),
-
#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
-GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
-GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
-GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
-GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
-GEN_VAFORM_PAIRED(vsel, vperm, 21),
GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),
@@ -294,7 +220,6 @@ GEN_VXFORM_207(vgbbd, 6, 20),
GEN_VXFORM_207(vpmsumb, 4, 16),
GEN_VXFORM_207(vpmsumh, 4, 17),
GEN_VXFORM_207(vpmsumw, 4, 18),
-GEN_VXFORM_207(vpmsumd, 4, 19),
GEN_VXFORM_207(vsbox, 4, 23),
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index 57a7f73bba..0266f09119 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -1,29 +1,26 @@
/*** VSX extension ***/
-static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
+static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high)
{
- tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true));
+ tcg_gen_ld_i64(dst, tcg_env, vsr64_offset(n, high));
}
-static inline void get_cpu_vsrl(TCGv_i64 dst, int n)
+static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high)
{
- tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false));
+ tcg_gen_st_i64(src, tcg_env, vsr64_offset(n, high));
}
-static inline void set_cpu_vsrh(int n, TCGv_i64 src)
-{
- tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true));
-}
-
-static inline void set_cpu_vsrl(int n, TCGv_i64 src)
+static inline TCGv_ptr gen_vsr_ptr(int reg)
{
- tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false));
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, tcg_env, vsr_full_offset(reg));
+ return r;
}
-static inline TCGv_ptr gen_vsr_ptr(int reg)
+static inline TCGv_ptr gen_acc_ptr(int reg)
{
TCGv_ptr r = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(r, cpu_env, vsr_full_offset(reg));
+ tcg_gen_addi_ptr(r, tcg_env, acc_full_offset(reg));
return r;
}
@@ -41,10 +38,8 @@ static void gen_##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
gen_qemu_##operation(ctx, t0, EA); \
- set_cpu_vsrh(xT(ctx->opcode), t0); \
+ set_cpu_vsr(xT(ctx->opcode), t0, true); \
/* NOTE: cpu_vsrl is undefined */ \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
}
VSX_LOAD_SCALAR(lxsdx, ld64_i64)
@@ -67,12 +62,10 @@ static void gen_lxvd2x(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
gen_qemu_ld64_i64(ctx, t0, EA);
- set_cpu_vsrh(xT(ctx->opcode), t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64_i64(ctx, t0, EA);
- set_cpu_vsrl(xT(ctx->opcode), t0);
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
}
static void gen_lxvw4x(DisasContext *ctx)
@@ -95,25 +88,20 @@ static void gen_lxvw4x(DisasContext *ctx)
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
} else {
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
}
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
- tcg_temp_free(EA);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_lxvwsx(DisasContext *ctx)
@@ -141,9 +129,6 @@ static void gen_lxvwsx(DisasContext *ctx)
data = tcg_temp_new_i32();
tcg_gen_qemu_ld_i32(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UL));
tcg_gen_gvec_dup_i32(MO_UL, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
-
- tcg_temp_free(EA);
- tcg_temp_free_i32(data);
}
static void gen_lxvdsx(DisasContext *ctx)
@@ -162,17 +147,14 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_addr_reg_index(ctx, EA);
data = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
-
- tcg_temp_free(EA);
- tcg_temp_free_i64(data);
+ tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
+ tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
}
static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
TCGv_i64 inh, TCGv_i64 inl)
{
- TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 mask = tcg_constant_i64(0x00FF00FF00FF00FF);
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
@@ -189,10 +171,6 @@ static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
tcg_gen_shri_i64(t1, inl, 8);
tcg_gen_and_i64(t1, t1, mask);
tcg_gen_or_i64(outl, t0, t1);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(mask);
}
static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
@@ -207,10 +185,8 @@ static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
tcg_gen_shri_i64(outl, lo, 32);
tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
-
- tcg_temp_free_i64(hi);
- tcg_temp_free_i64(lo);
}
+
static void gen_lxvh8x(DisasContext *ctx)
{
TCGv EA;
@@ -227,17 +203,14 @@ static void gen_lxvh8x(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
if (ctx->le_mode) {
gen_bswap16x8(xth, xtl, xth, xtl);
}
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
- tcg_temp_free(EA);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_lxvb16x(DisasContext *ctx)
@@ -255,123 +228,12 @@ static void gen_lxvb16x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
- tcg_temp_free(EA);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
-}
-
-#define VSX_VECTOR_LOAD(name, op, indexed) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- int xt; \
- TCGv EA; \
- TCGv_i64 xth; \
- TCGv_i64 xtl; \
- \
- if (indexed) { \
- xt = xT(ctx->opcode); \
- } else { \
- xt = DQxT(ctx->opcode); \
- } \
- \
- if (xt < 32) { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- xth = tcg_temp_new_i64(); \
- xtl = tcg_temp_new_i64(); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- if (indexed) { \
- gen_addr_reg_index(ctx, EA); \
- } else { \
- gen_addr_imm_index(ctx, EA, 0x0F); \
- } \
- if (ctx->le_mode) { \
- tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \
- set_cpu_vsrl(xt, xtl); \
- tcg_gen_addi_tl(EA, EA, 8); \
- tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \
- set_cpu_vsrh(xt, xth); \
- } else { \
- tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \
- set_cpu_vsrh(xt, xth); \
- tcg_gen_addi_tl(EA, EA, 8); \
- tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \
- set_cpu_vsrl(xt, xtl); \
- } \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(xth); \
- tcg_temp_free_i64(xtl); \
-}
-
-VSX_VECTOR_LOAD(lxv, ld_i64, 0)
-VSX_VECTOR_LOAD(lxvx, ld_i64, 1)
-
-#define VSX_VECTOR_STORE(name, op, indexed) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- int xt; \
- TCGv EA; \
- TCGv_i64 xth; \
- TCGv_i64 xtl; \
- \
- if (indexed) { \
- xt = xT(ctx->opcode); \
- } else { \
- xt = DQxT(ctx->opcode); \
- } \
- \
- if (xt < 32) { \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- } else { \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- } \
- xth = tcg_temp_new_i64(); \
- xtl = tcg_temp_new_i64(); \
- get_cpu_vsrh(xth, xt); \
- get_cpu_vsrl(xtl, xt); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- if (indexed) { \
- gen_addr_reg_index(ctx, EA); \
- } else { \
- gen_addr_imm_index(ctx, EA, 0x0F); \
- } \
- if (ctx->le_mode) { \
- tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \
- tcg_gen_addi_tl(EA, EA, 8); \
- tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \
- } else { \
- tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \
- tcg_gen_addi_tl(EA, EA, 8); \
- tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \
- } \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(xth); \
- tcg_temp_free_i64(xtl); \
-}
-
-VSX_VECTOR_STORE(stxv, st_i64, 0)
-VSX_VECTOR_STORE(stxvx, st_i64, 1)
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+}
#ifdef TARGET_PPC64
#define VSX_VECTOR_LOAD_STORE_LENGTH(name) \
@@ -395,9 +257,7 @@ static void gen_##name(DisasContext *ctx) \
xt = gen_vsr_ptr(xT(ctx->opcode)); \
gen_set_access_type(ctx, ACCESS_INT); \
gen_addr_register(ctx, EA); \
- gen_helper_##name(cpu_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
- tcg_temp_free(EA); \
- tcg_temp_free_ptr(xt); \
+ gen_helper_##name(tcg_env, EA, xt, cpu_gpr[rB(ctx->opcode)]); \
}
VSX_VECTOR_LOAD_STORE_LENGTH(lxvl)
@@ -406,30 +266,6 @@ VSX_VECTOR_LOAD_STORE_LENGTH(stxvl)
VSX_VECTOR_LOAD_STORE_LENGTH(stxvll)
#endif
-#define VSX_LOAD_SCALAR_DS(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 xth; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- xth = tcg_temp_new_i64(); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0x03); \
- gen_qemu_##operation(ctx, xth, EA); \
- set_cpu_vsrh(rD(ctx->opcode) + 32, xth); \
- /* NOTE: cpu_vsrl is undefined */ \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(xth); \
-}
-
-VSX_LOAD_SCALAR_DS(lxsd, ld64_i64)
-VSX_LOAD_SCALAR_DS(lxssp, ld32fs)
-
#define VSX_STORE_SCALAR(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -443,10 +279,8 @@ static void gen_##name(DisasContext *ctx) \
gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
- get_cpu_vsrh(t0, xS(ctx->opcode)); \
+ get_cpu_vsr(t0, xS(ctx->opcode), true); \
gen_qemu_##operation(ctx, t0, EA); \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(t0); \
}
VSX_STORE_SCALAR(stxsdx, st64_i64)
@@ -468,13 +302,11 @@ static void gen_stxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- get_cpu_vsrh(t0, xS(ctx->opcode));
+ get_cpu_vsr(t0, xS(ctx->opcode), true);
gen_qemu_st64_i64(ctx, t0, EA);
tcg_gen_addi_tl(EA, EA, 8);
- get_cpu_vsrl(t0, xS(ctx->opcode));
+ get_cpu_vsr(t0, xS(ctx->opcode), false);
gen_qemu_st64_i64(ctx, t0, EA);
- tcg_temp_free(EA);
- tcg_temp_free_i64(t0);
}
static void gen_stxvw4x(DisasContext *ctx)
@@ -489,8 +321,8 @@ static void gen_stxvw4x(DisasContext *ctx)
}
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
- get_cpu_vsrh(xsh, xS(ctx->opcode));
- get_cpu_vsrl(xsl, xS(ctx->opcode));
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -500,21 +332,16 @@ static void gen_stxvw4x(DisasContext *ctx)
tcg_gen_shri_i64(t0, xsh, 32);
tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_shri_i64(t0, xsl, 32);
tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(xsh);
- tcg_temp_free_i64(xsl);
}
static void gen_stxvh8x(DisasContext *ctx)
@@ -529,8 +356,8 @@ static void gen_stxvh8x(DisasContext *ctx)
}
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
- get_cpu_vsrh(xsh, xS(ctx->opcode));
- get_cpu_vsrl(xsl, xS(ctx->opcode));
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
@@ -539,19 +366,14 @@ static void gen_stxvh8x(DisasContext *ctx)
TCGv_i64 outl = tcg_temp_new_i64();
gen_bswap16x8(outh, outl, xsh, xsl);
- tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
- tcg_temp_free_i64(outh);
- tcg_temp_free_i64(outl);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
- tcg_temp_free(EA);
- tcg_temp_free_i64(xsh);
- tcg_temp_free_i64(xsl);
}
static void gen_stxvb16x(DisasContext *ctx)
@@ -566,43 +388,16 @@ static void gen_stxvb16x(DisasContext *ctx)
}
xsh = tcg_temp_new_i64();
xsl = tcg_temp_new_i64();
- get_cpu_vsrh(xsh, xS(ctx->opcode));
- get_cpu_vsrl(xsl, xS(ctx->opcode));
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
+ get_cpu_vsr(xsl, xS(ctx->opcode), false);
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
- tcg_temp_free(EA);
- tcg_temp_free_i64(xsh);
- tcg_temp_free_i64(xsl);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
-#define VSX_STORE_SCALAR_DS(name, operation) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv EA; \
- TCGv_i64 xth; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- xth = tcg_temp_new_i64(); \
- get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \
- gen_set_access_type(ctx, ACCESS_INT); \
- EA = tcg_temp_new(); \
- gen_addr_imm_index(ctx, EA, 0x03); \
- gen_qemu_##operation(ctx, xth, EA); \
- /* NOTE: cpu_vsrl is undefined */ \
- tcg_temp_free(EA); \
- tcg_temp_free_i64(xth); \
-}
-
-VSX_STORE_SCALAR_DS(stxsd, st64_i64)
-VSX_STORE_SCALAR_DS(stxssp, st32fs)
-
static void gen_mfvsrwz(DisasContext *ctx)
{
if (xS(ctx->opcode) < 32) {
@@ -618,11 +413,9 @@ static void gen_mfvsrwz(DisasContext *ctx)
}
TCGv_i64 tmp = tcg_temp_new_i64();
TCGv_i64 xsh = tcg_temp_new_i64();
- get_cpu_vsrh(xsh, xS(ctx->opcode));
+ get_cpu_vsr(xsh, xS(ctx->opcode), true);
tcg_gen_ext32u_i64(tmp, xsh);
tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp);
- tcg_temp_free_i64(tmp);
- tcg_temp_free_i64(xsh);
}
static void gen_mtvsrwa(DisasContext *ctx)
@@ -642,9 +435,7 @@ static void gen_mtvsrwa(DisasContext *ctx)
TCGv_i64 xsh = tcg_temp_new_i64();
tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
tcg_gen_ext32s_i64(xsh, tmp);
- set_cpu_vsrh(xT(ctx->opcode), xsh);
- tcg_temp_free_i64(tmp);
- tcg_temp_free_i64(xsh);
+ set_cpu_vsr(xT(ctx->opcode), xsh, true);
}
static void gen_mtvsrwz(DisasContext *ctx)
@@ -664,9 +455,7 @@ static void gen_mtvsrwz(DisasContext *ctx)
TCGv_i64 xsh = tcg_temp_new_i64();
tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]);
tcg_gen_ext32u_i64(xsh, tmp);
- set_cpu_vsrh(xT(ctx->opcode), xsh);
- tcg_temp_free_i64(tmp);
- tcg_temp_free_i64(xsh);
+ set_cpu_vsr(xT(ctx->opcode), xsh, true);
}
#if defined(TARGET_PPC64)
@@ -685,9 +474,8 @@ static void gen_mfvsrd(DisasContext *ctx)
}
}
t0 = tcg_temp_new_i64();
- get_cpu_vsrh(t0, xS(ctx->opcode));
+ get_cpu_vsr(t0, xS(ctx->opcode), true);
tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free_i64(t0);
}
static void gen_mtvsrd(DisasContext *ctx)
@@ -706,8 +494,7 @@ static void gen_mtvsrd(DisasContext *ctx)
}
t0 = tcg_temp_new_i64();
tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
- set_cpu_vsrh(xT(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
}
static void gen_mfvsrld(DisasContext *ctx)
@@ -725,9 +512,8 @@ static void gen_mfvsrld(DisasContext *ctx)
}
}
t0 = tcg_temp_new_i64();
- get_cpu_vsrl(t0, xS(ctx->opcode));
+ get_cpu_vsr(t0, xS(ctx->opcode), false);
tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0);
- tcg_temp_free_i64(t0);
}
static void gen_mtvsrdd(DisasContext *ctx)
@@ -751,11 +537,10 @@ static void gen_mtvsrdd(DisasContext *ctx)
} else {
tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]);
}
- set_cpu_vsrh(xT(ctx->opcode), t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]);
- set_cpu_vsrl(xT(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
}
static void gen_mtvsrws(DisasContext *ctx)
@@ -776,66 +561,22 @@ static void gen_mtvsrws(DisasContext *ctx)
t0 = tcg_temp_new_i64();
tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)],
cpu_gpr[rA(ctx->opcode)], 32, 32);
- set_cpu_vsrl(xT(ctx->opcode), t0);
- set_cpu_vsrh(xT(ctx->opcode), t0);
- tcg_temp_free_i64(t0);
+ set_cpu_vsr(xT(ctx->opcode), t0, false);
+ set_cpu_vsr(xT(ctx->opcode), t0, true);
}
#endif
-static void gen_xxpermdi(DisasContext *ctx)
-{
- TCGv_i64 xh, xl;
-
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
-
- xh = tcg_temp_new_i64();
- xl = tcg_temp_new_i64();
-
- if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
- (xT(ctx->opcode) == xB(ctx->opcode)))) {
- if ((DM(ctx->opcode) & 2) == 0) {
- get_cpu_vsrh(xh, xA(ctx->opcode));
- } else {
- get_cpu_vsrl(xh, xA(ctx->opcode));
- }
- if ((DM(ctx->opcode) & 1) == 0) {
- get_cpu_vsrh(xl, xB(ctx->opcode));
- } else {
- get_cpu_vsrl(xl, xB(ctx->opcode));
- }
-
- set_cpu_vsrh(xT(ctx->opcode), xh);
- set_cpu_vsrl(xT(ctx->opcode), xl);
- } else {
- if ((DM(ctx->opcode) & 2) == 0) {
- get_cpu_vsrh(xh, xA(ctx->opcode));
- set_cpu_vsrh(xT(ctx->opcode), xh);
- } else {
- get_cpu_vsrl(xh, xA(ctx->opcode));
- set_cpu_vsrh(xT(ctx->opcode), xh);
- }
- if ((DM(ctx->opcode) & 1) == 0) {
- get_cpu_vsrh(xl, xB(ctx->opcode));
- set_cpu_vsrl(xT(ctx->opcode), xl);
- } else {
- get_cpu_vsrl(xl, xB(ctx->opcode));
- set_cpu_vsrl(xT(ctx->opcode), xl);
- }
- }
- tcg_temp_free_i64(xh);
- tcg_temp_free_i64(xl);
-}
-
#define OP_ABS 1
#define OP_NABS 2
#define OP_NEG 3
#define OP_CPSGN 4
#define SGN_MASK_DP 0x8000000000000000ull
#define SGN_MASK_SP 0x8000000080000000ull
+#define EXP_MASK_DP 0x7FF0000000000000ull
+#define EXP_MASK_SP 0x7F8000007F800000ull
+#define FRC_MASK_DP (~(SGN_MASK_DP | EXP_MASK_DP))
+#define FRC_MASK_SP (~(SGN_MASK_SP | EXP_MASK_SP))
#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -847,7 +588,7 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \
xb = tcg_temp_new_i64(); \
sgm = tcg_temp_new_i64(); \
- get_cpu_vsrh(xb, xB(ctx->opcode)); \
+ get_cpu_vsr(xb, xB(ctx->opcode), true); \
tcg_gen_movi_i64(sgm, sgn_mask); \
switch (op) { \
case OP_ABS: { \
@@ -864,17 +605,15 @@ static void glue(gen_, name)(DisasContext *ctx) \
} \
case OP_CPSGN: { \
TCGv_i64 xa = tcg_temp_new_i64(); \
- get_cpu_vsrh(xa, xA(ctx->opcode)); \
+ get_cpu_vsr(xa, xA(ctx->opcode), true); \
tcg_gen_and_i64(xa, xa, sgm); \
tcg_gen_andc_i64(xb, xb, sgm); \
tcg_gen_or_i64(xb, xb, xa); \
- tcg_temp_free_i64(xa); \
break; \
} \
} \
- set_cpu_vsrh(xT(ctx->opcode), xb); \
- tcg_temp_free_i64(xb); \
- tcg_temp_free_i64(sgm); \
+ set_cpu_vsr(xT(ctx->opcode), xb, true); \
+ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
}
VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
@@ -898,8 +637,8 @@ static void glue(gen_, name)(DisasContext *ctx) \
xbl = tcg_temp_new_i64(); \
sgm = tcg_temp_new_i64(); \
tmp = tcg_temp_new_i64(); \
- get_cpu_vsrh(xbh, xb); \
- get_cpu_vsrl(xbl, xb); \
+ get_cpu_vsr(xbh, xb, true); \
+ get_cpu_vsr(xbl, xb, false); \
tcg_gen_movi_i64(sgm, sgn_mask); \
switch (op) { \
case OP_ABS: \
@@ -914,19 +653,14 @@ static void glue(gen_, name)(DisasContext *ctx) \
case OP_CPSGN: \
xah = tcg_temp_new_i64(); \
xa = rA(ctx->opcode) + 32; \
- get_cpu_vsrh(tmp, xa); \
+ get_cpu_vsr(tmp, xa, true); \
tcg_gen_and_i64(xah, tmp, sgm); \
tcg_gen_andc_i64(xbh, xbh, sgm); \
tcg_gen_or_i64(xbh, xbh, xah); \
- tcg_temp_free_i64(xah); \
break; \
} \
- set_cpu_vsrh(xt, xbh); \
- set_cpu_vsrl(xt, xbl); \
- tcg_temp_free_i64(xbl); \
- tcg_temp_free_i64(xbh); \
- tcg_temp_free_i64(sgm); \
- tcg_temp_free_i64(tmp); \
+ set_cpu_vsr(xt, xbh, true); \
+ set_cpu_vsr(xt, xbl, false); \
}
VSX_SCALAR_MOVE_QP(xsabsqp, OP_ABS, SGN_MASK_DP)
@@ -934,67 +668,125 @@ VSX_SCALAR_MOVE_QP(xsnabsqp, OP_NABS, SGN_MASK_DP)
VSX_SCALAR_MOVE_QP(xsnegqp, OP_NEG, SGN_MASK_DP)
VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP)
-#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- TCGv_i64 xbh, xbl, sgm; \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xbh = tcg_temp_new_i64(); \
- xbl = tcg_temp_new_i64(); \
- sgm = tcg_temp_new_i64(); \
- get_cpu_vsrh(xbh, xB(ctx->opcode)); \
- get_cpu_vsrl(xbl, xB(ctx->opcode)); \
- tcg_gen_movi_i64(sgm, sgn_mask); \
- switch (op) { \
- case OP_ABS: { \
- tcg_gen_andc_i64(xbh, xbh, sgm); \
- tcg_gen_andc_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_NABS: { \
- tcg_gen_or_i64(xbh, xbh, sgm); \
- tcg_gen_or_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_NEG: { \
- tcg_gen_xor_i64(xbh, xbh, sgm); \
- tcg_gen_xor_i64(xbl, xbl, sgm); \
- break; \
- } \
- case OP_CPSGN: { \
- TCGv_i64 xah = tcg_temp_new_i64(); \
- TCGv_i64 xal = tcg_temp_new_i64(); \
- get_cpu_vsrh(xah, xA(ctx->opcode)); \
- get_cpu_vsrl(xal, xA(ctx->opcode)); \
- tcg_gen_and_i64(xah, xah, sgm); \
- tcg_gen_and_i64(xal, xal, sgm); \
- tcg_gen_andc_i64(xbh, xbh, sgm); \
- tcg_gen_andc_i64(xbl, xbl, sgm); \
- tcg_gen_or_i64(xbh, xbh, xah); \
- tcg_gen_or_i64(xbl, xbl, xal); \
- tcg_temp_free_i64(xah); \
- tcg_temp_free_i64(xal); \
- break; \
- } \
- } \
- set_cpu_vsrh(xT(ctx->opcode), xbh); \
- set_cpu_vsrl(xT(ctx->opcode), xbl); \
- tcg_temp_free_i64(xbh); \
- tcg_temp_free_i64(xbl); \
- tcg_temp_free_i64(sgm); \
- }
-
-VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
-VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
-VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+#define TCG_OP_IMM_i64(FUNC, OP, IMM) \
+ static void FUNC(TCGv_i64 t, TCGv_i64 b) \
+ { \
+ OP(t, b, IMM); \
+ }
+
+TCG_OP_IMM_i64(do_xvabssp_i64, tcg_gen_andi_i64, ~SGN_MASK_SP)
+TCG_OP_IMM_i64(do_xvnabssp_i64, tcg_gen_ori_i64, SGN_MASK_SP)
+TCG_OP_IMM_i64(do_xvnegsp_i64, tcg_gen_xori_i64, SGN_MASK_SP)
+TCG_OP_IMM_i64(do_xvabsdp_i64, tcg_gen_andi_i64, ~SGN_MASK_DP)
+TCG_OP_IMM_i64(do_xvnabsdp_i64, tcg_gen_ori_i64, SGN_MASK_DP)
+TCG_OP_IMM_i64(do_xvnegdp_i64, tcg_gen_xori_i64, SGN_MASK_DP)
+#undef TCG_OP_IMM_i64
+
+static void xv_msb_op1(unsigned vece, TCGv_vec t, TCGv_vec b,
+ void (*tcg_gen_op_vec)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_op_vec(vece, t, b, tcg_constant_vec_matching(t, vece, msb));
+}
+
+static void do_xvabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
+{
+ xv_msb_op1(vece, t, b, tcg_gen_andc_vec);
+}
+
+static void do_xvnabs_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
+{
+ xv_msb_op1(vece, t, b, tcg_gen_or_vec);
+}
+
+static void do_xvneg_vec(unsigned vece, TCGv_vec t, TCGv_vec b)
+{
+ xv_msb_op1(vece, t, b, tcg_gen_xor_vec);
+}
+
+static bool do_vsx_msb_op(DisasContext *ctx, arg_XX2 *a, unsigned vece,
+ void (*vec)(unsigned, TCGv_vec, TCGv_vec),
+ void (*i64)(TCGv_i64, TCGv_i64))
+{
+ static const TCGOpcode vecop_list[] = {
+ 0
+ };
+
+ const GVecGen2 op = {
+ .fni8 = i64,
+ .fniv = vec,
+ .opt_opc = vecop_list,
+ .vece = vece
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_2(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
+ 16, 16, &op);
+
+ return true;
+}
+
+TRANS(XVABSDP, do_vsx_msb_op, MO_64, do_xvabs_vec, do_xvabsdp_i64)
+TRANS(XVNABSDP, do_vsx_msb_op, MO_64, do_xvnabs_vec, do_xvnabsdp_i64)
+TRANS(XVNEGDP, do_vsx_msb_op, MO_64, do_xvneg_vec, do_xvnegdp_i64)
+TRANS(XVABSSP, do_vsx_msb_op, MO_32, do_xvabs_vec, do_xvabssp_i64)
+TRANS(XVNABSSP, do_vsx_msb_op, MO_32, do_xvnabs_vec, do_xvnabssp_i64)
+TRANS(XVNEGSP, do_vsx_msb_op, MO_32, do_xvneg_vec, do_xvnegsp_i64)
+
+static void do_xvcpsgndp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_andi_i64(a, a, SGN_MASK_DP);
+ tcg_gen_andi_i64(b, b, ~SGN_MASK_DP);
+ tcg_gen_or_i64(t, a, b);
+}
+
+static void do_xvcpsgnsp_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_andi_i64(a, a, SGN_MASK_SP);
+ tcg_gen_andi_i64(b, b, ~SGN_MASK_SP);
+ tcg_gen_or_i64(t, a, b);
+}
+
+static void do_xvcpsgn_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ uint64_t msb = (vece == MO_32) ? SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_bitsel_vec(vece, t, tcg_constant_vec_matching(t, vece, msb), a, b);
+}
+
+static bool do_xvcpsgn(DisasContext *ctx, arg_XX3 *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ 0
+ };
+
+ static const GVecGen3 op[] = {
+ {
+ .fni8 = do_xvcpsgnsp_i64,
+ .fniv = do_xvcpsgn_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = do_xvcpsgndp_i64,
+ .fniv = do_xvcpsgn_vec,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_3(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
+ vsr_full_offset(a->xb), 16, 16, &op[vece - MO_32]);
+
+ return true;
+}
+
+TRANS(XVCPSGNSP, do_xvcpsgn, MO_32)
+TRANS(XVCPSGNDP, do_xvcpsgn, MO_64)
#define VSX_CMP(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
@@ -1009,16 +801,11 @@ static void gen_##name(DisasContext *ctx) \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
if ((ctx->opcode >> (31 - 21)) & 1) { \
- gen_helper_##name(cpu_crf[6], cpu_env, xt, xa, xb); \
+ gen_helper_##name(cpu_crf[6], tcg_env, xt, xa, xb); \
} else { \
ignored = tcg_temp_new_i32(); \
- gen_helper_##name(ignored, cpu_env, xt, xa, xb); \
- tcg_temp_free_i32(ignored); \
+ gen_helper_##name(ignored, tcg_env, xt, xa, xb); \
} \
- gen_helper_float_check_status(cpu_env); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(xb); \
}
VSX_CMP(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
@@ -1030,23 +817,41 @@ VSX_CMP(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
VSX_CMP(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
VSX_CMP(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
-static void gen_xscvqpdp(DisasContext *ctx)
+static bool trans_XSCVQPDP(DisasContext *ctx, arg_X_tb_rc *a)
{
- TCGv_i32 opc;
+ TCGv_i32 ro;
TCGv_ptr xt, xb;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- opc = tcg_const_i32(ctx->opcode);
- xt = gen_vsr_ptr(xT(ctx->opcode));
- xb = gen_vsr_ptr(xB(ctx->opcode));
- gen_helper_xscvqpdp(cpu_env, opc, xt, xb);
- tcg_temp_free_i32(opc);
- tcg_temp_free_ptr(xt);
- tcg_temp_free_ptr(xb);
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ ro = tcg_constant_i32(a->rc);
+
+ xt = gen_avr_ptr(a->rt);
+ xb = gen_avr_ptr(a->rb);
+ gen_helper_XSCVQPDP(tcg_env, ro, xt, xb);
+ return true;
+}
+
+static bool do_helper_env_X_tb(DisasContext *ctx, arg_X_tb *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr xt, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_avr_ptr(a->rt);
+ xb = gen_avr_ptr(a->rb);
+ gen_helper(tcg_env, xt, xb);
+ return true;
}
+TRANS(XSCVUQQP, do_helper_env_X_tb, gen_helper_XSCVUQQP)
+TRANS(XSCVSQQP, do_helper_env_X_tb, gen_helper_XSCVSQQP)
+TRANS(XSCVQPUQZ, do_helper_env_X_tb, gen_helper_XSCVQPUQZ)
+TRANS(XSCVQPSQZ, do_helper_env_X_tb, gen_helper_XSCVQPSQZ)
+
#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -1055,9 +860,8 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
- gen_helper_##name(cpu_env, opc); \
- tcg_temp_free_i32(opc); \
+ opc = tcg_constant_i32(ctx->opcode); \
+ gen_helper_##name(tcg_env, opc); \
}
#define GEN_VSX_HELPER_X3(name, op1, op2, inval, type) \
@@ -1071,10 +875,7 @@ static void gen_##name(DisasContext *ctx) \
xt = gen_vsr_ptr(xT(ctx->opcode)); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, xt, xa, xb); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, xt, xa, xb); \
}
#define GEN_VSX_HELPER_X2(name, op1, op2, inval, type) \
@@ -1087,9 +888,7 @@ static void gen_##name(DisasContext *ctx) \
} \
xt = gen_vsr_ptr(xT(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, xt, xb); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, xt, xb); \
}
#define GEN_VSX_HELPER_X2_AB(name, op1, op2, inval, type) \
@@ -1101,13 +900,10 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(xA(ctx->opcode)); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, opc, xa, xb); \
- tcg_temp_free_i32(opc); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, opc, xa, xb); \
}
#define GEN_VSX_HELPER_X1(name, op1, op2, inval, type) \
@@ -1119,11 +915,9 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xb = gen_vsr_ptr(xB(ctx->opcode)); \
- gen_helper_##name(cpu_env, opc, xb); \
- tcg_temp_free_i32(opc); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, opc, xb); \
}
#define GEN_VSX_HELPER_R3(name, op1, op2, inval, type) \
@@ -1135,15 +929,11 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xt, xa, xb); \
- tcg_temp_free_i32(opc); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, opc, xt, xa, xb); \
}
#define GEN_VSX_HELPER_R2(name, op1, op2, inval, type) \
@@ -1155,13 +945,10 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xt = gen_vsr_ptr(rD(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xt, xb); \
- tcg_temp_free_i32(opc); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, opc, xt, xb); \
}
#define GEN_VSX_HELPER_R2_AB(name, op1, op2, inval, type) \
@@ -1173,13 +960,10 @@ static void gen_##name(DisasContext *ctx) \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
- opc = tcg_const_i32(ctx->opcode); \
+ opc = tcg_constant_i32(ctx->opcode); \
xa = gen_vsr_ptr(rA(ctx->opcode) + 32); \
xb = gen_vsr_ptr(rB(ctx->opcode) + 32); \
- gen_helper_##name(cpu_env, opc, xa, xb); \
- tcg_temp_free_i32(opc); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(xb); \
+ gen_helper_##name(tcg_env, opc, xa, xb); \
}
#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
@@ -1193,11 +977,10 @@ static void gen_##name(DisasContext *ctx) \
} \
t0 = tcg_temp_new_i64(); \
t1 = tcg_temp_new_i64(); \
- get_cpu_vsrh(t0, xB(ctx->opcode)); \
- gen_helper_##name(t1, cpu_env, t0); \
- set_cpu_vsrh(xT(ctx->opcode), t1); \
- tcg_temp_free_i64(t0); \
- tcg_temp_free_i64(t1); \
+ get_cpu_vsr(t0, xB(ctx->opcode), true); \
+ gen_helper_##name(t1, tcg_env, t0); \
+ set_cpu_vsr(xT(ctx->opcode), t1, true); \
+ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false); \
}
GEN_VSX_HELPER_X3(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
@@ -1212,10 +995,6 @@ GEN_VSX_HELPER_X2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_X2_AB(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
GEN_VSX_HELPER_X1(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2_AB(xscmpexpdp, 0x0C, 0x07, 0, PPC2_ISA300)
GEN_VSX_HELPER_R2_AB(xscmpexpqp, 0x04, 0x05, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2_AB(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
@@ -1224,10 +1003,6 @@ GEN_VSX_HELPER_R2_AB(xscmpoqp, 0x04, 0x04, 0, PPC2_VSX)
GEN_VSX_HELPER_R2_AB(xscmpuqp, 0x04, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_X3(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
GEN_VSX_HELPER_X3(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_R3(xsmaxcdp, 0x00, 0x10, 0, PPC2_ISA300)
-GEN_VSX_HELPER_R3(xsmincdp, 0x00, 0x11, 0, PPC2_ISA300)
-GEN_VSX_HELPER_R3(xsmaxjdp, 0x00, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_R3(xsminjdp, 0x00, 0x12, 0, PPC2_ISA300)
GEN_VSX_HELPER_X2(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300)
GEN_VSX_HELPER_X2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
GEN_VSX_HELPER_R2(xscvdpqp, 0x04, 0x1A, 0x16, PPC2_ISA300)
@@ -1239,7 +1014,208 @@ GEN_VSX_HELPER_R2(xscvqpuwz, 0x04, 0x1A, 0x01, PPC2_ISA300)
GEN_VSX_HELPER_X2(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300)
GEN_VSX_HELPER_R2(xscvsdqp, 0x04, 0x1A, 0x0A, PPC2_ISA300)
GEN_VSX_HELPER_X2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
-GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
+
+/* test if +Inf */
+static void gen_is_pos_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, exp_msk));
+}
+
+/* test if -Inf */
+static void gen_is_neg_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, sgn_msk | exp_msk));
+}
+
+/* test if +Inf or -Inf */
+static void gen_is_any_inf(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, exp_msk));
+}
+
+/* test if +0 */
+static void gen_is_pos_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, 0));
+}
+
+/* test if -0 */
+static void gen_is_neg_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, sgn_msk));
+}
+
+/* test if +0 or -0 */
+static void gen_is_any_zero(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
+ tcg_gen_cmp_vec(TCG_COND_EQ, vece, t, b,
+ tcg_constant_vec_matching(t, vece, 0));
+}
+
+/* test if +Denormal */
+static void gen_is_pos_denormal(unsigned vece, TCGv_vec t,
+ TCGv_vec b, int64_t v)
+{
+ uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
+ tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
+ tcg_constant_vec_matching(t, vece, frc_msk));
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
+ tcg_constant_vec_matching(t, vece, 0));
+ tcg_gen_and_vec(vece, t, t, b);
+}
+
+/* test if -Denormal */
+static void gen_is_neg_denormal(unsigned vece, TCGv_vec t,
+ TCGv_vec b, int64_t v)
+{
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
+ tcg_gen_cmp_vec(TCG_COND_LEU, vece, t, b,
+ tcg_constant_vec_matching(t, vece, sgn_msk | frc_msk));
+ tcg_gen_cmp_vec(TCG_COND_GTU, vece, b, b,
+ tcg_constant_vec_matching(t, vece, sgn_msk));
+ tcg_gen_and_vec(vece, t, t, b);
+}
+
+/* test if +Denormal or -Denormal */
+static void gen_is_any_denormal(unsigned vece, TCGv_vec t,
+ TCGv_vec b, int64_t v)
+{
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ uint64_t frc_msk = (vece == MO_32) ? (uint32_t)FRC_MASK_SP : FRC_MASK_DP;
+ tcg_gen_andc_vec(vece, b, b, tcg_constant_vec_matching(t, vece, sgn_msk));
+ tcg_gen_cmp_vec(TCG_COND_LE, vece, t, b,
+ tcg_constant_vec_matching(t, vece, frc_msk));
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, b, b,
+ tcg_constant_vec_matching(t, vece, 0));
+ tcg_gen_and_vec(vece, t, t, b);
+}
+
+/* test if NaN */
+static void gen_is_nan(unsigned vece, TCGv_vec t, TCGv_vec b, int64_t v)
+{
+ uint64_t exp_msk = (vece == MO_32) ? (uint32_t)EXP_MASK_SP : EXP_MASK_DP;
+ uint64_t sgn_msk = (vece == MO_32) ? (uint32_t)SGN_MASK_SP : SGN_MASK_DP;
+ tcg_gen_and_vec(vece, b, b, tcg_constant_vec_matching(t, vece, ~sgn_msk));
+ tcg_gen_cmp_vec(TCG_COND_GT, vece, t, b,
+ tcg_constant_vec_matching(t, vece, exp_msk));
+}
+
+static bool do_xvtstdc(DisasContext *ctx, arg_XX2_uim *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, 0
+ };
+
+ GVecGen2i op = {
+ .fnoi = (vece == MO_32) ? gen_helper_XVTSTDCSP : gen_helper_XVTSTDCDP,
+ .vece = vece,
+ .opt_opc = vecop_list
+ };
+
+ REQUIRE_VSX(ctx);
+
+ switch (a->uim) {
+ case 0:
+ set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
+ set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
+ return true;
+ case ((1 << 0) | (1 << 1)):
+ /* test if +Denormal or -Denormal */
+ op.fniv = gen_is_any_denormal;
+ break;
+ case (1 << 0):
+ /* test if -Denormal */
+ op.fniv = gen_is_neg_denormal;
+ break;
+ case (1 << 1):
+ /* test if +Denormal */
+ op.fniv = gen_is_pos_denormal;
+ break;
+ case ((1 << 2) | (1 << 3)):
+ /* test if +0 or -0 */
+ op.fniv = gen_is_any_zero;
+ break;
+ case (1 << 2):
+ /* test if -0 */
+ op.fniv = gen_is_neg_zero;
+ break;
+ case (1 << 3):
+ /* test if +0 */
+ op.fniv = gen_is_pos_zero;
+ break;
+ case ((1 << 4) | (1 << 5)):
+ /* test if +Inf or -Inf */
+ op.fniv = gen_is_any_inf;
+ break;
+ case (1 << 4):
+ /* test if -Inf */
+ op.fniv = gen_is_neg_inf;
+ break;
+ case (1 << 5):
+ /* test if +Inf */
+ op.fniv = gen_is_pos_inf;
+ break;
+ case (1 << 6):
+ /* test if NaN */
+ op.fniv = gen_is_nan;
+ break;
+ }
+ tcg_gen_gvec_2i(vsr_full_offset(a->xt), vsr_full_offset(a->xb),
+ 16, 16, a->uim, &op);
+
+ return true;
+}
+
+TRANS_FLAGS2(VSX, XVTSTDCSP, do_xvtstdc, MO_32)
+TRANS_FLAGS2(VSX, XVTSTDCDP, do_xvtstdc, MO_64)
+
+static bool do_XX2_bf_uim(DisasContext *ctx, arg_XX2_bf_uim *a, bool vsr,
+ void (*gen_helper)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_ptr))
+{
+ TCGv_ptr xb;
+
+ REQUIRE_VSX(ctx);
+ xb = vsr ? gen_vsr_ptr(a->xb) : gen_avr_ptr(a->xb);
+ gen_helper(tcg_env, tcg_constant_i32(a->bf), tcg_constant_i32(a->uim), xb);
+ return true;
+}
+
+TRANS_FLAGS2(ISA300, XSTSTDCSP, do_XX2_bf_uim, true, gen_helper_XSTSTDCSP)
+TRANS_FLAGS2(ISA300, XSTSTDCDP, do_XX2_bf_uim, true, gen_helper_XSTSTDCDP)
+TRANS_FLAGS2(ISA300, XSTSTDCQP, do_XX2_bf_uim, false, gen_helper_XSTSTDCQP)
+
+bool trans_XSCVSPDPN(DisasContext *ctx, arg_XX2 *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX207);
+ REQUIRE_VSX(ctx);
+
+ tmp = tcg_temp_new_i64();
+ get_cpu_vsr(tmp, a->xb, true);
+
+ gen_helper_XSCVSPDPN(tmp, tmp);
+
+ set_cpu_vsr(a->xt, tmp, true);
+ set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
+ return true;
+}
+
GEN_VSX_HELPER_X2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
@@ -1266,9 +1242,6 @@ GEN_VSX_HELPER_X2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
GEN_VSX_HELPER_X2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
-GEN_VSX_HELPER_X1(xststdcsp, 0x14, 0x12, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xststdcdp, 0x14, 0x16, 0, PPC2_ISA300)
-GEN_VSX_HELPER_2(xststdcqp, 0x04, 0x16, 0, PPC2_ISA300)
GEN_VSX_HELPER_X3(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
GEN_VSX_HELPER_X3(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
@@ -1323,49 +1296,213 @@ GEN_VSX_HELPER_X2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
GEN_VSX_HELPER_X2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtstdcsp, 0x14, 0x1A, 0, PPC2_VSX)
-GEN_VSX_HELPER_2(xvtstdcdp, 0x14, 0x1E, 0, PPC2_VSX)
-GEN_VSX_HELPER_X3(xxperm, 0x08, 0x03, 0, PPC2_ISA300)
-GEN_VSX_HELPER_X3(xxpermr, 0x08, 0x07, 0, PPC2_ISA300)
+
+static bool trans_XXPERM(DisasContext *ctx, arg_XX3 *a)
+{
+ TCGv_ptr xt, xa, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_vsr_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+
+ gen_helper_VPERM(xt, xa, xt, xb);
+ return true;
+}
+
+static bool trans_XXPERMR(DisasContext *ctx, arg_XX3 *a)
+{
+ TCGv_ptr xt, xa, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_vsr_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+
+ gen_helper_VPERMR(xt, xa, xt, xb);
+ return true;
+}
+
+static bool trans_XXPERMDI(DisasContext *ctx, arg_XX3_dm *a)
+{
+ TCGv_i64 t0, t1;
+
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
+
+ t0 = tcg_temp_new_i64();
+
+ if (unlikely(a->xt == a->xa || a->xt == a->xb)) {
+ t1 = tcg_temp_new_i64();
+
+ get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
+ get_cpu_vsr(t1, a->xb, (a->dm & 1) == 0);
+
+ set_cpu_vsr(a->xt, t0, true);
+ set_cpu_vsr(a->xt, t1, false);
+ } else {
+ get_cpu_vsr(t0, a->xa, (a->dm & 2) == 0);
+ set_cpu_vsr(a->xt, t0, true);
+
+ get_cpu_vsr(t0, a->xb, (a->dm & 1) == 0);
+ set_cpu_vsr(a->xt, t0, false);
+ }
+ return true;
+}
+
+static bool trans_XXPERMX(DisasContext *ctx, arg_8RR_XX4_uim3 *a)
+{
+ TCGv_ptr xt, xa, xb, xc;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_vsr_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+ xc = gen_vsr_ptr(a->xc);
+
+ gen_helper_XXPERMX(xt, xa, xb, xc, tcg_constant_tl(a->uim3));
+ return true;
+}
+
+typedef void (*xxgenpcv_genfn)(TCGv_ptr, TCGv_ptr);
+
+static bool do_xxgenpcv(DisasContext *ctx, arg_X_imm5 *a,
+ const xxgenpcv_genfn fn[4])
+{
+ TCGv_ptr xt, vrb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ if (a->imm & ~0x3) {
+ gen_invalid(ctx);
+ return true;
+ }
+
+ xt = gen_vsr_ptr(a->xt);
+ vrb = gen_avr_ptr(a->vrb);
+
+ fn[a->imm](xt, vrb);
+ return true;
+}
+
+#define XXGENPCV(NAME) \
+ static bool trans_##NAME(DisasContext *ctx, arg_X_imm5 *a) \
+ { \
+ static const xxgenpcv_genfn fn[4] = { \
+ gen_helper_##NAME##_be_exp, \
+ gen_helper_##NAME##_be_comp, \
+ gen_helper_##NAME##_le_exp, \
+ gen_helper_##NAME##_le_comp, \
+ }; \
+ return do_xxgenpcv(ctx, a, fn); \
+ }
+
+XXGENPCV(XXGENPCVBM)
+XXGENPCV(XXGENPCVHM)
+XXGENPCV(XXGENPCVWM)
+XXGENPCV(XXGENPCVDM)
+#undef XXGENPCV
+
+static bool do_xsmadd(DisasContext *ctx, int tgt, int src1, int src2, int src3,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr t, s1, s2, s3;
+
+ t = gen_vsr_ptr(tgt);
+ s1 = gen_vsr_ptr(src1);
+ s2 = gen_vsr_ptr(src2);
+ s3 = gen_vsr_ptr(src3);
+
+ gen_helper(tcg_env, t, s1, s2, s3);
+ return true;
+}
+
+static bool do_xsmadd_XX3(DisasContext *ctx, arg_XX3 *a, bool type_a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ REQUIRE_VSX(ctx);
+
+ if (type_a) {
+ return do_xsmadd(ctx, a->xt, a->xa, a->xt, a->xb, gen_helper);
+ }
+ return do_xsmadd(ctx, a->xt, a->xa, a->xb, a->xt, gen_helper);
+}
+
+TRANS_FLAGS2(VSX, XSMADDADP, do_xsmadd_XX3, true, gen_helper_XSMADDDP)
+TRANS_FLAGS2(VSX, XSMADDMDP, do_xsmadd_XX3, false, gen_helper_XSMADDDP)
+TRANS_FLAGS2(VSX, XSMSUBADP, do_xsmadd_XX3, true, gen_helper_XSMSUBDP)
+TRANS_FLAGS2(VSX, XSMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSMSUBDP)
+TRANS_FLAGS2(VSX, XSNMADDADP, do_xsmadd_XX3, true, gen_helper_XSNMADDDP)
+TRANS_FLAGS2(VSX, XSNMADDMDP, do_xsmadd_XX3, false, gen_helper_XSNMADDDP)
+TRANS_FLAGS2(VSX, XSNMSUBADP, do_xsmadd_XX3, true, gen_helper_XSNMSUBDP)
+TRANS_FLAGS2(VSX, XSNMSUBMDP, do_xsmadd_XX3, false, gen_helper_XSNMSUBDP)
+TRANS_FLAGS2(VSX207, XSMADDASP, do_xsmadd_XX3, true, gen_helper_XSMADDSP)
+TRANS_FLAGS2(VSX207, XSMADDMSP, do_xsmadd_XX3, false, gen_helper_XSMADDSP)
+TRANS_FLAGS2(VSX207, XSMSUBASP, do_xsmadd_XX3, true, gen_helper_XSMSUBSP)
+TRANS_FLAGS2(VSX207, XSMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSMSUBSP)
+TRANS_FLAGS2(VSX207, XSNMADDASP, do_xsmadd_XX3, true, gen_helper_XSNMADDSP)
+TRANS_FLAGS2(VSX207, XSNMADDMSP, do_xsmadd_XX3, false, gen_helper_XSNMADDSP)
+TRANS_FLAGS2(VSX207, XSNMSUBASP, do_xsmadd_XX3, true, gen_helper_XSNMSUBSP)
+TRANS_FLAGS2(VSX207, XSNMSUBMSP, do_xsmadd_XX3, false, gen_helper_XSNMSUBSP)
+
+static bool do_xsmadd_X(DisasContext *ctx, arg_X_rc *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr),
+ void (*gen_helper_ro)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ int vrt, vra, vrb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ vrt = a->rt + 32;
+ vra = a->ra + 32;
+ vrb = a->rb + 32;
+
+ if (a->rc) {
+ return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper_ro);
+ }
+
+ return do_xsmadd(ctx, vrt, vra, vrt, vrb, gen_helper);
+}
+
+TRANS(XSMADDQP, do_xsmadd_X, gen_helper_XSMADDQP, gen_helper_XSMADDQPO)
+TRANS(XSMSUBQP, do_xsmadd_X, gen_helper_XSMSUBQP, gen_helper_XSMSUBQPO)
+TRANS(XSNMADDQP, do_xsmadd_X, gen_helper_XSNMADDQP, gen_helper_XSNMADDQPO)
+TRANS(XSNMSUBQP, do_xsmadd_X, gen_helper_XSNMSUBQP, gen_helper_XSNMSUBQPO)
#define GEN_VSX_HELPER_VSX_MADD(name, op1, aop, mop, inval, type) \
static void gen_##name(DisasContext *ctx) \
{ \
- TCGv_ptr xt, xa, b, c; \
+ TCGv_ptr xt, s1, s2, s3; \
if (unlikely(!ctx->vsx_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VSXU); \
return; \
} \
xt = gen_vsr_ptr(xT(ctx->opcode)); \
- xa = gen_vsr_ptr(xA(ctx->opcode)); \
+ s1 = gen_vsr_ptr(xA(ctx->opcode)); \
if (ctx->opcode & PPC_BIT32(25)) { \
/* \
* AxT + B \
*/ \
- b = gen_vsr_ptr(xT(ctx->opcode)); \
- c = gen_vsr_ptr(xB(ctx->opcode)); \
+ s2 = gen_vsr_ptr(xB(ctx->opcode)); \
+ s3 = gen_vsr_ptr(xT(ctx->opcode)); \
} else { \
/* \
* AxB + T \
*/ \
- b = gen_vsr_ptr(xB(ctx->opcode)); \
- c = gen_vsr_ptr(xT(ctx->opcode)); \
+ s2 = gen_vsr_ptr(xT(ctx->opcode)); \
+ s3 = gen_vsr_ptr(xB(ctx->opcode)); \
} \
- gen_helper_##name(cpu_env, xt, xa, b, c); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_ptr(xa); \
- tcg_temp_free_ptr(b); \
- tcg_temp_free_ptr(c); \
-}
-
-GEN_VSX_HELPER_VSX_MADD(xsmadddp, 0x04, 0x04, 0x05, 0, PPC2_VSX)
-GEN_VSX_HELPER_VSX_MADD(xsmsubdp, 0x04, 0x06, 0x07, 0, PPC2_VSX)
-GEN_VSX_HELPER_VSX_MADD(xsnmadddp, 0x04, 0x14, 0x15, 0, PPC2_VSX)
-GEN_VSX_HELPER_VSX_MADD(xsnmsubdp, 0x04, 0x16, 0x17, 0, PPC2_VSX)
-GEN_VSX_HELPER_VSX_MADD(xsmaddsp, 0x04, 0x00, 0x01, 0, PPC2_VSX207)
-GEN_VSX_HELPER_VSX_MADD(xsmsubsp, 0x04, 0x02, 0x03, 0, PPC2_VSX207)
-GEN_VSX_HELPER_VSX_MADD(xsnmaddsp, 0x04, 0x10, 0x11, 0, PPC2_VSX207)
-GEN_VSX_HELPER_VSX_MADD(xsnmsubsp, 0x04, 0x12, 0x13, 0, PPC2_VSX207)
+ gen_helper_##name(tcg_env, xt, s1, s2, s3); \
+}
+
GEN_VSX_HELPER_VSX_MADD(xvmadddp, 0x04, 0x0C, 0x0D, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xvmsubdp, 0x04, 0x0E, 0x0F, 0, PPC2_VSX)
GEN_VSX_HELPER_VSX_MADD(xvnmadddp, 0x04, 0x1C, 0x1D, 0, PPC2_VSX)
@@ -1390,18 +1527,13 @@ static void gen_xxbrd(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
tcg_gen_bswap64_i64(xth, xbh);
tcg_gen_bswap64_i64(xtl, xbl);
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_xxbrh(DisasContext *ctx)
@@ -1419,17 +1551,12 @@ static void gen_xxbrh(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
gen_bswap16x8(xth, xtl, xbh, xbl);
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_xxbrq(DisasContext *ctx)
@@ -1448,21 +1575,15 @@ static void gen_xxbrq(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
t0 = tcg_temp_new_i64();
tcg_gen_bswap64_i64(t0, xbl);
tcg_gen_bswap64_i64(xtl, xbh);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
tcg_gen_mov_i64(xth, t0);
- set_cpu_vsrh(xT(ctx->opcode), xth);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
}
static void gen_xxbrw(DisasContext *ctx)
@@ -1480,17 +1601,12 @@ static void gen_xxbrw(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
gen_bswap32x4(xth, xtl, xbh, xbl);
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
#define VSX_LOGICAL(name, vece, tcg_op) \
@@ -1527,89 +1643,173 @@ static void glue(gen_, name)(DisasContext *ctx) \
b0 = tcg_temp_new_i64(); \
b1 = tcg_temp_new_i64(); \
tmp = tcg_temp_new_i64(); \
- if (high) { \
- get_cpu_vsrh(a0, xA(ctx->opcode)); \
- get_cpu_vsrh(a1, xA(ctx->opcode)); \
- get_cpu_vsrh(b0, xB(ctx->opcode)); \
- get_cpu_vsrh(b1, xB(ctx->opcode)); \
- } else { \
- get_cpu_vsrl(a0, xA(ctx->opcode)); \
- get_cpu_vsrl(a1, xA(ctx->opcode)); \
- get_cpu_vsrl(b0, xB(ctx->opcode)); \
- get_cpu_vsrl(b1, xB(ctx->opcode)); \
- } \
+ get_cpu_vsr(a0, xA(ctx->opcode), high); \
+ get_cpu_vsr(a1, xA(ctx->opcode), high); \
+ get_cpu_vsr(b0, xB(ctx->opcode), high); \
+ get_cpu_vsr(b1, xB(ctx->opcode), high); \
tcg_gen_shri_i64(a0, a0, 32); \
tcg_gen_shri_i64(b0, b0, 32); \
tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \
- set_cpu_vsrh(xT(ctx->opcode), tmp); \
+ set_cpu_vsr(xT(ctx->opcode), tmp, true); \
tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \
- set_cpu_vsrl(xT(ctx->opcode), tmp); \
- tcg_temp_free_i64(a0); \
- tcg_temp_free_i64(a1); \
- tcg_temp_free_i64(b0); \
- tcg_temp_free_i64(b1); \
- tcg_temp_free_i64(tmp); \
+ set_cpu_vsr(xT(ctx->opcode), tmp, false); \
}
VSX_XXMRG(xxmrghw, 1)
VSX_XXMRG(xxmrglw, 0)
-static void gen_xxsel(DisasContext *ctx)
+static bool trans_XXSEL(DisasContext *ctx, arg_XX4 *a)
{
- int rt = xT(ctx->opcode);
- int ra = xA(ctx->opcode);
- int rb = xB(ctx->opcode);
- int rc = xC(ctx->opcode);
+ REQUIRE_INSNS_FLAGS2(ctx, VSX);
+ REQUIRE_VSX(ctx);
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
- tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(rt), vsr_full_offset(rc),
- vsr_full_offset(rb), vsr_full_offset(ra), 16, 16);
+ tcg_gen_gvec_bitsel(MO_64, vsr_full_offset(a->xt), vsr_full_offset(a->xc),
+ vsr_full_offset(a->xb), vsr_full_offset(a->xa), 16, 16);
+
+ return true;
}
-static void gen_xxspltw(DisasContext *ctx)
+static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim *a)
{
- int rt = xT(ctx->opcode);
- int rb = xB(ctx->opcode);
- int uim = UIM(ctx->opcode);
int tofs, bofs;
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+ REQUIRE_VSX(ctx);
- tofs = vsr_full_offset(rt);
- bofs = vsr_full_offset(rb);
- bofs += uim << MO_32;
-#ifndef HOST_WORDS_BIG_ENDIAN
+ tofs = vsr_full_offset(a->xt);
+ bofs = vsr_full_offset(a->xb);
+ bofs += a->uim << MO_32;
+#if !HOST_BIG_ENDIAN
bofs ^= 8 | 4;
#endif
tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
+ return true;
}
#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
-static void gen_xxspltib(DisasContext *ctx)
+static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a)
{
- uint8_t uim8 = IMM8(ctx->opcode);
- int rt = xT(ctx->opcode);
+ if (a->xt < 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+ tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm);
+ return true;
+}
- if (rt < 32) {
- if (unlikely(!ctx->vsx_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VSXU);
- return;
- }
+static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si);
+
+ return true;
+}
+
+static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16,
+ helper_todouble(a->si));
+ return true;
+}
+
+static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a)
+{
+ TCGv_i32 imm;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ imm = tcg_constant_i32(a->si);
+
+ tcg_gen_st_i32(imm, tcg_env,
+ offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix)));
+ tcg_gen_st_i32(imm, tcg_env,
+ offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix)));
+
+ return true;
+}
+
+static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a)
+{
+ static const uint64_t values[32] = {
+ 0, /* Unspecified */
+ 0x3FFF000000000000llu, /* QP +1.0 */
+ 0x4000000000000000llu, /* QP +2.0 */
+ 0x4000800000000000llu, /* QP +3.0 */
+ 0x4001000000000000llu, /* QP +4.0 */
+ 0x4001400000000000llu, /* QP +5.0 */
+ 0x4001800000000000llu, /* QP +6.0 */
+ 0x4001C00000000000llu, /* QP +7.0 */
+ 0x7FFF000000000000llu, /* QP +Inf */
+ 0x7FFF800000000000llu, /* QP dQNaN */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0, /* Unspecified */
+ 0x8000000000000000llu, /* QP -0.0 */
+ 0xBFFF000000000000llu, /* QP -1.0 */
+ 0xC000000000000000llu, /* QP -2.0 */
+ 0xC000800000000000llu, /* QP -3.0 */
+ 0xC001000000000000llu, /* QP -4.0 */
+ 0xC001400000000000llu, /* QP -5.0 */
+ 0xC001800000000000llu, /* QP -6.0 */
+ 0xC001C00000000000llu, /* QP -7.0 */
+ 0xFFFF000000000000llu, /* QP -Inf */
+ };
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ if (values[a->uim]) {
+ set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false);
+ set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true);
} else {
- if (unlikely(!ctx->altivec_enabled)) {
- gen_exception(ctx, POWERPC_EXCP_VPU);
- return;
- }
+ gen_invalid(ctx);
}
- tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8);
+
+ return true;
+}
+
+static bool trans_XVTLSBB(DisasContext *ctx, arg_XX2_bf_xb *a)
+{
+ TCGv_i64 xb, t0, t1, all_true, all_false, mask, zero;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ xb = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ all_true = tcg_temp_new_i64();
+ all_false = tcg_temp_new_i64();
+ mask = tcg_constant_i64(dup_const(MO_8, 1));
+ zero = tcg_constant_i64(0);
+
+ get_cpu_vsr(xb, a->xb, true);
+ tcg_gen_and_i64(t0, mask, xb);
+ get_cpu_vsr(xb, a->xb, false);
+ tcg_gen_and_i64(t1, mask, xb);
+
+ tcg_gen_or_i64(all_false, t0, t1);
+ tcg_gen_and_i64(all_true, t0, t1);
+
+ tcg_gen_setcond_i64(TCG_COND_EQ, all_false, all_false, zero);
+ tcg_gen_shli_i64(all_false, all_false, 1);
+ tcg_gen_setcond_i64(TCG_COND_EQ, all_true, all_true, mask);
+ tcg_gen_shli_i64(all_true, all_true, 3);
+
+ tcg_gen_or_i64(t0, all_false, all_true);
+ tcg_gen_extrl_i64_i32(cpu_crf[a->bf], t0);
+ return true;
}
static void gen_xxsldwi(DisasContext *ctx)
@@ -1624,90 +1824,75 @@ static void gen_xxsldwi(DisasContext *ctx)
switch (SHW(ctx->opcode)) {
case 0: {
- get_cpu_vsrh(xth, xA(ctx->opcode));
- get_cpu_vsrl(xtl, xA(ctx->opcode));
+ get_cpu_vsr(xth, xA(ctx->opcode), true);
+ get_cpu_vsr(xtl, xA(ctx->opcode), false);
break;
}
case 1: {
TCGv_i64 t0 = tcg_temp_new_i64();
- get_cpu_vsrh(xth, xA(ctx->opcode));
+ get_cpu_vsr(xth, xA(ctx->opcode), true);
tcg_gen_shli_i64(xth, xth, 32);
- get_cpu_vsrl(t0, xA(ctx->opcode));
+ get_cpu_vsr(t0, xA(ctx->opcode), false);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xth, xth, t0);
- get_cpu_vsrl(xtl, xA(ctx->opcode));
+ get_cpu_vsr(xtl, xA(ctx->opcode), false);
tcg_gen_shli_i64(xtl, xtl, 32);
- get_cpu_vsrh(t0, xB(ctx->opcode));
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xtl, xtl, t0);
- tcg_temp_free_i64(t0);
break;
}
case 2: {
- get_cpu_vsrl(xth, xA(ctx->opcode));
- get_cpu_vsrh(xtl, xB(ctx->opcode));
+ get_cpu_vsr(xth, xA(ctx->opcode), false);
+ get_cpu_vsr(xtl, xB(ctx->opcode), true);
break;
}
case 3: {
TCGv_i64 t0 = tcg_temp_new_i64();
- get_cpu_vsrl(xth, xA(ctx->opcode));
+ get_cpu_vsr(xth, xA(ctx->opcode), false);
tcg_gen_shli_i64(xth, xth, 32);
- get_cpu_vsrh(t0, xB(ctx->opcode));
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xth, xth, t0);
- get_cpu_vsrh(xtl, xB(ctx->opcode));
+ get_cpu_vsr(xtl, xB(ctx->opcode), true);
tcg_gen_shli_i64(xtl, xtl, 32);
- get_cpu_vsrl(t0, xB(ctx->opcode));
+ get_cpu_vsr(t0, xB(ctx->opcode), false);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_or_i64(xtl, xtl, t0);
- tcg_temp_free_i64(t0);
break;
}
}
- set_cpu_vsrh(xT(ctx->opcode), xth);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
-}
-
-#define VSX_EXTRACT_INSERT(name) \
-static void gen_##name(DisasContext *ctx) \
-{ \
- TCGv_ptr xt, xb; \
- TCGv_i32 t0; \
- TCGv_i64 t1; \
- uint8_t uimm = UIMM4(ctx->opcode); \
- \
- if (unlikely(!ctx->vsx_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VSXU); \
- return; \
- } \
- xt = gen_vsr_ptr(xT(ctx->opcode)); \
- xb = gen_vsr_ptr(xB(ctx->opcode)); \
- t0 = tcg_temp_new_i32(); \
- t1 = tcg_temp_new_i64(); \
- /* \
- * uimm > 15 out of bound and for \
- * uimm > 12 handle as per hardware in helper \
- */ \
- if (uimm > 15) { \
- tcg_gen_movi_i64(t1, 0); \
- set_cpu_vsrh(xT(ctx->opcode), t1); \
- set_cpu_vsrl(xT(ctx->opcode), t1); \
- return; \
- } \
- tcg_gen_movi_i32(t0, uimm); \
- gen_helper_##name(cpu_env, xt, xb, t0); \
- tcg_temp_free_ptr(xb); \
- tcg_temp_free_ptr(xt); \
- tcg_temp_free_i32(t0); \
- tcg_temp_free_i64(t1); \
-}
-
-VSX_EXTRACT_INSERT(xxextractuw)
-VSX_EXTRACT_INSERT(xxinsertw)
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+}
+
+static bool do_vsx_extract_insert(DisasContext *ctx, arg_XX2_uim *a,
+ void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i64 zero = tcg_constant_i64(0);
+ TCGv_ptr xt, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ /*
+ * uim > 15 out of bound and for
+ * uim > 12 handle as per hardware in helper
+ */
+ if (a->uim > 15) {
+ set_cpu_vsr(a->xt, zero, true);
+ set_cpu_vsr(a->xt, zero, false);
+ } else {
+ xt = gen_vsr_ptr(a->xt);
+ xb = gen_vsr_ptr(a->xb);
+ gen_helper(xt, xb, tcg_constant_i32(a->uim));
+ }
+ return true;
+}
+
+TRANS(XXEXTRACTUW, do_vsx_extract_insert, gen_helper_XXEXTRACTUW)
+TRANS(XXINSERTW, do_vsx_extract_insert, gen_helper_XXINSERTW)
#ifdef TARGET_PPC64
static void gen_xsxexpdp(DisasContext *ctx)
@@ -1719,9 +1904,8 @@ static void gen_xsxexpdp(DisasContext *ctx)
return;
}
t0 = tcg_temp_new_i64();
- get_cpu_vsrh(t0, xB(ctx->opcode));
+ get_cpu_vsr(t0, xB(ctx->opcode), true);
tcg_gen_extract_i64(rt, t0, 52, 11);
- tcg_temp_free_i64(t0);
}
static void gen_xsxexpqp(DisasContext *ctx)
@@ -1737,16 +1921,12 @@ static void gen_xsxexpqp(DisasContext *ctx)
xth = tcg_temp_new_i64();
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
tcg_gen_extract_i64(xth, xbh, 48, 15);
- set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
tcg_gen_movi_i64(xtl, 0);
- set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
-
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
}
static void gen_xsiexpdp(DisasContext *ctx)
@@ -1766,10 +1946,8 @@ static void gen_xsiexpdp(DisasContext *ctx)
tcg_gen_andi_i64(t0, rb, 0x7FF);
tcg_gen_shli_i64(t0, t0, 52);
tcg_gen_or_i64(xth, xth, t0);
- set_cpu_vsrh(xT(ctx->opcode), xth);
- /* dword[1] is undefined */
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
+ set_cpu_vsr(xT(ctx->opcode), tcg_constant_i64(0), false);
}
static void gen_xsiexpqp(DisasContext *ctx)
@@ -1789,26 +1967,19 @@ static void gen_xsiexpqp(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xah = tcg_temp_new_i64();
xal = tcg_temp_new_i64();
- get_cpu_vsrh(xah, rA(ctx->opcode) + 32);
- get_cpu_vsrl(xal, rA(ctx->opcode) + 32);
+ get_cpu_vsr(xah, rA(ctx->opcode) + 32, true);
+ get_cpu_vsr(xal, rA(ctx->opcode) + 32, false);
xbh = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
t0 = tcg_temp_new_i64();
tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF);
tcg_gen_andi_i64(t0, xbh, 0x7FFF);
tcg_gen_shli_i64(t0, t0, 48);
tcg_gen_or_i64(xth, xth, t0);
- set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
tcg_gen_mov_i64(xtl, xal);
- set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xah);
- tcg_temp_free_i64(xal);
- tcg_temp_free_i64(xbh);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
}
static void gen_xsxsigdp(DisasContext *ctx)
@@ -1823,22 +1994,16 @@ static void gen_xsxsigdp(DisasContext *ctx)
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
- get_cpu_vsrh(t1, xB(ctx->opcode));
+ get_cpu_vsr(t1, xB(ctx->opcode), true);
tcg_gen_extract_i64(exp, t1, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
- get_cpu_vsrh(t1, xB(ctx->opcode));
+ get_cpu_vsr(t1, xB(ctx->opcode), true);
tcg_gen_deposit_i64(rt, t0, t1, 0, 52);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(exp);
- tcg_temp_free_i64(zr);
- tcg_temp_free_i64(nan);
}
static void gen_xsxsigqp(DisasContext *ctx)
@@ -1857,30 +2022,21 @@ static void gen_xsxsigqp(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, rB(ctx->opcode) + 32);
- get_cpu_vsrl(xbl, rB(ctx->opcode) + 32);
+ get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true);
+ get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(32767);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(32767);
tcg_gen_extract_i64(exp, xbh, 48, 15);
tcg_gen_movi_i64(t0, 0x0001000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
tcg_gen_deposit_i64(xth, t0, xbh, 0, 48);
- set_cpu_vsrh(rD(ctx->opcode) + 32, xth);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xth, true);
tcg_gen_mov_i64(xtl, xbl);
- set_cpu_vsrl(rD(ctx->opcode) + 32, xtl);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(exp);
- tcg_temp_free_i64(zr);
- tcg_temp_free_i64(nan);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false);
}
#endif
@@ -1904,30 +2060,22 @@ static void gen_xviexpsp(DisasContext *ctx)
xal = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xah, xA(ctx->opcode));
- get_cpu_vsrl(xal, xA(ctx->opcode));
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xah, xA(ctx->opcode), true);
+ get_cpu_vsr(xal, xA(ctx->opcode), false);
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
t0 = tcg_temp_new_i64();
tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF);
tcg_gen_andi_i64(t0, xbh, 0xFF000000FF);
tcg_gen_shli_i64(t0, t0, 23);
tcg_gen_or_i64(xth, xth, t0);
- set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF);
tcg_gen_andi_i64(t0, xbl, 0xFF000000FF);
tcg_gen_shli_i64(t0, t0, 23);
tcg_gen_or_i64(xtl, xtl, t0);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xah);
- tcg_temp_free_i64(xal);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_xviexpdp(DisasContext *ctx)
@@ -1949,23 +2097,16 @@ static void gen_xviexpdp(DisasContext *ctx)
xal = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xah, xA(ctx->opcode));
- get_cpu_vsrl(xal, xA(ctx->opcode));
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xah, xA(ctx->opcode), true);
+ get_cpu_vsr(xal, xA(ctx->opcode), false);
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
tcg_gen_deposit_i64(xth, xah, xbh, 52, 11);
- set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xah);
- tcg_temp_free_i64(xal);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_xvxexpsp(DisasContext *ctx)
@@ -1983,20 +2124,15 @@ static void gen_xvxexpsp(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
tcg_gen_shri_i64(xth, xbh, 23);
tcg_gen_andi_i64(xth, xth, 0xFF000000FF);
- set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
tcg_gen_shri_i64(xtl, xbl, 23);
tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
static void gen_xvxexpdp(DisasContext *ctx)
@@ -2014,21 +2150,28 @@ static void gen_xvxexpdp(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
tcg_gen_extract_i64(xth, xbh, 52, 11);
- set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
tcg_gen_extract_i64(xtl, xbl, 52, 11);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
-
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
}
-GEN_VSX_HELPER_X2(xvxsigsp, 0x00, 0x04, 0, PPC2_ISA300)
+static bool trans_XVXSIGSP(DisasContext *ctx, arg_XX2 *a)
+{
+ TCGv_ptr t, b;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ t = gen_vsr_ptr(a->xt);
+ b = gen_vsr_ptr(a->xb);
+
+ gen_helper_XVXSIGSP(t, b);
+ return true;
+}
static void gen_xvxsigdp(DisasContext *ctx)
{
@@ -2046,37 +2189,722 @@ static void gen_xvxsigdp(DisasContext *ctx)
xtl = tcg_temp_new_i64();
xbh = tcg_temp_new_i64();
xbl = tcg_temp_new_i64();
- get_cpu_vsrh(xbh, xB(ctx->opcode));
- get_cpu_vsrl(xbl, xB(ctx->opcode));
+ get_cpu_vsr(xbh, xB(ctx->opcode), true);
+ get_cpu_vsr(xbl, xB(ctx->opcode), false);
exp = tcg_temp_new_i64();
t0 = tcg_temp_new_i64();
- zr = tcg_const_i64(0);
- nan = tcg_const_i64(2047);
+ zr = tcg_constant_i64(0);
+ nan = tcg_constant_i64(2047);
tcg_gen_extract_i64(exp, xbh, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
tcg_gen_deposit_i64(xth, t0, xbh, 0, 52);
- set_cpu_vsrh(xT(ctx->opcode), xth);
+ set_cpu_vsr(xT(ctx->opcode), xth, true);
tcg_gen_extract_i64(exp, xbl, 52, 11);
tcg_gen_movi_i64(t0, 0x0010000000000000);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0);
tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0);
tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52);
- set_cpu_vsrl(xT(ctx->opcode), xtl);
+ set_cpu_vsr(xT(ctx->opcode), xtl, false);
+}
+
+static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ,
+ int rt, bool store, bool paired)
+{
+ TCGv ea;
+ TCGv_i64 xt;
+ MemOp mop;
+ int rt1, rt2;
+
+ xt = tcg_temp_new_i64();
+
+ mop = DEF_MEMOP(MO_UQ);
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, ra, displ);
+
+ if (paired && ctx->le_mode) {
+ rt1 = rt + 1;
+ rt2 = rt;
+ } else {
+ rt1 = rt;
+ rt2 = rt + 1;
+ }
+
+ if (store) {
+ get_cpu_vsr(xt, rt1, !ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt1, ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ if (paired) {
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt2, !ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ gen_addr_add(ctx, ea, ea, 8);
+ get_cpu_vsr(xt, rt2, ctx->le_mode);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ }
+ } else {
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt1, xt, !ctx->le_mode);
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt1, xt, ctx->le_mode);
+ if (paired) {
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt2, xt, !ctx->le_mode);
+ gen_addr_add(ctx, ea, ea, 8);
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt2, xt, ctx->le_mode);
+ }
+ }
+ return true;
+}
+
+static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired)
+{
+ if (paired || a->rt < 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+
+ return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired);
+}
+
+static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a,
+ bool store, bool paired)
+{
+ arg_D d;
+ REQUIRE_VSX(ctx);
+
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired);
+}
+
+static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired)
+{
+ if (paired || a->rt >= 32) {
+ REQUIRE_VSX(ctx);
+ } else {
+ REQUIRE_VECTOR(ctx);
+ }
+
+ return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired);
+}
+
+static bool do_lstxsd(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
+{
+ TCGv ea;
+ TCGv_i64 xt;
+ MemOp mop;
+
+ if (store) {
+ REQUIRE_VECTOR(ctx);
+ } else {
+ REQUIRE_VSX(ctx);
+ }
+
+ xt = tcg_temp_new_i64();
+ mop = DEF_MEMOP(MO_UQ);
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, ra, displ);
+
+ if (store) {
+ get_cpu_vsr(xt, rt + 32, true);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(rt + 32, xt, true);
+ set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
+ }
+ return true;
+}
+
+static bool do_lstxsd_DS(DisasContext *ctx, arg_D *a, bool store)
+{
+ return do_lstxsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
+}
+
+static bool do_plstxsd_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
+{
+ arg_D d;
+
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_lstxsd(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
+}
+
+static bool do_lstxssp(DisasContext *ctx, int rt, int ra, TCGv displ, bool store)
+{
+ TCGv ea;
+ TCGv_i64 xt;
+
+ REQUIRE_VECTOR(ctx);
+
+ xt = tcg_temp_new_i64();
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, ra, displ);
+
+ if (store) {
+ get_cpu_vsr(xt, rt + 32, true);
+ gen_qemu_st32fs(ctx, xt, ea);
+ } else {
+ gen_qemu_ld32fs(ctx, xt, ea);
+ set_cpu_vsr(rt + 32, xt, true);
+ set_cpu_vsr(rt + 32, tcg_constant_i64(0), false);
+ }
+ return true;
+}
+
+static bool do_lstxssp_DS(DisasContext *ctx, arg_D *a, bool store)
+{
+ return do_lstxssp(ctx, a->rt, a->ra, tcg_constant_tl(a->si), store);
+}
+
+static bool do_plstxssp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store)
+{
+ arg_D d;
+
+ if (!resolve_PLS_D(ctx, &d, a)) {
+ return true;
+ }
+
+ return do_lstxssp(ctx, d.rt, d.ra, tcg_constant_tl(d.si), store);
+}
+
+TRANS_FLAGS2(ISA300, LXSD, do_lstxsd_DS, false)
+TRANS_FLAGS2(ISA300, STXSD, do_lstxsd_DS, true)
+TRANS_FLAGS2(ISA300, LXSSP, do_lstxssp_DS, false)
+TRANS_FLAGS2(ISA300, STXSSP, do_lstxssp_DS, true)
+TRANS_FLAGS2(ISA300, STXV, do_lstxv_D, true, false)
+TRANS_FLAGS2(ISA300, LXV, do_lstxv_D, false, false)
+TRANS_FLAGS2(ISA310, STXVP, do_lstxv_D, true, true)
+TRANS_FLAGS2(ISA310, LXVP, do_lstxv_D, false, true)
+TRANS_FLAGS2(ISA300, STXVX, do_lstxv_X, true, false)
+TRANS_FLAGS2(ISA300, LXVX, do_lstxv_X, false, false)
+TRANS_FLAGS2(ISA310, STXVPX, do_lstxv_X, true, true)
+TRANS_FLAGS2(ISA310, LXVPX, do_lstxv_X, false, true)
+TRANS64_FLAGS2(ISA310, PLXSD, do_plstxsd_PLS_D, false)
+TRANS64_FLAGS2(ISA310, PSTXSD, do_plstxsd_PLS_D, true)
+TRANS64_FLAGS2(ISA310, PLXSSP, do_plstxssp_PLS_D, false)
+TRANS64_FLAGS2(ISA310, PSTXSSP, do_plstxssp_PLS_D, true)
+TRANS64_FLAGS2(ISA310, PSTXV, do_lstxv_PLS_D, true, false)
+TRANS64_FLAGS2(ISA310, PLXV, do_lstxv_PLS_D, false, false)
+TRANS64_FLAGS2(ISA310, PSTXVP, do_lstxv_PLS_D, true, true)
+TRANS64_FLAGS2(ISA310, PLXVP, do_lstxv_PLS_D, false, true)
+
+static bool do_lstrm(DisasContext *ctx, arg_X *a, MemOp mop, bool store)
+{
+ TCGv ea;
+ TCGv_i64 xt;
+
+ REQUIRE_VSX(ctx);
+
+ xt = tcg_temp_new_i64();
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ ea = do_ea_calc(ctx, a->ra , cpu_gpr[a->rb]);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(exp);
- tcg_temp_free_i64(zr);
- tcg_temp_free_i64(nan);
- tcg_temp_free_i64(xth);
- tcg_temp_free_i64(xtl);
- tcg_temp_free_i64(xbh);
- tcg_temp_free_i64(xbl);
+ if (store) {
+ get_cpu_vsr(xt, a->rt, false);
+ tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop);
+ set_cpu_vsr(a->rt, xt, false);
+ set_cpu_vsr(a->rt, tcg_constant_i64(0), true);
+ }
+ return true;
}
+TRANS_FLAGS2(ISA310, LXVRBX, do_lstrm, DEF_MEMOP(MO_UB), false)
+TRANS_FLAGS2(ISA310, LXVRHX, do_lstrm, DEF_MEMOP(MO_UW), false)
+TRANS_FLAGS2(ISA310, LXVRWX, do_lstrm, DEF_MEMOP(MO_UL), false)
+TRANS_FLAGS2(ISA310, LXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), false)
+TRANS_FLAGS2(ISA310, STXVRBX, do_lstrm, DEF_MEMOP(MO_UB), true)
+TRANS_FLAGS2(ISA310, STXVRHX, do_lstrm, DEF_MEMOP(MO_UW), true)
+TRANS_FLAGS2(ISA310, STXVRWX, do_lstrm, DEF_MEMOP(MO_UL), true)
+TRANS_FLAGS2(ISA310, STXVRDX, do_lstrm, DEF_MEMOP(MO_UQ), true)
+
+static void gen_xxeval_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c,
+ int64_t imm)
+{
+ /*
+ * Instead of processing imm bit-by-bit, we'll skip the computation of
+ * conjunctions whose corresponding bit is unset.
+ */
+ int bit;
+ TCGv_i64 conj, disj;
+
+ conj = tcg_temp_new_i64();
+ disj = tcg_temp_new_i64();
+ tcg_gen_movi_i64(disj, 0);
+
+ /* Iterate over set bits from the least to the most significant bit */
+ while (imm) {
+ /*
+ * Get the next bit to be processed with ctz64. Invert the result of
+ * ctz64 to match the indexing used by PowerISA.
+ */
+ bit = 7 - ctz64(imm);
+ if (bit & 0x4) {
+ tcg_gen_mov_i64(conj, a);
+ } else {
+ tcg_gen_not_i64(conj, a);
+ }
+ if (bit & 0x2) {
+ tcg_gen_and_i64(conj, conj, b);
+ } else {
+ tcg_gen_andc_i64(conj, conj, b);
+ }
+ if (bit & 0x1) {
+ tcg_gen_and_i64(conj, conj, c);
+ } else {
+ tcg_gen_andc_i64(conj, conj, c);
+ }
+ tcg_gen_or_i64(disj, disj, conj);
+
+ /* Unset the least significant bit that is set */
+ imm &= imm - 1;
+ }
+
+ tcg_gen_mov_i64(t, disj);
+}
+
+static void gen_xxeval_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c, int64_t imm)
+{
+ /*
+ * Instead of processing imm bit-by-bit, we'll skip the computation of
+ * conjunctions whose corresponding bit is unset.
+ */
+ int bit;
+ TCGv_vec disj, conj;
+
+ conj = tcg_temp_new_vec_matching(t);
+ disj = tcg_temp_new_vec_matching(t);
+ tcg_gen_dupi_vec(vece, disj, 0);
+
+ /* Iterate over set bits from the least to the most significant bit */
+ while (imm) {
+ /*
+ * Get the next bit to be processed with ctz64. Invert the result of
+ * ctz64 to match the indexing used by PowerISA.
+ */
+ bit = 7 - ctz64(imm);
+ if (bit & 0x4) {
+ tcg_gen_mov_vec(conj, a);
+ } else {
+ tcg_gen_not_vec(vece, conj, a);
+ }
+ if (bit & 0x2) {
+ tcg_gen_and_vec(vece, conj, conj, b);
+ } else {
+ tcg_gen_andc_vec(vece, conj, conj, b);
+ }
+ if (bit & 0x1) {
+ tcg_gen_and_vec(vece, conj, conj, c);
+ } else {
+ tcg_gen_andc_vec(vece, conj, conj, c);
+ }
+ tcg_gen_or_vec(vece, disj, disj, conj);
+
+ /* Unset the least significant bit that is set */
+ imm &= imm - 1;
+ }
+
+ tcg_gen_mov_vec(t, disj);
+}
+
+static bool trans_XXEVAL(DisasContext *ctx, arg_8RR_XX4_imm *a)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_andc_vec, 0
+ };
+ static const GVecGen4i op = {
+ .fniv = gen_xxeval_vec,
+ .fno = gen_helper_XXEVAL,
+ .fni8 = gen_xxeval_i64,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ };
+ int xt = vsr_full_offset(a->xt), xa = vsr_full_offset(a->xa),
+ xb = vsr_full_offset(a->xb), xc = vsr_full_offset(a->xc);
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ /* Equivalent functions that can be implemented with a single gen_gvec */
+ switch (a->imm) {
+ case 0b00000000: /* false */
+ set_cpu_vsr(a->xt, tcg_constant_i64(0), true);
+ set_cpu_vsr(a->xt, tcg_constant_i64(0), false);
+ break;
+ case 0b00000011: /* and(B,A) */
+ tcg_gen_gvec_and(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b00000101: /* and(C,A) */
+ tcg_gen_gvec_and(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b00001111: /* A */
+ tcg_gen_gvec_mov(MO_64, xt, xa, 16, 16);
+ break;
+ case 0b00010001: /* and(C,B) */
+ tcg_gen_gvec_and(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b00011011: /* C?B:A */
+ tcg_gen_gvec_bitsel(MO_64, xt, xc, xb, xa, 16, 16);
+ break;
+ case 0b00011101: /* B?C:A */
+ tcg_gen_gvec_bitsel(MO_64, xt, xb, xc, xa, 16, 16);
+ break;
+ case 0b00100111: /* C?A:B */
+ tcg_gen_gvec_bitsel(MO_64, xt, xc, xa, xb, 16, 16);
+ break;
+ case 0b00110011: /* B */
+ tcg_gen_gvec_mov(MO_64, xt, xb, 16, 16);
+ break;
+ case 0b00110101: /* A?C:B */
+ tcg_gen_gvec_bitsel(MO_64, xt, xa, xc, xb, 16, 16);
+ break;
+ case 0b00111100: /* xor(B,A) */
+ tcg_gen_gvec_xor(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b00111111: /* or(B,A) */
+ tcg_gen_gvec_or(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b01000111: /* B?A:C */
+ tcg_gen_gvec_bitsel(MO_64, xt, xb, xa, xc, 16, 16);
+ break;
+ case 0b01010011: /* A?B:C */
+ tcg_gen_gvec_bitsel(MO_64, xt, xa, xb, xc, 16, 16);
+ break;
+ case 0b01010101: /* C */
+ tcg_gen_gvec_mov(MO_64, xt, xc, 16, 16);
+ break;
+ case 0b01011010: /* xor(C,A) */
+ tcg_gen_gvec_xor(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b01011111: /* or(C,A) */
+ tcg_gen_gvec_or(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b01100110: /* xor(C,B) */
+ tcg_gen_gvec_xor(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b01110111: /* or(C,B) */
+ tcg_gen_gvec_or(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b10001000: /* nor(C,B) */
+ tcg_gen_gvec_nor(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b10011001: /* eqv(C,B) */
+ tcg_gen_gvec_eqv(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b10100000: /* nor(C,A) */
+ tcg_gen_gvec_nor(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b10100101: /* eqv(C,A) */
+ tcg_gen_gvec_eqv(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b10101010: /* not(C) */
+ tcg_gen_gvec_not(MO_64, xt, xc, 16, 16);
+ break;
+ case 0b11000000: /* nor(B,A) */
+ tcg_gen_gvec_nor(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b11000011: /* eqv(B,A) */
+ tcg_gen_gvec_eqv(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b11001100: /* not(B) */
+ tcg_gen_gvec_not(MO_64, xt, xb, 16, 16);
+ break;
+ case 0b11101110: /* nand(C,B) */
+ tcg_gen_gvec_nand(MO_64, xt, xc, xb, 16, 16);
+ break;
+ case 0b11110000: /* not(A) */
+ tcg_gen_gvec_not(MO_64, xt, xa, 16, 16);
+ break;
+ case 0b11111010: /* nand(C,A) */
+ tcg_gen_gvec_nand(MO_64, xt, xc, xa, 16, 16);
+ break;
+ case 0b11111100: /* nand(B,A) */
+ tcg_gen_gvec_nand(MO_64, xt, xb, xa, 16, 16);
+ break;
+ case 0b11111111: /* true */
+ set_cpu_vsr(a->xt, tcg_constant_i64(-1), true);
+ set_cpu_vsr(a->xt, tcg_constant_i64(-1), false);
+ break;
+ default:
+ /* Fallback to compute all conjunctions/disjunctions */
+ tcg_gen_gvec_4i(xt, xa, xb, xc, 16, 16, a->imm, &op);
+ }
+
+ return true;
+}
+
+static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ TCGv_vec c)
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(c);
+ tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1);
+ tcg_gen_bitsel_vec(vece, t, tmp, b, a);
+}
+
+static bool do_xxblendv(DisasContext *ctx, arg_8RR_XX4 *a, unsigned vece)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, 0
+ };
+ static const GVecGen4 ops[4] = {
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVB,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVH,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVW,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_xxblendv_vec,
+ .fno = gen_helper_XXBLENDVD,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ }
+ };
+
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa),
+ vsr_full_offset(a->xb), vsr_full_offset(a->xc),
+ 16, 16, &ops[vece]);
+
+ return true;
+}
+
+TRANS(XXBLENDVB, do_xxblendv, MO_8)
+TRANS(XXBLENDVH, do_xxblendv, MO_16)
+TRANS(XXBLENDVW, do_xxblendv, MO_32)
+TRANS(XXBLENDVD, do_xxblendv, MO_64)
+
+static bool do_helper_XX3(DisasContext *ctx, arg_XX3 *a,
+ void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr xt, xa, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA300);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_vsr_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+
+ helper(tcg_env, xt, xa, xb);
+ return true;
+}
+
+TRANS(XSCMPEQDP, do_helper_XX3, gen_helper_XSCMPEQDP)
+TRANS(XSCMPGEDP, do_helper_XX3, gen_helper_XSCMPGEDP)
+TRANS(XSCMPGTDP, do_helper_XX3, gen_helper_XSCMPGTDP)
+TRANS(XSMAXCDP, do_helper_XX3, gen_helper_XSMAXCDP)
+TRANS(XSMINCDP, do_helper_XX3, gen_helper_XSMINCDP)
+TRANS(XSMAXJDP, do_helper_XX3, gen_helper_XSMAXJDP)
+TRANS(XSMINJDP, do_helper_XX3, gen_helper_XSMINJDP)
+
+static bool do_helper_X(arg_X *a,
+ void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ TCGv_ptr rt, ra, rb;
+
+ rt = gen_avr_ptr(a->rt);
+ ra = gen_avr_ptr(a->ra);
+ rb = gen_avr_ptr(a->rb);
+
+ helper(tcg_env, rt, ra, rb);
+ return true;
+}
+
+static bool do_xscmpqp(DisasContext *ctx, arg_X *a,
+ void (*helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr))
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ return do_helper_X(a, helper);
+}
+
+TRANS(XSCMPEQQP, do_xscmpqp, gen_helper_XSCMPEQQP)
+TRANS(XSCMPGEQP, do_xscmpqp, gen_helper_XSCMPGEQP)
+TRANS(XSCMPGTQP, do_xscmpqp, gen_helper_XSCMPGTQP)
+TRANS(XSMAXCQP, do_xscmpqp, gen_helper_XSMAXCQP)
+TRANS(XSMINCQP, do_xscmpqp, gen_helper_XSMINCQP)
+
+static bool trans_XVCVSPBF16(DisasContext *ctx, arg_XX2 *a)
+{
+ TCGv_ptr xt, xb;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ xt = gen_vsr_ptr(a->xt);
+ xb = gen_vsr_ptr(a->xb);
+
+ gen_helper_XVCVSPBF16(tcg_env, xt, xb);
+ return true;
+}
+
+static bool trans_XVCVBF16SPN(DisasContext *ctx, arg_XX2 *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+
+ tcg_gen_gvec_shli(MO_32, vsr_full_offset(a->xt), vsr_full_offset(a->xb),
+ 16, 16, 16);
+
+ return true;
+}
+
+ /*
+ * The PowerISA 3.1 mentions that for the current version of the
+ * architecture, "the hardware implementation provides the effect of
+ * ACC[i] and VSRs 4*i to 4*i + 3 logically containing the same data"
+ * and "The Accumulators introduce no new logical state at this time"
+ * (page 501). For now it seems unnecessary to create new structures,
+ * so ACC[i] is the same as VSRs 4*i to 4*i+3 and therefore
+ * move to and from accumulators are no-ops.
+ */
+static bool trans_XXMFACC(DisasContext *ctx, arg_X_a *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+ return true;
+}
+
+static bool trans_XXMTACC(DisasContext *ctx, arg_X_a *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+ return true;
+}
+
+static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a)
+{
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+ tcg_gen_gvec_dup_imm(MO_64, acc_full_offset(a->ra), 64, 64, 0);
+ return true;
+}
+
+static bool do_ger(DisasContext *ctx, arg_MMIRR_XX3 *a,
+ void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32))
+{
+ uint32_t mask;
+ TCGv_ptr xt, xa, xb;
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+ if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
+ gen_invalid(ctx);
+ return true;
+ }
+
+ xt = gen_acc_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+
+ mask = ger_pack_masks(a->pmsk, a->ymsk, a->xmsk);
+ helper(tcg_env, xa, xb, xt, tcg_constant_i32(mask));
+ return true;
+}
+
+TRANS(XVI4GER8, do_ger, gen_helper_XVI4GER8)
+TRANS(XVI4GER8PP, do_ger, gen_helper_XVI4GER8PP)
+TRANS(XVI8GER4, do_ger, gen_helper_XVI8GER4)
+TRANS(XVI8GER4PP, do_ger, gen_helper_XVI8GER4PP)
+TRANS(XVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
+TRANS(XVI16GER2, do_ger, gen_helper_XVI16GER2)
+TRANS(XVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
+TRANS(XVI16GER2S, do_ger, gen_helper_XVI16GER2S)
+TRANS(XVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
+
+TRANS64(PMXVI4GER8, do_ger, gen_helper_XVI4GER8)
+TRANS64(PMXVI4GER8PP, do_ger, gen_helper_XVI4GER8PP)
+TRANS64(PMXVI8GER4, do_ger, gen_helper_XVI8GER4)
+TRANS64(PMXVI8GER4PP, do_ger, gen_helper_XVI8GER4PP)
+TRANS64(PMXVI8GER4SPP, do_ger, gen_helper_XVI8GER4SPP)
+TRANS64(PMXVI16GER2, do_ger, gen_helper_XVI16GER2)
+TRANS64(PMXVI16GER2PP, do_ger, gen_helper_XVI16GER2PP)
+TRANS64(PMXVI16GER2S, do_ger, gen_helper_XVI16GER2S)
+TRANS64(PMXVI16GER2SPP, do_ger, gen_helper_XVI16GER2SPP)
+
+TRANS(XVBF16GER2, do_ger, gen_helper_XVBF16GER2)
+TRANS(XVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
+TRANS(XVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
+TRANS(XVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
+TRANS(XVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
+
+TRANS(XVF16GER2, do_ger, gen_helper_XVF16GER2)
+TRANS(XVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
+TRANS(XVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
+TRANS(XVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
+TRANS(XVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
+
+TRANS(XVF32GER, do_ger, gen_helper_XVF32GER)
+TRANS(XVF32GERPP, do_ger, gen_helper_XVF32GERPP)
+TRANS(XVF32GERPN, do_ger, gen_helper_XVF32GERPN)
+TRANS(XVF32GERNP, do_ger, gen_helper_XVF32GERNP)
+TRANS(XVF32GERNN, do_ger, gen_helper_XVF32GERNN)
+
+TRANS(XVF64GER, do_ger, gen_helper_XVF64GER)
+TRANS(XVF64GERPP, do_ger, gen_helper_XVF64GERPP)
+TRANS(XVF64GERPN, do_ger, gen_helper_XVF64GERPN)
+TRANS(XVF64GERNP, do_ger, gen_helper_XVF64GERNP)
+TRANS(XVF64GERNN, do_ger, gen_helper_XVF64GERNN)
+
+TRANS64(PMXVBF16GER2, do_ger, gen_helper_XVBF16GER2)
+TRANS64(PMXVBF16GER2PP, do_ger, gen_helper_XVBF16GER2PP)
+TRANS64(PMXVBF16GER2PN, do_ger, gen_helper_XVBF16GER2PN)
+TRANS64(PMXVBF16GER2NP, do_ger, gen_helper_XVBF16GER2NP)
+TRANS64(PMXVBF16GER2NN, do_ger, gen_helper_XVBF16GER2NN)
+
+TRANS64(PMXVF16GER2, do_ger, gen_helper_XVF16GER2)
+TRANS64(PMXVF16GER2PP, do_ger, gen_helper_XVF16GER2PP)
+TRANS64(PMXVF16GER2PN, do_ger, gen_helper_XVF16GER2PN)
+TRANS64(PMXVF16GER2NP, do_ger, gen_helper_XVF16GER2NP)
+TRANS64(PMXVF16GER2NN, do_ger, gen_helper_XVF16GER2NN)
+
+TRANS64(PMXVF32GER, do_ger, gen_helper_XVF32GER)
+TRANS64(PMXVF32GERPP, do_ger, gen_helper_XVF32GERPP)
+TRANS64(PMXVF32GERPN, do_ger, gen_helper_XVF32GERPN)
+TRANS64(PMXVF32GERNP, do_ger, gen_helper_XVF32GERNP)
+TRANS64(PMXVF32GERNN, do_ger, gen_helper_XVF32GERNN)
+
+TRANS64(PMXVF64GER, do_ger, gen_helper_XVF64GER)
+TRANS64(PMXVF64GERPP, do_ger, gen_helper_XVF64GERPP)
+TRANS64(PMXVF64GERPN, do_ger, gen_helper_XVF64GERPN)
+TRANS64(PMXVF64GERNP, do_ger, gen_helper_XVF64GERNP)
+TRANS64(PMXVF64GERNN, do_ger, gen_helper_XVF64GERNN)
+
#undef GEN_XX2FORM
#undef GEN_XX3FORM
#undef GEN_XX2IFORM
diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc
index 1d41beef26..a3ba094d62 100644
--- a/target/ppc/translate/vsx-ops.c.inc
+++ b/target/ppc/translate/vsx-ops.c.inc
@@ -10,7 +10,6 @@ GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300),
@@ -25,7 +24,6 @@ GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
-GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300),
#if defined(TARGET_PPC64)
GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300),
@@ -135,7 +133,6 @@ GEN_VSX_XFORM_300_EO(xsnabsqp, 0x04, 0x19, 0x08, 0x00000001),
GEN_VSX_XFORM_300_EO(xsnegqp, 0x04, 0x19, 0x10, 0x00000001),
GEN_VSX_XFORM_300(xscpsgnqp, 0x04, 0x03, 0x00000001),
GEN_VSX_XFORM_300_EO(xscvdpqp, 0x04, 0x1A, 0x16, 0x00000001),
-GEN_VSX_XFORM_300_EO(xscvqpdp, 0x04, 0x1A, 0x14, 0x0),
GEN_VSX_XFORM_300_EO(xscvqpsdz, 0x04, 0x1A, 0x19, 0x00000001),
GEN_VSX_XFORM_300_EO(xscvqpswz, 0x04, 0x1A, 0x09, 0x00000001),
GEN_VSX_XFORM_300_EO(xscvqpudz, 0x04, 0x1A, 0x11, 0x00000001),
@@ -150,33 +147,11 @@ GEN_HANDLER_E(xsiexpdp, 0x3C, 0x16, 0x1C, 0, PPC_NONE, PPC2_ISA300),
GEN_VSX_XFORM_300(xsiexpqp, 0x4, 0x1B, 0x00000001),
#endif
-GEN_XX2FORM(xststdcdp, 0x14, 0x16, PPC2_ISA300),
-GEN_XX2FORM(xststdcsp, 0x14, 0x12, PPC2_ISA300),
-GEN_VSX_XFORM_300(xststdcqp, 0x04, 0x16, 0x00000001),
-
GEN_XX3FORM(xviexpsp, 0x00, 0x1B, PPC2_ISA300),
GEN_XX3FORM(xviexpdp, 0x00, 0x1F, PPC2_ISA300),
GEN_XX2FORM_EO(xvxexpdp, 0x16, 0x1D, 0x00, PPC2_ISA300),
GEN_XX2FORM_EO(xvxsigdp, 0x16, 0x1D, 0x01, PPC2_ISA300),
GEN_XX2FORM_EO(xvxexpsp, 0x16, 0x1D, 0x08, PPC2_ISA300),
-GEN_XX2FORM_EO(xvxsigsp, 0x16, 0x1D, 0x09, PPC2_ISA300),
-
-/* DCMX = bit[25] << 6 | bit[29] << 5 | bit[11:15] */
-#define GEN_XX2FORM_DCMX(name, opc2, opc3, fl2) \
-GEN_XX3FORM(name, opc2, opc3 | 0, fl2), \
-GEN_XX3FORM(name, opc2, opc3 | 1, fl2)
-
-GEN_XX2FORM_DCMX(xvtstdcdp, 0x14, 0x1E, PPC2_ISA300),
-GEN_XX2FORM_DCMX(xvtstdcsp, 0x14, 0x1A, PPC2_ISA300),
-
-GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
-GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
-GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
-GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
-GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
-GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
-GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
-GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
GEN_VSX_XFORM_300(xsaddqp, 0x04, 0x00, 0x0),
@@ -189,18 +164,6 @@ GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
-GEN_XX3FORM_NAME(xsmadddp, "xsmaddadp", 0x04, 0x04, PPC2_VSX),
-GEN_XX3FORM_NAME(xsmadddp, "xsmaddmdp", 0x04, 0x05, PPC2_VSX),
-GEN_XX3FORM_NAME(xsmsubdp, "xsmsubadp", 0x04, 0x06, PPC2_VSX),
-GEN_XX3FORM_NAME(xsmsubdp, "xsmsubmdp", 0x04, 0x07, PPC2_VSX),
-GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddadp", 0x04, 0x14, PPC2_VSX),
-GEN_XX3FORM_NAME(xsnmadddp, "xsnmaddmdp", 0x04, 0x15, PPC2_VSX),
-GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubadp", 0x04, 0x16, PPC2_VSX),
-GEN_XX3FORM_NAME(xsnmsubdp, "xsnmsubmdp", 0x04, 0x17, PPC2_VSX),
-GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
-GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
-GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
-GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
GEN_XX3FORM(xscmpexpdp, 0x0C, 0x07, PPC2_ISA300),
GEN_VSX_XFORM_300(xscmpexpqp, 0x04, 0x05, 0x00600001),
GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
@@ -209,17 +172,12 @@ GEN_VSX_XFORM_300(xscmpoqp, 0x04, 0x04, 0x00600001),
GEN_VSX_XFORM_300(xscmpuqp, 0x04, 0x14, 0x00600001),
GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
-GEN_XX3FORM(xsmaxcdp, 0x00, 0x10, PPC2_ISA300),
-GEN_XX3FORM(xsmincdp, 0x00, 0x11, PPC2_ISA300),
-GEN_XX3FORM(xsmaxjdp, 0x00, 0x12, PPC2_ISA300),
-GEN_XX3FORM(xsminjdp, 0x00, 0x13, PPC2_ISA300),
GEN_XX2FORM_EO(xscvdphp, 0x16, 0x15, 0x11, PPC2_ISA300),
GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
GEN_XX2FORM_EO(xscvhpdp, 0x16, 0x15, 0x10, PPC2_ISA300),
GEN_VSX_XFORM_300_EO(xscvsdqp, 0x04, 0x1A, 0x0A, 0x00000001),
GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX),
-GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207),
GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX),
GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX),
GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX),
@@ -242,14 +200,6 @@ GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsmaddsp, "xsmaddasp", 0x04, 0x00, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsmaddsp, "xsmaddmsp", 0x04, 0x01, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsmsubsp, "xsmsubasp", 0x04, 0x02, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsmsubsp, "xsmsubmsp", 0x04, 0x03, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddasp", 0x04, 0x10, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsnmaddsp, "xsnmaddmsp", 0x04, 0x11, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubasp", 0x04, 0x12, PPC2_VSX207),
-GEN_XX3FORM_NAME(xsnmsubsp, "xsnmsubmsp", 0x04, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
@@ -348,55 +298,4 @@ VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
-GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300),
-GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300),
-GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
-GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
-GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300),
-GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300),
-
-#define GEN_XXSEL_ROW(opc3) \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
-GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
-
-GEN_XXSEL_ROW(0x00)
-GEN_XXSEL_ROW(0x01)
-GEN_XXSEL_ROW(0x02)
-GEN_XXSEL_ROW(0x03)
-GEN_XXSEL_ROW(0x04)
-GEN_XXSEL_ROW(0x05)
-GEN_XXSEL_ROW(0x06)
-GEN_XXSEL_ROW(0x07)
-GEN_XXSEL_ROW(0x08)
-GEN_XXSEL_ROW(0x09)
-GEN_XXSEL_ROW(0x0A)
-GEN_XXSEL_ROW(0x0B)
-GEN_XXSEL_ROW(0x0C)
-GEN_XXSEL_ROW(0x0D)
-GEN_XXSEL_ROW(0x0E)
-GEN_XXSEL_ROW(0x0F)
-GEN_XXSEL_ROW(0x10)
-GEN_XXSEL_ROW(0x11)
-GEN_XXSEL_ROW(0x12)
-GEN_XXSEL_ROW(0x13)
-GEN_XXSEL_ROW(0x14)
-GEN_XXSEL_ROW(0x15)
-GEN_XXSEL_ROW(0x16)
-GEN_XXSEL_ROW(0x17)
-GEN_XXSEL_ROW(0x18)
-GEN_XXSEL_ROW(0x19)
-GEN_XXSEL_ROW(0x1A)
-GEN_XXSEL_ROW(0x1B)
-GEN_XXSEL_ROW(0x1C)
-GEN_XXSEL_ROW(0x1D)
-GEN_XXSEL_ROW(0x1E)
-GEN_XXSEL_ROW(0x1F)
-
-GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
index aa3f867596..a4d07a0d0d 100644
--- a/target/ppc/user_only_helper.c
+++ b/target/ppc/user_only_helper.c
@@ -21,16 +21,22 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
+#include "internal.h"
-
-bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
- MMUAccessType access_type, int mmu_idx,
- bool probe, uintptr_t retaddr)
+void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address,
+ MMUAccessType access_type,
+ bool maperr, uintptr_t retaddr)
{
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
+ CPUPPCState *env = cpu_env(cs);
int exception, error_code;
+ /*
+ * Both DSISR and the "trap number" (exception vector offset,
+ * looked up from exception_index) are present in the linux-user
+ * signal frame.
+ * FIXME: we don't actually populate the trap number properly.
+ * It would be easiest to fill in an env->trap value now.
+ */
if (access_type == MMU_INST_FETCH) {
exception = POWERPC_EXCP_ISI;
error_code = 0x40000000;