aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@amazon.com>2013-11-19 13:00:02 -0800
committerAnthony Liguori <aliguori@amazon.com>2013-11-19 13:00:02 -0800
commitdc6dc0a9879738236b4da03d34d3c72eedfd25bb (patch)
tree28bf322fcf4275a93a27eb133c2a6e37cf5bb898
parent4be6693a8842023542d1ae1c6e4e2a3fd9988ebe (diff)
parent463230d85e1e0898b1a94092af65026e615e6ec6 (diff)
Merge remote-tracking branch 'rth/tcg-ia64-17' into staging
# By Richard Henderson # Via Richard Henderson * rth/tcg-ia64-17: tcg-ia64: Introduce tcg_opc_bswap64_i tcg-ia64: Introduce tcg_opc_ext_i tcg-ia64: Introduce tcg_opc_movi_a tcg-ia64: Introduce tcg_opc_mov_a tcg-ia64: Use A3 form of logical operations tcg-ia64: Use SUB_A3 and ADDS_A4 for subtraction tcg-ia64: Use ADDS for small addition tcg-ia64: Avoid unnecessary stop bit in tcg_out_alu tcg-ia64: Move AREG0 to R32 tcg-ia64: Simplify brcond tcg-ia64: Handle constant calls tcg-ia64: Use shortcuts for nop insns tcg-ia64: Use TCGMemOp within qemu_ldst routines Message-id: 1384811395-7097-1-git-send-email-rth@twiddle.net Signed-off-by: Anthony Liguori <aliguori@amazon.com>
-rw-r--r--tcg/ia64/tcg-target.c755
-rw-r--r--tcg/ia64/tcg-target.h4
2 files changed, 397 insertions, 362 deletions
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index 0656d3907a..2d8e00cd94 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -109,7 +109,6 @@ enum {
};
static const int tcg_target_reg_alloc_order[] = {
- TCG_REG_R33,
TCG_REG_R35,
TCG_REG_R36,
TCG_REG_R37,
@@ -226,6 +225,7 @@ enum {
OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull,
OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull,
OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull,
+ OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull,
OPC_CMP_LT_A6 = 0x18000000000ull,
OPC_CMP_LTU_A6 = 0x1a000000000ull,
OPC_CMP_EQ_A6 = 0x1c000000000ull,
@@ -263,6 +263,7 @@ enum {
OPC_MOV_I_I26 = 0x00150000000ull,
OPC_MOVL_X2 = 0x0c000000000ull,
OPC_OR_A1 = 0x10070000000ull,
+ OPC_OR_A3 = 0x10170000000ull,
OPC_SETF_EXP_M18 = 0x0c748000000ull,
OPC_SETF_SIG_M18 = 0x0c708000000ull,
OPC_SHL_I7 = 0x0f240000000ull,
@@ -281,9 +282,13 @@ enum {
OPC_UNPACK4_L_I2 = 0x0f860000000ull,
OPC_XMA_L_F2 = 0x1d000000000ull,
OPC_XOR_A1 = 0x10078000000ull,
+ OPC_XOR_A3 = 0x10178000000ull,
OPC_ZXT1_I29 = 0x00080000000ull,
OPC_ZXT2_I29 = 0x00088000000ull,
OPC_ZXT4_I29 = 0x00090000000ull,
+
+ INSN_NOP_M = OPC_NOP_M48, /* nop.m 0 */
+ INSN_NOP_I = OPC_NOP_I18, /* nop.i 0 */
};
static inline uint64_t tcg_opc_a1(int qp, uint64_t opc, int r1,
@@ -581,6 +586,8 @@ static inline uint64_t tcg_opc_l3(uint64_t imm)
return (imm & 0x07fffffffff00000ull) >> 18;
}
+#define tcg_opc_l4 tcg_opc_l3
+
static inline uint64_t tcg_opc_m1(int qp, uint64_t opc, int r1, int r3)
{
return opc
@@ -665,6 +672,15 @@ static inline uint64_t tcg_opc_x3(int qp, uint64_t opc, uint64_t imm)
| (qp & 0x3f);
}
+static inline uint64_t tcg_opc_x4(int qp, uint64_t opc, int b1, uint64_t imm)
+{
+ return opc
+ | ((imm & 0x0800000000000000ull) >> 23) /* i */
+ | ((imm & 0x00000000000fffffull) << 13) /* imm20b */
+ | ((b1 & 0x7) << 6)
+ | (qp & 0x3f);
+}
+
/*
* Relocations
@@ -851,20 +867,31 @@ static inline void tcg_out_bundle(TCGContext *s, int template,
s->code_ptr += 16;
}
+static inline uint64_t tcg_opc_mov_a(int qp, TCGReg dst, TCGReg src)
+{
+ return tcg_opc_a4(qp, OPC_ADDS_A4, dst, 0, src);
+}
+
static inline void tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
tcg_out_bundle(s, mmI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, 0, arg));
+ INSN_NOP_M,
+ INSN_NOP_M,
+ tcg_opc_mov_a(TCG_REG_P0, ret, arg));
+}
+
+static inline uint64_t tcg_opc_movi_a(int qp, TCGReg dst, int64_t src)
+{
+ assert(src == sextract64(src, 0, 22));
+ return tcg_opc_a5(qp, OPC_ADDL_A5, dst, src, TCG_REG_R0);
}
static inline void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg reg, tcg_target_long arg)
{
tcg_out_bundle(s, mLX,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_l2 (arg),
tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, reg, arg));
}
@@ -877,8 +904,8 @@ static void tcg_out_br(TCGContext *s, int label_index)
the existing value and using it again. This ensure that caches and
memory are kept coherent during retranslation. */
tcg_out_bundle(s, mmB,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
+ INSN_NOP_M,
tcg_opc_b1 (TCG_REG_P0, OPC_BR_SPTK_MANY_B1,
get_reloc_pcrel21b(s->code_ptr + 2)));
@@ -890,7 +917,23 @@ static void tcg_out_br(TCGContext *s, int label_index)
}
}
-static inline void tcg_out_call(TCGContext *s, TCGArg addr)
+static inline void tcg_out_calli(TCGContext *s, uintptr_t addr)
+{
+ /* Look through the function descriptor. */
+ uintptr_t disp, *desc = (uintptr_t *)addr;
+ tcg_out_bundle(s, mlx,
+ INSN_NOP_M,
+ tcg_opc_l2 (desc[1]),
+ tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, desc[1]));
+ disp = (desc[0] - (uintptr_t)s->code_ptr) >> 4;
+ tcg_out_bundle(s, mLX,
+ INSN_NOP_M,
+ tcg_opc_l4 (disp),
+ tcg_opc_x4 (TCG_REG_P0, OPC_BRL_CALL_SPTK_MANY_X4,
+ TCG_REG_B0, disp));
+}
+
+static inline void tcg_out_callr(TCGContext *s, TCGReg addr)
{
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, addr),
@@ -899,7 +942,7 @@ static inline void tcg_out_call(TCGContext *s, TCGArg addr)
TCG_REG_B6, TCG_REG_R2, 0));
tcg_out_bundle(s, mmB,
tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R3),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_b5 (TCG_REG_P0, OPC_BR_CALL_SPTK_MANY_B5,
TCG_REG_B0, TCG_REG_B6));
}
@@ -915,7 +958,7 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
imm = (uint64_t)disp >> 4;
tcg_out_bundle(s, mLX,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_l3 (imm),
tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
}
@@ -932,12 +975,12 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
TCG_REG_R2, TCG_REG_R2),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6,
TCG_REG_R2, 0));
tcg_out_bundle(s, mmB,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
+ INSN_NOP_M,
tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
TCG_REG_B6));
}
@@ -947,12 +990,12 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
{
tcg_out_bundle(s, mmI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
+ INSN_NOP_M,
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, TCG_REG_B6, addr, 0));
tcg_out_bundle(s, mmB,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
+ INSN_NOP_M,
tcg_opc_b4(TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
}
@@ -964,14 +1007,14 @@ static inline void tcg_out_ld_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
TCG_REG_R2, arg2, arg1),
tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
tcg_out_bundle(s, MmI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
TCG_REG_R2, TCG_REG_R2, arg1),
tcg_opc_m1 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
}
@@ -983,14 +1026,14 @@ static inline void tcg_out_st_rel(TCGContext *s, uint64_t opc_m4, TCGArg arg,
tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4,
TCG_REG_R2, arg2, arg1),
tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg2);
tcg_out_bundle(s, MmI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
TCG_REG_R2, TCG_REG_R2, arg1),
tcg_opc_m4 (TCG_REG_P0, opc_m4, arg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
}
@@ -1014,32 +1057,59 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
}
}
-static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, TCGArg ret,
- TCGArg arg1, int const_arg1,
+static inline void tcg_out_alu(TCGContext *s, uint64_t opc_a1, uint64_t opc_a3,
+ TCGReg ret, TCGArg arg1, int const_arg1,
TCGArg arg2, int const_arg2)
{
- uint64_t opc1, opc2;
+ uint64_t opc1 = 0, opc2 = 0, opc3 = 0;
+ if (const_arg2 && arg2 != 0) {
+ opc2 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R3, arg2);
+ arg2 = TCG_REG_R3;
+ }
if (const_arg1 && arg1 != 0) {
- opc1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
- TCG_REG_R2, arg1, TCG_REG_R0);
- arg1 = TCG_REG_R2;
- } else {
- opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+ if (opc_a3 && arg1 == (int8_t)arg1) {
+ opc3 = tcg_opc_a3(TCG_REG_P0, opc_a3, ret, arg1, arg2);
+ } else {
+ opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, arg1);
+ arg1 = TCG_REG_R2;
+ }
+ }
+ if (opc3 == 0) {
+ opc3 = tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2);
}
- if (const_arg2 && arg2 != 0) {
- opc2 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
- TCG_REG_R3, arg2, TCG_REG_R0);
- arg2 = TCG_REG_R3;
+ tcg_out_bundle(s, (opc1 || opc2 ? mII : miI),
+ opc1 ? opc1 : INSN_NOP_M,
+ opc2 ? opc2 : INSN_NOP_I,
+ opc3);
+}
+
+static inline void tcg_out_add(TCGContext *s, TCGReg ret, TCGReg arg1,
+ TCGArg arg2, int const_arg2)
+{
+ if (const_arg2 && arg2 == sextract64(arg2, 0, 14)) {
+ tcg_out_bundle(s, mmI,
+ INSN_NOP_M,
+ INSN_NOP_M,
+ tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, arg2, arg1));
} else {
- opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0);
+ tcg_out_alu(s, OPC_ADD_A1, 0, ret, arg1, 0, arg2, const_arg2);
}
+}
- tcg_out_bundle(s, mII,
- opc1,
- opc2,
- tcg_opc_a1(TCG_REG_P0, opc_a1, ret, arg1, arg2));
+static inline void tcg_out_sub(TCGContext *s, TCGReg ret, TCGArg arg1,
+ int const_arg1, TCGArg arg2, int const_arg2)
+{
+ if (!const_arg1 && const_arg2 && -arg2 == sextract64(-arg2, 0, 14)) {
+ tcg_out_bundle(s, mmI,
+ INSN_NOP_M,
+ INSN_NOP_M,
+ tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, ret, -arg2, arg1));
+ } else {
+ tcg_out_alu(s, OPC_SUB_A1, OPC_SUB_A3, ret,
+ arg1, const_arg1, arg2, const_arg2);
+ }
}
static inline void tcg_out_eqv(TCGContext *s, TCGArg ret,
@@ -1047,7 +1117,7 @@ static inline void tcg_out_eqv(TCGContext *s, TCGArg ret,
TCGArg arg2, int const_arg2)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a1 (TCG_REG_P0, OPC_XOR_A1, ret, arg1, arg2),
tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
}
@@ -1057,7 +1127,7 @@ static inline void tcg_out_nand(TCGContext *s, TCGArg ret,
TCGArg arg2, int const_arg2)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a1 (TCG_REG_P0, OPC_AND_A1, ret, arg1, arg2),
tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
}
@@ -1067,7 +1137,7 @@ static inline void tcg_out_nor(TCGContext *s, TCGArg ret,
TCGArg arg2, int const_arg2)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, arg2),
tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, ret, -1, ret));
}
@@ -1077,7 +1147,7 @@ static inline void tcg_out_orc(TCGContext *s, TCGArg ret,
TCGArg arg2, int const_arg2)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a3 (TCG_REG_P0, OPC_ANDCM_A3, TCG_REG_R2, -1, arg2),
tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret, arg1, TCG_REG_R2));
}
@@ -1088,16 +1158,16 @@ static inline void tcg_out_mul(TCGContext *s, TCGArg ret,
tcg_out_bundle(s, mmI,
tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F6, arg1),
tcg_opc_m18(TCG_REG_P0, OPC_SETF_SIG_M18, TCG_REG_F7, arg2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
tcg_out_bundle(s, mmF,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
+ INSN_NOP_M,
tcg_opc_f2 (TCG_REG_P0, OPC_XMA_L_F2, TCG_REG_F6, TCG_REG_F6,
TCG_REG_F7, TCG_REG_F0));
tcg_out_bundle(s, miI,
tcg_opc_m19(TCG_REG_P0, OPC_GETF_SIG_M19, ret, TCG_REG_F6),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I,
+ INSN_NOP_I);
}
static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
@@ -1105,8 +1175,8 @@ static inline void tcg_out_sar_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
ret, arg1, arg2, 31 - arg2));
} else {
@@ -1124,14 +1194,14 @@ static inline void tcg_out_sar_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_I11,
ret, arg1, arg2, 63 - arg2));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i5 (TCG_REG_P0, OPC_SHR_I5, ret, arg1, arg2));
}
}
@@ -1141,13 +1211,13 @@ static inline void tcg_out_shl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
arg1, 63 - arg2, 31 - arg2));
} else {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R2,
0x1f, arg2),
tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
@@ -1160,14 +1230,14 @@ static inline void tcg_out_shl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret,
arg1, 63 - arg2, 63 - arg2));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, ret,
arg1, arg2));
}
@@ -1178,8 +1248,8 @@ static inline void tcg_out_shr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
arg1, arg2, 31 - arg2));
} else {
@@ -1197,14 +1267,14 @@ static inline void tcg_out_shr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
arg1, arg2, 63 - arg2));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
arg1, arg2));
}
@@ -1215,20 +1285,20 @@ static inline void tcg_out_rotl_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
TCG_REG_R2, arg1, arg1),
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
TCG_REG_R2, 32 - arg2, 31));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
TCG_REG_R2, arg1, arg1),
tcg_opc_a3 (TCG_REG_P0, OPC_AND_A3, TCG_REG_R3,
0x1f, arg2));
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_a3 (TCG_REG_P0, OPC_SUB_A3, TCG_REG_R3,
0x20, TCG_REG_R3),
tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, ret,
@@ -1241,8 +1311,8 @@ static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
arg1, 0x40 - arg2));
} else {
@@ -1254,8 +1324,8 @@ static inline void tcg_out_rotl_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
tcg_opc_i5 (TCG_REG_P0, OPC_SHR_U_I5, TCG_REG_R2,
arg1, TCG_REG_R2));
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
TCG_REG_R2, TCG_REG_R3));
}
@@ -1266,7 +1336,7 @@ static inline void tcg_out_rotr_i32(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i2 (TCG_REG_P0, OPC_UNPACK4_L_I2,
TCG_REG_R2, arg1, arg1),
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, ret,
@@ -1287,8 +1357,8 @@ static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
{
if (const_arg2) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i10(TCG_REG_P0, OPC_SHRP_I10, ret, arg1,
arg1, arg2));
} else {
@@ -1300,44 +1370,63 @@ static inline void tcg_out_rotr_i64(TCGContext *s, TCGArg ret, TCGArg arg1,
tcg_opc_i7 (TCG_REG_P0, OPC_SHL_I7, TCG_REG_R2,
arg1, TCG_REG_R2));
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_a1 (TCG_REG_P0, OPC_OR_A1, ret,
TCG_REG_R2, TCG_REG_R3));
}
}
+static const uint64_t opc_ext_i29[8] = {
+ OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0,
+ OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0
+};
+
+static inline uint64_t tcg_opc_ext_i(int qp, TCGMemOp opc, TCGReg d, TCGReg s)
+{
+ if ((opc & MO_SIZE) == MO_64) {
+ return tcg_opc_mov_a(qp, d, s);
+ } else {
+ return tcg_opc_i29(qp, opc_ext_i29[opc & MO_SSIZE], d, s);
+ }
+}
+
static inline void tcg_out_ext(TCGContext *s, uint64_t opc_i29,
TCGArg ret, TCGArg arg)
{
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ INSN_NOP_I,
tcg_opc_i29(TCG_REG_P0, opc_i29, ret, arg));
}
+static inline uint64_t tcg_opc_bswap64_i(int qp, TCGReg d, TCGReg s)
+{
+ return tcg_opc_i3(qp, OPC_MUX1_I3, d, s, 0xb);
+}
+
static inline void tcg_out_bswap16(TCGContext *s, TCGArg ret, TCGArg arg)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 15, 15),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
}
static inline void tcg_out_bswap32(TCGContext *s, TCGArg ret, TCGArg arg)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, ret, arg, 31, 31),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, ret, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0, ret, ret));
}
static inline void tcg_out_bswap64(TCGContext *s, TCGArg ret, TCGArg arg)
{
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3, ret, arg, 0xb));
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, ret, arg));
}
static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1,
@@ -1357,8 +1446,7 @@ static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1,
} else {
/* Otherwise, load any constant into a temporary. Do this into
the first I slot to help out with cross-unit delays. */
- i1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5,
- TCG_REG_R2, a2, TCG_REG_R0);
+ i1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, a2);
a2 = TCG_REG_R2;
}
}
@@ -1366,8 +1454,8 @@ static inline void tcg_out_deposit(TCGContext *s, TCGArg ret, TCGArg a1,
i2 = tcg_opc_i15(TCG_REG_P0, OPC_DEP_I15, ret, a2, a1, cpos, lm1);
}
tcg_out_bundle(s, (i1 ? mII : miI),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- i1 ? i1 : tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_M,
+ i1 ? i1 : INSN_NOP_I,
i2);
}
@@ -1413,38 +1501,16 @@ static inline uint64_t tcg_opc_cmp_a(int qp, TCGCond cond, TCGArg arg1,
}
}
-static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGArg arg1,
- int const_arg1, TCGArg arg2, int const_arg2,
- int label_index, int cmp4)
+static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
+ TCGReg arg2, int label_index, int cmp4)
{
TCGLabel *l = &s->labels[label_index];
- uint64_t opc1, opc2;
-
- if (const_arg1 && arg1 != 0) {
- opc1 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
- arg1, TCG_REG_R0);
- arg1 = TCG_REG_R2;
- } else {
- opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
- }
-
- if (const_arg2 && arg2 != 0) {
- opc2 = tcg_opc_a5(TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R3,
- arg2, TCG_REG_R0);
- arg2 = TCG_REG_R3;
- } else {
- opc2 = tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0);
- }
- tcg_out_bundle(s, mII,
- opc1,
- opc2,
- tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4));
- tcg_out_bundle(s, mmB,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_b1 (TCG_REG_P6, OPC_BR_DPTK_FEW_B1,
- get_reloc_pcrel21b(s->code_ptr + 2)));
+ tcg_out_bundle(s, miB,
+ INSN_NOP_M,
+ tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
+ tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1,
+ get_reloc_pcrel21b(s->code_ptr + 2)));
if (l->has_value) {
reloc_pcrel21b((s->code_ptr - 16) + 2, l->u.value);
@@ -1459,8 +1525,8 @@ static inline void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg ret,
{
tcg_out_bundle(s, MmI,
tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4),
- tcg_opc_a5(TCG_REG_P6, OPC_ADDL_A5, ret, 1, TCG_REG_R0),
- tcg_opc_a5(TCG_REG_P7, OPC_ADDL_A5, ret, 0, TCG_REG_R0));
+ tcg_opc_movi_a(TCG_REG_P6, ret, 1),
+ tcg_opc_movi_a(TCG_REG_P7, ret, 0));
}
static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
@@ -1471,18 +1537,18 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
uint64_t opc1, opc2;
if (const_v1) {
- opc1 = tcg_opc_a5(TCG_REG_P6, OPC_ADDL_A5, ret, v1, TCG_REG_R0);
+ opc1 = tcg_opc_movi_a(TCG_REG_P6, ret, v1);
} else if (ret == v1) {
- opc1 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+ opc1 = INSN_NOP_M;
} else {
- opc1 = tcg_opc_a4(TCG_REG_P6, OPC_ADDS_A4, ret, 0, v1);
+ opc1 = tcg_opc_mov_a(TCG_REG_P6, ret, v1);
}
if (const_v2) {
- opc2 = tcg_opc_a5(TCG_REG_P7, OPC_ADDL_A5, ret, v2, TCG_REG_R0);
+ opc2 = tcg_opc_movi_a(TCG_REG_P7, ret, v2);
} else if (ret == v2) {
- opc2 = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+ opc2 = INSN_NOP_I;
} else {
- opc2 = tcg_opc_a4(TCG_REG_P7, OPC_ADDS_A4, ret, 0, v2);
+ opc2 = tcg_opc_mov_a(TCG_REG_P7, ret, v2);
}
tcg_out_bundle(s, MmI,
@@ -1496,11 +1562,11 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
R2 is loaded with the address of the addend TLB entry.
R57 is loaded with the address, zero extented on 32-bit targets. */
static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
- int s_bits, uint64_t offset_rw,
+ TCGMemOp s_bits, uint64_t offset_rw,
uint64_t offset_addend)
{
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2,
addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2,
@@ -1509,12 +1575,9 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
tcg_out_bundle(s, mII,
tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
offset_rw, TCG_REG_R2),
-#if TARGET_LONG_BITS == 32
- tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, TCG_REG_R57, addr_reg),
-#else
- tcg_opc_a4(TCG_REG_P0, OPC_ADDS_A4, TCG_REG_R57,
- 0, addr_reg),
-#endif
+ tcg_opc_ext_i(TCG_REG_P0,
+ TARGET_LONG_BITS == 32 ? MO_UL : MO_Q,
+ TCG_REG_R57, addr_reg),
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_REG_R2, TCG_AREG0));
tcg_out_bundle(s, mII,
@@ -1538,23 +1601,20 @@ static const void * const qemu_ld_helpers[4] = {
helper_ldq_mmu,
};
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ TCGMemOp opc)
{
- int addr_reg, data_reg, mem_index, s_bits, bswap;
- uint64_t opc_ld_m1[4] = { OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1 };
- uint64_t opc_ext_i29[8] = { OPC_ZXT1_I29, OPC_ZXT2_I29, OPC_ZXT4_I29, 0,
- OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0 };
+ static const uint64_t opc_ld_m1[4] = {
+ OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
+ };
+ int addr_reg, data_reg, mem_index;
+ TCGMemOp s_bits, bswap;
data_reg = *args++;
addr_reg = *args++;
mem_index = *args;
- s_bits = opc & 3;
-
-#ifdef TARGET_WORDS_BIGENDIAN
- bswap = 1;
-#else
- bswap = 0;
-#endif
+ s_bits = opc & MO_SIZE;
+ bswap = opc & MO_BSWAP;
/* Read the TLB entry */
tcg_out_qemu_tlb(s, addr_reg, s_bits,
@@ -1563,8 +1623,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
/* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX,
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4,
- TCG_REG_R56, 0, TCG_AREG0),
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]),
tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
(tcg_target_long) qemu_ld_helpers[s_bits]));
@@ -1575,14 +1634,14 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
TCG_REG_R3, TCG_REG_R57),
tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
TCG_REG_R3, 0));
- if (bswap && s_bits == 1) {
+ if (bswap && s_bits == MO_16) {
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
TCG_REG_R8, TCG_REG_R3),
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
TCG_REG_R8, TCG_REG_R8, 15, 15));
- } else if (bswap && s_bits == 2) {
+ } else if (bswap && s_bits == MO_32) {
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
TCG_REG_R8, TCG_REG_R3),
@@ -1594,38 +1653,26 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
TCG_REG_R8, TCG_REG_R3),
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
- if (!bswap || s_bits == 0) {
+ if (!bswap) {
tcg_out_bundle(s, miB,
- tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58,
- mem_index, TCG_REG_R0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
+ INSN_NOP_I,
tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
TCG_REG_B0, TCG_REG_B6));
} else {
tcg_out_bundle(s, miB,
- tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R58,
- mem_index, TCG_REG_R0),
- tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
- TCG_REG_R8, TCG_REG_R8, 0xb),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
+ tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R8, TCG_REG_R8),
tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
TCG_REG_B0, TCG_REG_B6));
}
- if (opc == 3) {
- tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
- data_reg, 0, TCG_REG_R8));
- } else {
- tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i29(TCG_REG_P0, opc_ext_i29[opc],
- data_reg, TCG_REG_R8));
- }
+ tcg_out_bundle(s, miI,
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8));
}
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
@@ -1637,32 +1684,30 @@ static const void * const qemu_st_helpers[4] = {
helper_stq_mmu,
};
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ TCGMemOp opc)
{
- int addr_reg, data_reg, mem_index, bswap;
- uint64_t opc_st_m4[4] = { OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4 };
+ static const uint64_t opc_st_m4[4] = {
+ OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
+ };
+ int addr_reg, data_reg, mem_index;
+ TCGMemOp s_bits;
data_reg = *args++;
addr_reg = *args++;
mem_index = *args;
+ s_bits = opc & MO_SIZE;
-#ifdef TARGET_WORDS_BIGENDIAN
- bswap = 1;
-#else
- bswap = 0;
-#endif
-
- tcg_out_qemu_tlb(s, addr_reg, opc,
+ tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
offsetof(CPUArchState, tlb_table[mem_index][0].addend));
/* P6 is the fast path, and P7 the slow path */
tcg_out_bundle(s, mLX,
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4,
- TCG_REG_R56, 0, TCG_AREG0),
- tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[opc]),
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
+ tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[s_bits]),
tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
- (tcg_target_long) qemu_st_helpers[opc]));
+ (tcg_target_long) qemu_st_helpers[s_bits]));
tcg_out_bundle(s, MmI,
tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
TCG_REG_R2, 8),
@@ -1671,150 +1716,145 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
TCG_REG_R3, 0));
- if (!bswap || opc == 0) {
+ switch (opc) {
+ case MO_8:
+ case MO_16:
+ case MO_32:
+ case MO_64:
tcg_out_bundle(s, mii,
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
TCG_REG_R1, TCG_REG_R2),
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58,
- 0, data_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
- } else if (opc == 1) {
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
+ INSN_NOP_I);
+ break;
+
+ case MO_16 | MO_BSWAP:
tcg_out_bundle(s, miI,
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
TCG_REG_R2, data_reg, 15, 15));
tcg_out_bundle(s, miI,
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58,
- 0, data_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
- TCG_REG_R2, TCG_REG_R2, 0xb));
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
data_reg = TCG_REG_R2;
- } else if (opc == 2) {
+ break;
+
+ case MO_32 | MO_BSWAP:
tcg_out_bundle(s, miI,
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
TCG_REG_R2, data_reg, 31, 31));
tcg_out_bundle(s, miI,
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58,
- 0, data_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
- TCG_REG_R2, TCG_REG_R2, 0xb));
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
data_reg = TCG_REG_R2;
- } else if (opc == 3) {
+ break;
+
+ case MO_64 | MO_BSWAP:
tcg_out_bundle(s, miI,
tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
TCG_REG_R1, TCG_REG_R2),
- tcg_opc_a4 (TCG_REG_P7, OPC_ADDS_A4, TCG_REG_R58,
- 0, data_reg),
- tcg_opc_i3 (TCG_REG_P6, OPC_MUX1_I3,
- TCG_REG_R2, data_reg, 0xb));
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
+ tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, data_reg));
data_reg = TCG_REG_R2;
+ break;
+
+ default:
+ tcg_abort();
}
tcg_out_bundle(s, miB,
- tcg_opc_m4 (TCG_REG_P6, opc_st_m4[opc],
+ tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
data_reg, TCG_REG_R3),
- tcg_opc_a5 (TCG_REG_P7, OPC_ADDL_A5, TCG_REG_R59,
- mem_index, TCG_REG_R0),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index),
tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
TCG_REG_B0, TCG_REG_B6));
}
#else /* !CONFIG_SOFTMMU */
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
+ TCGMemOp opc)
{
static uint64_t const opc_ld_m1[4] = {
OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
};
- static uint64_t const opc_sxt_i29[4] = {
- OPC_SXT1_I29, OPC_SXT2_I29, OPC_SXT4_I29, 0
- };
- int addr_reg, data_reg, s_bits, bswap;
+ int addr_reg, data_reg;
+ TCGMemOp s_bits, bswap;
data_reg = *args++;
addr_reg = *args++;
- s_bits = opc & 3;
-
-#ifdef TARGET_WORDS_BIGENDIAN
- bswap = 1;
-#else
- bswap = 0;
-#endif
+ s_bits = opc & MO_SIZE;
+ bswap = opc & MO_BSWAP;
#if TARGET_LONG_BITS == 32
if (GUEST_BASE != 0) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
TCG_REG_R3, addr_reg),
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_GUEST_BASE_REG, TCG_REG_R3));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
TCG_REG_R2, addr_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
- if (!bswap || s_bits == 0) {
- if (s_bits == opc) {
+ if (!bswap) {
+ if (!(opc & MO_SIGN)) {
tcg_out_bundle(s, miI,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I,
+ INSN_NOP_I);
} else {
tcg_out_bundle(s, mII,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
- data_reg, data_reg));
+ INSN_NOP_I,
+ tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
}
- } else if (s_bits == 3) {
+ } else if (s_bits == MO_64) {
tcg_out_bundle(s, mII,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb));
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
} else {
- if (s_bits == 1) {
+ if (s_bits == MO_16) {
tcg_out_bundle(s, mII,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
data_reg, data_reg, 15, 15));
} else {
tcg_out_bundle(s, mII,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
+ INSN_NOP_I,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
data_reg, data_reg, 31, 31));
}
- if (opc == s_bits) {
+ if (!(opc & MO_SIGN)) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb));
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
} else {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb),
- tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
- data_reg, data_reg));
+ INSN_NOP_M,
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg),
+ tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
}
}
#else
@@ -1824,157 +1864,149 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
TCG_GUEST_BASE_REG, addr_reg),
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
} else {
tcg_out_bundle(s, mmI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_m1 (TCG_REG_P0, opc_ld_m1[s_bits],
data_reg, addr_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
- if (bswap && s_bits == 1) {
+ if (bswap && s_bits == MO_16) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
data_reg, data_reg, 15, 15),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb));
- } else if (bswap && s_bits == 2) {
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
+ } else if (bswap && s_bits == MO_32) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
data_reg, data_reg, 31, 31),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb));
- } else if (bswap && s_bits == 3) {
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
+ } else if (bswap && s_bits == MO_64) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- data_reg, data_reg, 0xb));
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, data_reg, data_reg));
}
- if (s_bits != opc) {
+ if (opc & MO_SIGN) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i29(TCG_REG_P0, opc_sxt_i29[s_bits],
- data_reg, data_reg));
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, data_reg));
}
#endif
}
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
+ TCGMemOp opc)
{
static uint64_t const opc_st_m4[4] = {
OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
};
- int addr_reg, data_reg, bswap;
+ int addr_reg, data_reg;
#if TARGET_LONG_BITS == 64
uint64_t add_guest_base;
#endif
+ TCGMemOp s_bits, bswap;
data_reg = *args++;
addr_reg = *args++;
-
-#ifdef TARGET_WORDS_BIGENDIAN
- bswap = 1;
-#else
- bswap = 0;
-#endif
+ s_bits = opc & MO_SIZE;
+ bswap = opc & MO_BSWAP;
#if TARGET_LONG_BITS == 32
if (GUEST_BASE != 0) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
TCG_REG_R3, addr_reg),
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_GUEST_BASE_REG, TCG_REG_R3));
} else {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29,
TCG_REG_R2, addr_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
}
if (bswap) {
- if (opc == 1) {
+ if (s_bits == MO_16) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
TCG_REG_R3, data_reg, 15, 15),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, TCG_REG_R3, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0,
+ TCG_REG_R3, TCG_REG_R3));
data_reg = TCG_REG_R3;
- } else if (opc == 2) {
+ } else if (s_bits == MO_32) {
tcg_out_bundle(s, mII,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
TCG_REG_R3, data_reg, 31, 31),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, TCG_REG_R3, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0,
+ TCG_REG_R3, TCG_REG_R3));
data_reg = TCG_REG_R3;
- } else if (opc == 3) {
+ } else if (s_bits == MO_64) {
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, data_reg, 0xb));
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
data_reg = TCG_REG_R3;
}
}
tcg_out_bundle(s, mmI,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+ tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
data_reg, TCG_REG_R2),
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_M,
+ INSN_NOP_I);
#else
if (GUEST_BASE != 0) {
add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
TCG_GUEST_BASE_REG, addr_reg);
addr_reg = TCG_REG_R2;
} else {
- add_guest_base = tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0);
+ add_guest_base = INSN_NOP_M;
}
- if (!bswap || opc == 0) {
+ if (!bswap) {
tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI),
add_guest_base,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+ tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
data_reg, addr_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I);
} else {
- if (opc == 1) {
+ if (s_bits == MO_16) {
tcg_out_bundle(s, mII,
add_guest_base,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
TCG_REG_R3, data_reg, 15, 15),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, TCG_REG_R3, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0,
+ TCG_REG_R3, TCG_REG_R3));
data_reg = TCG_REG_R3;
- } else if (opc == 2) {
+ } else if (s_bits == MO_32) {
tcg_out_bundle(s, mII,
add_guest_base,
tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12,
TCG_REG_R3, data_reg, 31, 31),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, TCG_REG_R3, 0xb));
+ tcg_opc_bswap64_i(TCG_REG_P0,
+ TCG_REG_R3, TCG_REG_R3));
data_reg = TCG_REG_R3;
- } else if (opc == 3) {
+ } else if (s_bits == MO_64) {
tcg_out_bundle(s, miI,
add_guest_base,
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i3 (TCG_REG_P0, OPC_MUX1_I3,
- TCG_REG_R3, data_reg, 0xb));
+ INSN_NOP_I,
+ tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R3, data_reg));
data_reg = TCG_REG_R3;
}
tcg_out_bundle(s, miI,
- tcg_opc_m4 (TCG_REG_P0, opc_st_m4[opc],
+ tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits],
data_reg, addr_reg),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0),
- tcg_opc_i18(TCG_REG_P0, OPC_NOP_I18, 0));
+ INSN_NOP_I,
+ INSN_NOP_I);
}
#endif
}
@@ -1992,7 +2024,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_br(s, args[0]);
break;
case INDEX_op_call:
- tcg_out_call(s, args[0]);
+ if (likely(const_args[0])) {
+ tcg_out_calli(s, args[0]);
+ } else {
+ tcg_out_callr(s, args[0]);
+ }
break;
case INDEX_op_goto_tb:
tcg_out_goto_tb(s, args[0]);
@@ -2052,24 +2088,23 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_add_i32:
case INDEX_op_add_i64:
- tcg_out_alu(s, OPC_ADD_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ tcg_out_add(s, args[0], args[1], args[2], const_args[2]);
break;
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
- tcg_out_alu(s, OPC_SUB_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ tcg_out_sub(s, args[0], args[1], const_args[1], args[2], const_args[2]);
break;
case INDEX_op_and_i32:
case INDEX_op_and_i64:
- tcg_out_alu(s, OPC_AND_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
+ tcg_out_alu(s, OPC_AND_A1, OPC_AND_A3, args[0],
+ args[2], const_args[2], args[1], const_args[1]);
break;
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
- tcg_out_alu(s, OPC_ANDCM_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ tcg_out_alu(s, OPC_ANDCM_A1, OPC_ANDCM_A3, args[0],
+ args[1], const_args[1], args[2], const_args[2]);
break;
case INDEX_op_eqv_i32:
case INDEX_op_eqv_i64:
@@ -2088,8 +2123,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_or_i32:
case INDEX_op_or_i64:
- tcg_out_alu(s, OPC_OR_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
+ tcg_out_alu(s, OPC_OR_A1, OPC_OR_A3, args[0],
+ args[2], const_args[2], args[1], const_args[1]);
break;
case INDEX_op_orc_i32:
case INDEX_op_orc_i64:
@@ -2098,8 +2134,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
- tcg_out_alu(s, OPC_XOR_A1, args[0], args[1], const_args[1],
- args[2], const_args[2]);
+ /* TCG expects arg2 constant; A3 expects arg1 constant. Swap. */
+ tcg_out_alu(s, OPC_XOR_A1, OPC_XOR_A3, args[0],
+ args[2], const_args[2], args[1], const_args[1]);
break;
case INDEX_op_mul_i32:
@@ -2180,12 +2217,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], const_args[0],
- args[1], const_args[1], args[3], 1);
+ tcg_out_brcond(s, args[2], args[0], args[1], args[3], 1);
break;
case INDEX_op_brcond_i64:
- tcg_out_brcond(s, args[2], args[0], const_args[0],
- args[1], const_args[1], args[3], 0);
+ tcg_out_brcond(s, args[2], args[0], args[1], args[3], 0);
break;
case INDEX_op_setcond_i32:
tcg_out_setcond(s, args[3], args[0], args[1], args[2], 1);
@@ -2203,39 +2238,39 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, 0);
+ tcg_out_qemu_ld(s, args, MO_UB);
break;
case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 0 | 4);
+ tcg_out_qemu_ld(s, args, MO_SB);
break;
case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, 1);
+ tcg_out_qemu_ld(s, args, MO_TEUW);
break;
case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 1 | 4);
+ tcg_out_qemu_ld(s, args, MO_TESW);
break;
case INDEX_op_qemu_ld32:
case INDEX_op_qemu_ld32u:
- tcg_out_qemu_ld(s, args, 2);
+ tcg_out_qemu_ld(s, args, MO_TEUL);
break;
case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, 2 | 4);
+ tcg_out_qemu_ld(s, args, MO_TESL);
break;
case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 3);
+ tcg_out_qemu_ld(s, args, MO_TEQ);
break;
case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, 0);
+ tcg_out_qemu_st(s, args, MO_UB);
break;
case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, 1);
+ tcg_out_qemu_st(s, args, MO_TEUW);
break;
case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
+ tcg_out_qemu_st(s, args, MO_TEUL);
break;
case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
+ tcg_out_qemu_st(s, args, MO_TEQ);
break;
default:
@@ -2245,7 +2280,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_br, { } },
- { INDEX_op_call, { "r" } },
+ { INDEX_op_call, { "ri" } },
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
@@ -2261,7 +2296,7 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_st16_i32, { "rZ", "r" } },
{ INDEX_op_st_i32, { "rZ", "r" } },
- { INDEX_op_add_i32, { "r", "rI", "rI" } },
+ { INDEX_op_add_i32, { "r", "rZ", "rI" } },
{ INDEX_op_sub_i32, { "r", "rI", "rI" } },
{ INDEX_op_and_i32, { "r", "rI", "rI" } },
@@ -2289,7 +2324,7 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_bswap16_i32, { "r", "rZ" } },
{ INDEX_op_bswap32_i32, { "r", "rZ" } },
- { INDEX_op_brcond_i32, { "rI", "rI" } },
+ { INDEX_op_brcond_i32, { "rZ", "rZ" } },
{ INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
{ INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rI", "rI" } },
@@ -2308,7 +2343,7 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_st32_i64, { "rZ", "r" } },
{ INDEX_op_st_i64, { "rZ", "r" } },
- { INDEX_op_add_i64, { "r", "rI", "rI" } },
+ { INDEX_op_add_i64, { "r", "rZ", "rI" } },
{ INDEX_op_sub_i64, { "r", "rI", "rI" } },
{ INDEX_op_and_i64, { "r", "rI", "rI" } },
@@ -2339,7 +2374,7 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_bswap32_i64, { "r", "rZ" } },
{ INDEX_op_bswap64_i64, { "r", "rZ" } },
- { INDEX_op_brcond_i64, { "rI", "rI" } },
+ { INDEX_op_brcond_i64, { "rZ", "rZ" } },
{ INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
{ INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rI", "rI" } },
@@ -2384,8 +2419,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_bundle(s, miI,
tcg_opc_m34(TCG_REG_P0, OPC_ALLOC_M34,
TCG_REG_R34, 32, 24, 0),
- tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
- TCG_AREG0, 0, TCG_REG_R32),
+ INSN_NOP_I,
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
TCG_REG_B6, TCG_REG_R33, 0));
@@ -2393,7 +2427,7 @@ static void tcg_target_qemu_prologue(TCGContext *s)
an ADDL in the M slot of the next bundle. */
if (GUEST_BASE != 0) {
tcg_out_bundle(s, mlx,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_l2 (GUEST_BASE),
tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
TCG_GUEST_BASE_REG, GUEST_BASE));
@@ -2404,19 +2438,19 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
TCG_REG_R12, -frame_size, TCG_REG_R12),
tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
- TCG_REG_R32, TCG_REG_B0),
+ TCG_REG_R33, TCG_REG_B0),
tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6));
/* epilogue */
tb_ret_addr = s->code_ptr;
tcg_out_bundle(s, miI,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21,
- TCG_REG_B0, TCG_REG_R32, 0),
+ TCG_REG_B0, TCG_REG_R33, 0),
tcg_opc_a4 (TCG_REG_P0, OPC_ADDS_A4,
TCG_REG_R12, frame_size, TCG_REG_R12));
tcg_out_bundle(s, miB,
- tcg_opc_m48(TCG_REG_P0, OPC_NOP_M48, 0),
+ INSN_NOP_M,
tcg_opc_i26(TCG_REG_P0, OPC_MOV_I_I26,
TCG_REG_PFS, TCG_REG_R34),
tcg_opc_b4 (TCG_REG_P0, OPC_BR_RET_SPTK_MANY_B4,
@@ -2469,16 +2503,17 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* internal use */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R12); /* stack pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_R32); /* return address */
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R33); /* return address */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R34); /* PFS */
- /* The following 3 are not in use, are call-saved, but *not* saved
+ /* The following 4 are not in use, are call-saved, but *not* saved
by the prologue. Therefore we cannot use them without modifying
the prologue. There doesn't seem to be any good reason to use
these as opposed to the windowed registers. */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R4);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7);
tcg_add_target_add_op_defs(ia64_op_defs);
}
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index c90038aae5..52a939c946 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -92,6 +92,8 @@ typedef enum {
TCG_REG_R61,
TCG_REG_R62,
TCG_REG_R63,
+
+ TCG_AREG0 = TCG_REG_R32,
} TCGReg;
#define TCG_CT_CONST_ZERO 0x100
@@ -162,8 +164,6 @@ typedef enum {
#define TCG_TARGET_HAS_not_i32 0 /* xor r1, -1, r3 */
#define TCG_TARGET_HAS_not_i64 0 /* xor r1, -1, r3 */
-#define TCG_AREG0 TCG_REG_R7
-
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
start = start & ~(32UL - 1UL);