aboutsummaryrefslogtreecommitdiff
path: root/tcg/i386
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2010-06-03 17:35:17 -0700
committerAurelien Jarno <aurelien@aurel32.net>2010-06-10 00:16:21 +0200
commit5d8a4f8f4aa5b63eb3cc2a2234ffb8d4f0a2af50 (patch)
treeeda84dcc0fa390a3d98b762105b6310f491af743 /tcg/i386
parent1c0fd16018384daeb340b389a00768ceecc45611 (diff)
tcg-i386: Merge 64-bit generation.
Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'tcg/i386')
-rw-r--r--tcg/i386/tcg-target.c1156
-rw-r--r--tcg/i386/tcg-target.h61
2 files changed, 887 insertions, 330 deletions
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 147ba1744f..4d952007b5 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -24,18 +24,33 @@
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
- "%eax",
- "%ecx",
- "%edx",
- "%ebx",
- "%esp",
- "%ebp",
- "%esi",
- "%edi",
+#if TCG_TARGET_REG_BITS == 64
+ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+#else
+ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
+#endif
};
#endif
static const int tcg_target_reg_alloc_order[] = {
+#if TCG_TARGET_REG_BITS == 64
+ TCG_REG_RBP,
+ TCG_REG_RBX,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R9,
+ TCG_REG_R8,
+ TCG_REG_RCX,
+ TCG_REG_RDX,
+ TCG_REG_RSI,
+ TCG_REG_RDI,
+ TCG_REG_RAX,
+#else
TCG_REG_EBX,
TCG_REG_ESI,
TCG_REG_EDI,
@@ -43,10 +58,28 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_ECX,
TCG_REG_EDX,
TCG_REG_EAX,
+#endif
};
-static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
-static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
+static const int tcg_target_call_iarg_regs[] = {
+#if TCG_TARGET_REG_BITS == 64
+ TCG_REG_RDI,
+ TCG_REG_RSI,
+ TCG_REG_RDX,
+ TCG_REG_RCX,
+ TCG_REG_R8,
+ TCG_REG_R9,
+#else
+ TCG_REG_EAX,
+ TCG_REG_EDX,
+ TCG_REG_ECX
+#endif
+};
+
+static const int tcg_target_call_oarg_regs[2] = {
+ TCG_REG_EAX,
+ TCG_REG_EDX
+};
static uint8_t *tb_ret_addr;
@@ -55,14 +88,15 @@ static void patch_reloc(uint8_t *code_ptr, int type,
{
value += addend;
switch(type) {
- case R_386_32:
- *(uint32_t *)code_ptr = value;
- break;
case R_386_PC32:
- *(uint32_t *)code_ptr = value - (long)code_ptr;
+ value -= (uintptr_t)code_ptr;
+ if (value != (int32_t)value) {
+ tcg_abort();
+ }
+ *(uint32_t *)code_ptr = value;
break;
case R_386_PC8:
- value -= (long)code_ptr;
+ value -= (uintptr_t)code_ptr;
if (value != (int8_t)value) {
tcg_abort();
}
@@ -76,6 +110,10 @@ static void patch_reloc(uint8_t *code_ptr, int type,
/* maximum number of register used for input function arguments */
static inline int tcg_target_get_call_iarg_regs_count(int flags)
{
+ if (TCG_TARGET_REG_BITS == 64) {
+ return 6;
+ }
+
flags &= TCG_CALL_TYPE_MASK;
switch(flags) {
case TCG_CALL_TYPE_STD:
@@ -122,20 +160,42 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
break;
case 'q':
ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xf);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ } else {
+ tcg_regset_set32(ct->u.regs, 0, 0xf);
+ }
break;
case 'r':
ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xff);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ } else {
+ tcg_regset_set32(ct->u.regs, 0, 0xff);
+ }
break;
/* qemu_ld/st address constraint */
case 'L':
ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xff);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set32(ct->u.regs, 0, 0xffff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
+ } else {
+ tcg_regset_set32(ct->u.regs, 0, 0xff);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
+ }
+ break;
+
+ case 'e':
+ ct->ct |= TCG_CT_CONST_S32;
+ break;
+ case 'Z':
+ ct->ct |= TCG_CT_CONST_U32;
break;
+
default:
return -1;
}
@@ -148,16 +208,38 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
static inline int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct)
{
- int ct;
- ct = arg_ct->ct;
- if (ct & TCG_CT_CONST)
+ int ct = arg_ct->ct;
+ if (ct & TCG_CT_CONST) {
return 1;
- else
- return 0;
+ }
+ if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
+ return 1;
+ }
+ return 0;
}
+#if TCG_TARGET_REG_BITS == 64
+# define LOWREGMASK(x) ((x) & 7)
+#else
+# define LOWREGMASK(x) (x)
+#endif
+
#define P_EXT 0x100 /* 0x0f opcode prefix */
#define P_DATA16 0x200 /* 0x66 opcode prefix */
+#if TCG_TARGET_REG_BITS == 64
+# define P_ADDR32 0x400 /* 0x67 opcode prefix */
+# define P_REXW 0x800 /* Set REX.W = 1 */
+# define P_REXB_R 0x1000 /* REG field as byte register */
+# define P_REXB_RM 0x2000 /* R/M field as byte register */
+#else
+# define P_ADDR32 0
+# define P_REXW 0
+# define P_REXB_R 0
+# define P_REXB_RM 0
+#endif
#define OPC_ARITH_EvIz (0x81)
#define OPC_ARITH_EvIb (0x83)
@@ -179,9 +261,11 @@ static inline int tcg_target_const_match(tcg_target_long val,
#define OPC_MOVB_EvGv (0x88) /* stores, more or less */
#define OPC_MOVL_EvGv (0x89) /* stores, more or less */
#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
+#define OPC_MOVL_EvIz (0xc7)
#define OPC_MOVL_Iv (0xb8)
#define OPC_MOVSBL (0xbe | P_EXT)
#define OPC_MOVSWL (0xbf | P_EXT)
+#define OPC_MOVSLQ (0x63 | P_REXW)
#define OPC_MOVZBL (0xb6 | P_EXT)
#define OPC_MOVZWL (0xb7 | P_EXT)
#define OPC_POP_r32 (0x58)
@@ -189,7 +273,7 @@ static inline int tcg_target_const_match(tcg_target_long val,
#define OPC_PUSH_Iv (0x68)
#define OPC_PUSH_Ib (0x6a)
#define OPC_RET (0xc3)
-#define OPC_SETCC (0x90 | P_EXT) /* ... plus condition code */
+#define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
#define OPC_SHIFT_1 (0xd1)
#define OPC_SHIFT_Ib (0xc1)
#define OPC_SHIFT_cl (0xd3)
@@ -226,6 +310,8 @@ static inline int tcg_target_const_match(tcg_target_long val,
#define EXT3_IDIV 7
/* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
+#define EXT5_INC_Ev 0
+#define EXT5_DEC_Ev 1
#define EXT5_CALLN_Ev 2
#define EXT5_JMPN_Ev 4
@@ -261,7 +347,45 @@ static const uint8_t tcg_cond_to_jcc[10] = {
[TCG_COND_GTU] = JCC_JA,
};
-static inline void tcg_out_opc(TCGContext *s, int opc)
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
+{
+ int rex;
+
+ if (opc & P_DATA16) {
+ /* We should never be asking for both 16 and 64-bit operation. */
+ assert((opc & P_REXW) == 0);
+ tcg_out8(s, 0x66);
+ }
+ if (opc & P_ADDR32) {
+ tcg_out8(s, 0x67);
+ }
+
+ rex = 0;
+ rex |= (opc & P_REXW) >> 8; /* REX.W */
+ rex |= (r & 8) >> 1; /* REX.R */
+ rex |= (x & 8) >> 2; /* REX.X */
+ rex |= (rm & 8) >> 3; /* REX.B */
+
+ /* P_REXB_{R,RM} indicates that the given register is the low byte.
+ For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
+ as otherwise the encoding indicates %[abcd]h. Note that the values
+ that are ORed in merely indicate that the REX byte must be present;
+ those bits get discarded in output. */
+ rex |= opc & (r >= 4 ? P_REXB_R : 0);
+ rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
+
+ if (rex) {
+ tcg_out8(s, (uint8_t)(rex | 0x40));
+ }
+
+ if (opc & P_EXT) {
+ tcg_out8(s, 0x0f);
+ }
+ tcg_out8(s, opc);
+}
+#else
+static void tcg_out_opc(TCGContext *s, int opc)
{
if (opc & P_DATA16) {
tcg_out8(s, 0x66);
@@ -271,36 +395,69 @@ static inline void tcg_out_opc(TCGContext *s, int opc)
}
tcg_out8(s, opc);
}
+/* Discard the register arguments to tcg_out_opc early, so as not to penalize
+ the 32-bit compilation paths. This method works with all versions of gcc,
+ whereas relying on optimization may not be able to exclude them. */
+#define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
+#endif
-static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
+static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
{
- tcg_out_opc(s, opc);
- tcg_out8(s, 0xc0 | (r << 3) | rm);
+ tcg_out_opc(s, opc, r, rm, 0);
+ tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
}
/* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
- We handle either RM and INDEX missing with a -1 value. */
+ We handle either RM and INDEX missing with a negative value. In 64-bit
+ mode for absolute addresses, ~RM is the size of the immediate operand
+ that will follow the instruction. */
static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
- int index, int shift, int32_t offset)
+ int index, int shift,
+ tcg_target_long offset)
{
int mod, len;
- if (index == -1 && rm == -1) {
- /* Absolute address. */
- tcg_out_opc(s, opc);
- tcg_out8(s, (r << 3) | 5);
- tcg_out32(s, offset);
- return;
- }
+ if (index < 0 && rm < 0) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* Try for a rip-relative addressing mode. This has replaced
+ the 32-bit-mode absolute addressing encoding. */
+ tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm;
+ tcg_target_long disp = offset - pc;
+ if (disp == (int32_t)disp) {
+ tcg_out_opc(s, opc, r, 0, 0);
+ tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
+ tcg_out32(s, disp);
+ return;
+ }
- tcg_out_opc(s, opc);
+ /* Try for an absolute address encoding. This requires the
+ use of the MODRM+SIB encoding and is therefore larger than
+ rip-relative addressing. */
+ if (offset == (int32_t)offset) {
+ tcg_out_opc(s, opc, r, 0, 0);
+ tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
+ tcg_out8(s, (4 << 3) | 5);
+ tcg_out32(s, offset);
+ return;
+ }
+
+ /* ??? The memory isn't directly addressable. */
+ tcg_abort();
+ } else {
+ /* Absolute address. */
+ tcg_out_opc(s, opc, r, 0, 0);
+ tcg_out8(s, (r << 3) | 5);
+ tcg_out32(s, offset);
+ return;
+ }
+ }
/* Find the length of the immediate addend. Note that the encoding
that would be used for (%ebp) indicates absolute addressing. */
- if (rm == -1) {
+ if (rm < 0) {
mod = 0, len = 4, rm = 5;
- } else if (offset == 0 && rm != TCG_REG_EBP) {
+ } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
mod = 0, len = 0;
} else if (offset == (int8_t)offset) {
mod = 0x40, len = 1;
@@ -310,22 +467,25 @@ static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
/* Use a single byte MODRM format if possible. Note that the encoding
that would be used for %esp is the escape to the two byte form. */
- if (index == -1 && rm != TCG_REG_ESP) {
+ if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
/* Single byte MODRM format. */
- tcg_out8(s, mod | (r << 3) | rm);
+ tcg_out_opc(s, opc, r, rm, 0);
+ tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
} else {
/* Two byte MODRM+SIB format. */
/* Note that the encoding that would place %esp into the index
- field indicates no index register. */
- if (index == -1) {
+ field indicates no index register. In 64-bit mode, the REX.X
+ bit counts, so %r12 can be used as the index. */
+ if (index < 0) {
index = 4;
} else {
assert(index != TCG_REG_ESP);
}
- tcg_out8(s, mod | (r << 3) | 4);
- tcg_out8(s, (shift << 6) | (index << 3) | rm);
+ tcg_out_opc(s, opc, r, rm, index);
+ tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
+ tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
}
if (len == 1) {
@@ -335,9 +495,9 @@ static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
}
}
-/* rm == -1 means no register index */
-static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
- int32_t offset)
+/* A simplification of the above with no index or shift. */
+static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
+ int rm, tcg_target_long offset)
{
tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
}
@@ -345,58 +505,75 @@ static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
/* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
{
- tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3), dest, src);
+ /* Propagate an opcode prefix, such as P_REXW. */
+ int ext = subop & ~0x7;
+ subop &= 0x7;
+
+ tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
}
static inline void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg)
{
if (arg != ret) {
- tcg_out_modrm(s, OPC_MOVL_GvEv, ret, arg);
+ int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
+ tcg_out_modrm(s, opc, ret, arg);
}
}
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- int ret, int32_t arg)
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ int ret, tcg_target_long arg)
{
if (arg == 0) {
tgen_arithr(s, ARITH_XOR, ret, ret);
+ return;
+ } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
+ tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
+ tcg_out32(s, arg);
+ } else if (arg == (int32_t)arg) {
+ tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
+ tcg_out32(s, arg);
} else {
- tcg_out8(s, OPC_MOVL_Iv + ret);
+ tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
tcg_out32(s, arg);
+ tcg_out32(s, arg >> 31 >> 1);
}
}
static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
{
if (val == (int8_t)val) {
- tcg_out_opc(s, OPC_PUSH_Ib);
+ tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
tcg_out8(s, val);
- } else {
- tcg_out_opc(s, OPC_PUSH_Iv);
+ } else if (val == (int32_t)val) {
+ tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
tcg_out32(s, val);
+ } else {
+ tcg_abort();
}
}
static inline void tcg_out_push(TCGContext *s, int reg)
{
- tcg_out_opc(s, OPC_PUSH_r32 + reg);
+ tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
}
static inline void tcg_out_pop(TCGContext *s, int reg)
{
- tcg_out_opc(s, OPC_POP_r32 + reg);
+ tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
int arg1, tcg_target_long arg2)
{
- tcg_out_modrm_offset(s, OPC_MOVL_GvEv, ret, arg1, arg2);
+ int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
+ tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
int arg1, tcg_target_long arg2)
{
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv, arg, arg1, arg2);
+ int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
+ tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
}
static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
@@ -406,35 +583,35 @@ static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
subopc &= 0x7;
if (count == 1) {
- tcg_out_modrm(s, OPC_SHIFT_1 | ext, subopc, reg);
+ tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
} else {
- tcg_out_modrm(s, OPC_SHIFT_Ib | ext, subopc, reg);
+ tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
tcg_out8(s, count);
}
}
static inline void tcg_out_bswap32(TCGContext *s, int reg)
{
- tcg_out_opc(s, OPC_BSWAP + reg);
+ tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
}
static inline void tcg_out_rolw_8(TCGContext *s, int reg)
{
- tcg_out_shifti(s, SHIFT_ROL | P_DATA16, reg, 8);
+ tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
}
static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
{
/* movzbl */
- assert(src < 4);
- tcg_out_modrm(s, OPC_MOVZBL, dest, src);
+ assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+ tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
}
-static void tcg_out_ext8s(TCGContext *s, int dest, int src)
+static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
{
/* movsbl */
- assert(src < 4);
- tcg_out_modrm(s, OPC_MOVSBL, dest, src);
+ assert(src < 4 || TCG_TARGET_REG_BITS == 64);
+ tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
}
static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
@@ -443,38 +620,94 @@ static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
tcg_out_modrm(s, OPC_MOVZWL, dest, src);
}
-static inline void tcg_out_ext16s(TCGContext *s, int dest, int src)
+static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
{
- /* movswl */
- tcg_out_modrm(s, OPC_MOVSWL, dest, src);
+ /* movsw[lq] */
+ tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
}
-static inline void tgen_arithi(TCGContext *s, int c, int r0,
- int32_t val, int cf)
+static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
{
+ /* 32-bit mov zero extends. */
+ tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
+}
+
+static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
+{
+ tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
+}
+
+static inline void tcg_out_bswap64(TCGContext *s, int reg)
+{
+ tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
+}
+
+static void tgen_arithi(TCGContext *s, int c, int r0,
+ tcg_target_long val, int cf)
+{
+ int rexw = 0;
+
+ if (TCG_TARGET_REG_BITS == 64) {
+ rexw = c & -8;
+ c &= 7;
+ }
+
/* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
partial flags update stalls on Pentium4 and are not recommended
by current Intel optimization manuals. */
if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
- int opc = ((c == ARITH_ADD) ^ (val < 0) ? OPC_INC_r32 : OPC_DEC_r32);
- tcg_out_opc(s, opc + r0);
- } else if (val == (int8_t)val) {
- tcg_out_modrm(s, OPC_ARITH_EvIb, c, r0);
+ _Bool is_inc = (c == ARITH_ADD) ^ (val < 0);
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* The single-byte increment encodings are re-tasked as the
+ REX prefixes. Use the MODRM encoding. */
+ tcg_out_modrm(s, OPC_GRP5 + rexw,
+ (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
+ } else {
+ tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
+ }
+ return;
+ }
+
+ if (c == ARITH_AND) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (val == 0xffffffffu) {
+ tcg_out_ext32u(s, r0, r0);
+ return;
+ }
+ if (val == (uint32_t)val) {
+ /* AND with no high bits set can use a 32-bit operation. */
+ rexw = 0;
+ }
+ }
+ if (val == 0xffu) {
+ tcg_out_ext8u(s, r0, r0);
+ return;
+ }
+ if (val == 0xffffu) {
+ tcg_out_ext16u(s, r0, r0);
+ return;
+ }
+ }
+
+ if (val == (int8_t)val) {
+ tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
tcg_out8(s, val);
- } else if (c == ARITH_AND && val == 0xffu && r0 < 4) {
- tcg_out_ext8u(s, r0, r0);
- } else if (c == ARITH_AND && val == 0xffffu) {
- tcg_out_ext16u(s, r0, r0);
- } else {
- tcg_out_modrm(s, OPC_ARITH_EvIz, c, r0);
+ return;
+ }
+ if (rexw == 0 || val == (int32_t)val) {
+ tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
tcg_out32(s, val);
+ return;
}
+
+ tcg_abort();
}
static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
{
- if (val != 0)
- tgen_arithi(s, ARITH_ADD, reg, val, 0);
+ if (val != 0) {
+ tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
+ }
}
/* Use SMALL != 0 to force a short forward branch. */
@@ -501,7 +734,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
tcg_out8(s, OPC_JMP_long);
tcg_out32(s, val - 5);
} else {
- tcg_out_opc(s, OPC_JCC_long + opc);
+ tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
tcg_out32(s, val - 6);
}
}
@@ -517,7 +750,7 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
if (opc == -1) {
tcg_out8(s, OPC_JMP_long);
} else {
- tcg_out_opc(s, OPC_JCC_long + opc);
+ tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
}
tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
s->code_ptr += 4;
@@ -525,28 +758,37 @@ static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
}
static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
- int const_arg2)
+ int const_arg2, int rexw)
{
if (const_arg2) {
if (arg2 == 0) {
/* test r, r */
- tcg_out_modrm(s, OPC_TESTL, arg1, arg1);
+ tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
} else {
- tgen_arithi(s, ARITH_CMP, arg1, arg2, 0);
+ tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
}
} else {
- tgen_arithr(s, ARITH_CMP, arg1, arg2);
+ tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
}
}
-static void tcg_out_brcond(TCGContext *s, TCGCond cond,
- TCGArg arg1, TCGArg arg2, int const_arg2,
- int label_index, int small)
+static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index, int small)
{
- tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
}
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
+ TCGArg arg1, TCGArg arg2, int const_arg2,
+ int label_index, int small)
+{
+ tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
+ tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
+}
+#else
/* XXX: we implement it at the target level to avoid having to
handle cross basic blocks temporaries */
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
@@ -556,87 +798,97 @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
label_next = gen_new_label();
switch(args[4]) {
case TCG_COND_EQ:
- tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
- label_next, 1);
- tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
+ label_next, 1);
+ tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
+ args[5], small);
break;
case TCG_COND_NE:
- tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2],
- args[5], small);
- tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
+ args[5], small);
+ tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
+ args[5], small);
break;
case TCG_COND_LT:
- tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_LE:
- tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_GT:
- tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_GE:
- tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_LTU:
- tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_LEU:
- tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_GTU:
- tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
+ args[5], small);
break;
case TCG_COND_GEU:
- tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
+ args[5], small);
tcg_out_jxx(s, JCC_JNE, label_next, 1);
- tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2],
- args[5], small);
+ tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
+ args[5], small);
break;
default:
tcg_abort();
}
tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
}
+#endif
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGArg dest,
- TCGArg arg1, TCGArg arg2, int const_arg2)
+static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
+ TCGArg arg1, TCGArg arg2, int const_arg2)
{
- tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
tcg_out_ext8u(s, dest, dest);
}
+#if TCG_TARGET_REG_BITS == 64
+static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
+ TCGArg arg1, TCGArg arg2, int const_arg2)
+{
+ tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
+ tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
+ tcg_out_ext8u(s, dest, dest);
+}
+#else
static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
const int *const_args)
{
@@ -678,11 +930,30 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr);
}
}
+#endif
+
+static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest)
+{
+ tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5;
+
+ if (disp == (int32_t)disp) {
+ tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
+ tcg_out32(s, disp);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
+ tcg_out_modrm(s, OPC_GRP5,
+ call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
+ }
+}
+
+static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest)
+{
+ tcg_out_branch(s, 1, dest);
+}
-static void tcg_out_calli(TCGContext *s, tcg_target_long dest)
+static void tcg_out_jmp(TCGContext *s, tcg_target_long dest)
{
- tcg_out_opc(s, OPC_CALL_Jz);
- tcg_out32(s, dest - (tcg_target_long)s->code_ptr - 4);
+ tcg_out_branch(s, 0, dest);
}
#if defined(CONFIG_SOFTMMU)
@@ -718,11 +989,12 @@ static void *qemu_st_helpers[4] = {
LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
positions of the displacements of forward jumps to the TLB miss case.
- EAX is loaded with the low part of the address. In the TLB hit case,
- it has been adjusted as indicated by the TLB and so is a host address.
- In the TLB miss case, it continues to hold a guest address.
+ First argument register is loaded with the low part of the address.
+ In the TLB hit case, it has been adjusted as indicated by the TLB
+ and so is a host address. In the TLB miss case, it continues to
+ hold a guest address.
- EDX is clobbered. */
+ Second argument register is clobbered. */
static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
int mem_index, int s_bits,
@@ -730,32 +1002,42 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
uint8_t **label_ptr, int which)
{
const int addrlo = args[addrlo_idx];
- const int r0 = TCG_REG_EAX;
- const int r1 = TCG_REG_EDX;
+ const int r0 = tcg_target_call_iarg_regs[0];
+ const int r1 = tcg_target_call_iarg_regs[1];
+ TCGType type = TCG_TYPE_I32;
+ int rexw = 0;
+
+ if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) {
+ type = TCG_TYPE_I64;
+ rexw = P_REXW;
+ }
- tcg_out_mov(s, TCG_TYPE_I32, r1, addrlo);
- tcg_out_mov(s, TCG_TYPE_I32, r0, addrlo);
+ tcg_out_mov(s, type, r1, addrlo);
+ tcg_out_mov(s, type, r0, addrlo);
- tcg_out_shifti(s, SHIFT_SHR, r1, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ tcg_out_shifti(s, SHIFT_SHR + rexw, r1,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tgen_arithi(s, ARITH_AND, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
- tgen_arithi(s, ARITH_AND, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
+ tgen_arithi(s, ARITH_AND + rexw, r0,
+ TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
+ tgen_arithi(s, ARITH_AND + rexw, r1,
+ (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
- tcg_out_modrm_sib_offset(s, OPC_LEA, r1, TCG_AREG0, r1, 0,
+ tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0,
offsetof(CPUState, tlb_table[mem_index][0])
+ which);
/* cmp 0(r1), r0 */
- tcg_out_modrm_offset(s, OPC_CMP_GvEv, r0, r1, 0);
+ tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r0, r1, 0);
- tcg_out_mov(s, TCG_TYPE_I32, r0, addrlo);
+ tcg_out_mov(s, type, r0, addrlo);
/* jne label1 */
tcg_out8(s, OPC_JCC_short + JCC_JNE);
label_ptr[0] = s->code_ptr;
s->code_ptr++;
- if (TARGET_LONG_BITS == 64) {
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
/* cmp 4(r1), addrhi */
tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r1, 4);
@@ -768,7 +1050,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx,
/* TLB Hit. */
/* add addend(r1), r0 */
- tcg_out_modrm_offset(s, OPC_ADD_GvEv, r0, r1,
+ tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r0, r1,
offsetof(CPUTLBEntry, addend) - which);
}
#endif
@@ -783,26 +1065,24 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
#endif
switch (sizeop) {
case 0:
- /* movzbl */
tcg_out_modrm_offset(s, OPC_MOVZBL, datalo, base, ofs);
break;
case 0 | 4:
- /* movsbl */
- tcg_out_modrm_offset(s, OPC_MOVSBL, datalo, base, ofs);
+ tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW, datalo, base, ofs);
break;
case 1:
- /* movzwl */
tcg_out_modrm_offset(s, OPC_MOVZWL, datalo, base, ofs);
if (bswap) {
tcg_out_rolw_8(s, datalo);
}
break;
case 1 | 4:
- /* movswl */
- tcg_out_modrm_offset(s, OPC_MOVSWL, datalo, base, ofs);
if (bswap) {
+ tcg_out_modrm_offset(s, OPC_MOVZWL, datalo, base, ofs);
tcg_out_rolw_8(s, datalo);
- tcg_out_modrm(s, OPC_MOVSWL, datalo, datalo);
+ tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
+ } else {
+ tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW, datalo, base, ofs);
}
break;
case 2:
@@ -811,22 +1091,40 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi,
tcg_out_bswap32(s, datalo);
}
break;
- case 3:
+#if TCG_TARGET_REG_BITS == 64
+ case 2 | 4:
if (bswap) {
- int t = datalo;
- datalo = datahi;
- datahi = t;
- }
- if (base != datalo) {
tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs);
- tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4);
+ tcg_out_bswap32(s, datalo);
+ tcg_out_ext32s(s, datalo, datalo);
} else {
- tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4);
- tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs);
+ tcg_out_modrm_offset(s, OPC_MOVSLQ, datalo, base, ofs);
}
- if (bswap) {
- tcg_out_bswap32(s, datalo);
- tcg_out_bswap32(s, datahi);
+ break;
+#endif
+ case 3:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_ld(s, TCG_TYPE_I64, datalo, base, ofs);
+ if (bswap) {
+ tcg_out_bswap64(s, datalo);
+ }
+ } else {
+ if (bswap) {
+ int t = datalo;
+ datalo = datahi;
+ datahi = t;
+ }
+ if (base != datalo) {
+ tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs);
+ tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4);
+ } else {
+ tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4);
+ tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs);
+ }
+ if (bswap) {
+ tcg_out_bswap32(s, datalo);
+ tcg_out_bswap32(s, datahi);
+ }
}
break;
default:
@@ -849,20 +1147,21 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
data_reg = args[0];
addrlo_idx = 1;
- if (opc == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
data_reg2 = args[1];
addrlo_idx = 2;
}
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + (TARGET_LONG_BITS / 32)];
+ mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
s_bits = opc & 3;
tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2, TCG_REG_EAX, 0, opc);
+ tcg_out_qemu_ld_direct(s, data_reg, data_reg2,
+ tcg_target_call_iarg_regs[0], 0, opc);
/* jmp label2 */
tcg_out8(s, OPC_JMP_short);
@@ -873,14 +1172,14 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
/* label1: */
*label_ptr[0] = s->code_ptr - label_ptr[0] - 1;
- if (TARGET_LONG_BITS == 64) {
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
*label_ptr[1] = s->code_ptr - label_ptr[1] - 1;
}
/* XXX: move that code at the end of the TB */
- /* EAX is already loaded. */
+ /* The first argument is already loaded with addrlo. */
arg_idx = 1;
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx++],
args[addrlo_idx + 1]);
}
@@ -890,10 +1189,10 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
switch(opc) {
case 0 | 4:
- tcg_out_ext8s(s, data_reg, TCG_REG_EAX);
+ tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
break;
case 1 | 4:
- tcg_out_ext16s(s, data_reg, TCG_REG_EAX);
+ tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
break;
case 0:
tcg_out_ext8u(s, data_reg, TCG_REG_EAX);
@@ -902,26 +1201,52 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
tcg_out_ext16u(s, data_reg, TCG_REG_EAX);
break;
case 2:
- default:
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
break;
+#if TCG_TARGET_REG_BITS == 64
+ case 2 | 4:
+ tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
+ break;
+#endif
case 3:
- if (data_reg == TCG_REG_EDX) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
+ } else if (data_reg == TCG_REG_EDX) {
/* xchg %edx, %eax */
- tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX);
+ tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX);
} else {
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX);
}
break;
+ default:
+ tcg_abort();
}
/* label2: */
*label_ptr[2] = s->code_ptr - label_ptr[2] - 1;
#else
- tcg_out_qemu_ld_direct(s, data_reg, data_reg2,
- args[addrlo_idx], GUEST_BASE, opc);
+ {
+ int32_t offset = GUEST_BASE;
+ int base = args[addrlo_idx];
+
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* ??? We assume all operations have left us with register
+ contents that are zero extended. So far this appears to
+ be true. If we want to enforce this, we can either do
+ an explicit zero-extension here, or (if GUEST_BASE == 0)
+ use the ADDR32 prefix. For now, do nothing. */
+
+ if (offset != GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RDI, GUEST_BASE);
+ tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_RDI, base);
+ base = TCG_REG_RDI, offset = 0;
+ }
+ }
+
+ tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, opc);
+ }
#endif
}
@@ -936,12 +1261,12 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
/* ??? Ideally we wouldn't need a scratch register. For user-only,
we could perform the bswap twice to restore the original value
instead of moving to the scratch. But as it is, the L constraint
- means that EDX is definitely free here. */
- int scratch = TCG_REG_EDX;
+ means that the second argument reg is definitely free here. */
+ int scratch = tcg_target_call_iarg_regs[1];
switch (sizeop) {
case 0:
- tcg_out_modrm_offset(s, OPC_MOVB_EvGv, datalo, base, ofs);
+ tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R, datalo, base, ofs);
break;
case 1:
if (bswap) {
@@ -949,9 +1274,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
tcg_out_rolw_8(s, scratch);
datalo = scratch;
}
- /* movw */
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
- datalo, base, ofs);
+ tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16, datalo, base, ofs);
break;
case 2:
if (bswap) {
@@ -962,7 +1285,14 @@ static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi,
tcg_out_st(s, TCG_TYPE_I32, datalo, base, ofs);
break;
case 3:
- if (bswap) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (bswap) {
+ tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
+ tcg_out_bswap64(s, scratch);
+ datalo = scratch;
+ }
+ tcg_out_st(s, TCG_TYPE_I64, datalo, base, ofs);
+ } else if (bswap) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
tcg_out_bswap32(s, scratch);
tcg_out_st(s, TCG_TYPE_I32, scratch, base, ofs);
@@ -992,20 +1322,21 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
data_reg = args[0];
addrlo_idx = 1;
- if (opc == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && opc == 3) {
data_reg2 = args[1];
addrlo_idx = 2;
}
#if defined(CONFIG_SOFTMMU)
- mem_index = args[addrlo_idx + (TARGET_LONG_BITS / 32)];
+ mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)];
s_bits = opc;
tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
- tcg_out_qemu_st_direct(s, data_reg, data_reg2, TCG_REG_EAX, 0, opc);
+ tcg_out_qemu_st_direct(s, data_reg, data_reg2,
+ tcg_target_call_iarg_regs[0], 0, opc);
/* jmp label2 */
tcg_out8(s, OPC_JMP_short);
@@ -1016,12 +1347,17 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
/* label1: */
*label_ptr[0] = s->code_ptr - label_ptr[0] - 1;
- if (TARGET_LONG_BITS == 64) {
+ if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
*label_ptr[1] = s->code_ptr - label_ptr[1] - 1;
}
/* XXX: move that code at the end of the TB */
- if (TARGET_LONG_BITS == 32) {
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ TCG_REG_RSI, data_reg);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
+ stack_adjust = 0;
+ } else if (TARGET_LONG_BITS == 32) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, data_reg);
if (opc == 3) {
tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg2);
@@ -1058,7 +1394,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]);
- if (stack_adjust == 4) {
+ if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
/* Pop and discard. This is 2 bytes smaller than the add. */
tcg_out_pop(s, TCG_REG_ECX);
} else if (stack_adjust != 0) {
@@ -1068,21 +1404,48 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
/* label2: */
*label_ptr[2] = s->code_ptr - label_ptr[2] - 1;
#else
- tcg_out_qemu_st_direct(s, data_reg, data_reg2,
- args[addrlo_idx], GUEST_BASE, opc);
+ {
+ int32_t offset = GUEST_BASE;
+ int base = args[addrlo_idx];
+
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* ??? We assume all operations have left us with register
+ contents that are zero extended. So far this appears to
+ be true. If we want to enforce this, we can either do
+ an explicit zero-extension here, or (if GUEST_BASE == 0)
+ use the ADDR32 prefix. For now, do nothing. */
+
+ if (offset != GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RDI, GUEST_BASE);
+ tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_RDI, base);
+ base = TCG_REG_RDI, offset = 0;
+ }
+ }
+
+ tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, opc);
+ }
#endif
}
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
- int c;
+ int c, rexw = 0;
+
+#if TCG_TARGET_REG_BITS == 64
+# define OP_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i64): \
+ rexw = P_REXW; /* FALLTHRU */ \
+ case glue(glue(INDEX_op_, x), _i32)
+#else
+# define OP_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i32)
+#endif
switch(opc) {
case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
- tcg_out8(s, OPC_JMP_long); /* jmp tb_ret_addr */
- tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
+ tcg_out_jmp(s, (tcg_target_long) tb_ret_addr);
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
@@ -1107,8 +1470,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_jmp:
if (const_args[0]) {
- tcg_out8(s, OPC_JMP_long);
- tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
+ tcg_out_jmp(s, args[0]);
} else {
/* jmp *reg */
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, args[0]);
@@ -1120,38 +1482,43 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_movi_i32:
tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
break;
- case INDEX_op_ld8u_i32:
- /* movzbl */
+ OP_32_64(ld8u):
+ /* Note that we can ignore REXW for the zero-extend to 64-bit. */
tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
break;
- case INDEX_op_ld8s_i32:
- /* movsbl */
- tcg_out_modrm_offset(s, OPC_MOVSBL, args[0], args[1], args[2]);
+ OP_32_64(ld8s):
+ tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
break;
- case INDEX_op_ld16u_i32:
- /* movzwl */
+ OP_32_64(ld16u):
+ /* Note that we can ignore REXW for the zero-extend to 64-bit. */
tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
break;
- case INDEX_op_ld16s_i32:
- /* movswl */
- tcg_out_modrm_offset(s, OPC_MOVSWL, args[0], args[1], args[2]);
+ OP_32_64(ld16s):
+ tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
break;
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_ld32u_i64:
+#endif
case INDEX_op_ld_i32:
tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
break;
- case INDEX_op_st8_i32:
- /* movb */
- tcg_out_modrm_offset(s, OPC_MOVB_EvGv, args[0], args[1], args[2]);
+
+ OP_32_64(st8):
+ tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
+ args[0], args[1], args[2]);
break;
- case INDEX_op_st16_i32:
- /* movw */
+ OP_32_64(st16):
tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
args[0], args[1], args[2]);
break;
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_st32_i64:
+#endif
case INDEX_op_st_i32:
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
break;
- case INDEX_op_add_i32:
+
+ OP_32_64(add):
/* For 3-operand addition, use LEA. */
if (args[0] != args[1]) {
TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
@@ -1161,147 +1528,117 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
} else if (a0 == a2) {
/* Watch out for dest = src + dest, since we've removed
the matching constraint on the add. */
- tgen_arithr(s, ARITH_ADD, a0, a1);
+ tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
break;
}
- tcg_out_modrm_sib_offset(s, OPC_LEA, a0, a1, a2, 0, c3);
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
break;
}
c = ARITH_ADD;
goto gen_arith;
- case INDEX_op_sub_i32:
+ OP_32_64(sub):
c = ARITH_SUB;
goto gen_arith;
- case INDEX_op_and_i32:
+ OP_32_64(and):
c = ARITH_AND;
goto gen_arith;
- case INDEX_op_or_i32:
+ OP_32_64(or):
c = ARITH_OR;
goto gen_arith;
- case INDEX_op_xor_i32:
+ OP_32_64(xor):
c = ARITH_XOR;
goto gen_arith;
gen_arith:
if (const_args[2]) {
- tgen_arithi(s, c, args[0], args[2], 0);
+ tgen_arithi(s, c + rexw, args[0], args[2], 0);
} else {
- tgen_arithr(s, c, args[0], args[2]);
+ tgen_arithr(s, c + rexw, args[0], args[2]);
}
break;
- case INDEX_op_mul_i32:
+
+ OP_32_64(mul):
if (const_args[2]) {
int32_t val;
val = args[2];
if (val == (int8_t)val) {
- tcg_out_modrm(s, OPC_IMUL_GvEvIb, args[0], args[0]);
+ tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
tcg_out8(s, val);
} else {
- tcg_out_modrm(s, OPC_IMUL_GvEvIz, args[0], args[0]);
+ tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
tcg_out32(s, val);
}
} else {
- tcg_out_modrm(s, OPC_IMUL_GvEv, args[0], args[2]);
+ tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
}
break;
- case INDEX_op_mulu2_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
- break;
- case INDEX_op_div2_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_IDIV, args[4]);
+
+ OP_32_64(div2):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
break;
- case INDEX_op_divu2_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_DIV, args[4]);
+ OP_32_64(divu2):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
break;
- case INDEX_op_shl_i32:
+
+ OP_32_64(shl):
c = SHIFT_SHL;
- gen_shift32:
- if (const_args[2]) {
- tcg_out_shifti(s, c, args[0], args[2]);
- } else {
- tcg_out_modrm(s, OPC_SHIFT_cl, c, args[0]);
- }
- break;
- case INDEX_op_shr_i32:
+ goto gen_shift;
+ OP_32_64(shr):
c = SHIFT_SHR;
- goto gen_shift32;
- case INDEX_op_sar_i32:
+ goto gen_shift;
+ OP_32_64(sar):
c = SHIFT_SAR;
- goto gen_shift32;
- case INDEX_op_rotl_i32:
+ goto gen_shift;
+ OP_32_64(rotl):
c = SHIFT_ROL;
- goto gen_shift32;
- case INDEX_op_rotr_i32:
+ goto gen_shift;
+ OP_32_64(rotr):
c = SHIFT_ROR;
- goto gen_shift32;
-
- case INDEX_op_add2_i32:
- if (const_args[4]) {
- tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
- } else {
- tgen_arithr(s, ARITH_ADD, args[0], args[4]);
- }
- if (const_args[5]) {
- tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
+ goto gen_shift;
+ gen_shift:
+ if (const_args[2]) {
+ tcg_out_shifti(s, c + rexw, args[0], args[2]);
} else {
- tgen_arithr(s, ARITH_ADC, args[1], args[5]);
- }
- break;
- case INDEX_op_sub2_i32:
- if (const_args[4]) {
- tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
- } else {
- tgen_arithr(s, ARITH_SUB, args[0], args[4]);
- }
- if (const_args[5]) {
- tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
- } else {
- tgen_arithr(s, ARITH_SBB, args[1], args[5]);
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
}
break;
+
case INDEX_op_brcond_i32:
- tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
- args[3], 0);
+ tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
+ args[3], 0);
break;
- case INDEX_op_brcond2_i32:
- tcg_out_brcond2(s, args, const_args, 0);
+ case INDEX_op_setcond_i32:
+ tcg_out_setcond32(s, args[3], args[0], args[1],
+ args[2], const_args[2]);
break;
- case INDEX_op_bswap16_i32:
+ OP_32_64(bswap16):
tcg_out_rolw_8(s, args[0]);
break;
- case INDEX_op_bswap32_i32:
+ OP_32_64(bswap32):
tcg_out_bswap32(s, args[0]);
break;
- case INDEX_op_neg_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, args[0]);
+ OP_32_64(neg):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
break;
-
- case INDEX_op_not_i32:
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NOT, args[0]);
+ OP_32_64(not):
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
break;
- case INDEX_op_ext8s_i32:
- tcg_out_ext8s(s, args[0], args[1]);
+ OP_32_64(ext8s):
+ tcg_out_ext8s(s, args[0], args[1], rexw);
break;
- case INDEX_op_ext16s_i32:
- tcg_out_ext16s(s, args[0], args[1]);
+ OP_32_64(ext16s):
+ tcg_out_ext16s(s, args[0], args[1], rexw);
break;
- case INDEX_op_ext8u_i32:
+ OP_32_64(ext8u):
tcg_out_ext8u(s, args[0], args[1]);
break;
- case INDEX_op_ext16u_i32:
+ OP_32_64(ext16u):
tcg_out_ext16u(s, args[0], args[1]);
break;
- case INDEX_op_setcond_i32:
- tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
- break;
- case INDEX_op_setcond2_i32:
- tcg_out_setcond2(s, args, const_args);
- break;
-
case INDEX_op_qemu_ld8u:
tcg_out_qemu_ld(s, args, 0);
break;
@@ -1314,6 +1651,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_ld16s:
tcg_out_qemu_ld(s, args, 1 | 4);
break;
+#if TCG_TARGET_REG_BITS == 64
+ case INDEX_op_qemu_ld32u:
+#endif
case INDEX_op_qemu_ld32:
tcg_out_qemu_ld(s, args, 2);
break;
@@ -1334,9 +1674,82 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_qemu_st(s, args, 3);
break;
+#if TCG_TARGET_REG_BITS == 32
+ case INDEX_op_brcond2_i32:
+ tcg_out_brcond2(s, args, const_args, 0);
+ break;
+ case INDEX_op_setcond2_i32:
+ tcg_out_setcond2(s, args, const_args);
+ break;
+ case INDEX_op_mulu2_i32:
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]);
+ break;
+ case INDEX_op_add2_i32:
+ if (const_args[4]) {
+ tgen_arithi(s, ARITH_ADD, args[0], args[4], 1);
+ } else {
+ tgen_arithr(s, ARITH_ADD, args[0], args[4]);
+ }
+ if (const_args[5]) {
+ tgen_arithi(s, ARITH_ADC, args[1], args[5], 1);
+ } else {
+ tgen_arithr(s, ARITH_ADC, args[1], args[5]);
+ }
+ break;
+ case INDEX_op_sub2_i32:
+ if (const_args[4]) {
+ tgen_arithi(s, ARITH_SUB, args[0], args[4], 1);
+ } else {
+ tgen_arithr(s, ARITH_SUB, args[0], args[4]);
+ }
+ if (const_args[5]) {
+ tgen_arithi(s, ARITH_SBB, args[1], args[5], 1);
+ } else {
+ tgen_arithr(s, ARITH_SBB, args[1], args[5]);
+ }
+ break;
+#else /* TCG_TARGET_REG_BITS == 64 */
+ case INDEX_op_movi_i64:
+ tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
+ break;
+ case INDEX_op_ld32s_i64:
+ tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ break;
+ case INDEX_op_qemu_ld32s:
+ tcg_out_qemu_ld(s, args, 2 | 4);
+ break;
+
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
+ args[3], 0);
+ break;
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond64(s, args[3], args[0], args[1],
+ args[2], const_args[2]);
+ break;
+
+ case INDEX_op_bswap64_i64:
+ tcg_out_bswap64(s, args[0]);
+ break;
+ case INDEX_op_ext32u_i64:
+ tcg_out_ext32u(s, args[0], args[1]);
+ break;
+ case INDEX_op_ext32s_i64:
+ tcg_out_ext32s(s, args[0], args[1]);
+ break;
+#endif
+
default:
tcg_abort();
}
+
+#undef OP_32_64
}
static const TCGTargetOpDef x86_op_defs[] = {
@@ -1359,7 +1772,6 @@ static const TCGTargetOpDef x86_op_defs[] = {
{ INDEX_op_add_i32, { "r", "r", "ri" } },
{ INDEX_op_sub_i32, { "r", "0", "ri" } },
{ INDEX_op_mul_i32, { "r", "0", "ri" } },
- { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
{ INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
{ INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
{ INDEX_op_and_i32, { "r", "0", "ri" } },
@@ -1374,10 +1786,6 @@ static const TCGTargetOpDef x86_op_defs[] = {
{ INDEX_op_brcond_i32, { "r", "ri" } },
- { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
- { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
- { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
-
{ INDEX_op_bswap16_i32, { "r", "0" } },
{ INDEX_op_bswap32_i32, { "r", "0" } },
@@ -1391,9 +1799,75 @@ static const TCGTargetOpDef x86_op_defs[] = {
{ INDEX_op_ext16u_i32, { "r", "r" } },
{ INDEX_op_setcond_i32, { "q", "r", "ri" } },
+
+#if TCG_TARGET_REG_BITS == 32
+ { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
+ { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
+ { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
+ { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
{ INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
+#else
+ { INDEX_op_mov_i64, { "r", "r" } },
+ { INDEX_op_movi_i64, { "r" } },
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i64, { "r", "r" } },
+ { INDEX_op_st16_i64, { "r", "r" } },
+ { INDEX_op_st32_i64, { "r", "r" } },
+ { INDEX_op_st_i64, { "r", "r" } },
+
+ { INDEX_op_add_i64, { "r", "0", "re" } },
+ { INDEX_op_mul_i64, { "r", "0", "re" } },
+ { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
+ { INDEX_op_sub_i64, { "r", "0", "re" } },
+ { INDEX_op_and_i64, { "r", "0", "reZ" } },
+ { INDEX_op_or_i64, { "r", "0", "re" } },
+ { INDEX_op_xor_i64, { "r", "0", "re" } },
+
+ { INDEX_op_shl_i64, { "r", "0", "ci" } },
+ { INDEX_op_shr_i64, { "r", "0", "ci" } },
+ { INDEX_op_sar_i64, { "r", "0", "ci" } },
+ { INDEX_op_rotl_i64, { "r", "0", "ci" } },
+ { INDEX_op_rotr_i64, { "r", "0", "ci" } },
+
+ { INDEX_op_brcond_i64, { "r", "re" } },
+ { INDEX_op_setcond_i64, { "r", "r", "re" } },
+
+ { INDEX_op_bswap16_i64, { "r", "0" } },
+ { INDEX_op_bswap32_i64, { "r", "0" } },
+ { INDEX_op_bswap64_i64, { "r", "0" } },
+ { INDEX_op_neg_i64, { "r", "0" } },
+ { INDEX_op_not_i64, { "r", "0" } },
+
+ { INDEX_op_ext8s_i64, { "r", "r" } },
+ { INDEX_op_ext16s_i64, { "r", "r" } },
+ { INDEX_op_ext32s_i64, { "r", "r" } },
+ { INDEX_op_ext8u_i64, { "r", "r" } },
+ { INDEX_op_ext16u_i64, { "r", "r" } },
+ { INDEX_op_ext32u_i64, { "r", "r" } },
+#endif
-#if TARGET_LONG_BITS == 32
+#if TCG_TARGET_REG_BITS == 64
+ { INDEX_op_qemu_ld8u, { "r", "L" } },
+ { INDEX_op_qemu_ld8s, { "r", "L" } },
+ { INDEX_op_qemu_ld16u, { "r", "L" } },
+ { INDEX_op_qemu_ld16s, { "r", "L" } },
+ { INDEX_op_qemu_ld32, { "r", "L" } },
+ { INDEX_op_qemu_ld32u, { "r", "L" } },
+ { INDEX_op_qemu_ld32s, { "r", "L" } },
+ { INDEX_op_qemu_ld64, { "r", "L" } },
+
+ { INDEX_op_qemu_st8, { "L", "L" } },
+ { INDEX_op_qemu_st16, { "L", "L" } },
+ { INDEX_op_qemu_st32, { "L", "L" } },
+ { INDEX_op_qemu_st64, { "L", "L" } },
+#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
{ INDEX_op_qemu_ld8u, { "r", "L" } },
{ INDEX_op_qemu_ld8s, { "r", "L" } },
{ INDEX_op_qemu_ld16u, { "r", "L" } },
@@ -1422,11 +1896,19 @@ static const TCGTargetOpDef x86_op_defs[] = {
};
static int tcg_target_callee_save_regs[] = {
- /* TCG_REG_EBP, */ /* currently used for the global env, so no
- need to save */
+#if TCG_TARGET_REG_BITS == 64
+ TCG_REG_RBP,
+ TCG_REG_RBX,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ /* TCG_REG_R14, */ /* Currently used for the global env. */
+ TCG_REG_R15,
+#else
+ /* TCG_REG_EBP, */ /* Currently used for the global env. */
TCG_REG_EBX,
TCG_REG_ESI,
TCG_REG_EDI,
+#endif
};
/* Generate global QEMU prologue and epilogue code */
@@ -1435,27 +1917,34 @@ static void tcg_target_qemu_prologue(TCGContext *s)
int i, frame_size, push_size, stack_addend;
/* TB prologue */
- /* save all callee saved registers */
- for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+
+ /* Save all callee saved registers. */
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
tcg_out_push(s, tcg_target_callee_save_regs[i]);
}
- /* reserve some stack space */
- push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
+
+ /* Reserve some stack space. */
+ push_size = 1 + ARRAY_SIZE(tcg_target_callee_save_regs);
+ push_size *= TCG_TARGET_REG_BITS / 8;
+
frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
~(TCG_TARGET_STACK_ALIGN - 1);
stack_addend = frame_size - push_size;
tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
- tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_EAX); /* jmp *%eax */
+ /* jmp *tb. */
+ tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[0]);
/* TB epilogue */
tb_ret_addr = s->code_ptr;
+
tcg_out_addi(s, TCG_REG_ESP, stack_addend);
- for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
+
+ for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
tcg_out_pop(s, tcg_target_callee_save_regs[i]);
}
- tcg_out_opc(s, OPC_RET);
+ tcg_out_opc(s, OPC_RET, 0, 0, 0);
}
static void tcg_target_init(TCGContext *s)
@@ -1466,12 +1955,25 @@ static void tcg_target_init(TCGContext *s)
tcg_abort();
#endif
- tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
+ } else {
+ tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
+ }
tcg_regset_clear(tcg_target_call_clobber_regs);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
+ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
+ }
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index ca1d7306ea..bfafbfcbae 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -23,10 +23,18 @@
*/
#define TCG_TARGET_I386 1
-#define TCG_TARGET_REG_BITS 32
+#if defined(__x86_64__)
+# define TCG_TARGET_REG_BITS 64
+#else
+# define TCG_TARGET_REG_BITS 32
+#endif
//#define TCG_TARGET_WORDS_BIGENDIAN
-#define TCG_TARGET_NB_REGS 8
+#if TCG_TARGET_REG_BITS == 64
+# define TCG_TARGET_NB_REGS 16
+#else
+# define TCG_TARGET_NB_REGS 8
+#endif
enum {
TCG_REG_EAX = 0,
@@ -37,8 +45,30 @@ enum {
TCG_REG_EBP,
TCG_REG_ESI,
TCG_REG_EDI,
+
+ /* 64-bit registers; always define the symbols to avoid
+ too much if-deffing. */
+ TCG_REG_R8,
+ TCG_REG_R9,
+ TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
+ TCG_REG_R13,
+ TCG_REG_R14,
+ TCG_REG_R15,
+ TCG_REG_RAX = TCG_REG_EAX,
+ TCG_REG_RCX = TCG_REG_ECX,
+ TCG_REG_RDX = TCG_REG_EDX,
+ TCG_REG_RBX = TCG_REG_EBX,
+ TCG_REG_RSP = TCG_REG_ESP,
+ TCG_REG_RBP = TCG_REG_EBP,
+ TCG_REG_RSI = TCG_REG_ESI,
+ TCG_REG_RDI = TCG_REG_EDI,
};
+#define TCG_CT_CONST_S32 0x100
+#define TCG_CT_CONST_U32 0x200
+
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_ESP
#define TCG_TARGET_STACK_ALIGN 16
@@ -61,10 +91,35 @@ enum {
// #define TCG_TARGET_HAS_nand_i32
// #define TCG_TARGET_HAS_nor_i32
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_div2_i64
+#define TCG_TARGET_HAS_rot_i64
+#define TCG_TARGET_HAS_ext8s_i64
+#define TCG_TARGET_HAS_ext16s_i64
+#define TCG_TARGET_HAS_ext32s_i64
+#define TCG_TARGET_HAS_ext8u_i64
+#define TCG_TARGET_HAS_ext16u_i64
+#define TCG_TARGET_HAS_ext32u_i64
+#define TCG_TARGET_HAS_bswap16_i64
+#define TCG_TARGET_HAS_bswap32_i64
+#define TCG_TARGET_HAS_bswap64_i64
+#define TCG_TARGET_HAS_neg_i64
+#define TCG_TARGET_HAS_not_i64
+// #define TCG_TARGET_HAS_andc_i64
+// #define TCG_TARGET_HAS_orc_i64
+// #define TCG_TARGET_HAS_eqv_i64
+// #define TCG_TARGET_HAS_nand_i64
+// #define TCG_TARGET_HAS_nor_i64
+#endif
+
#define TCG_TARGET_HAS_GUEST_BASE
/* Note: must be synced with dyngen-exec.h */
-#define TCG_AREG0 TCG_REG_EBP
+#if TCG_TARGET_REG_BITS == 64
+# define TCG_AREG0 TCG_REG_R14
+#else
+# define TCG_AREG0 TCG_REG_EBP
+#endif
static inline void flush_icache_range(unsigned long start, unsigned long stop)
{