aboutsummaryrefslogtreecommitdiff
path: root/tcg/i386/tcg-target.h
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/i386/tcg-target.h')
-rw-r--r--tcg/i386/tcg-target.h104
1 files changed, 63 insertions, 41 deletions
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index f378d29568..a10d4e1fce 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -25,15 +25,16 @@
#ifndef I386_TCG_TARGET_H
#define I386_TCG_TARGET_H
+#include "host/cpuinfo.h"
+
#define TCG_TARGET_INSN_UNIT_SIZE 1
-#define TCG_TARGET_TLB_DISPLACEMENT_BITS 31
#ifdef __x86_64__
-# define TCG_TARGET_REG_BITS 64
# define TCG_TARGET_NB_REGS 32
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
#else
-# define TCG_TARGET_REG_BITS 32
# define TCG_TARGET_NB_REGS 24
+# define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
#endif
typedef enum {
@@ -96,11 +97,34 @@ typedef enum {
#else
#define TCG_TARGET_CALL_STACK_OFFSET 0
#endif
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
+#if defined(_WIN64)
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC
+#elif TCG_TARGET_REG_BITS == 64
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
+#else
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
+#endif
-extern bool have_bmi1;
-extern bool have_popcnt;
-extern bool have_avx1;
-extern bool have_avx2;
+#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
+#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
+#define have_avx1 (cpuinfo & CPUINFO_AVX1)
+#define have_avx2 (cpuinfo & CPUINFO_AVX2)
+#define have_movbe (cpuinfo & CPUINFO_MOVBE)
+
+/*
+ * There are interesting instructions in AVX512, so long as we have AVX512VL,
+ * which indicates support for EVEX on sizes smaller than 512 bits.
+ */
+#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
+ (cpuinfo & CPUINFO_AVX512F))
+#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
+#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
+#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
/* optional instructions */
#define TCG_TARGET_HAS_div2_i32 1
@@ -111,7 +135,6 @@ extern bool have_avx2;
#define TCG_TARGET_HAS_ext16u_i32 1
#define TCG_TARGET_HAS_bswap16_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_neg_i32 1
#define TCG_TARGET_HAS_not_i32 1
#define TCG_TARGET_HAS_andc_i32 have_bmi1
#define TCG_TARGET_HAS_orc_i32 0
@@ -124,20 +147,18 @@ extern bool have_avx2;
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_extract_i32 1
#define TCG_TARGET_HAS_sextract_i32 1
-#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_extract2_i32 1
+#define TCG_TARGET_HAS_negsetcond_i32 1
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_goto_ptr 1
-#define TCG_TARGET_HAS_direct_jump 1
#if TCG_TARGET_REG_BITS == 64
-/* Keep target addresses zero-extended in a register. */
-#define TCG_TARGET_HAS_extrl_i64_i32 (TARGET_LONG_BITS == 32)
-#define TCG_TARGET_HAS_extrh_i64_i32 (TARGET_LONG_BITS == 32)
+/* Keep 32-bit values zero-extended in a register. */
+#define TCG_TARGET_HAS_extr_i64_i32 1
#define TCG_TARGET_HAS_div2_i64 1
#define TCG_TARGET_HAS_rot_i64 1
#define TCG_TARGET_HAS_ext8s_i64 1
@@ -149,7 +170,6 @@ extern bool have_avx2;
#define TCG_TARGET_HAS_bswap16_i64 1
#define TCG_TARGET_HAS_bswap32_i64 1
#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_neg_i64 1
#define TCG_TARGET_HAS_not_i64 1
#define TCG_TARGET_HAS_andc_i64 have_bmi1
#define TCG_TARGET_HAS_orc_i64 0
@@ -162,33 +182,52 @@ extern bool have_avx2;
#define TCG_TARGET_HAS_deposit_i64 1
#define TCG_TARGET_HAS_extract_i64 1
#define TCG_TARGET_HAS_sextract_i64 0
-#define TCG_TARGET_HAS_movcond_i64 1
+#define TCG_TARGET_HAS_extract2_i64 1
+#define TCG_TARGET_HAS_negsetcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
#define TCG_TARGET_HAS_mulu2_i64 1
#define TCG_TARGET_HAS_muls2_i64 1
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
+#define TCG_TARGET_HAS_qemu_st8_i32 0
+#else
+#define TCG_TARGET_HAS_qemu_st8_i32 1
#endif
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
+ (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
+
+#define TCG_TARGET_HAS_tst 1
+
/* We do not support older SSE systems, only beginning with AVX1. */
#define TCG_TARGET_HAS_v64 have_avx1
#define TCG_TARGET_HAS_v128 have_avx1
#define TCG_TARGET_HAS_v256 have_avx2
#define TCG_TARGET_HAS_andc_vec 1
-#define TCG_TARGET_HAS_orc_vec 0
-#define TCG_TARGET_HAS_not_vec 0
+#define TCG_TARGET_HAS_orc_vec have_avx512vl
+#define TCG_TARGET_HAS_nand_vec have_avx512vl
+#define TCG_TARGET_HAS_nor_vec have_avx512vl
+#define TCG_TARGET_HAS_eqv_vec have_avx512vl
+#define TCG_TARGET_HAS_not_vec have_avx512vl
#define TCG_TARGET_HAS_neg_vec 0
+#define TCG_TARGET_HAS_abs_vec 1
+#define TCG_TARGET_HAS_roti_vec have_avx512vl
+#define TCG_TARGET_HAS_rots_vec 0
+#define TCG_TARGET_HAS_rotv_vec have_avx512vl
#define TCG_TARGET_HAS_shi_vec 1
-#define TCG_TARGET_HAS_shs_vec 0
-#define TCG_TARGET_HAS_shv_vec 0
-#define TCG_TARGET_HAS_cmp_vec 1
+#define TCG_TARGET_HAS_shs_vec 1
+#define TCG_TARGET_HAS_shv_vec have_avx2
#define TCG_TARGET_HAS_mul_vec 1
+#define TCG_TARGET_HAS_sat_vec 1
+#define TCG_TARGET_HAS_minmax_vec 1
+#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
+#define TCG_TARGET_HAS_cmpsel_vec -1
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
- (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
- ((ofs) == 0 && (len) == 16))
+ (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
+ (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
/* Check for the possibility of high-byte extraction and, for 64-bit,
@@ -197,18 +236,6 @@ extern bool have_avx2;
#define TCG_TARGET_extract_i64_valid(ofs, len) \
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
-static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
-{
-}
-
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
- uintptr_t jmp_addr, uintptr_t addr)
-{
- /* patch the branch destination */
- atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
- /* no need to flush icache explicitly */
-}
-
/* This defines the natural memory order supported by this
* architecture before guarantees made by various barrier
* instructions.
@@ -216,15 +243,10 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
* The x86 has a pretty strong memory ordering which only really
* allows for some stores to be re-ordered after loads.
*/
-#include "tcg-mo.h"
+#include "tcg/tcg-mo.h"
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
-
-#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-
-#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
-#endif
#define TCG_TARGET_NEED_POOL_LABELS
#endif