aboutsummaryrefslogtreecommitdiff
path: root/tcg/tcg.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/tcg.c')
-rw-r--r--tcg/tcg.c5663
1 files changed, 3944 insertions, 1719 deletions
diff --git a/tcg/tcg.c b/tcg/tcg.c
index f34f52fbdb..0c0bb9d169 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -22,9 +22,6 @@
* THE SOFTWARE.
*/
-/* define it to use liveness analysis (better code) */
-#define USE_TCG_OPTIMIZATIONS
-
#include "qemu/osdep.h"
/* Define to jump the ELF file used to communicate with GDB. */
@@ -33,25 +30,21 @@
#include "qemu/error-report.h"
#include "qemu/cutils.h"
#include "qemu/host-utils.h"
+#include "qemu/qemu-print.h"
+#include "qemu/cacheflush.h"
+#include "qemu/cacheinfo.h"
#include "qemu/timer.h"
-
-/* Note: the long term plan is to reduce the dependencies on the QEMU
- CPU definitions. Currently they are used for qemu_ld/st
- instructions */
-#define NO_CPU_IO_DEFS
-#include "cpu.h"
-
-#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
-
-#include "tcg-op.h"
+#include "exec/translation-block.h"
+#include "exec/tlb-common.h"
+#include "tcg/startup.h"
+#include "tcg/tcg-op-common.h"
#if UINTPTR_MAX == UINT32_MAX
# define ELF_CLASS ELFCLASS32
#else
# define ELF_CLASS ELFCLASS64
#endif
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
# define ELF_DATA ELFDATA2MSB
#else
# define ELF_DATA ELFDATA2LSB
@@ -59,12 +52,17 @@
#include "elf.h"
#include "exec/log.h"
-#include "sysemu/sysemu.h"
+#include "tcg/tcg-ldst.h"
+#include "tcg/tcg-temp-internal.h"
+#include "tcg-internal.h"
+#include "tcg/perf.h"
+#ifdef CONFIG_USER_ONLY
+#include "exec/user/guest-base.h"
+#endif
-/* Forward declarations for functions declared in tcg-target.inc.c and
+/* Forward declarations for functions declared in tcg-target.c.inc and
used here. */
static void tcg_target_init(TCGContext *s);
-static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
static void tcg_target_qemu_prologue(TCGContext *s);
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend);
@@ -92,29 +90,78 @@ typedef struct QEMU_PACKED {
DebugFrameFDEHeader fde;
} DebugFrameHeader;
-static void tcg_register_jit_int(void *buf, size_t size,
+typedef struct TCGLabelQemuLdst {
+ bool is_ld; /* qemu_ld: true, qemu_st: false */
+ MemOpIdx oi;
+ TCGType type; /* result type of a load */
+ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
+ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
+ TCGReg datalo_reg; /* reg index for low word to be loaded or stored */
+ TCGReg datahi_reg; /* reg index for high word to be loaded or stored */
+ const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
+ tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
+ QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
+} TCGLabelQemuLdst;
+
+static void tcg_register_jit_int(const void *buf, size_t size,
const void *debug_frame,
size_t debug_frame_size)
__attribute__((unused));
-/* Forward declarations for functions declared and used in tcg-target.inc.c. */
-static const char *target_parse_constraint(TCGArgConstraint *ct,
- const char *ct_str, TCGType type);
+/* Forward declarations for functions declared and used in tcg-target.c.inc. */
+static void tcg_out_tb_start(TCGContext *s);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
intptr_t arg2);
-static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
- const int *const_args);
+static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
+static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
+static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg);
+static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
+static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
+static void tcg_out_goto_tb(TCGContext *s, int which);
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS]);
#if TCG_TARGET_MAYBE_vec
-static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
- unsigned vece, const TCGArg *args,
- const int *const_args);
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg src);
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg base, intptr_t offset);
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, int64_t arg);
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS]);
#else
-static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
- unsigned vece, const TCGArg *args,
- const int *const_args)
+static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg src)
+{
+ g_assert_not_reached();
+}
+static inline bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, TCGReg base, intptr_t offset)
+{
+ g_assert_not_reached();
+}
+static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
+ TCGReg dst, int64_t arg)
+{
+ g_assert_not_reached();
+}
+static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+ unsigned vecl, unsigned vece,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
{
g_assert_not_reached();
}
@@ -123,55 +170,85 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
intptr_t arg2);
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
TCGReg base, intptr_t ofs);
-static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
-static int tcg_target_const_match(tcg_target_long val, TCGType type,
- const TCGArgConstraint *arg_ct);
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
+ const TCGHelperInfo *info);
+static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece);
#ifdef TCG_TARGET_NEED_LDST_LABELS
-static bool tcg_out_ldst_finalize(TCGContext *s);
+static int tcg_out_ldst_finalize(TCGContext *s);
#endif
-#define TCG_HIGHWATER 1024
+#ifndef CONFIG_USER_ONLY
+#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
+#endif
+
+typedef struct TCGLdstHelperParam {
+ TCGReg (*ra_gen)(TCGContext *s, const TCGLabelQemuLdst *l, int arg_reg);
+ unsigned ntmp;
+ int tmp[3];
+} TCGLdstHelperParam;
-static TCGContext **tcg_ctxs;
-static unsigned int n_tcg_ctxs;
-TCGv_env cpu_env = 0;
+static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
+ const TCGLdstHelperParam *p)
+ __attribute__((unused));
+static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *l,
+ bool load_sign, const TCGLdstHelperParam *p)
+ __attribute__((unused));
+static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *l,
+ const TCGLdstHelperParam *p)
+ __attribute__((unused));
-struct tcg_region_tree {
- QemuMutex lock;
- GTree *tree;
- /* padding to avoid false sharing is computed at run-time */
+static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = {
+ [MO_UB] = helper_ldub_mmu,
+ [MO_SB] = helper_ldsb_mmu,
+ [MO_UW] = helper_lduw_mmu,
+ [MO_SW] = helper_ldsw_mmu,
+ [MO_UL] = helper_ldul_mmu,
+ [MO_UQ] = helper_ldq_mmu,
+#if TCG_TARGET_REG_BITS == 64
+ [MO_SL] = helper_ldsl_mmu,
+ [MO_128] = helper_ld16_mmu,
+#endif
};
-/*
- * We divide code_gen_buffer into equally-sized "regions" that TCG threads
- * dynamically allocate from as demand dictates. Given appropriate region
- * sizing, this minimizes flushes even when some TCG threads generate a lot
- * more code than others.
- */
-struct tcg_region_state {
- QemuMutex lock;
-
- /* fields set at init time */
- void *start;
- void *start_aligned;
- void *end;
- size_t n;
- size_t size; /* size of one region */
- size_t stride; /* .size + guard size */
-
- /* fields protected by the lock */
- size_t current; /* current region index */
- size_t agg_size_full; /* aggregate size of full regions */
+static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
+ [MO_8] = helper_stb_mmu,
+ [MO_16] = helper_stw_mmu,
+ [MO_32] = helper_stl_mmu,
+ [MO_64] = helper_stq_mmu,
+#if TCG_TARGET_REG_BITS == 64
+ [MO_128] = helper_st16_mmu,
+#endif
};
-static struct tcg_region_state region;
-/*
- * This is an array of struct tcg_region_tree's, with padding.
- * We use void * to simplify the computation of region_trees[i]; each
- * struct is found every tree_size bytes.
- */
-static void *region_trees;
-static size_t tree_size;
+typedef struct {
+ MemOp atom; /* lg2 bits of atomicity required */
+ MemOp align; /* lg2 bits of alignment to use */
+} TCGAtomAlign;
+
+static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+ __attribute__((unused));
+
+#ifdef CONFIG_USER_ONLY
+bool tcg_use_softmmu;
+#endif
+
+TCGContext tcg_init_ctx;
+__thread TCGContext *tcg_ctx;
+
+TCGContext **tcg_ctxs;
+unsigned int tcg_cur_ctxs;
+unsigned int tcg_max_ctxs;
+TCGv_env tcg_env;
+const void *tcg_code_gen_epilogue;
+uintptr_t tcg_splitwx_diff;
+
+#ifndef CONFIG_TCG_INTERPRETER
+tcg_prologue_fn *tcg_qemu_tb_exec;
+#endif
+
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
static TCGRegSet tcg_target_call_clobber_regs;
@@ -262,39 +339,19 @@ static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
TCGLabel *l, intptr_t addend)
{
- TCGRelocation *r;
+ TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
- if (l->has_value) {
- /* FIXME: This may break relocations on RISC targets that
- modify instruction fields in place. The caller may not have
- written the initial value. */
- bool ok = patch_reloc(code_ptr, type, l->u.value, addend);
- tcg_debug_assert(ok);
- } else {
- /* add a new relocation entry */
- r = tcg_malloc(sizeof(TCGRelocation));
- r->type = type;
- r->ptr = code_ptr;
- r->addend = addend;
- r->next = l->u.first_reloc;
- l->u.first_reloc = r;
- }
+ r->type = type;
+ r->ptr = code_ptr;
+ r->addend = addend;
+ QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
}
-static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
+static void tcg_out_label(TCGContext *s, TCGLabel *l)
{
- intptr_t value = (intptr_t)ptr;
- TCGRelocation *r;
-
tcg_debug_assert(!l->has_value);
-
- for (r = l->u.first_reloc; r != NULL; r = r->next) {
- bool ok = patch_reloc(r->ptr, r->type, value, r->addend);
- tcg_debug_assert(ok);
- }
-
l->has_value = 1;
- l->u.value_ptr = ptr;
+ l->u.value_ptr = tcg_splitwx_to_rx(s->code_ptr);
}
TCGLabel *gen_new_label(void)
@@ -302,409 +359,414 @@ TCGLabel *gen_new_label(void)
TCGContext *s = tcg_ctx;
TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
- *l = (TCGLabel){
- .id = s->nb_labels++
- };
+ memset(l, 0, sizeof(TCGLabel));
+ l->id = s->nb_labels++;
+ QSIMPLEQ_INIT(&l->branches);
+ QSIMPLEQ_INIT(&l->relocs);
+
+ QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
return l;
}
-static void set_jmp_reset_offset(TCGContext *s, int which)
+static bool tcg_resolve_relocs(TCGContext *s)
{
- size_t off = tcg_current_code_size(s);
- s->tb_jmp_reset_offset[which] = off;
- /* Make sure that we didn't overflow the stored offset. */
- assert(s->tb_jmp_reset_offset[which] == off);
-}
+ TCGLabel *l;
-#include "tcg-target.inc.c"
+ QSIMPLEQ_FOREACH(l, &s->labels, next) {
+ TCGRelocation *r;
+ uintptr_t value = l->u.value;
-/* compare a pointer @ptr and a tb_tc @s */
-static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
-{
- if (ptr >= s->ptr + s->size) {
- return 1;
- } else if (ptr < s->ptr) {
- return -1;
+ QSIMPLEQ_FOREACH(r, &l->relocs, next) {
+ if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
+ return false;
+ }
+ }
}
- return 0;
+ return true;
}
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
+static void set_jmp_reset_offset(TCGContext *s, int which)
{
- const struct tb_tc *a = ap;
- const struct tb_tc *b = bp;
-
/*
- * When both sizes are set, we know this isn't a lookup.
- * This is the most likely case: every TB must be inserted; lookups
- * are a lot less frequent.
+ * We will check for overflow at the end of the opcode loop in
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
*/
- if (likely(a->size && b->size)) {
- if (a->ptr > b->ptr) {
- return 1;
- } else if (a->ptr < b->ptr) {
- return -1;
- }
- /* a->ptr == b->ptr should happen only on deletions */
- g_assert(a->size == b->size);
- return 0;
- }
- /*
- * All lookups have either .size field set to 0.
- * From the glib sources we see that @ap is always the lookup key. However
- * the docs provide no guarantee, so we just mark this case as likely.
- */
- if (likely(a->size == 0)) {
- return ptr_cmp_tb_tc(a->ptr, b);
- }
- return ptr_cmp_tb_tc(b->ptr, a);
+ s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
}
-static void tcg_region_trees_init(void)
+static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
{
- size_t i;
-
- tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
- region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- qemu_mutex_init(&rt->lock);
- rt->tree = g_tree_new(tb_tc_cmp);
- }
+ /*
+ * We will check for overflow at the end of the opcode loop in
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
+ */
+ s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
}
-static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
+static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
{
- size_t region_idx;
-
- if (p < region.start_aligned) {
- region_idx = 0;
- } else {
- ptrdiff_t offset = p - region.start_aligned;
-
- if (offset > region.stride * (region.n - 1)) {
- region_idx = region.n - 1;
- } else {
- region_idx = offset / region.stride;
- }
- }
- return region_trees + region_idx * tree_size;
+ /*
+ * Return the read-execute version of the pointer, for the benefit
+ * of any pc-relative addressing mode.
+ */
+ return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
}
-void tcg_tb_insert(TranslationBlock *tb)
+static int __attribute__((unused))
+tlb_mask_table_ofs(TCGContext *s, int which)
{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
-
- qemu_mutex_lock(&rt->lock);
- g_tree_insert(rt->tree, &tb->tc, tb);
- qemu_mutex_unlock(&rt->lock);
+ return (offsetof(CPUNegativeOffsetState, tlb.f[which]) -
+ sizeof(CPUNegativeOffsetState));
}
-void tcg_tb_remove(TranslationBlock *tb)
+/* Signal overflow, starting over with fewer guest insns. */
+static G_NORETURN
+void tcg_raise_tb_overflow(TCGContext *s)
{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
-
- qemu_mutex_lock(&rt->lock);
- g_tree_remove(rt->tree, &tb->tc);
- qemu_mutex_unlock(&rt->lock);
+ siglongjmp(s->jmp_trans, -2);
}
/*
- * Find the TB 'tb' such that
- * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
- * Return NULL if not found.
+ * Used by tcg_out_movext{1,2} to hold the arguments for tcg_out_movext.
+ * By the time we arrive at tcg_out_movext1, @dst is always a TCGReg.
+ *
+ * However, tcg_out_helper_load_slots reuses this field to hold an
+ * argument slot number (which may designate a argument register or an
+ * argument stack slot), converting to TCGReg once all arguments that
+ * are destined for the stack are processed.
*/
-TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
-{
- struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
- TranslationBlock *tb;
- struct tb_tc s = { .ptr = (void *)tc_ptr };
-
- qemu_mutex_lock(&rt->lock);
- tb = g_tree_lookup(rt->tree, &s);
- qemu_mutex_unlock(&rt->lock);
- return tb;
-}
-
-static void tcg_region_tree_lock_all(void)
-{
- size_t i;
-
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
+typedef struct TCGMovExtend {
+ unsigned dst;
+ TCGReg src;
+ TCGType dst_type;
+ TCGType src_type;
+ MemOp src_ext;
+} TCGMovExtend;
- qemu_mutex_lock(&rt->lock);
- }
-}
-
-static void tcg_region_tree_unlock_all(void)
-{
- size_t i;
-
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- qemu_mutex_unlock(&rt->lock);
- }
-}
-
-void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
+/**
+ * tcg_out_movext -- move and extend
+ * @s: tcg context
+ * @dst_type: integral type for destination
+ * @dst: destination register
+ * @src_type: integral type for source
+ * @src_ext: extension to apply to source
+ * @src: source register
+ *
+ * Move or extend @src into @dst, depending on @src_ext and the types.
+ */
+static void tcg_out_movext(TCGContext *s, TCGType dst_type, TCGReg dst,
+ TCGType src_type, MemOp src_ext, TCGReg src)
{
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- g_tree_foreach(rt->tree, func, user_data);
+ switch (src_ext) {
+ case MO_UB:
+ tcg_out_ext8u(s, dst, src);
+ break;
+ case MO_SB:
+ tcg_out_ext8s(s, dst_type, dst, src);
+ break;
+ case MO_UW:
+ tcg_out_ext16u(s, dst, src);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, dst_type, dst, src);
+ break;
+ case MO_UL:
+ case MO_SL:
+ if (dst_type == TCG_TYPE_I32) {
+ if (src_type == TCG_TYPE_I32) {
+ tcg_out_mov(s, TCG_TYPE_I32, dst, src);
+ } else {
+ tcg_out_extrl_i64_i32(s, dst, src);
+ }
+ } else if (src_type == TCG_TYPE_I32) {
+ if (src_ext & MO_SIGN) {
+ tcg_out_exts_i32_i64(s, dst, src);
+ } else {
+ tcg_out_extu_i32_i64(s, dst, src);
+ }
+ } else {
+ if (src_ext & MO_SIGN) {
+ tcg_out_ext32s(s, dst, src);
+ } else {
+ tcg_out_ext32u(s, dst, src);
+ }
+ }
+ break;
+ case MO_UQ:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ if (dst_type == TCG_TYPE_I32) {
+ tcg_out_extrl_i64_i32(s, dst, src);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I64, dst, src);
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
- tcg_region_tree_unlock_all();
}
-size_t tcg_nb_tbs(void)
+/* Minor variations on a theme, using a structure. */
+static void tcg_out_movext1_new_src(TCGContext *s, const TCGMovExtend *i,
+ TCGReg src)
{
- size_t nb_tbs = 0;
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- nb_tbs += g_tree_nnodes(rt->tree);
- }
- tcg_region_tree_unlock_all();
- return nb_tbs;
+ tcg_out_movext(s, i->dst_type, i->dst, i->src_type, i->src_ext, src);
}
-static void tcg_region_tree_reset_all(void)
+static void tcg_out_movext1(TCGContext *s, const TCGMovExtend *i)
{
- size_t i;
-
- tcg_region_tree_lock_all();
- for (i = 0; i < region.n; i++) {
- struct tcg_region_tree *rt = region_trees + i * tree_size;
-
- /* Increment the refcount first so that destroy acts as a reset */
- g_tree_ref(rt->tree);
- g_tree_destroy(rt->tree);
- }
- tcg_region_tree_unlock_all();
+ tcg_out_movext1_new_src(s, i, i->src);
}
-static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
-{
- void *start, *end;
-
- start = region.start_aligned + curr_region * region.stride;
- end = start + region.size;
-
- if (curr_region == 0) {
- start = region.start;
- }
- if (curr_region == region.n - 1) {
- end = region.end;
- }
-
- *pstart = start;
- *pend = end;
-}
+/**
+ * tcg_out_movext2 -- move and extend two pair
+ * @s: tcg context
+ * @i1: first move description
+ * @i2: second move description
+ * @scratch: temporary register, or -1 for none
+ *
+ * As tcg_out_movext, for both @i1 and @i2, caring for overlap
+ * between the sources and destinations.
+ */
-static void tcg_region_assign(TCGContext *s, size_t curr_region)
+static void tcg_out_movext2(TCGContext *s, const TCGMovExtend *i1,
+ const TCGMovExtend *i2, int scratch)
{
- void *start, *end;
-
- tcg_region_bounds(curr_region, &start, &end);
-
- s->code_gen_buffer = start;
- s->code_gen_ptr = start;
- s->code_gen_buffer_size = end - start;
- s->code_gen_highwater = end - TCG_HIGHWATER;
-}
+ TCGReg src1 = i1->src;
+ TCGReg src2 = i2->src;
-static bool tcg_region_alloc__locked(TCGContext *s)
-{
- if (region.current == region.n) {
- return true;
+ if (i1->dst != src2) {
+ tcg_out_movext1(s, i1);
+ tcg_out_movext1(s, i2);
+ return;
}
- tcg_region_assign(s, region.current);
- region.current++;
- return false;
-}
-
-/*
- * Request a new region once the one in use has filled up.
- * Returns true on error.
- */
-static bool tcg_region_alloc(TCGContext *s)
-{
- bool err;
- /* read the region size now; alloc__locked will overwrite it on success */
- size_t size_full = s->code_gen_buffer_size;
+ if (i2->dst == src1) {
+ TCGType src1_type = i1->src_type;
+ TCGType src2_type = i2->src_type;
- qemu_mutex_lock(&region.lock);
- err = tcg_region_alloc__locked(s);
- if (!err) {
- region.agg_size_full += size_full - TCG_HIGHWATER;
+ if (tcg_out_xchg(s, MAX(src1_type, src2_type), src1, src2)) {
+ /* The data is now in the correct registers, now extend. */
+ src1 = i2->src;
+ src2 = i1->src;
+ } else {
+ tcg_debug_assert(scratch >= 0);
+ tcg_out_mov(s, src1_type, scratch, src1);
+ src1 = scratch;
+ }
}
- qemu_mutex_unlock(&region.lock);
- return err;
+ tcg_out_movext1_new_src(s, i2, src2);
+ tcg_out_movext1_new_src(s, i1, src1);
}
-/*
- * Perform a context's first region allocation.
- * This function does _not_ increment region.agg_size_full.
+/**
+ * tcg_out_movext3 -- move and extend three pair
+ * @s: tcg context
+ * @i1: first move description
+ * @i2: second move description
+ * @i3: third move description
+ * @scratch: temporary register, or -1 for none
+ *
+ * As tcg_out_movext, for all of @i1, @i2 and @i3, caring for overlap
+ * between the sources and destinations.
*/
-static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
-{
- return tcg_region_alloc__locked(s);
-}
-/* Call from a safe-work context */
-void tcg_region_reset_all(void)
+static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
+ const TCGMovExtend *i2, const TCGMovExtend *i3,
+ int scratch)
{
- unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
- unsigned int i;
+ TCGReg src1 = i1->src;
+ TCGReg src2 = i2->src;
+ TCGReg src3 = i3->src;
- qemu_mutex_lock(&region.lock);
- region.current = 0;
- region.agg_size_full = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- TCGContext *s = atomic_read(&tcg_ctxs[i]);
- bool err = tcg_region_initial_alloc__locked(s);
-
- g_assert(!err);
+ if (i1->dst != src2 && i1->dst != src3) {
+ tcg_out_movext1(s, i1);
+ tcg_out_movext2(s, i2, i3, scratch);
+ return;
}
- qemu_mutex_unlock(&region.lock);
-
- tcg_region_tree_reset_all();
-}
-
-#ifdef CONFIG_USER_ONLY
-static size_t tcg_n_regions(void)
-{
- return 1;
-}
-#else
-/*
- * It is likely that some vCPUs will translate more code than others, so we
- * first try to set more regions than max_cpus, with those regions being of
- * reasonable size. If that's not possible we make do by evenly dividing
- * the code_gen_buffer among the vCPUs.
- */
-static size_t tcg_n_regions(void)
-{
- size_t i;
-
- /* Use a single region if all we have is one vCPU thread */
- if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
- return 1;
+ if (i2->dst != src1 && i2->dst != src3) {
+ tcg_out_movext1(s, i2);
+ tcg_out_movext2(s, i1, i3, scratch);
+ return;
+ }
+ if (i3->dst != src1 && i3->dst != src2) {
+ tcg_out_movext1(s, i3);
+ tcg_out_movext2(s, i1, i2, scratch);
+ return;
}
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
- for (i = 8; i > 0; i--) {
- size_t regions_per_thread = i;
- size_t region_size;
-
- region_size = tcg_init_ctx.code_gen_buffer_size;
- region_size /= max_cpus * regions_per_thread;
-
- if (region_size >= 2 * 1024u * 1024) {
- return max_cpus * regions_per_thread;
+ /*
+ * There is a cycle. Since there are only 3 nodes, the cycle is
+ * either "clockwise" or "anti-clockwise", and can be solved with
+ * a single scratch or two xchg.
+ */
+ if (i1->dst == src2 && i2->dst == src3 && i3->dst == src1) {
+ /* "Clockwise" */
+ if (tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2)) {
+ tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3);
+ /* The data is now in the correct registers, now extend. */
+ tcg_out_movext1_new_src(s, i1, i1->dst);
+ tcg_out_movext1_new_src(s, i2, i2->dst);
+ tcg_out_movext1_new_src(s, i3, i3->dst);
+ } else {
+ tcg_debug_assert(scratch >= 0);
+ tcg_out_mov(s, i1->src_type, scratch, src1);
+ tcg_out_movext1(s, i3);
+ tcg_out_movext1(s, i2);
+ tcg_out_movext1_new_src(s, i1, scratch);
+ }
+ } else if (i1->dst == src3 && i2->dst == src1 && i3->dst == src2) {
+ /* "Anti-clockwise" */
+ if (tcg_out_xchg(s, MAX(i2->src_type, i3->src_type), src2, src3)) {
+ tcg_out_xchg(s, MAX(i1->src_type, i2->src_type), src1, src2);
+ /* The data is now in the correct registers, now extend. */
+ tcg_out_movext1_new_src(s, i1, i1->dst);
+ tcg_out_movext1_new_src(s, i2, i2->dst);
+ tcg_out_movext1_new_src(s, i3, i3->dst);
+ } else {
+ tcg_debug_assert(scratch >= 0);
+ tcg_out_mov(s, i1->src_type, scratch, src1);
+ tcg_out_movext1(s, i2);
+ tcg_out_movext1(s, i3);
+ tcg_out_movext1_new_src(s, i1, scratch);
}
+ } else {
+ g_assert_not_reached();
}
- /* If we can't, then just allocate one region per vCPU thread */
- return max_cpus;
}
-#endif
-/*
- * Initializes region partitioning.
- *
- * Called at init time from the parent thread (i.e. the one calling
- * tcg_context_init), after the target's TCG globals have been set.
- *
- * Region partitioning works by splitting code_gen_buffer into separate regions,
- * and then assigning regions to TCG threads so that the threads can translate
- * code in parallel without synchronization.
- *
- * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
- * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
- * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
- * must have been parsed before calling this function, since it calls
- * qemu_tcg_mttcg_enabled().
- *
- * In user-mode we use a single region. Having multiple regions in user-mode
- * is not supported, because the number of vCPU threads (recall that each thread
- * spawned by the guest corresponds to a vCPU thread) is only bounded by the
- * OS, and usually this number is huge (tens of thousands is not uncommon).
- * Thus, given this large bound on the number of vCPU threads and the fact
- * that code_gen_buffer is allocated at compile-time, we cannot guarantee
- * that the availability of at least one region per vCPU thread.
- *
- * However, this user-mode limitation is unlikely to be a significant problem
- * in practice. Multi-threaded guests share most if not all of their translated
- * code, which makes parallel code generation less appealing than in softmmu.
- */
-void tcg_region_init(void)
-{
- void *buf = tcg_init_ctx.code_gen_buffer;
- void *aligned;
- size_t size = tcg_init_ctx.code_gen_buffer_size;
- size_t page_size = qemu_real_host_page_size;
- size_t region_size;
- size_t n_regions;
- size_t i;
+#define C_PFX1(P, A) P##A
+#define C_PFX2(P, A, B) P##A##_##B
+#define C_PFX3(P, A, B, C) P##A##_##B##_##C
+#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
+#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
+#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
- n_regions = tcg_n_regions();
+/* Define an enumeration for the various combinations. */
- /* The first region will be 'aligned - buf' bytes larger than the others */
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
- /*
- * Make region_size a multiple of page_size, using aligned as the start.
- * As a result of this we might end up with a few extra pages at the end of
- * the buffer; we will assign those to the last region.
- */
- region_size = (size - (aligned - buf)) / n_regions;
- region_size = QEMU_ALIGN_DOWN(region_size, page_size);
+#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
+#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
+#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
+#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
- /* A region must have at least 2 pages; one code, one guard */
- g_assert(region_size >= 2 * page_size);
+#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
+#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
+#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
+#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
- /* init the region struct */
- qemu_mutex_init(&region.lock);
- region.n = n_regions;
- region.size = region_size - page_size;
- region.stride = region_size;
- region.start = buf;
- region.start_aligned = aligned;
- /* page-align the end, since its last page will be a guard page */
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
- /* account for that last guard page */
- region.end -= page_size;
+#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
+#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1),
+#define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1),
- /* set guard pages */
- for (i = 0; i < region.n; i++) {
- void *start, *end;
- int rc;
+#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
+#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
+#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
+#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
- tcg_region_bounds(i, &start, &end);
- rc = qemu_mprotect_none(end, page_size);
- g_assert(!rc);
- }
+typedef enum {
+#include "tcg-target-con-set.h"
+} TCGConstraintSetIndex;
+
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
+
+#undef C_O0_I1
+#undef C_O0_I2
+#undef C_O0_I3
+#undef C_O0_I4
+#undef C_O1_I1
+#undef C_O1_I2
+#undef C_O1_I3
+#undef C_O1_I4
+#undef C_N1_I2
+#undef C_N1O1_I1
+#undef C_N2_I1
+#undef C_O2_I1
+#undef C_O2_I2
+#undef C_O2_I3
+#undef C_O2_I4
+#undef C_N1_O1_I4
+
+/* Put all of the constraint sets into an array, indexed by the enum. */
+
+#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
+#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
+#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
+#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
+
+#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
+#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
+#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
+#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
+
+#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
+#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
+#define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
+
+#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
+#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
+#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
+#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
+
+static const TCGTargetOpDef constraint_sets[] = {
+#include "tcg-target-con-set.h"
+};
- tcg_region_trees_init();
- /* In user-mode we support only one ctx, so do the initial allocation now */
-#ifdef CONFIG_USER_ONLY
- {
- bool err = tcg_region_initial_alloc__locked(tcg_ctx);
+#undef C_O0_I1
+#undef C_O0_I2
+#undef C_O0_I3
+#undef C_O0_I4
+#undef C_O1_I1
+#undef C_O1_I2
+#undef C_O1_I3
+#undef C_O1_I4
+#undef C_N1_I2
+#undef C_N1O1_I1
+#undef C_N2_I1
+#undef C_O2_I1
+#undef C_O2_I2
+#undef C_O2_I3
+#undef C_O2_I4
+#undef C_N1_O1_I4
+
+/* Expand the enumerator to be returned from tcg_target_op_def(). */
+
+#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
+#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
+#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
+#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
+
+#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
+#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
+#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
+#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
+
+#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
+#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1)
+#define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1)
+
+#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
+#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
+#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
+#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4)
+
+#include "tcg-target.c.inc"
+
+#ifndef CONFIG_TCG_INTERPRETER
+/* Validate CPUTLBDescFast placement. */
+QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
+ sizeof(CPUNegativeOffsetState))
+ < MIN_TLB_MASK_TABLE_OFS);
+#endif
- g_assert(!err);
- }
+static void alloc_tcg_plugin_context(TCGContext *s)
+{
+#ifdef CONFIG_PLUGIN
+ s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
+ s->plugin_tb->insns =
+ g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
#endif
}
@@ -716,12 +778,13 @@ void tcg_region_init(void)
* In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
* of tcg_region_init() for the reasoning behind this.
*
- * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
- * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
+ * In system-mode each caller registers its context in tcg_ctxs[]. Note that in
+ * system-mode tcg_ctxs[] does not track tcg_ctx_init, since the initial context
* is not used anymore for translation once this function is called.
*
- * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
- * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
+ * Not tracking tcg_init_ctx in tcg_ctxs[] in system-mode keeps code that
+ * iterates over the array (e.g. tcg_code_size() the same for both system/user
+ * modes.
*/
#ifdef CONFIG_USER_ONLY
void tcg_register_thread(void)
@@ -733,7 +796,6 @@ void tcg_register_thread(void)
{
TCGContext *s = g_malloc(sizeof(*s));
unsigned int i, n;
- bool err;
*s = tcg_init_ctx;
@@ -747,81 +809,25 @@ void tcg_register_thread(void)
}
/* Claim an entry in tcg_ctxs */
- n = atomic_fetch_inc(&n_tcg_ctxs);
- g_assert(n < max_cpus);
- atomic_set(&tcg_ctxs[n], s);
-
- tcg_ctx = s;
- qemu_mutex_lock(&region.lock);
- err = tcg_region_initial_alloc__locked(tcg_ctx);
- g_assert(!err);
- qemu_mutex_unlock(&region.lock);
-}
-#endif /* !CONFIG_USER_ONLY */
+ n = qatomic_fetch_inc(&tcg_cur_ctxs);
+ g_assert(n < tcg_max_ctxs);
+ qatomic_set(&tcg_ctxs[n], s);
-/*
- * Returns the size (in bytes) of all translated code (i.e. from all regions)
- * currently in the cache.
- * See also: tcg_code_capacity()
- * Do not confuse with tcg_current_code_size(); that one applies to a single
- * TCG context.
- */
-size_t tcg_code_size(void)
-{
- unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
- unsigned int i;
- size_t total;
-
- qemu_mutex_lock(&region.lock);
- total = region.agg_size_full;
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = atomic_read(&tcg_ctxs[i]);
- size_t size;
-
- size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
- g_assert(size <= s->code_gen_buffer_size);
- total += size;
+ if (n > 0) {
+ alloc_tcg_plugin_context(s);
+ tcg_region_initial_alloc(s);
}
- qemu_mutex_unlock(&region.lock);
- return total;
-}
-
-/*
- * Returns the code capacity (in bytes) of the entire cache, i.e. including all
- * regions.
- * See also: tcg_code_size()
- */
-size_t tcg_code_capacity(void)
-{
- size_t guard_size, capacity;
-
- /* no need for synchronization; these variables are set at init time */
- guard_size = region.stride - region.size;
- capacity = region.end + guard_size - region.start;
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
- return capacity;
-}
-size_t tcg_tb_phys_invalidate_count(void)
-{
- unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
- unsigned int i;
- size_t total = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = atomic_read(&tcg_ctxs[i]);
-
- total += atomic_read(&s->tb_phys_invalidate_count);
- }
- return total;
+ tcg_ctx = s;
}
+#endif /* !CONFIG_USER_ONLY */
/* pool based memory allocation */
void *tcg_malloc_internal(TCGContext *s, int size)
{
TCGPool *p;
int pool_size;
-
+
if (size > TCG_POOL_CHUNK_SIZE) {
/* big malloc: insert a new pool (XXX: could optimize) */
p = g_malloc(sizeof(TCGPool) + size);
@@ -842,10 +848,11 @@ void *tcg_malloc_internal(TCGContext *s, int size)
p = g_malloc(sizeof(TCGPool) + pool_size);
p->size = pool_size;
p->next = NULL;
- if (s->pool_current)
+ if (s->pool_current) {
s->pool_current->next = p;
- else
+ } else {
s->pool_first = p;
+ }
} else {
p = p->next;
}
@@ -869,31 +876,443 @@ void tcg_pool_reset(TCGContext *s)
s->pool_current = NULL;
}
-typedef struct TCGHelperInfo {
- void *func;
- const char *name;
- unsigned flags;
- unsigned sizemask;
-} TCGHelperInfo;
+/*
+ * Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
+ * akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
+ * We only use these for layout in tcg_out_ld_helper_ret and
+ * tcg_out_st_helper_args, and share them between several of
+ * the helpers, with the end result that it's easier to build manually.
+ */
-#include "exec/helper-proto.h"
+#if TCG_TARGET_REG_BITS == 32
+# define dh_typecode_ttl dh_typecode_i32
+#else
+# define dh_typecode_ttl dh_typecode_i64
+#endif
+
+static TCGHelperInfo info_helper_ld32_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(ttl, 0) /* return tcg_target_ulong */
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i32, 3) /* unsigned oi */
+ | dh_typemask(ptr, 4) /* uintptr_t ra */
+};
+
+static TCGHelperInfo info_helper_ld64_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(i64, 0) /* return uint64_t */
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i32, 3) /* unsigned oi */
+ | dh_typemask(ptr, 4) /* uintptr_t ra */
+};
+
+static TCGHelperInfo info_helper_ld128_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(i128, 0) /* return Int128 */
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i32, 3) /* unsigned oi */
+ | dh_typemask(ptr, 4) /* uintptr_t ra */
+};
+
+static TCGHelperInfo info_helper_st32_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(void, 0)
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i32, 3) /* uint32_t data */
+ | dh_typemask(i32, 4) /* unsigned oi */
+ | dh_typemask(ptr, 5) /* uintptr_t ra */
+};
+
+static TCGHelperInfo info_helper_st64_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(void, 0)
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i64, 3) /* uint64_t data */
+ | dh_typemask(i32, 4) /* unsigned oi */
+ | dh_typemask(ptr, 5) /* uintptr_t ra */
+};
-static const TCGHelperInfo all_helpers[] = {
-#include "exec/helper-tcg.h"
+static TCGHelperInfo info_helper_st128_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(void, 0)
+ | dh_typemask(env, 1)
+ | dh_typemask(i64, 2) /* uint64_t addr */
+ | dh_typemask(i128, 3) /* Int128 data */
+ | dh_typemask(i32, 4) /* unsigned oi */
+ | dh_typemask(ptr, 5) /* uintptr_t ra */
};
-static GHashTable *helper_table;
+
+#ifdef CONFIG_TCG_INTERPRETER
+static ffi_type *typecode_to_ffi(int argmask)
+{
+ /*
+ * libffi does not support __int128_t, so we have forced Int128
+ * to use the structure definition instead of the builtin type.
+ */
+ static ffi_type *ffi_type_i128_elements[3] = {
+ &ffi_type_uint64,
+ &ffi_type_uint64,
+ NULL
+ };
+ static ffi_type ffi_type_i128 = {
+ .size = 16,
+ .alignment = __alignof__(Int128),
+ .type = FFI_TYPE_STRUCT,
+ .elements = ffi_type_i128_elements,
+ };
+
+ switch (argmask) {
+ case dh_typecode_void:
+ return &ffi_type_void;
+ case dh_typecode_i32:
+ return &ffi_type_uint32;
+ case dh_typecode_s32:
+ return &ffi_type_sint32;
+ case dh_typecode_i64:
+ return &ffi_type_uint64;
+ case dh_typecode_s64:
+ return &ffi_type_sint64;
+ case dh_typecode_ptr:
+ return &ffi_type_pointer;
+ case dh_typecode_i128:
+ return &ffi_type_i128;
+ }
+ g_assert_not_reached();
+}
+
+static ffi_cif *init_ffi_layout(TCGHelperInfo *info)
+{
+ unsigned typemask = info->typemask;
+ struct {
+ ffi_cif cif;
+ ffi_type *args[];
+ } *ca;
+ ffi_status status;
+ int nargs;
+
+ /* Ignoring the return type, find the last non-zero field. */
+ nargs = 32 - clz32(typemask >> 3);
+ nargs = DIV_ROUND_UP(nargs, 3);
+ assert(nargs <= MAX_CALL_IARGS);
+
+ ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
+ ca->cif.rtype = typecode_to_ffi(typemask & 7);
+ ca->cif.nargs = nargs;
+
+ if (nargs != 0) {
+ ca->cif.arg_types = ca->args;
+ for (int j = 0; j < nargs; ++j) {
+ int typecode = extract32(typemask, (j + 1) * 3, 3);
+ ca->args[j] = typecode_to_ffi(typecode);
+ }
+ }
+
+ status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
+ ca->cif.rtype, ca->cif.arg_types);
+ assert(status == FFI_OK);
+
+ return &ca->cif;
+}
+
+#define HELPER_INFO_INIT(I) (&(I)->cif)
+#define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
+#else
+#define HELPER_INFO_INIT(I) (&(I)->init)
+#define HELPER_INFO_INIT_VAL(I) 1
+#endif /* CONFIG_TCG_INTERPRETER */
+
+static inline bool arg_slot_reg_p(unsigned arg_slot)
+{
+ /*
+ * Split the sizeof away from the comparison to avoid Werror from
+ * "unsigned < 0 is always false", when iarg_regs is empty.
+ */
+ unsigned nreg = ARRAY_SIZE(tcg_target_call_iarg_regs);
+ return arg_slot < nreg;
+}
+
+static inline int arg_slot_stk_ofs(unsigned arg_slot)
+{
+ unsigned max = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
+ unsigned stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
+
+ tcg_debug_assert(stk_slot < max);
+ return TCG_TARGET_CALL_STACK_OFFSET + stk_slot * sizeof(tcg_target_long);
+}
+
+typedef struct TCGCumulativeArgs {
+ int arg_idx; /* tcg_gen_callN args[] */
+ int info_in_idx; /* TCGHelperInfo in[] */
+ int arg_slot; /* regs+stack slot */
+ int ref_slot; /* stack slots for references */
+} TCGCumulativeArgs;
+
+static void layout_arg_even(TCGCumulativeArgs *cum)
+{
+ cum->arg_slot += cum->arg_slot & 1;
+}
+
+static void layout_arg_1(TCGCumulativeArgs *cum, TCGHelperInfo *info,
+ TCGCallArgumentKind kind)
+{
+ TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
+
+ *loc = (TCGCallArgumentLoc){
+ .kind = kind,
+ .arg_idx = cum->arg_idx,
+ .arg_slot = cum->arg_slot,
+ };
+ cum->info_in_idx++;
+ cum->arg_slot++;
+}
+
+static void layout_arg_normal_n(TCGCumulativeArgs *cum,
+ TCGHelperInfo *info, int n)
+{
+ TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
+
+ for (int i = 0; i < n; ++i) {
+ /* Layout all using the same arg_idx, adjusting the subindex. */
+ loc[i] = (TCGCallArgumentLoc){
+ .kind = TCG_CALL_ARG_NORMAL,
+ .arg_idx = cum->arg_idx,
+ .tmp_subindex = i,
+ .arg_slot = cum->arg_slot + i,
+ };
+ }
+ cum->info_in_idx += n;
+ cum->arg_slot += n;
+}
+
+static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
+{
+ TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
+ int n = 128 / TCG_TARGET_REG_BITS;
+
+ /* The first subindex carries the pointer. */
+ layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
+
+ /*
+ * The callee is allowed to clobber memory associated with
+ * structure pass by-reference. Therefore we must make copies.
+ * Allocate space from "ref_slot", which will be adjusted to
+ * follow the parameters on the stack.
+ */
+ loc[0].ref_slot = cum->ref_slot;
+
+ /*
+ * Subsequent words also go into the reference slot, but
+ * do not accumulate into the regular arguments.
+ */
+ for (int i = 1; i < n; ++i) {
+ loc[i] = (TCGCallArgumentLoc){
+ .kind = TCG_CALL_ARG_BY_REF_N,
+ .arg_idx = cum->arg_idx,
+ .tmp_subindex = i,
+ .ref_slot = cum->ref_slot + i,
+ };
+ }
+ cum->info_in_idx += n - 1; /* i=0 accounted for in layout_arg_1 */
+ cum->ref_slot += n;
+}
+
+static void init_call_layout(TCGHelperInfo *info)
+{
+ int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
+ int max_stk_slots = TCG_STATIC_CALL_ARGS_SIZE / sizeof(tcg_target_long);
+ unsigned typemask = info->typemask;
+ unsigned typecode;
+ TCGCumulativeArgs cum = { };
+
+ /*
+ * Parse and place any function return value.
+ */
+ typecode = typemask & 7;
+ switch (typecode) {
+ case dh_typecode_void:
+ info->nr_out = 0;
+ break;
+ case dh_typecode_i32:
+ case dh_typecode_s32:
+ case dh_typecode_ptr:
+ info->nr_out = 1;
+ info->out_kind = TCG_CALL_RET_NORMAL;
+ break;
+ case dh_typecode_i64:
+ case dh_typecode_s64:
+ info->nr_out = 64 / TCG_TARGET_REG_BITS;
+ info->out_kind = TCG_CALL_RET_NORMAL;
+ /* Query the last register now to trigger any assert early. */
+ tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
+ break;
+ case dh_typecode_i128:
+ info->nr_out = 128 / TCG_TARGET_REG_BITS;
+ info->out_kind = TCG_TARGET_CALL_RET_I128;
+ switch (TCG_TARGET_CALL_RET_I128) {
+ case TCG_CALL_RET_NORMAL:
+ /* Query the last register now to trigger any assert early. */
+ tcg_target_call_oarg_reg(info->out_kind, info->nr_out - 1);
+ break;
+ case TCG_CALL_RET_BY_VEC:
+ /* Query the single register now to trigger any assert early. */
+ tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0);
+ break;
+ case TCG_CALL_RET_BY_REF:
+ /*
+ * Allocate the first argument to the output.
+ * We don't need to store this anywhere, just make it
+ * unavailable for use in the input loop below.
+ */
+ cum.arg_slot = 1;
+ break;
+ default:
+ qemu_build_not_reached();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /*
+ * Parse and place function arguments.
+ */
+ for (typemask >>= 3; typemask; typemask >>= 3, cum.arg_idx++) {
+ TCGCallArgumentKind kind;
+ TCGType type;
+
+ typecode = typemask & 7;
+ switch (typecode) {
+ case dh_typecode_i32:
+ case dh_typecode_s32:
+ type = TCG_TYPE_I32;
+ break;
+ case dh_typecode_i64:
+ case dh_typecode_s64:
+ type = TCG_TYPE_I64;
+ break;
+ case dh_typecode_ptr:
+ type = TCG_TYPE_PTR;
+ break;
+ case dh_typecode_i128:
+ type = TCG_TYPE_I128;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ switch (TCG_TARGET_CALL_ARG_I32) {
+ case TCG_CALL_ARG_EVEN:
+ layout_arg_even(&cum);
+ /* fall through */
+ case TCG_CALL_ARG_NORMAL:
+ layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
+ break;
+ case TCG_CALL_ARG_EXTEND:
+ kind = TCG_CALL_ARG_EXTEND_U + (typecode & 1);
+ layout_arg_1(&cum, info, kind);
+ break;
+ default:
+ qemu_build_not_reached();
+ }
+ break;
+
+ case TCG_TYPE_I64:
+ switch (TCG_TARGET_CALL_ARG_I64) {
+ case TCG_CALL_ARG_EVEN:
+ layout_arg_even(&cum);
+ /* fall through */
+ case TCG_CALL_ARG_NORMAL:
+ if (TCG_TARGET_REG_BITS == 32) {
+ layout_arg_normal_n(&cum, info, 2);
+ } else {
+ layout_arg_1(&cum, info, TCG_CALL_ARG_NORMAL);
+ }
+ break;
+ default:
+ qemu_build_not_reached();
+ }
+ break;
+
+ case TCG_TYPE_I128:
+ switch (TCG_TARGET_CALL_ARG_I128) {
+ case TCG_CALL_ARG_EVEN:
+ layout_arg_even(&cum);
+ /* fall through */
+ case TCG_CALL_ARG_NORMAL:
+ layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
+ break;
+ case TCG_CALL_ARG_BY_REF:
+ layout_arg_by_ref(&cum, info);
+ break;
+ default:
+ qemu_build_not_reached();
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ }
+ info->nr_in = cum.info_in_idx;
+
+ /* Validate that we didn't overrun the input array. */
+ assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
+ /* Validate the backend has enough argument space. */
+ assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
+
+ /*
+ * Relocate the "ref_slot" area to the end of the parameters.
+ * Minimizing this stack offset helps code size for x86,
+ * which has a signed 8-bit offset encoding.
+ */
+ if (cum.ref_slot != 0) {
+ int ref_base = 0;
+
+ if (cum.arg_slot > max_reg_slots) {
+ int align = __alignof(Int128) / sizeof(tcg_target_long);
+
+ ref_base = cum.arg_slot - max_reg_slots;
+ if (align > 1) {
+ ref_base = ROUND_UP(ref_base, align);
+ }
+ }
+ assert(ref_base + cum.ref_slot <= max_stk_slots);
+ ref_base += max_reg_slots;
+
+ if (ref_base != 0) {
+ for (int i = cum.info_in_idx - 1; i >= 0; --i) {
+ TCGCallArgumentLoc *loc = &info->in[i];
+ switch (loc->kind) {
+ case TCG_CALL_ARG_BY_REF:
+ case TCG_CALL_ARG_BY_REF_N:
+ loc->ref_slot += ref_base;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
static void process_op_defs(TCGContext *s);
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
TCGReg reg, const char *name);
-void tcg_context_init(TCGContext *s)
+static void tcg_context_init(unsigned max_cpus)
{
+ TCGContext *s = &tcg_init_ctx;
int op, total_args, n, i;
TCGOpDef *def;
TCGArgConstraint *args_ct;
- int *sorted_args;
TCGTemp *ts;
memset(s, 0, sizeof(*s));
@@ -908,26 +1327,21 @@ void tcg_context_init(TCGContext *s)
total_args += n;
}
- args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
- sorted_args = g_malloc(sizeof(int) * total_args);
+ args_ct = g_new0(TCGArgConstraint, total_args);
for(op = 0; op < NB_OPS; op++) {
def = &tcg_op_defs[op];
def->args_ct = args_ct;
- def->sorted_args = sorted_args;
n = def->nb_iargs + def->nb_oargs;
- sorted_args += n;
args_ct += n;
}
- /* Register helpers. */
- /* Use g_direct_hash/equal for direct pointer comparisons on func. */
- helper_table = g_hash_table_new(NULL, NULL);
-
- for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
- g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
- (gpointer)&all_helpers[i]);
- }
+ init_call_layout(&info_helper_ld32_mmu);
+ init_call_layout(&info_helper_ld64_mmu);
+ init_call_layout(&info_helper_ld128_mmu);
+ init_call_layout(&info_helper_st32_mmu);
+ init_call_layout(&info_helper_st64_mmu);
+ init_call_layout(&info_helper_st128_mmu);
tcg_target_init(s);
process_op_defs(s);
@@ -947,23 +1361,33 @@ void tcg_context_init(TCGContext *s)
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
}
+ alloc_tcg_plugin_context(s);
+
tcg_ctx = s;
/*
* In user-mode we simply share the init context among threads, since we
* use a single region. See the documentation tcg_region_init() for the
* reasoning behind this.
- * In softmmu we will have at most max_cpus TCG threads.
+ * In system-mode we will have at most max_cpus TCG threads.
*/
#ifdef CONFIG_USER_ONLY
tcg_ctxs = &tcg_ctx;
- n_tcg_ctxs = 1;
+ tcg_cur_ctxs = 1;
+ tcg_max_ctxs = 1;
#else
- tcg_ctxs = g_new(TCGContext *, max_cpus);
+ tcg_max_ctxs = max_cpus;
+ tcg_ctxs = g_new0(TCGContext *, max_cpus);
#endif
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
- cpu_env = temp_tcgv_ptr(ts);
+ tcg_env = temp_tcgv_ptr(ts);
+}
+
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
+{
+ tcg_context_init(max_cpus);
+ tcg_region_init(tb_size, splitwx, max_cpus);
}
/*
@@ -986,92 +1410,90 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
}
goto retry;
}
- atomic_set(&s->code_gen_ptr, next);
+ qatomic_set(&s->code_gen_ptr, next);
s->data_gen_ptr = NULL;
return tb;
}
-void tcg_prologue_init(TCGContext *s)
+void tcg_prologue_init(void)
{
- size_t prologue_size, total_size;
- void *buf0, *buf1;
+ TCGContext *s = tcg_ctx;
+ size_t prologue_size;
- /* Put the prologue at the beginning of code_gen_buffer. */
- buf0 = s->code_gen_buffer;
- total_size = s->code_gen_buffer_size;
- s->code_ptr = buf0;
- s->code_buf = buf0;
+ s->code_ptr = s->code_gen_ptr;
+ s->code_buf = s->code_gen_ptr;
s->data_gen_ptr = NULL;
- s->code_gen_prologue = buf0;
- /* Compute a high-water mark, at which we voluntarily flush the buffer
- and start over. The size here is arbitrary, significantly larger
- than we expect the code generation for any one opcode to require. */
- s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
+#ifndef CONFIG_TCG_INTERPRETER
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
+#endif
#ifdef TCG_TARGET_NEED_POOL_LABELS
s->pool_labels = NULL;
#endif
+ qemu_thread_jit_write();
/* Generate the prologue. */
tcg_target_qemu_prologue(s);
#ifdef TCG_TARGET_NEED_POOL_LABELS
/* Allow the prologue to put e.g. guest_base into a pool entry. */
{
- bool ok = tcg_out_pool_finalize(s);
- tcg_debug_assert(ok);
+ int result = tcg_out_pool_finalize(s);
+ tcg_debug_assert(result == 0);
}
#endif
- buf1 = s->code_ptr;
- flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
-
- /* Deduct the prologue from the buffer. */
prologue_size = tcg_current_code_size(s);
- s->code_gen_ptr = buf1;
- s->code_gen_buffer = buf1;
- s->code_buf = buf1;
- total_size -= prologue_size;
- s->code_gen_buffer_size = total_size;
+ perf_report_prologue(s->code_gen_ptr, prologue_size);
- tcg_register_jit(s->code_gen_buffer, total_size);
+#ifndef CONFIG_TCG_INTERPRETER
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
+ (uintptr_t)s->code_buf, prologue_size);
+#endif
-#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
- qemu_log_lock();
- qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
- if (s->data_gen_ptr) {
- size_t code_size = s->data_gen_ptr - buf0;
- size_t data_size = prologue_size - code_size;
- size_t i;
-
- log_disas(buf0, code_size);
-
- for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
- if (sizeof(tcg_target_ulong) == 8) {
- qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
- (uintptr_t)s->data_gen_ptr + i,
- *(uint64_t *)(s->data_gen_ptr + i));
- } else {
- qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
- (uintptr_t)s->data_gen_ptr + i,
- *(uint32_t *)(s->data_gen_ptr + i));
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "PROLOGUE: [size=%zu]\n", prologue_size);
+ if (s->data_gen_ptr) {
+ size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
+ size_t data_size = prologue_size - code_size;
+ size_t i;
+
+ disas(logfile, s->code_gen_ptr, code_size);
+
+ for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
+ if (sizeof(tcg_target_ulong) == 8) {
+ fprintf(logfile,
+ "0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
+ (uintptr_t)s->data_gen_ptr + i,
+ *(uint64_t *)(s->data_gen_ptr + i));
+ } else {
+ fprintf(logfile,
+ "0x%08" PRIxPTR ": .long 0x%08x\n",
+ (uintptr_t)s->data_gen_ptr + i,
+ *(uint32_t *)(s->data_gen_ptr + i));
+ }
}
+ } else {
+ disas(logfile, s->code_gen_ptr, prologue_size);
}
- } else {
- log_disas(buf0, prologue_size);
+ fprintf(logfile, "\n");
+ qemu_log_unlock(logfile);
}
- qemu_log("\n");
- qemu_log_flush();
- qemu_log_unlock();
}
+
+#ifndef CONFIG_TCG_INTERPRETER
+ /*
+ * Assert that goto_ptr is implemented completely, setting an epilogue.
+ * For tci, we use NULL as the signal to return from the interpreter,
+ * so skip this check.
+ */
+ tcg_debug_assert(tcg_code_gen_epilogue != NULL);
#endif
- /* Assert that goto_ptr is implemented completely. */
- if (TCG_TARGET_HAS_goto_ptr) {
- tcg_debug_assert(s->code_gen_epilogue != NULL);
- }
+ tcg_region_prologue_set(s);
}
void tcg_func_start(TCGContext *s)
@@ -1082,6 +1504,13 @@ void tcg_func_start(TCGContext *s)
/* No temps have been previously allocated for size or locality. */
memset(s->free_temps, 0, sizeof(s->free_temps));
+ /* No constant temps have been previously allocated. */
+ for (int i = 0; i < TCG_TYPE_COUNT; ++i) {
+ if (s->const_table[i]) {
+ g_hash_table_remove_all(s->const_table[i]);
+ }
+ }
+
s->nb_ops = 0;
s->nb_labels = 0;
s->current_frame_offset = s->frame_start;
@@ -1092,23 +1521,34 @@ void tcg_func_start(TCGContext *s)
QTAILQ_INIT(&s->ops);
QTAILQ_INIT(&s->free_ops);
+ s->emit_before_op = NULL;
+ QSIMPLEQ_INIT(&s->labels);
+
+ tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
+ s->addr_type == TCG_TYPE_I64);
+
+ tcg_debug_assert(s->insn_start_words > 0);
}
-static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
+static TCGTemp *tcg_temp_alloc(TCGContext *s)
{
int n = s->nb_temps++;
- tcg_debug_assert(n < TCG_MAX_TEMPS);
+
+ if (n >= TCG_MAX_TEMPS) {
+ tcg_raise_tb_overflow(s);
+ }
return memset(&s->temps[n], 0, sizeof(TCGTemp));
}
-static inline TCGTemp *tcg_global_alloc(TCGContext *s)
+static TCGTemp *tcg_global_alloc(TCGContext *s)
{
TCGTemp *ts;
tcg_debug_assert(s->nb_globals == s->nb_temps);
+ tcg_debug_assert(s->nb_globals < TCG_MAX_TEMPS);
s->nb_globals++;
ts = tcg_temp_alloc(s);
- ts->temp_global = 1;
+ ts->kind = TEMP_GLOBAL;
return ts;
}
@@ -1118,14 +1558,12 @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
{
TCGTemp *ts;
- if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
- tcg_abort();
- }
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
ts = tcg_global_alloc(s);
ts->base_type = type;
ts->type = type;
- ts->fixed_reg = 1;
+ ts->kind = TEMP_FIXED;
ts->reg = reg;
ts->name = name;
tcg_regset_set_reg(s->reserved_regs, reg);
@@ -1141,24 +1579,27 @@ void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
= tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
}
-TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
- intptr_t offset, const char *name)
+static TCGTemp *tcg_global_mem_new_internal(TCGv_ptr base, intptr_t offset,
+ const char *name, TCGType type)
{
TCGContext *s = tcg_ctx;
TCGTemp *base_ts = tcgv_ptr_temp(base);
TCGTemp *ts = tcg_global_alloc(s);
- int indirect_reg = 0, bigendian = 0;
-#ifdef HOST_WORDS_BIGENDIAN
- bigendian = 1;
-#endif
+ int indirect_reg = 0;
- if (!base_ts->fixed_reg) {
+ switch (base_ts->kind) {
+ case TEMP_FIXED:
+ break;
+ case TEMP_GLOBAL:
/* We do not support double-indirect registers. */
tcg_debug_assert(!base_ts->indirect_reg);
base_ts->indirect_base = 1;
s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
? 2 : 1);
indirect_reg = 1;
+ break;
+ default:
+ g_assert_not_reached();
}
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
@@ -1170,7 +1611,7 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
ts->indirect_reg = indirect_reg;
ts->mem_allocated = 1;
ts->mem_base = base_ts;
- ts->mem_offset = offset + bigendian * 4;
+ ts->mem_offset = offset;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_0");
ts->name = strdup(buf);
@@ -1181,7 +1622,8 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
ts2->indirect_reg = indirect_reg;
ts2->mem_allocated = 1;
ts2->mem_base = base_ts;
- ts2->mem_offset = offset + (1 - bigendian) * 4;
+ ts2->mem_offset = offset + 4;
+ ts2->temp_subindex = 1;
pstrcpy(buf, sizeof(buf), name);
pstrcat(buf, sizeof(buf), "_1");
ts2->name = strdup(buf);
@@ -1197,51 +1639,128 @@ TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
return ts;
}
-TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
+TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t off, const char *name)
+{
+ TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I32);
+ return temp_tcgv_i32(ts);
+}
+
+TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t off, const char *name)
+{
+ TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_I64);
+ return temp_tcgv_i64(ts);
+}
+
+TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name)
+{
+ TCGTemp *ts = tcg_global_mem_new_internal(reg, off, name, TCG_TYPE_PTR);
+ return temp_tcgv_ptr(ts);
+}
+
+TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
{
TCGContext *s = tcg_ctx;
TCGTemp *ts;
- int idx, k;
-
- k = type + (temp_local ? TCG_TYPE_COUNT : 0);
- idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
- if (idx < TCG_MAX_TEMPS) {
- /* There is already an available temp with the right type. */
- clear_bit(idx, s->free_temps[k].l);
-
- ts = &s->temps[idx];
- ts->temp_allocated = 1;
- tcg_debug_assert(ts->base_type == type);
- tcg_debug_assert(ts->temp_local == temp_local);
- } else {
- ts = tcg_temp_alloc(s);
- if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
- TCGTemp *ts2 = tcg_temp_alloc(s);
+ int n;
- ts->base_type = type;
- ts->type = TCG_TYPE_I32;
- ts->temp_allocated = 1;
- ts->temp_local = temp_local;
+ if (kind == TEMP_EBB) {
+ int idx = find_first_bit(s->free_temps[type].l, TCG_MAX_TEMPS);
- tcg_debug_assert(ts2 == ts + 1);
- ts2->base_type = TCG_TYPE_I64;
- ts2->type = TCG_TYPE_I32;
- ts2->temp_allocated = 1;
- ts2->temp_local = temp_local;
- } else {
- ts->base_type = type;
- ts->type = type;
+ if (idx < TCG_MAX_TEMPS) {
+ /* There is already an available temp with the right type. */
+ clear_bit(idx, s->free_temps[type].l);
+
+ ts = &s->temps[idx];
ts->temp_allocated = 1;
- ts->temp_local = temp_local;
+ tcg_debug_assert(ts->base_type == type);
+ tcg_debug_assert(ts->kind == kind);
+ return ts;
}
+ } else {
+ tcg_debug_assert(kind == TEMP_TB);
}
-#if defined(CONFIG_DEBUG_TCG)
- s->temps_in_use++;
-#endif
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ n = 1;
+ break;
+ case TCG_TYPE_I64:
+ n = 64 / TCG_TARGET_REG_BITS;
+ break;
+ case TCG_TYPE_I128:
+ n = 128 / TCG_TARGET_REG_BITS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ts = tcg_temp_alloc(s);
+ ts->base_type = type;
+ ts->temp_allocated = 1;
+ ts->kind = kind;
+
+ if (n == 1) {
+ ts->type = type;
+ } else {
+ ts->type = TCG_TYPE_REG;
+
+ for (int i = 1; i < n; ++i) {
+ TCGTemp *ts2 = tcg_temp_alloc(s);
+
+ tcg_debug_assert(ts2 == ts + i);
+ ts2->base_type = type;
+ ts2->type = TCG_TYPE_REG;
+ ts2->temp_allocated = 1;
+ ts2->temp_subindex = i;
+ ts2->kind = kind;
+ }
+ }
return ts;
}
+TCGv_i32 tcg_temp_new_i32(void)
+{
+ return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_TB));
+}
+
+TCGv_i32 tcg_temp_ebb_new_i32(void)
+{
+ return temp_tcgv_i32(tcg_temp_new_internal(TCG_TYPE_I32, TEMP_EBB));
+}
+
+TCGv_i64 tcg_temp_new_i64(void)
+{
+ return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_TB));
+}
+
+TCGv_i64 tcg_temp_ebb_new_i64(void)
+{
+ return temp_tcgv_i64(tcg_temp_new_internal(TCG_TYPE_I64, TEMP_EBB));
+}
+
+TCGv_ptr tcg_temp_new_ptr(void)
+{
+ return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_TB));
+}
+
+TCGv_ptr tcg_temp_ebb_new_ptr(void)
+{
+ return temp_tcgv_ptr(tcg_temp_new_internal(TCG_TYPE_PTR, TEMP_EBB));
+}
+
+TCGv_i128 tcg_temp_new_i128(void)
+{
+ return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_TB));
+}
+
+TCGv_i128 tcg_temp_ebb_new_i128(void)
+{
+ return temp_tcgv_i128(tcg_temp_new_internal(TCG_TYPE_I128, TEMP_EBB));
+}
+
TCGv_vec tcg_temp_new_vec(TCGType type)
{
TCGTemp *t;
@@ -1262,7 +1781,7 @@ TCGv_vec tcg_temp_new_vec(TCGType type)
}
#endif
- t = tcg_temp_new_internal(type, 0);
+ t = tcg_temp_new_internal(type, TEMP_EBB);
return temp_tcgv_vec(t);
}
@@ -1273,83 +1792,157 @@ TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
tcg_debug_assert(t->temp_allocated != 0);
- t = tcg_temp_new_internal(t->base_type, 0);
+ t = tcg_temp_new_internal(t->base_type, TEMP_EBB);
return temp_tcgv_vec(t);
}
void tcg_temp_free_internal(TCGTemp *ts)
{
TCGContext *s = tcg_ctx;
- int k, idx;
-#if defined(CONFIG_DEBUG_TCG)
- s->temps_in_use--;
- if (s->temps_in_use < 0) {
- fprintf(stderr, "More temporaries freed than allocated!\n");
+ switch (ts->kind) {
+ case TEMP_CONST:
+ case TEMP_TB:
+ /* Silently ignore free. */
+ break;
+ case TEMP_EBB:
+ tcg_debug_assert(ts->temp_allocated != 0);
+ ts->temp_allocated = 0;
+ set_bit(temp_idx(ts), s->free_temps[ts->base_type].l);
+ break;
+ default:
+ /* It never made sense to free TEMP_FIXED or TEMP_GLOBAL. */
+ g_assert_not_reached();
}
-#endif
-
- tcg_debug_assert(ts->temp_global == 0);
- tcg_debug_assert(ts->temp_allocated != 0);
- ts->temp_allocated = 0;
-
- idx = temp_idx(ts);
- k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
- set_bit(idx, s->free_temps[k].l);
}
-TCGv_i32 tcg_const_i32(int32_t val)
+void tcg_temp_free_i32(TCGv_i32 arg)
{
- TCGv_i32 t0;
- t0 = tcg_temp_new_i32();
- tcg_gen_movi_i32(t0, val);
- return t0;
+ tcg_temp_free_internal(tcgv_i32_temp(arg));
}
-TCGv_i64 tcg_const_i64(int64_t val)
+void tcg_temp_free_i64(TCGv_i64 arg)
{
- TCGv_i64 t0;
- t0 = tcg_temp_new_i64();
- tcg_gen_movi_i64(t0, val);
- return t0;
+ tcg_temp_free_internal(tcgv_i64_temp(arg));
}
-TCGv_i32 tcg_const_local_i32(int32_t val)
+void tcg_temp_free_i128(TCGv_i128 arg)
{
- TCGv_i32 t0;
- t0 = tcg_temp_local_new_i32();
- tcg_gen_movi_i32(t0, val);
- return t0;
+ tcg_temp_free_internal(tcgv_i128_temp(arg));
}
-TCGv_i64 tcg_const_local_i64(int64_t val)
+void tcg_temp_free_ptr(TCGv_ptr arg)
{
- TCGv_i64 t0;
- t0 = tcg_temp_local_new_i64();
- tcg_gen_movi_i64(t0, val);
- return t0;
+ tcg_temp_free_internal(tcgv_ptr_temp(arg));
}
-#if defined(CONFIG_DEBUG_TCG)
-void tcg_clear_temp_count(void)
+void tcg_temp_free_vec(TCGv_vec arg)
{
- TCGContext *s = tcg_ctx;
- s->temps_in_use = 0;
+ tcg_temp_free_internal(tcgv_vec_temp(arg));
}
-int tcg_check_temp_count(void)
+TCGTemp *tcg_constant_internal(TCGType type, int64_t val)
{
TCGContext *s = tcg_ctx;
- if (s->temps_in_use) {
- /* Clear the count so that we don't give another
- * warning immediately next time around.
- */
- s->temps_in_use = 0;
- return 1;
+ GHashTable *h = s->const_table[type];
+ TCGTemp *ts;
+
+ if (h == NULL) {
+ h = g_hash_table_new(g_int64_hash, g_int64_equal);
+ s->const_table[type] = h;
}
- return 0;
+
+ ts = g_hash_table_lookup(h, &val);
+ if (ts == NULL) {
+ int64_t *val_ptr;
+
+ ts = tcg_temp_alloc(s);
+
+ if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
+ TCGTemp *ts2 = tcg_temp_alloc(s);
+
+ tcg_debug_assert(ts2 == ts + 1);
+
+ ts->base_type = TCG_TYPE_I64;
+ ts->type = TCG_TYPE_I32;
+ ts->kind = TEMP_CONST;
+ ts->temp_allocated = 1;
+
+ ts2->base_type = TCG_TYPE_I64;
+ ts2->type = TCG_TYPE_I32;
+ ts2->kind = TEMP_CONST;
+ ts2->temp_allocated = 1;
+ ts2->temp_subindex = 1;
+
+ /*
+ * Retain the full value of the 64-bit constant in the low
+ * part, so that the hash table works. Actual uses will
+ * truncate the value to the low part.
+ */
+ ts[HOST_BIG_ENDIAN].val = val;
+ ts[!HOST_BIG_ENDIAN].val = val >> 32;
+ val_ptr = &ts[HOST_BIG_ENDIAN].val;
+ } else {
+ ts->base_type = type;
+ ts->type = type;
+ ts->kind = TEMP_CONST;
+ ts->temp_allocated = 1;
+ ts->val = val;
+ val_ptr = &ts->val;
+ }
+ g_hash_table_insert(h, val_ptr, ts);
+ }
+
+ return ts;
}
-#endif
+
+TCGv_i32 tcg_constant_i32(int32_t val)
+{
+ return temp_tcgv_i32(tcg_constant_internal(TCG_TYPE_I32, val));
+}
+
+TCGv_i64 tcg_constant_i64(int64_t val)
+{
+ return temp_tcgv_i64(tcg_constant_internal(TCG_TYPE_I64, val));
+}
+
+TCGv_ptr tcg_constant_ptr_int(intptr_t val)
+{
+ return temp_tcgv_ptr(tcg_constant_internal(TCG_TYPE_PTR, val));
+}
+
+TCGv_vec tcg_constant_vec(TCGType type, unsigned vece, int64_t val)
+{
+ val = dup_const(vece, val);
+ return temp_tcgv_vec(tcg_constant_internal(type, val));
+}
+
+TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
+{
+ TCGTemp *t = tcgv_vec_temp(match);
+
+ tcg_debug_assert(t->temp_allocated != 0);
+ return tcg_constant_vec(t->base_type, vece, val);
+}
+
+#ifdef CONFIG_DEBUG_TCG
+size_t temp_idx(TCGTemp *ts)
+{
+ ptrdiff_t n = ts - tcg_ctx->temps;
+ assert(n >= 0 && n < tcg_ctx->nb_temps);
+ return n;
+}
+
+TCGTemp *tcgv_i32_temp(TCGv_i32 v)
+{
+ uintptr_t o = (uintptr_t)v - offsetof(TCGContext, temps);
+
+ assert(o < sizeof(TCGTemp) * tcg_ctx->nb_temps);
+ assert(o % sizeof(TCGTemp) == 0);
+
+ return (void *)tcg_ctx + (uintptr_t)v;
+}
+#endif /* CONFIG_DEBUG_TCG */
/* Return true if OP may appear in the opcode stream.
Test the runtime variable that controls each opcode. */
@@ -1367,19 +1960,31 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_insn_start:
case INDEX_op_exit_tb:
case INDEX_op_goto_tb:
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_ld_i64:
- case INDEX_op_qemu_st_i64:
+ case INDEX_op_goto_ptr:
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld_a64_i32:
+ case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_ld_a64_i64:
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_st_a64_i64:
return true;
- case INDEX_op_goto_ptr:
- return TCG_TARGET_HAS_goto_ptr;
+ case INDEX_op_qemu_st8_a32_i32:
+ case INDEX_op_qemu_st8_a64_i32:
+ return TCG_TARGET_HAS_qemu_st8_i32;
+
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ return TCG_TARGET_HAS_qemu_ldst_i128;
case INDEX_op_mov_i32:
- case INDEX_op_movi_i32:
case INDEX_op_setcond_i32:
case INDEX_op_brcond_i32:
+ case INDEX_op_movcond_i32:
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
@@ -1390,6 +1995,7 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_st_i32:
case INDEX_op_add_i32:
case INDEX_op_sub_i32:
+ case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
case INDEX_op_and_i32:
case INDEX_op_or_i32:
@@ -1399,8 +2005,8 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_sar_i32:
return true;
- case INDEX_op_movcond_i32:
- return TCG_TARGET_HAS_movcond_i32;
+ case INDEX_op_negsetcond_i32:
+ return TCG_TARGET_HAS_negsetcond_i32;
case INDEX_op_div_i32:
case INDEX_op_divu_i32:
return TCG_TARGET_HAS_div_i32;
@@ -1419,6 +2025,8 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_HAS_extract_i32;
case INDEX_op_sextract_i32:
return TCG_TARGET_HAS_sextract_i32;
+ case INDEX_op_extract2_i32:
+ return TCG_TARGET_HAS_extract2_i32;
case INDEX_op_add2_i32:
return TCG_TARGET_HAS_add2_i32;
case INDEX_op_sub2_i32:
@@ -1445,8 +2053,6 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_HAS_bswap32_i32;
case INDEX_op_not_i32:
return TCG_TARGET_HAS_not_i32;
- case INDEX_op_neg_i32:
- return TCG_TARGET_HAS_neg_i32;
case INDEX_op_andc_i32:
return TCG_TARGET_HAS_andc_i32;
case INDEX_op_orc_i32:
@@ -1469,9 +2075,9 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_REG_BITS == 32;
case INDEX_op_mov_i64:
- case INDEX_op_movi_i64:
case INDEX_op_setcond_i64:
case INDEX_op_brcond_i64:
+ case INDEX_op_movcond_i64:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i64:
case INDEX_op_ld16u_i64:
@@ -1485,6 +2091,7 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_st_i64:
case INDEX_op_add_i64:
case INDEX_op_sub_i64:
+ case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
case INDEX_op_and_i64:
case INDEX_op_or_i64:
@@ -1496,8 +2103,8 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_extu_i32_i64:
return TCG_TARGET_REG_BITS == 64;
- case INDEX_op_movcond_i64:
- return TCG_TARGET_HAS_movcond_i64;
+ case INDEX_op_negsetcond_i64:
+ return TCG_TARGET_HAS_negsetcond_i64;
case INDEX_op_div_i64:
case INDEX_op_divu_i64:
return TCG_TARGET_HAS_div_i64;
@@ -1516,10 +2123,11 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_HAS_extract_i64;
case INDEX_op_sextract_i64:
return TCG_TARGET_HAS_sextract_i64;
+ case INDEX_op_extract2_i64:
+ return TCG_TARGET_HAS_extract2_i64;
case INDEX_op_extrl_i64_i32:
- return TCG_TARGET_HAS_extrl_i64_i32;
case INDEX_op_extrh_i64_i32:
- return TCG_TARGET_HAS_extrh_i64_i32;
+ return TCG_TARGET_HAS_extr_i64_i32;
case INDEX_op_ext8s_i64:
return TCG_TARGET_HAS_ext8s_i64;
case INDEX_op_ext16s_i64:
@@ -1540,8 +2148,6 @@ bool tcg_op_supported(TCGOpcode op)
return TCG_TARGET_HAS_bswap64_i64;
case INDEX_op_not_i64:
return TCG_TARGET_HAS_not_i64;
- case INDEX_op_neg_i64:
- return TCG_TARGET_HAS_neg_i64;
case INDEX_op_andc_i64:
return TCG_TARGET_HAS_andc_i64;
case INDEX_op_orc_i64:
@@ -1573,7 +2179,7 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_mov_vec:
case INDEX_op_dup_vec:
- case INDEX_op_dupi_vec:
+ case INDEX_op_dupm_vec:
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
case INDEX_op_add_vec:
@@ -1589,10 +2195,18 @@ bool tcg_op_supported(TCGOpcode op)
return have_vec && TCG_TARGET_HAS_not_vec;
case INDEX_op_neg_vec:
return have_vec && TCG_TARGET_HAS_neg_vec;
+ case INDEX_op_abs_vec:
+ return have_vec && TCG_TARGET_HAS_abs_vec;
case INDEX_op_andc_vec:
return have_vec && TCG_TARGET_HAS_andc_vec;
case INDEX_op_orc_vec:
return have_vec && TCG_TARGET_HAS_orc_vec;
+ case INDEX_op_nand_vec:
+ return have_vec && TCG_TARGET_HAS_nand_vec;
+ case INDEX_op_nor_vec:
+ return have_vec && TCG_TARGET_HAS_nor_vec;
+ case INDEX_op_eqv_vec:
+ return have_vec && TCG_TARGET_HAS_eqv_vec;
case INDEX_op_mul_vec:
return have_vec && TCG_TARGET_HAS_mul_vec;
case INDEX_op_shli_vec:
@@ -1607,6 +2221,27 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_shrv_vec:
case INDEX_op_sarv_vec:
return have_vec && TCG_TARGET_HAS_shv_vec;
+ case INDEX_op_rotli_vec:
+ return have_vec && TCG_TARGET_HAS_roti_vec;
+ case INDEX_op_rotls_vec:
+ return have_vec && TCG_TARGET_HAS_rots_vec;
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
+ return have_vec && TCG_TARGET_HAS_rotv_vec;
+ case INDEX_op_ssadd_vec:
+ case INDEX_op_usadd_vec:
+ case INDEX_op_sssub_vec:
+ case INDEX_op_ussub_vec:
+ return have_vec && TCG_TARGET_HAS_sat_vec;
+ case INDEX_op_smin_vec:
+ case INDEX_op_umin_vec:
+ case INDEX_op_smax_vec:
+ case INDEX_op_umax_vec:
+ return have_vec && TCG_TARGET_HAS_minmax_vec;
+ case INDEX_op_bitsel_vec:
+ return have_vec && TCG_TARGET_HAS_bitsel_vec;
+ case INDEX_op_cmpsel_vec:
+ return have_vec && TCG_TARGET_HAS_cmpsel_vec;
default:
tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
@@ -1614,192 +2249,181 @@ bool tcg_op_supported(TCGOpcode op)
}
}
-/* Note: we convert the 64 bit args to 32 bit and do some alignment
- and endian swap. Maybe it would be better to do the alignment
- and endian swap in tcg_reg_alloc_call(). */
-void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
+static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
+
+static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
{
- int i, real_args, nb_rets, pi;
- unsigned sizemask, flags;
- TCGHelperInfo *info;
+ TCGv_i64 extend_free[MAX_CALL_IARGS];
+ int n_extend = 0;
TCGOp *op;
+ int i, n, pi = 0, total_args;
- info = g_hash_table_lookup(helper_table, (gpointer)func);
- flags = info->flags;
- sizemask = info->sizemask;
-
-#if defined(__sparc__) && !defined(__arch64__) \
- && !defined(CONFIG_TCG_INTERPRETER)
- /* We have 64-bit values in one register, but need to pass as two
- separate parameters. Split them. */
- int orig_sizemask = sizemask;
- int orig_nargs = nargs;
- TCGv_i64 retl, reth;
- TCGTemp *split_args[MAX_OPC_PARAM];
-
- retl = NULL;
- reth = NULL;
- if (sizemask != 0) {
- for (i = real_args = 0; i < nargs; ++i) {
- int is_64bit = sizemask & (1 << (i+1)*2);
- if (is_64bit) {
- TCGv_i64 orig = temp_tcgv_i64(args[i]);
- TCGv_i32 h = tcg_temp_new_i32();
- TCGv_i32 l = tcg_temp_new_i32();
- tcg_gen_extr_i64_i32(l, h, orig);
- split_args[real_args++] = tcgv_i32_temp(h);
- split_args[real_args++] = tcgv_i32_temp(l);
- } else {
- split_args[real_args++] = args[i];
- }
- }
- nargs = real_args;
- args = split_args;
- sizemask = 0;
- }
-#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
- for (i = 0; i < nargs; ++i) {
- int is_64bit = sizemask & (1 << (i+1)*2);
- int is_signed = sizemask & (2 << (i+1)*2);
- if (!is_64bit) {
- TCGv_i64 temp = tcg_temp_new_i64();
- TCGv_i64 orig = temp_tcgv_i64(args[i]);
- if (is_signed) {
- tcg_gen_ext32s_i64(temp, orig);
- } else {
- tcg_gen_ext32u_i64(temp, orig);
- }
- args[i] = tcgv_i64_temp(temp);
- }
+ if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info)))) {
+ init_call_layout(info);
+ g_once_init_leave(HELPER_INFO_INIT(info), HELPER_INFO_INIT_VAL(info));
+ }
+
+ total_args = info->nr_out + info->nr_in + 2;
+ op = tcg_op_alloc(INDEX_op_call, total_args);
+
+#ifdef CONFIG_PLUGIN
+ /* Flag helpers that may affect guest state */
+ if (tcg_ctx->plugin_insn &&
+ !(info->flags & TCG_CALL_PLUGIN) &&
+ !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
+ tcg_ctx->plugin_insn->calls_helpers = true;
}
-#endif /* TCG_TARGET_EXTEND_ARGS */
-
- op = tcg_emit_op(INDEX_op_call);
-
- pi = 0;
- if (ret != NULL) {
-#if defined(__sparc__) && !defined(__arch64__) \
- && !defined(CONFIG_TCG_INTERPRETER)
- if (orig_sizemask & 1) {
- /* The 32-bit ABI is going to return the 64-bit value in
- the %o0/%o1 register pair. Prepare for this by using
- two return temporaries, and reassemble below. */
- retl = tcg_temp_new_i64();
- reth = tcg_temp_new_i64();
- op->args[pi++] = tcgv_i64_arg(reth);
- op->args[pi++] = tcgv_i64_arg(retl);
- nb_rets = 2;
- } else {
- op->args[pi++] = temp_arg(ret);
- nb_rets = 1;
- }
-#else
- if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
-#ifdef HOST_WORDS_BIGENDIAN
- op->args[pi++] = temp_arg(ret + 1);
- op->args[pi++] = temp_arg(ret);
-#else
- op->args[pi++] = temp_arg(ret);
- op->args[pi++] = temp_arg(ret + 1);
#endif
- nb_rets = 2;
- } else {
- op->args[pi++] = temp_arg(ret);
- nb_rets = 1;
+
+ TCGOP_CALLO(op) = n = info->nr_out;
+ switch (n) {
+ case 0:
+ tcg_debug_assert(ret == NULL);
+ break;
+ case 1:
+ tcg_debug_assert(ret != NULL);
+ op->args[pi++] = temp_arg(ret);
+ break;
+ case 2:
+ case 4:
+ tcg_debug_assert(ret != NULL);
+ tcg_debug_assert(ret->base_type == ret->type + ctz32(n));
+ tcg_debug_assert(ret->temp_subindex == 0);
+ for (i = 0; i < n; ++i) {
+ op->args[pi++] = temp_arg(ret + i);
}
-#endif
- } else {
- nb_rets = 0;
- }
- TCGOP_CALLO(op) = nb_rets;
-
- real_args = 0;
- for (i = 0; i < nargs; i++) {
- int is_64bit = sizemask & (1 << (i+1)*2);
- if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
-#ifdef TCG_TARGET_CALL_ALIGN_ARGS
- /* some targets want aligned 64 bit args */
- if (real_args & 1) {
- op->args[pi++] = TCG_CALL_DUMMY_ARG;
- real_args++;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ TCGOP_CALLI(op) = n = info->nr_in;
+ for (i = 0; i < n; i++) {
+ const TCGCallArgumentLoc *loc = &info->in[i];
+ TCGTemp *ts = args[loc->arg_idx] + loc->tmp_subindex;
+
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_BY_REF:
+ case TCG_CALL_ARG_BY_REF_N:
+ op->args[pi++] = temp_arg(ts);
+ break;
+
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ {
+ TCGv_i64 temp = tcg_temp_ebb_new_i64();
+ TCGv_i32 orig = temp_tcgv_i32(ts);
+
+ if (loc->kind == TCG_CALL_ARG_EXTEND_S) {
+ tcg_gen_ext_i32_i64(temp, orig);
+ } else {
+ tcg_gen_extu_i32_i64(temp, orig);
+ }
+ op->args[pi++] = tcgv_i64_arg(temp);
+ extend_free[n_extend++] = temp;
}
-#endif
- /* If stack grows up, then we will be placing successive
- arguments at lower addresses, which means we need to
- reverse the order compared to how we would normally
- treat either big or little-endian. For those arguments
- that will wind up in registers, this still works for
- HPPA (the only current STACK_GROWSUP target) since the
- argument registers are *also* allocated in decreasing
- order. If another such target is added, this logic may
- have to get more complicated to differentiate between
- stack arguments and register arguments. */
-#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
- op->args[pi++] = temp_arg(args[i] + 1);
- op->args[pi++] = temp_arg(args[i]);
-#else
- op->args[pi++] = temp_arg(args[i]);
- op->args[pi++] = temp_arg(args[i] + 1);
-#endif
- real_args += 2;
- continue;
- }
+ break;
- op->args[pi++] = temp_arg(args[i]);
- real_args++;
- }
- op->args[pi++] = (uintptr_t)func;
- op->args[pi++] = flags;
- TCGOP_CALLI(op) = real_args;
-
- /* Make sure the fields didn't overflow. */
- tcg_debug_assert(TCGOP_CALLI(op) == real_args);
- tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
-
-#if defined(__sparc__) && !defined(__arch64__) \
- && !defined(CONFIG_TCG_INTERPRETER)
- /* Free all of the parts we allocated above. */
- for (i = real_args = 0; i < orig_nargs; ++i) {
- int is_64bit = orig_sizemask & (1 << (i+1)*2);
- if (is_64bit) {
- tcg_temp_free_internal(args[real_args++]);
- tcg_temp_free_internal(args[real_args++]);
- } else {
- real_args++;
+ default:
+ g_assert_not_reached();
}
}
- if (orig_sizemask & 1) {
- /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
- Note that describing these as TCGv_i64 eliminates an unnecessary
- zero-extension that tcg_gen_concat_i32_i64 would create. */
- tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
- tcg_temp_free_i64(retl);
- tcg_temp_free_i64(reth);
- }
-#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
- for (i = 0; i < nargs; ++i) {
- int is_64bit = sizemask & (1 << (i+1)*2);
- if (!is_64bit) {
- tcg_temp_free_internal(args[i]);
- }
+ op->args[pi++] = (uintptr_t)info->func;
+ op->args[pi++] = (uintptr_t)info;
+ tcg_debug_assert(pi == total_args);
+
+ if (tcg_ctx->emit_before_op) {
+ QTAILQ_INSERT_BEFORE(tcg_ctx->emit_before_op, op, link);
+ } else {
+ QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
+ }
+
+ tcg_debug_assert(n_extend < ARRAY_SIZE(extend_free));
+ for (i = 0; i < n_extend; ++i) {
+ tcg_temp_free_i64(extend_free[i]);
}
-#endif /* TCG_TARGET_EXTEND_ARGS */
+}
+
+void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
+{
+ tcg_gen_callN(info, ret, NULL);
+}
+
+void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
+{
+ tcg_gen_callN(info, ret, &t1);
+}
+
+void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
+{
+ TCGTemp *args[2] = { t1, t2 };
+ tcg_gen_callN(info, ret, args);
+}
+
+void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
+ TCGTemp *t2, TCGTemp *t3)
+{
+ TCGTemp *args[3] = { t1, t2, t3 };
+ tcg_gen_callN(info, ret, args);
+}
+
+void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
+{
+ TCGTemp *args[4] = { t1, t2, t3, t4 };
+ tcg_gen_callN(info, ret, args);
+}
+
+void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
+{
+ TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
+ tcg_gen_callN(info, ret, args);
+}
+
+void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
+ TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
+{
+ TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
+ tcg_gen_callN(info, ret, args);
+}
+
+void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
+ TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
+{
+ TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
+ tcg_gen_callN(info, ret, args);
}
static void tcg_reg_alloc_start(TCGContext *s)
{
int i, n;
- TCGTemp *ts;
- for (i = 0, n = s->nb_globals; i < n; i++) {
- ts = &s->temps[i];
- ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
- }
- for (n = s->nb_temps; i < n; i++) {
- ts = &s->temps[i];
- ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
- ts->mem_allocated = 0;
- ts->fixed_reg = 0;
+ for (i = 0, n = s->nb_temps; i < n; i++) {
+ TCGTemp *ts = &s->temps[i];
+ TCGTempVal val = TEMP_VAL_MEM;
+
+ switch (ts->kind) {
+ case TEMP_CONST:
+ val = TEMP_VAL_CONST;
+ break;
+ case TEMP_FIXED:
+ val = TEMP_VAL_REG;
+ break;
+ case TEMP_GLOBAL:
+ break;
+ case TEMP_EBB:
+ val = TEMP_VAL_DEAD;
+ /* fall through */
+ case TEMP_TB:
+ ts->mem_allocated = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ ts->val_type = val;
}
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
@@ -1810,12 +2434,37 @@ static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
{
int idx = temp_idx(ts);
- if (ts->temp_global) {
+ switch (ts->kind) {
+ case TEMP_FIXED:
+ case TEMP_GLOBAL:
pstrcpy(buf, buf_size, ts->name);
- } else if (ts->temp_local) {
+ break;
+ case TEMP_TB:
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
- } else {
+ break;
+ case TEMP_EBB:
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
+ break;
+ case TEMP_CONST:
+ switch (ts->type) {
+ case TCG_TYPE_I32:
+ snprintf(buf, buf_size, "$0x%x", (int32_t)ts->val);
+ break;
+#if TCG_TARGET_REG_BITS > 32
+ case TCG_TYPE_I64:
+ snprintf(buf, buf_size, "$0x%" PRIx64, ts->val);
+ break;
+#endif
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ snprintf(buf, buf_size, "v%d$0x%" PRIx64,
+ 64 << (ts->type - TCG_TYPE_V64), ts->val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
}
return buf;
}
@@ -1826,19 +2475,6 @@ static char *tcg_get_arg_str(TCGContext *s, char *buf,
return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
}
-/* Find helper name. */
-static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
-{
- const char *ret = NULL;
- if (helper_table) {
- TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
- if (info) {
- ret = info->name;
- }
- }
- return ret;
-}
-
static const char * const cond_name[] =
{
[TCG_COND_NEVER] = "never",
@@ -1852,10 +2488,12 @@ static const char * const cond_name[] =
[TCG_COND_LTU] = "ltu",
[TCG_COND_GEU] = "geu",
[TCG_COND_LEU] = "leu",
- [TCG_COND_GTU] = "gtu"
+ [TCG_COND_GTU] = "gtu",
+ [TCG_COND_TSTEQ] = "tsteq",
+ [TCG_COND_TSTNE] = "tstne",
};
-static const char * const ldst_name[] =
+static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
{
[MO_UB] = "ub",
[MO_SB] = "sb",
@@ -1863,22 +2501,19 @@ static const char * const ldst_name[] =
[MO_LESW] = "lesw",
[MO_LEUL] = "leul",
[MO_LESL] = "lesl",
- [MO_LEQ] = "leq",
+ [MO_LEUQ] = "leq",
[MO_BEUW] = "beuw",
[MO_BESW] = "besw",
[MO_BEUL] = "beul",
[MO_BESL] = "besl",
- [MO_BEQ] = "beq",
+ [MO_BEUQ] = "beq",
+ [MO_128 + MO_BE] = "beo",
+ [MO_128 + MO_LE] = "leo",
};
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
-#ifdef ALIGNED_ONLY
[MO_UNALN >> MO_ASHIFT] = "un+",
- [MO_ALIGN >> MO_ASHIFT] = "",
-#else
- [MO_UNALN >> MO_ASHIFT] = "",
[MO_ALIGN >> MO_ASHIFT] = "al+",
-#endif
[MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
[MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
[MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
@@ -1887,6 +2522,23 @@ static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
[MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
};
+static const char * const atom_name[(MO_ATOM_MASK >> MO_ATOM_SHIFT) + 1] = {
+ [MO_ATOM_IFALIGN >> MO_ATOM_SHIFT] = "",
+ [MO_ATOM_IFALIGN_PAIR >> MO_ATOM_SHIFT] = "pair+",
+ [MO_ATOM_WITHIN16 >> MO_ATOM_SHIFT] = "w16+",
+ [MO_ATOM_WITHIN16_PAIR >> MO_ATOM_SHIFT] = "w16p+",
+ [MO_ATOM_SUBALIGN >> MO_ATOM_SHIFT] = "sub+",
+ [MO_ATOM_NONE >> MO_ATOM_SHIFT] = "noat+",
+};
+
+static const char bswap_flag_name[][6] = {
+ [TCG_BSWAP_IZ] = "iz",
+ [TCG_BSWAP_OZ] = "oz",
+ [TCG_BSWAP_OS] = "os",
+ [TCG_BSWAP_IZ | TCG_BSWAP_OZ] = "iz,oz",
+ [TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
+};
+
static inline bool tcg_regset_single(TCGRegSet d)
{
return (d & (d - 1)) == 0;
@@ -1901,7 +2553,11 @@ static inline TCGReg tcg_regset_first(TCGRegSet d)
}
}
-static void tcg_dump_ops(TCGContext *s, bool have_prefs)
+/* Return only the number of characters output -- no error return. */
+#define ne_fprintf(...) \
+ ({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
+
+static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
{
char buf[128];
TCGOp *op;
@@ -1917,104 +2573,146 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
if (c == INDEX_op_insn_start) {
nb_oargs = 0;
- col += qemu_log("\n ----");
+ col += ne_fprintf(f, "\n ----");
- for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
- target_ulong a;
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
- a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
-#else
- a = op->args[i];
-#endif
- col += qemu_log(" " TARGET_FMT_lx, a);
+ for (i = 0, k = s->insn_start_words; i < k; ++i) {
+ col += ne_fprintf(f, " %016" PRIx64,
+ tcg_get_insn_start_param(op, i));
}
} else if (c == INDEX_op_call) {
+ const TCGHelperInfo *info = tcg_call_info(op);
+ void *func = tcg_call_func(op);
+
/* variable number of arguments */
nb_oargs = TCGOP_CALLO(op);
nb_iargs = TCGOP_CALLI(op);
nb_cargs = def->nb_cargs;
- /* function name, flags, out args */
- col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
- tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
- op->args[nb_oargs + nb_iargs + 1], nb_oargs);
+ col += ne_fprintf(f, " %s ", def->name);
+
+ /*
+ * Print the function name from TCGHelperInfo, if available.
+ * Note that plugins have a template function for the info,
+ * but the actual function pointer comes from the plugin.
+ */
+ if (func == info->func) {
+ col += ne_fprintf(f, "%s", info->name);
+ } else {
+ col += ne_fprintf(f, "plugin(%p)", func);
+ }
+
+ col += ne_fprintf(f, ",$0x%x,$%d", info->flags, nb_oargs);
for (i = 0; i < nb_oargs; i++) {
- col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
- op->args[i]));
+ col += ne_fprintf(f, ",%s", tcg_get_arg_str(s, buf, sizeof(buf),
+ op->args[i]));
}
for (i = 0; i < nb_iargs; i++) {
TCGArg arg = op->args[nb_oargs + i];
- const char *t = "<dummy>";
- if (arg != TCG_CALL_DUMMY_ARG) {
- t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
- }
- col += qemu_log(",%s", t);
+ const char *t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
+ col += ne_fprintf(f, ",%s", t);
}
} else {
- col += qemu_log(" %s ", def->name);
+ col += ne_fprintf(f, " %s ", def->name);
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
nb_cargs = def->nb_cargs;
if (def->flags & TCG_OPF_VECTOR) {
- col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
- 8 << TCGOP_VECE(op));
+ col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
+ 8 << TCGOP_VECE(op));
}
k = 0;
for (i = 0; i < nb_oargs; i++) {
- if (k != 0) {
- col += qemu_log(",");
- }
- col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
- op->args[k++]));
+ const char *sep = k ? "," : "";
+ col += ne_fprintf(f, "%s%s", sep,
+ tcg_get_arg_str(s, buf, sizeof(buf),
+ op->args[k++]));
}
for (i = 0; i < nb_iargs; i++) {
- if (k != 0) {
- col += qemu_log(",");
- }
- col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
- op->args[k++]));
+ const char *sep = k ? "," : "";
+ col += ne_fprintf(f, "%s%s", sep,
+ tcg_get_arg_str(s, buf, sizeof(buf),
+ op->args[k++]));
}
switch (c) {
case INDEX_op_brcond_i32:
case INDEX_op_setcond_i32:
+ case INDEX_op_negsetcond_i32:
case INDEX_op_movcond_i32:
case INDEX_op_brcond2_i32:
case INDEX_op_setcond2_i32:
case INDEX_op_brcond_i64:
case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i64:
case INDEX_op_movcond_i64:
case INDEX_op_cmp_vec:
+ case INDEX_op_cmpsel_vec:
if (op->args[k] < ARRAY_SIZE(cond_name)
&& cond_name[op->args[k]]) {
- col += qemu_log(",%s", cond_name[op->args[k++]]);
+ col += ne_fprintf(f, ",%s", cond_name[op->args[k++]]);
} else {
- col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
+ col += ne_fprintf(f, ",$0x%" TCG_PRIlx, op->args[k++]);
}
i = 1;
break;
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_ld_i64:
- case INDEX_op_qemu_st_i64:
+ case INDEX_op_qemu_ld_a32_i32:
+ case INDEX_op_qemu_ld_a64_i32:
+ case INDEX_op_qemu_st_a32_i32:
+ case INDEX_op_qemu_st_a64_i32:
+ case INDEX_op_qemu_st8_a32_i32:
+ case INDEX_op_qemu_st8_a64_i32:
+ case INDEX_op_qemu_ld_a32_i64:
+ case INDEX_op_qemu_ld_a64_i64:
+ case INDEX_op_qemu_st_a32_i64:
+ case INDEX_op_qemu_st_a64_i64:
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
{
- TCGMemOpIdx oi = op->args[k++];
- TCGMemOp op = get_memop(oi);
+ const char *s_al, *s_op, *s_at;
+ MemOpIdx oi = op->args[k++];
+ MemOp mop = get_memop(oi);
unsigned ix = get_mmuidx(oi);
- if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
- col += qemu_log(",$0x%x,%u", op, ix);
+ s_al = alignment_name[(mop & MO_AMASK) >> MO_ASHIFT];
+ s_op = ldst_name[mop & (MO_BSWAP | MO_SSIZE)];
+ s_at = atom_name[(mop & MO_ATOM_MASK) >> MO_ATOM_SHIFT];
+ mop &= ~(MO_AMASK | MO_BSWAP | MO_SSIZE | MO_ATOM_MASK);
+
+ /* If all fields are accounted for, print symbolically. */
+ if (!mop && s_al && s_op && s_at) {
+ col += ne_fprintf(f, ",%s%s%s,%u",
+ s_at, s_al, s_op, ix);
} else {
- const char *s_al, *s_op;
- s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
- s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
- col += qemu_log(",%s%s,%u", s_al, s_op, ix);
+ mop = get_memop(oi);
+ col += ne_fprintf(f, ",$0x%x,%u", mop, ix);
}
i = 1;
}
break;
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ {
+ TCGArg flags = op->args[k];
+ const char *name = NULL;
+
+ if (flags < ARRAY_SIZE(bswap_flag_name)) {
+ name = bswap_flag_name[flags];
+ }
+ if (name) {
+ col += ne_fprintf(f, ",%s", name);
+ } else {
+ col += ne_fprintf(f, ",$0x%" TCG_PRIlx, flags);
+ }
+ i = k = 1;
+ }
+ break;
default:
i = 0;
break;
@@ -2025,21 +2723,101 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
case INDEX_op_brcond2_i32:
- col += qemu_log("%s$L%d", k ? "," : "",
- arg_label(op->args[k])->id);
+ col += ne_fprintf(f, "%s$L%d", k ? "," : "",
+ arg_label(op->args[k])->id);
i++, k++;
break;
+ case INDEX_op_mb:
+ {
+ TCGBar membar = op->args[k];
+ const char *b_op, *m_op;
+
+ switch (membar & TCG_BAR_SC) {
+ case 0:
+ b_op = "none";
+ break;
+ case TCG_BAR_LDAQ:
+ b_op = "acq";
+ break;
+ case TCG_BAR_STRL:
+ b_op = "rel";
+ break;
+ case TCG_BAR_SC:
+ b_op = "seq";
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ switch (membar & TCG_MO_ALL) {
+ case 0:
+ m_op = "none";
+ break;
+ case TCG_MO_LD_LD:
+ m_op = "rr";
+ break;
+ case TCG_MO_LD_ST:
+ m_op = "rw";
+ break;
+ case TCG_MO_ST_LD:
+ m_op = "wr";
+ break;
+ case TCG_MO_ST_ST:
+ m_op = "ww";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_LD_ST:
+ m_op = "rr+rw";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_ST_LD:
+ m_op = "rr+wr";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_ST_ST:
+ m_op = "rr+ww";
+ break;
+ case TCG_MO_LD_ST | TCG_MO_ST_LD:
+ m_op = "rw+wr";
+ break;
+ case TCG_MO_LD_ST | TCG_MO_ST_ST:
+ m_op = "rw+ww";
+ break;
+ case TCG_MO_ST_LD | TCG_MO_ST_ST:
+ m_op = "wr+ww";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_LD:
+ m_op = "rr+rw+wr";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST:
+ m_op = "rr+rw+ww";
+ break;
+ case TCG_MO_LD_LD | TCG_MO_ST_LD | TCG_MO_ST_ST:
+ m_op = "rr+wr+ww";
+ break;
+ case TCG_MO_LD_ST | TCG_MO_ST_LD | TCG_MO_ST_ST:
+ m_op = "rw+wr+ww";
+ break;
+ case TCG_MO_ALL:
+ m_op = "all";
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ col += ne_fprintf(f, "%s%s:%s", (k ? "," : ""), b_op, m_op);
+ i++, k++;
+ }
+ break;
default:
break;
}
for (; i < nb_cargs; i++, k++) {
- col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
+ col += ne_fprintf(f, "%s$0x%" TCG_PRIlx, k ? "," : "",
+ op->args[k]);
}
}
if (have_prefs || op->life) {
for (; col < 40; ++col) {
- putc(' ', qemu_logfile);
+ putc(' ', f);
}
}
@@ -2047,19 +2825,19 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
unsigned life = op->life;
if (life & (SYNC_ARG * 3)) {
- qemu_log(" sync:");
+ ne_fprintf(f, " sync:");
for (i = 0; i < 2; ++i) {
if (life & (SYNC_ARG << i)) {
- qemu_log(" %d", i);
+ ne_fprintf(f, " %d", i);
}
}
}
life /= DEAD_ARG;
if (life) {
- qemu_log(" dead:");
+ ne_fprintf(f, " dead:");
for (i = 0; life; ++i, life >>= 1) {
if (life & 1) {
- qemu_log(" %d", i);
+ ne_fprintf(f, " %d", i);
}
}
}
@@ -2067,73 +2845,86 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
if (have_prefs) {
for (i = 0; i < nb_oargs; ++i) {
- TCGRegSet set = op->output_pref[i];
+ TCGRegSet set = output_pref(op, i);
if (i == 0) {
- qemu_log(" pref=");
+ ne_fprintf(f, " pref=");
} else {
- qemu_log(",");
+ ne_fprintf(f, ",");
}
if (set == 0) {
- qemu_log("none");
+ ne_fprintf(f, "none");
} else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
- qemu_log("all");
+ ne_fprintf(f, "all");
#ifdef CONFIG_DEBUG_TCG
} else if (tcg_regset_single(set)) {
TCGReg reg = tcg_regset_first(set);
- qemu_log("%s", tcg_target_reg_names[reg]);
+ ne_fprintf(f, "%s", tcg_target_reg_names[reg]);
#endif
} else if (TCG_TARGET_NB_REGS <= 32) {
- qemu_log("%#x", (uint32_t)set);
+ ne_fprintf(f, "0x%x", (uint32_t)set);
} else {
- qemu_log("%#" PRIx64, (uint64_t)set);
+ ne_fprintf(f, "0x%" PRIx64, (uint64_t)set);
}
}
}
- qemu_log("\n");
+ putc('\n', f);
}
}
/* we give more priority to constraints with less registers */
static int get_constraint_priority(const TCGOpDef *def, int k)
{
- const TCGArgConstraint *arg_ct;
+ const TCGArgConstraint *arg_ct = &def->args_ct[k];
+ int n = ctpop64(arg_ct->regs);
- int i, n;
- arg_ct = &def->args_ct[k];
- if (arg_ct->ct & TCG_CT_ALIAS) {
- /* an alias is equivalent to a single register */
- n = 1;
- } else {
- if (!(arg_ct->ct & TCG_CT_REG))
- return 0;
- n = 0;
- for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
- if (tcg_regset_test_reg(arg_ct->u.regs, i))
- n++;
- }
+ /*
+ * Sort constraints of a single register first, which includes output
+ * aliases (which must exactly match the input already allocated).
+ */
+ if (n == 1 || arg_ct->oalias) {
+ return INT_MAX;
+ }
+
+ /*
+ * Sort register pairs next, first then second immediately after.
+ * Arbitrarily sort multiple pairs by the index of the first reg;
+ * there shouldn't be many pairs.
+ */
+ switch (arg_ct->pair) {
+ case 1:
+ case 3:
+ return (k + 1) * 2;
+ case 2:
+ return (arg_ct->pair_index + 1) * 2 - 1;
}
- return TCG_TARGET_NB_REGS - n + 1;
+
+ /* Finally, sort by decreasing register count. */
+ assert(n > 1);
+ return -n;
}
/* sort from highest priority to lowest */
static void sort_constraints(TCGOpDef *def, int start, int n)
{
- int i, j, p1, p2, tmp;
+ int i, j;
+ TCGArgConstraint *a = def->args_ct;
- for(i = 0; i < n; i++)
- def->sorted_args[start + i] = start + i;
- if (n <= 1)
+ for (i = 0; i < n; i++) {
+ a[start + i].sort_index = start + i;
+ }
+ if (n <= 1) {
return;
- for(i = 0; i < n - 1; i++) {
- for(j = i + 1; j < n; j++) {
- p1 = get_constraint_priority(def, def->sorted_args[start + i]);
- p2 = get_constraint_priority(def, def->sorted_args[start + j]);
+ }
+ for (i = 0; i < n - 1; i++) {
+ for (j = i + 1; j < n; j++) {
+ int p1 = get_constraint_priority(def, a[start + i].sort_index);
+ int p2 = get_constraint_priority(def, a[start + j].sort_index);
if (p1 < p2) {
- tmp = def->sorted_args[start + i];
- def->sorted_args[start + i] = def->sorted_args[start + j];
- def->sorted_args[start + j] = tmp;
+ int tmp = a[start + i].sort_index;
+ a[start + i].sort_index = a[start + j].sort_index;
+ a[start + j].sort_index = tmp;
}
}
}
@@ -2146,8 +2937,8 @@ static void process_op_defs(TCGContext *s)
for (op = 0; op < NB_OPS; op++) {
TCGOpDef *def = &tcg_op_defs[op];
const TCGTargetOpDef *tdefs;
- TCGType type;
- int i, nb_args;
+ bool saw_alias_pair = false;
+ int i, o, i2, o2, nb_args;
if (def->flags & TCG_OPF_NOT_PRESENT) {
continue;
@@ -2158,79 +2949,220 @@ static void process_op_defs(TCGContext *s)
continue;
}
- tdefs = tcg_target_op_def(op);
- /* Missing TCGTargetOpDef entry. */
- tcg_debug_assert(tdefs != NULL);
+ /*
+ * Macro magic should make it impossible, but double-check that
+ * the array index is in range. Since the signness of an enum
+ * is implementation defined, force the result to unsigned.
+ */
+ unsigned con_set = tcg_target_op_def(op);
+ tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
+ tdefs = &constraint_sets[con_set];
- type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
for (i = 0; i < nb_args; i++) {
const char *ct_str = tdefs->args_ct_str[i];
+ bool input_p = i >= def->nb_oargs;
+
/* Incomplete TCGTargetOpDef entry. */
tcg_debug_assert(ct_str != NULL);
- def->args_ct[i].u.regs = 0;
- def->args_ct[i].ct = 0;
- while (*ct_str != '\0') {
- switch(*ct_str) {
- case '0' ... '9':
- {
- int oarg = *ct_str - '0';
- tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
- tcg_debug_assert(oarg < def->nb_oargs);
- tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
- /* TCG_CT_ALIAS is for the output arguments.
- The input is tagged with TCG_CT_IALIAS. */
- def->args_ct[i] = def->args_ct[oarg];
- def->args_ct[oarg].ct |= TCG_CT_ALIAS;
- def->args_ct[oarg].alias_index = i;
- def->args_ct[i].ct |= TCG_CT_IALIAS;
- def->args_ct[i].alias_index = oarg;
- }
- ct_str++;
- break;
- case '&':
- def->args_ct[i].ct |= TCG_CT_NEWREG;
- ct_str++;
- break;
+ switch (*ct_str) {
+ case '0' ... '9':
+ o = *ct_str - '0';
+ tcg_debug_assert(input_p);
+ tcg_debug_assert(o < def->nb_oargs);
+ tcg_debug_assert(def->args_ct[o].regs != 0);
+ tcg_debug_assert(!def->args_ct[o].oalias);
+ def->args_ct[i] = def->args_ct[o];
+ /* The output sets oalias. */
+ def->args_ct[o].oalias = 1;
+ def->args_ct[o].alias_index = i;
+ /* The input sets ialias. */
+ def->args_ct[i].ialias = 1;
+ def->args_ct[i].alias_index = o;
+ if (def->args_ct[i].pair) {
+ saw_alias_pair = true;
+ }
+ tcg_debug_assert(ct_str[1] == '\0');
+ continue;
+
+ case '&':
+ tcg_debug_assert(!input_p);
+ def->args_ct[i].newreg = true;
+ ct_str++;
+ break;
+
+ case 'p': /* plus */
+ /* Allocate to the register after the previous. */
+ tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
+ o = i - 1;
+ tcg_debug_assert(!def->args_ct[o].pair);
+ tcg_debug_assert(!def->args_ct[o].ct);
+ def->args_ct[i] = (TCGArgConstraint){
+ .pair = 2,
+ .pair_index = o,
+ .regs = def->args_ct[o].regs << 1,
+ .newreg = def->args_ct[o].newreg,
+ };
+ def->args_ct[o].pair = 1;
+ def->args_ct[o].pair_index = i;
+ tcg_debug_assert(ct_str[1] == '\0');
+ continue;
+
+ case 'm': /* minus */
+ /* Allocate to the register before the previous. */
+ tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
+ o = i - 1;
+ tcg_debug_assert(!def->args_ct[o].pair);
+ tcg_debug_assert(!def->args_ct[o].ct);
+ def->args_ct[i] = (TCGArgConstraint){
+ .pair = 1,
+ .pair_index = o,
+ .regs = def->args_ct[o].regs >> 1,
+ .newreg = def->args_ct[o].newreg,
+ };
+ def->args_ct[o].pair = 2;
+ def->args_ct[o].pair_index = i;
+ tcg_debug_assert(ct_str[1] == '\0');
+ continue;
+ }
+
+ do {
+ switch (*ct_str) {
case 'i':
def->args_ct[i].ct |= TCG_CT_CONST;
- ct_str++;
break;
+
+ /* Include all of the target-specific constraints. */
+
+#undef CONST
+#define CONST(CASE, MASK) \
+ case CASE: def->args_ct[i].ct |= MASK; break;
+#define REGS(CASE, MASK) \
+ case CASE: def->args_ct[i].regs |= MASK; break;
+
+#include "tcg-target-con-str.h"
+
+#undef REGS
+#undef CONST
default:
- ct_str = target_parse_constraint(&def->args_ct[i],
- ct_str, type);
+ case '0' ... '9':
+ case '&':
+ case 'p':
+ case 'm':
/* Typo in TCGTargetOpDef constraint. */
- tcg_debug_assert(ct_str != NULL);
+ g_assert_not_reached();
}
- }
+ } while (*++ct_str != '\0');
}
/* TCGTargetOpDef entry with too much information? */
tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
+ /*
+ * Fix up output pairs that are aliased with inputs.
+ * When we created the alias, we copied pair from the output.
+ * There are three cases:
+ * (1a) Pairs of inputs alias pairs of outputs.
+ * (1b) One input aliases the first of a pair of outputs.
+ * (2) One input aliases the second of a pair of outputs.
+ *
+ * Case 1a is handled by making sure that the pair_index'es are
+ * properly updated so that they appear the same as a pair of inputs.
+ *
+ * Case 1b is handled by setting the pair_index of the input to
+ * itself, simply so it doesn't point to an unrelated argument.
+ * Since we don't encounter the "second" during the input allocation
+ * phase, nothing happens with the second half of the input pair.
+ *
+ * Case 2 is handled by setting the second input to pair=3, the
+ * first output to pair=3, and the pair_index'es to match.
+ */
+ if (saw_alias_pair) {
+ for (i = def->nb_oargs; i < nb_args; i++) {
+ /*
+ * Since [0-9pm] must be alone in the constraint string,
+ * the only way they can both be set is if the pair comes
+ * from the output alias.
+ */
+ if (!def->args_ct[i].ialias) {
+ continue;
+ }
+ switch (def->args_ct[i].pair) {
+ case 0:
+ break;
+ case 1:
+ o = def->args_ct[i].alias_index;
+ o2 = def->args_ct[o].pair_index;
+ tcg_debug_assert(def->args_ct[o].pair == 1);
+ tcg_debug_assert(def->args_ct[o2].pair == 2);
+ if (def->args_ct[o2].oalias) {
+ /* Case 1a */
+ i2 = def->args_ct[o2].alias_index;
+ tcg_debug_assert(def->args_ct[i2].pair == 2);
+ def->args_ct[i2].pair_index = i;
+ def->args_ct[i].pair_index = i2;
+ } else {
+ /* Case 1b */
+ def->args_ct[i].pair_index = i;
+ }
+ break;
+ case 2:
+ o = def->args_ct[i].alias_index;
+ o2 = def->args_ct[o].pair_index;
+ tcg_debug_assert(def->args_ct[o].pair == 2);
+ tcg_debug_assert(def->args_ct[o2].pair == 1);
+ if (def->args_ct[o2].oalias) {
+ /* Case 1a */
+ i2 = def->args_ct[o2].alias_index;
+ tcg_debug_assert(def->args_ct[i2].pair == 1);
+ def->args_ct[i2].pair_index = i;
+ def->args_ct[i].pair_index = i2;
+ } else {
+ /* Case 2 */
+ def->args_ct[i].pair = 3;
+ def->args_ct[o2].pair = 3;
+ def->args_ct[i].pair_index = o2;
+ def->args_ct[o2].pair_index = i;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ }
+
/* sort the constraints (XXX: this is just an heuristic) */
sort_constraints(def, 0, def->nb_oargs);
sort_constraints(def, def->nb_oargs, def->nb_iargs);
}
}
-void tcg_op_remove(TCGContext *s, TCGOp *op)
+static void remove_label_use(TCGOp *op, int idx)
{
- TCGLabel *label;
+ TCGLabel *label = arg_label(op->args[idx]);
+ TCGLabelUse *use;
+ QSIMPLEQ_FOREACH(use, &label->branches, next) {
+ if (use->op == op) {
+ QSIMPLEQ_REMOVE(&label->branches, use, TCGLabelUse, next);
+ return;
+ }
+ }
+ g_assert_not_reached();
+}
+
+void tcg_op_remove(TCGContext *s, TCGOp *op)
+{
switch (op->opc) {
case INDEX_op_br:
- label = arg_label(op->args[0]);
- label->refs--;
+ remove_label_use(op, 0);
break;
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
- label = arg_label(op->args[3]);
- label->refs--;
+ remove_label_use(op, 3);
break;
case INDEX_op_brcond2_i32:
- label = arg_label(op->args[5]);
- label->refs--;
+ remove_label_use(op, 5);
break;
default:
break;
@@ -2239,66 +3171,153 @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
QTAILQ_REMOVE(&s->ops, op, link);
QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
s->nb_ops--;
-
-#ifdef CONFIG_PROFILER
- atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
-#endif
}
-static TCGOp *tcg_op_alloc(TCGOpcode opc)
+void tcg_remove_ops_after(TCGOp *op)
{
TCGContext *s = tcg_ctx;
- TCGOp *op;
- if (likely(QTAILQ_EMPTY(&s->free_ops))) {
- op = tcg_malloc(sizeof(TCGOp));
- } else {
- op = QTAILQ_FIRST(&s->free_ops);
- QTAILQ_REMOVE(&s->free_ops, op, link);
+ while (true) {
+ TCGOp *last = tcg_last_op();
+ if (last == op) {
+ return;
+ }
+ tcg_op_remove(s, last);
}
+}
+
+static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs)
+{
+ TCGContext *s = tcg_ctx;
+ TCGOp *op = NULL;
+
+ if (unlikely(!QTAILQ_EMPTY(&s->free_ops))) {
+ QTAILQ_FOREACH(op, &s->free_ops, link) {
+ if (nargs <= op->nargs) {
+ QTAILQ_REMOVE(&s->free_ops, op, link);
+ nargs = op->nargs;
+ goto found;
+ }
+ }
+ }
+
+ /* Most opcodes have 3 or 4 operands: reduce fragmentation. */
+ nargs = MAX(4, nargs);
+ op = tcg_malloc(sizeof(TCGOp) + sizeof(TCGArg) * nargs);
+
+ found:
memset(op, 0, offsetof(TCGOp, link));
op->opc = opc;
- s->nb_ops++;
+ op->nargs = nargs;
+
+ /* Check for bitfield overflow. */
+ tcg_debug_assert(op->nargs == nargs);
+ s->nb_ops++;
return op;
}
-TCGOp *tcg_emit_op(TCGOpcode opc)
+TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs)
{
- TCGOp *op = tcg_op_alloc(opc);
- QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
+ TCGOp *op = tcg_op_alloc(opc, nargs);
+
+ if (tcg_ctx->emit_before_op) {
+ QTAILQ_INSERT_BEFORE(tcg_ctx->emit_before_op, op, link);
+ } else {
+ QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
+ }
return op;
}
-TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
+ TCGOpcode opc, unsigned nargs)
{
- TCGOp *new_op = tcg_op_alloc(opc);
+ TCGOp *new_op = tcg_op_alloc(opc, nargs);
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
return new_op;
}
-TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
+TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
+ TCGOpcode opc, unsigned nargs)
{
- TCGOp *new_op = tcg_op_alloc(opc);
+ TCGOp *new_op = tcg_op_alloc(opc, nargs);
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
return new_op;
}
+static void move_label_uses(TCGLabel *to, TCGLabel *from)
+{
+ TCGLabelUse *u;
+
+ QSIMPLEQ_FOREACH(u, &from->branches, next) {
+ TCGOp *op = u->op;
+ switch (op->opc) {
+ case INDEX_op_br:
+ op->args[0] = label_arg(to);
+ break;
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ op->args[3] = label_arg(to);
+ break;
+ case INDEX_op_brcond2_i32:
+ op->args[5] = label_arg(to);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ QSIMPLEQ_CONCAT(&to->branches, &from->branches);
+}
+
/* Reachable analysis : remove unreachable code. */
-static void reachable_code_pass(TCGContext *s)
+static void __attribute__((noinline))
+reachable_code_pass(TCGContext *s)
{
- TCGOp *op, *op_next;
+ TCGOp *op, *op_next, *op_prev;
bool dead = false;
QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
bool remove = dead;
TCGLabel *label;
- int call_flags;
switch (op->opc) {
case INDEX_op_set_label:
label = arg_label(op->args[0]);
- if (label->refs == 0) {
+
+ /*
+ * Note that the first op in the TB is always a load,
+ * so there is always something before a label.
+ */
+ op_prev = QTAILQ_PREV(op, link);
+
+ /*
+ * If we find two sequential labels, move all branches to
+ * reference the second label and remove the first label.
+ * Do this before branch to next optimization, so that the
+ * middle label is out of the way.
+ */
+ if (op_prev->opc == INDEX_op_set_label) {
+ move_label_uses(label, arg_label(op_prev->args[0]));
+ tcg_op_remove(s, op_prev);
+ op_prev = QTAILQ_PREV(op, link);
+ }
+
+ /*
+ * Optimization can fold conditional branches to unconditional.
+ * If we find a label which is preceded by an unconditional
+ * branch to next, remove the branch. We couldn't do this when
+ * processing the branch because any dead code between the branch
+ * and label had not yet been removed.
+ */
+ if (op_prev->opc == INDEX_op_br &&
+ label == arg_label(op_prev->args[0])) {
+ tcg_op_remove(s, op_prev);
+ /* Fall through means insns become live again. */
+ dead = false;
+ }
+
+ if (QSIMPLEQ_EMPTY(&label->branches)) {
/*
* While there is an occasional backward branch, virtually
* all branches generated by the translators are forward.
@@ -2311,21 +3330,6 @@ static void reachable_code_pass(TCGContext *s)
/* Once we see a label, insns become live again. */
dead = false;
remove = false;
-
- /*
- * Optimization can fold conditional branches to unconditional.
- * If we find a label with one reference which is preceded by
- * an unconditional branch to it, remove both. This needed to
- * wait until the dead code in between them was removed.
- */
- if (label->refs == 1) {
- TCGOp *op_prev = QTAILQ_PREV(op, link);
- if (op_prev->opc == INDEX_op_br &&
- label == arg_label(op_prev->args[0])) {
- tcg_op_remove(s, op_prev);
- remove = true;
- }
- }
}
break;
@@ -2338,8 +3342,7 @@ static void reachable_code_pass(TCGContext *s)
case INDEX_op_call:
/* Notice noreturn helper calls, raising exceptions. */
- call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1];
- if (call_flags & TCG_CALL_NO_RETURN) {
+ if (tcg_call_flags(op) & TCG_CALL_NO_RETURN) {
dead = true;
}
break;
@@ -2402,15 +3405,25 @@ static void la_bb_end(TCGContext *s, int ng, int nt)
{
int i;
- for (i = 0; i < ng; ++i) {
- s->temps[i].state = TS_DEAD | TS_MEM;
- la_reset_pref(&s->temps[i]);
- }
- for (i = ng; i < nt; ++i) {
- s->temps[i].state = (s->temps[i].temp_local
- ? TS_DEAD | TS_MEM
- : TS_DEAD);
- la_reset_pref(&s->temps[i]);
+ for (i = 0; i < nt; ++i) {
+ TCGTemp *ts = &s->temps[i];
+ int state;
+
+ switch (ts->kind) {
+ case TEMP_FIXED:
+ case TEMP_GLOBAL:
+ case TEMP_TB:
+ state = TS_DEAD | TS_MEM;
+ break;
+ case TEMP_EBB:
+ case TEMP_CONST:
+ state = TS_DEAD;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ ts->state = state;
+ la_reset_pref(ts);
}
}
@@ -2429,6 +3442,37 @@ static void la_global_sync(TCGContext *s, int ng)
}
}
+/*
+ * liveness analysis: conditional branch: all temps are dead unless
+ * explicitly live-across-conditional-branch, globals and local temps
+ * should be synced.
+ */
+static void la_bb_sync(TCGContext *s, int ng, int nt)
+{
+ la_global_sync(s, ng);
+
+ for (int i = ng; i < nt; ++i) {
+ TCGTemp *ts = &s->temps[i];
+ int state;
+
+ switch (ts->kind) {
+ case TEMP_TB:
+ state = ts->state;
+ ts->state = state | TS_MEM;
+ if (state != TS_DEAD) {
+ continue;
+ }
+ break;
+ case TEMP_EBB:
+ case TEMP_CONST:
+ continue;
+ default:
+ g_assert_not_reached();
+ }
+ la_reset_pref(&s->temps[i]);
+ }
+}
+
/* liveness analysis: sync globals back to memory and kill. */
static void la_global_kill(TCGContext *s, int ng)
{
@@ -2462,10 +3506,80 @@ static void la_cross_call(TCGContext *s, int nt)
}
}
+/*
+ * Liveness analysis: Verify the lifetime of TEMP_TB, and reduce
+ * to TEMP_EBB, if possible.
+ */
+static void __attribute__((noinline))
+liveness_pass_0(TCGContext *s)
+{
+ void * const multiple_ebb = (void *)(uintptr_t)-1;
+ int nb_temps = s->nb_temps;
+ TCGOp *op, *ebb;
+
+ for (int i = s->nb_globals; i < nb_temps; ++i) {
+ s->temps[i].state_ptr = NULL;
+ }
+
+ /*
+ * Represent each EBB by the op at which it begins. In the case of
+ * the first EBB, this is the first op, otherwise it is a label.
+ * Collect the uses of each TEMP_TB: NULL for unused, EBB for use
+ * within a single EBB, else MULTIPLE_EBB.
+ */
+ ebb = QTAILQ_FIRST(&s->ops);
+ QTAILQ_FOREACH(op, &s->ops, link) {
+ const TCGOpDef *def;
+ int nb_oargs, nb_iargs;
+
+ switch (op->opc) {
+ case INDEX_op_set_label:
+ ebb = op;
+ continue;
+ case INDEX_op_discard:
+ continue;
+ case INDEX_op_call:
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
+ break;
+ default:
+ def = &tcg_op_defs[op->opc];
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
+ break;
+ }
+
+ for (int i = 0; i < nb_oargs + nb_iargs; ++i) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+
+ if (ts->kind != TEMP_TB) {
+ continue;
+ }
+ if (ts->state_ptr == NULL) {
+ ts->state_ptr = ebb;
+ } else if (ts->state_ptr != ebb) {
+ ts->state_ptr = multiple_ebb;
+ }
+ }
+ }
+
+ /*
+ * For TEMP_TB that turned out not to be used beyond one EBB,
+ * reduce the liveness to TEMP_EBB.
+ */
+ for (int i = s->nb_globals; i < nb_temps; ++i) {
+ TCGTemp *ts = &s->temps[i];
+ if (ts->kind == TEMP_TB && ts->state_ptr != multiple_ebb) {
+ ts->kind = TEMP_EBB;
+ }
+ }
+}
+
/* Liveness analysis : update the opc_arg_life array to tell if a
given input arguments is dead. Instructions updating dead
temporaries are removed. */
-static void liveness_pass_1(TCGContext *s)
+static void __attribute__((noinline))
+liveness_pass_1(TCGContext *s)
{
int nb_globals = s->nb_globals;
int nb_temps = s->nb_temps;
@@ -2493,12 +3607,11 @@ static void liveness_pass_1(TCGContext *s)
switch (opc) {
case INDEX_op_call:
{
- int call_flags;
- int nb_call_regs;
+ const TCGHelperInfo *info = tcg_call_info(op);
+ int call_flags = tcg_call_flags(op);
nb_oargs = TCGOP_CALLO(op);
nb_iargs = TCGOP_CALLI(op);
- call_flags = op->args[nb_oargs + nb_iargs + 1];
/* pure functions can be removed if their result is unused */
if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
@@ -2523,11 +3636,11 @@ static void liveness_pass_1(TCGContext *s)
}
ts->state = TS_DEAD;
la_reset_pref(ts);
-
- /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
- op->output_pref[i] = 0;
}
+ /* Not used -- it will be tcg_target_call_oarg_reg(). */
+ memset(op->output_pref, 0, sizeof(op->output_pref));
+
if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
TCG_CALL_NO_READ_GLOBALS))) {
la_global_kill(s, nb_globals);
@@ -2538,7 +3651,7 @@ static void liveness_pass_1(TCGContext *s)
/* Record arguments that die in this helper. */
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
ts = arg_temp(op->args[i]);
- if (ts && ts->state & TS_DEAD) {
+ if (ts->state & TS_DEAD) {
arg_life |= DEAD_ARG << i;
}
}
@@ -2546,31 +3659,59 @@ static void liveness_pass_1(TCGContext *s)
/* For all live registers, remove call-clobbered prefs. */
la_cross_call(s, nb_temps);
- nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
+ /*
+ * Input arguments are live for preceding opcodes.
+ *
+ * For those arguments that die, and will be allocated in
+ * registers, clear the register set for that arg, to be
+ * filled in below. For args that will be on the stack,
+ * reset to any available reg. Process arguments in reverse
+ * order so that if a temp is used more than once, the stack
+ * reset to max happens before the register reset to 0.
+ */
+ for (i = nb_iargs - 1; i >= 0; i--) {
+ const TCGCallArgumentLoc *loc = &info->in[i];
+ ts = arg_temp(op->args[nb_oargs + i]);
- /* Input arguments are live for preceding opcodes. */
- for (i = 0; i < nb_iargs; i++) {
- ts = arg_temp(op->args[i + nb_oargs]);
- if (ts && ts->state & TS_DEAD) {
- /* For those arguments that die, and will be allocated
- * in registers, clear the register set for that arg,
- * to be filled in below. For args that will be on
- * the stack, reset to any available reg.
- */
- *la_temp_pref(ts)
- = (i < nb_call_regs ? 0 :
- tcg_target_available_regs[ts->type]);
+ if (ts->state & TS_DEAD) {
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ if (arg_slot_reg_p(loc->arg_slot)) {
+ *la_temp_pref(ts) = 0;
+ break;
+ }
+ /* fall through */
+ default:
+ *la_temp_pref(ts) =
+ tcg_target_available_regs[ts->type];
+ break;
+ }
ts->state &= ~TS_DEAD;
}
}
- /* For each input argument, add its input register to prefs.
- If a temp is used once, this produces a single set bit. */
- for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
- ts = arg_temp(op->args[i + nb_oargs]);
- if (ts) {
- tcg_regset_set_reg(*la_temp_pref(ts),
- tcg_target_call_iarg_regs[i]);
+ /*
+ * For each input argument, add its input register to prefs.
+ * If a temp is used once, this produces a single set bit;
+ * if a temp is used multiple times, this produces a set.
+ */
+ for (i = 0; i < nb_iargs; i++) {
+ const TCGCallArgumentLoc *loc = &info->in[i];
+ ts = arg_temp(op->args[nb_oargs + i]);
+
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ if (arg_slot_reg_p(loc->arg_slot)) {
+ tcg_regset_set_reg(*la_temp_pref(ts),
+ tcg_target_call_iarg_regs[loc->arg_slot]);
+ }
+ break;
+ default:
+ break;
}
}
}
@@ -2689,7 +3830,9 @@ static void liveness_pass_1(TCGContext *s)
ts = arg_temp(op->args[i]);
/* Remember the preference of the uses that followed. */
- op->output_pref[i] = *la_temp_pref(ts);
+ if (i < ARRAY_SIZE(op->output_pref)) {
+ op->output_pref[i] = *la_temp_pref(ts);
+ }
/* Output args are dead. */
if (ts->state & TS_DEAD) {
@@ -2705,6 +3848,8 @@ static void liveness_pass_1(TCGContext *s)
/* If end of basic block, update. */
if (def->flags & TCG_OPF_BB_EXIT) {
la_func_end(s, nb_globals, nb_temps);
+ } else if (def->flags & TCG_OPF_COND_BRANCH) {
+ la_bb_sync(s, nb_globals, nb_temps);
} else if (def->flags & TCG_OPF_BB_END) {
la_bb_end(s, nb_globals, nb_temps);
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
@@ -2755,13 +3900,13 @@ static void liveness_pass_1(TCGContext *s)
pset = la_temp_pref(ts);
set = *pset;
- set &= ct->u.regs;
- if (ct->ct & TCG_CT_IALIAS) {
- set &= op->output_pref[ct->alias_index];
+ set &= ct->regs;
+ if (ct->ialias) {
+ set &= output_pref(op, ct->alias_index);
}
/* If the combination is not possible, restart. */
if (set == 0) {
- set = ct->u.regs;
+ set = ct->regs;
}
*pset = set;
}
@@ -2774,7 +3919,8 @@ static void liveness_pass_1(TCGContext *s)
}
/* Liveness analysis: Convert indirect regs to direct temporaries. */
-static bool liveness_pass_2(TCGContext *s)
+static bool __attribute__((noinline))
+liveness_pass_2(TCGContext *s)
{
int nb_globals = s->nb_globals;
int nb_temps, i;
@@ -2788,6 +3934,8 @@ static bool liveness_pass_2(TCGContext *s)
TCGTemp *dts = tcg_temp_alloc(s);
dts->type = its->type;
dts->base_type = its->base_type;
+ dts->temp_subindex = its->temp_subindex;
+ dts->kind = TEMP_EBB;
its->state_ptr = dts;
} else {
its->state_ptr = NULL;
@@ -2811,13 +3959,16 @@ static bool liveness_pass_2(TCGContext *s)
if (opc == INDEX_op_call) {
nb_oargs = TCGOP_CALLO(op);
nb_iargs = TCGOP_CALLI(op);
- call_flags = op->args[nb_oargs + nb_iargs + 1];
+ call_flags = tcg_call_flags(op);
} else {
nb_iargs = def->nb_iargs;
nb_oargs = def->nb_oargs;
/* Set flags similar to how calls require. */
- if (def->flags & TCG_OPF_BB_END) {
+ if (def->flags & TCG_OPF_COND_BRANCH) {
+ /* Like reading globals: sync_globals */
+ call_flags = TCG_CALL_NO_WRITE_GLOBALS;
+ } else if (def->flags & TCG_OPF_BB_END) {
/* Like writing globals: save_globals */
call_flags = 0;
} else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
@@ -2833,21 +3984,19 @@ static bool liveness_pass_2(TCGContext *s)
/* Make sure that input arguments are available. */
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
arg_ts = arg_temp(op->args[i]);
- if (arg_ts) {
- dir_ts = arg_ts->state_ptr;
- if (dir_ts && arg_ts->state == TS_DEAD) {
- TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
- ? INDEX_op_ld_i32
- : INDEX_op_ld_i64);
- TCGOp *lop = tcg_op_insert_before(s, op, lopc);
+ dir_ts = arg_ts->state_ptr;
+ if (dir_ts && arg_ts->state == TS_DEAD) {
+ TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
+ ? INDEX_op_ld_i32
+ : INDEX_op_ld_i64);
+ TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
- lop->args[0] = temp_arg(dir_ts);
- lop->args[1] = temp_arg(arg_ts->mem_base);
- lop->args[2] = arg_ts->mem_offset;
+ lop->args[0] = temp_arg(dir_ts);
+ lop->args[1] = temp_arg(arg_ts->mem_base);
+ lop->args[2] = arg_ts->mem_offset;
- /* Loaded, but synced with memory. */
- arg_ts->state = TS_MEM;
- }
+ /* Loaded, but synced with memory. */
+ arg_ts->state = TS_MEM;
}
}
@@ -2856,14 +4005,12 @@ static bool liveness_pass_2(TCGContext *s)
so that we reload when needed. */
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
arg_ts = arg_temp(op->args[i]);
- if (arg_ts) {
- dir_ts = arg_ts->state_ptr;
- if (dir_ts) {
- op->args[i] = temp_arg(dir_ts);
- changes = true;
- if (IS_DEAD_ARG(i)) {
- arg_ts->state = TS_DEAD;
- }
+ dir_ts = arg_ts->state_ptr;
+ if (dir_ts) {
+ op->args[i] = temp_arg(dir_ts);
+ changes = true;
+ if (IS_DEAD_ARG(i)) {
+ arg_ts->state = TS_DEAD;
}
}
}
@@ -2891,34 +4038,68 @@ static bool liveness_pass_2(TCGContext *s)
}
/* Outputs become available. */
- for (i = 0; i < nb_oargs; i++) {
- arg_ts = arg_temp(op->args[i]);
+ if (opc == INDEX_op_mov_i32 || opc == INDEX_op_mov_i64) {
+ arg_ts = arg_temp(op->args[0]);
dir_ts = arg_ts->state_ptr;
- if (!dir_ts) {
- continue;
+ if (dir_ts) {
+ op->args[0] = temp_arg(dir_ts);
+ changes = true;
+
+ /* The output is now live and modified. */
+ arg_ts->state = 0;
+
+ if (NEED_SYNC_ARG(0)) {
+ TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
+ ? INDEX_op_st_i32
+ : INDEX_op_st_i64);
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
+ TCGTemp *out_ts = dir_ts;
+
+ if (IS_DEAD_ARG(0)) {
+ out_ts = arg_temp(op->args[1]);
+ arg_ts->state = TS_DEAD;
+ tcg_op_remove(s, op);
+ } else {
+ arg_ts->state = TS_MEM;
+ }
+
+ sop->args[0] = temp_arg(out_ts);
+ sop->args[1] = temp_arg(arg_ts->mem_base);
+ sop->args[2] = arg_ts->mem_offset;
+ } else {
+ tcg_debug_assert(!IS_DEAD_ARG(0));
+ }
}
- op->args[i] = temp_arg(dir_ts);
- changes = true;
+ } else {
+ for (i = 0; i < nb_oargs; i++) {
+ arg_ts = arg_temp(op->args[i]);
+ dir_ts = arg_ts->state_ptr;
+ if (!dir_ts) {
+ continue;
+ }
+ op->args[i] = temp_arg(dir_ts);
+ changes = true;
- /* The output is now live and modified. */
- arg_ts->state = 0;
+ /* The output is now live and modified. */
+ arg_ts->state = 0;
- /* Sync outputs upon their last write. */
- if (NEED_SYNC_ARG(i)) {
- TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
- ? INDEX_op_st_i32
- : INDEX_op_st_i64);
- TCGOp *sop = tcg_op_insert_after(s, op, sopc);
+ /* Sync outputs upon their last write. */
+ if (NEED_SYNC_ARG(i)) {
+ TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
+ ? INDEX_op_st_i32
+ : INDEX_op_st_i64);
+ TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
- sop->args[0] = temp_arg(dir_ts);
- sop->args[1] = temp_arg(arg_ts->mem_base);
- sop->args[2] = arg_ts->mem_offset;
+ sop->args[0] = temp_arg(dir_ts);
+ sop->args[1] = temp_arg(arg_ts->mem_base);
+ sop->args[2] = arg_ts->mem_offset;
- arg_ts->state = TS_MEM;
- }
- /* Drop outputs that are dead. */
- if (IS_DEAD_ARG(i)) {
- arg_ts->state = TS_DEAD;
+ arg_ts->state = TS_MEM;
+ }
+ /* Drop outputs that are dead. */
+ if (IS_DEAD_ARG(i)) {
+ arg_ts->state = TS_DEAD;
+ }
}
}
}
@@ -2926,94 +4107,102 @@ static bool liveness_pass_2(TCGContext *s)
return changes;
}
-#ifdef CONFIG_DEBUG_TCG
-static void dump_regs(TCGContext *s)
+static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
{
- TCGTemp *ts;
- int i;
- char buf[64];
+ intptr_t off;
+ int size, align;
+
+ /* When allocating an object, look at the full type. */
+ size = tcg_type_size(ts->base_type);
+ switch (ts->base_type) {
+ case TCG_TYPE_I32:
+ align = 4;
+ break;
+ case TCG_TYPE_I64:
+ case TCG_TYPE_V64:
+ align = 8;
+ break;
+ case TCG_TYPE_I128:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ /*
+ * Note that we do not require aligned storage for V256,
+ * and that we provide alignment for I128 to match V128,
+ * even if that's above what the host ABI requires.
+ */
+ align = 16;
+ break;
+ default:
+ g_assert_not_reached();
+ }
- for(i = 0; i < s->nb_temps; i++) {
- ts = &s->temps[i];
- printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
- switch(ts->val_type) {
- case TEMP_VAL_REG:
- printf("%s", tcg_target_reg_names[ts->reg]);
- break;
- case TEMP_VAL_MEM:
- printf("%d(%s)", (int)ts->mem_offset,
- tcg_target_reg_names[ts->mem_base->reg]);
- break;
- case TEMP_VAL_CONST:
- printf("$0x%" TCG_PRIlx, ts->val);
- break;
- case TEMP_VAL_DEAD:
- printf("D");
- break;
- default:
- printf("???");
- break;
- }
- printf("\n");
+ /*
+ * Assume the stack is sufficiently aligned.
+ * This affects e.g. ARM NEON, where we have 8 byte stack alignment
+ * and do not require 16 byte vector alignment. This seems slightly
+ * easier than fully parameterizing the above switch statement.
+ */
+ align = MIN(TCG_TARGET_STACK_ALIGN, align);
+ off = ROUND_UP(s->current_frame_offset, align);
+
+ /* If we've exhausted the stack frame, restart with a smaller TB. */
+ if (off + size > s->frame_end) {
+ tcg_raise_tb_overflow(s);
}
+ s->current_frame_offset = off + size;
+#if defined(__sparc__)
+ off += TCG_TARGET_STACK_BIAS;
+#endif
+
+ /* If the object was subdivided, assign memory to all the parts. */
+ if (ts->base_type != ts->type) {
+ int part_size = tcg_type_size(ts->type);
+ int part_count = size / part_size;
- for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
- if (s->reg_to_temp[i] != NULL) {
- printf("%s: %s\n",
- tcg_target_reg_names[i],
- tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
+ /*
+ * Each part is allocated sequentially in tcg_temp_new_internal.
+ * Jump back to the first part by subtracting the current index.
+ */
+ ts -= ts->temp_subindex;
+ for (int i = 0; i < part_count; ++i) {
+ ts[i].mem_offset = off + i * part_size;
+ ts[i].mem_base = s->frame_temp;
+ ts[i].mem_allocated = 1;
}
+ } else {
+ ts->mem_offset = off;
+ ts->mem_base = s->frame_temp;
+ ts->mem_allocated = 1;
}
}
-static void check_regs(TCGContext *s)
+/* Assign @reg to @ts, and update reg_to_temp[]. */
+static void set_temp_val_reg(TCGContext *s, TCGTemp *ts, TCGReg reg)
{
- int reg;
- int k;
- TCGTemp *ts;
- char buf[64];
-
- for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
- ts = s->reg_to_temp[reg];
- if (ts != NULL) {
- if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
- printf("Inconsistency for register %s:\n",
- tcg_target_reg_names[reg]);
- goto fail;
- }
- }
- }
- for (k = 0; k < s->nb_temps; k++) {
- ts = &s->temps[k];
- if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
- && s->reg_to_temp[ts->reg] != ts) {
- printf("Inconsistency for temp %s:\n",
- tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
- fail:
- printf("reg state:\n");
- dump_regs(s);
- tcg_abort();
+ if (ts->val_type == TEMP_VAL_REG) {
+ TCGReg old = ts->reg;
+ tcg_debug_assert(s->reg_to_temp[old] == ts);
+ if (old == reg) {
+ return;
}
+ s->reg_to_temp[old] = NULL;
}
+ tcg_debug_assert(s->reg_to_temp[reg] == NULL);
+ s->reg_to_temp[reg] = ts;
+ ts->val_type = TEMP_VAL_REG;
+ ts->reg = reg;
}
-#endif
-static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
+/* Assign a non-register value type to @ts, and update reg_to_temp[]. */
+static void set_temp_val_nonreg(TCGContext *s, TCGTemp *ts, TCGTempVal type)
{
-#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
- /* Sparc64 stack is accessed with offset of 2047 */
- s->current_frame_offset = (s->current_frame_offset +
- (tcg_target_long)sizeof(tcg_target_long) - 1) &
- ~(sizeof(tcg_target_long) - 1);
-#endif
- if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
- s->frame_end) {
- tcg_abort();
+ tcg_debug_assert(type != TEMP_VAL_REG);
+ if (ts->val_type == TEMP_VAL_REG) {
+ TCGReg reg = ts->reg;
+ tcg_debug_assert(s->reg_to_temp[reg] == ts);
+ s->reg_to_temp[reg] = NULL;
}
- ts->mem_offset = s->current_frame_offset;
- ts->mem_base = s->frame_temp;
- ts->mem_allocated = 1;
- s->current_frame_offset += sizeof(tcg_target_long);
+ ts->val_type = type;
}
static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
@@ -3022,16 +4211,25 @@ static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
mark it free; otherwise mark it dead. */
static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
{
- if (ts->fixed_reg) {
+ TCGTempVal new_type;
+
+ switch (ts->kind) {
+ case TEMP_FIXED:
return;
+ case TEMP_GLOBAL:
+ case TEMP_TB:
+ new_type = TEMP_VAL_MEM;
+ break;
+ case TEMP_EBB:
+ new_type = free_or_dead < 0 ? TEMP_VAL_MEM : TEMP_VAL_DEAD;
+ break;
+ case TEMP_CONST:
+ new_type = TEMP_VAL_CONST;
+ break;
+ default:
+ g_assert_not_reached();
}
- if (ts->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ts->reg] = NULL;
- }
- ts->val_type = (free_or_dead < 0
- || ts->temp_local
- || ts->temp_global
- ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
+ set_temp_val_nonreg(s, ts, new_type);
}
/* Mark a temporary as dead. */
@@ -3047,10 +4245,7 @@ static inline void temp_dead(TCGContext *s, TCGTemp *ts)
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
TCGRegSet preferred_regs, int free_or_dead)
{
- if (ts->fixed_reg) {
- return;
- }
- if (!ts->mem_coherent) {
+ if (!temp_readonly(ts) && !ts->mem_coherent) {
if (!ts->mem_allocated) {
temp_allocate_frame(s, ts);
}
@@ -3078,7 +4273,7 @@ static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
case TEMP_VAL_DEAD:
default:
- tcg_abort();
+ g_assert_not_reached();
}
ts->mem_coherent = 1;
}
@@ -3165,7 +4360,53 @@ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
}
}
- tcg_abort();
+ g_assert_not_reached();
+}
+
+static TCGReg tcg_reg_alloc_pair(TCGContext *s, TCGRegSet required_regs,
+ TCGRegSet allocated_regs,
+ TCGRegSet preferred_regs, bool rev)
+{
+ int i, j, k, fmin, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
+ TCGRegSet reg_ct[2];
+ const int *order;
+
+ /* Ensure that if I is not in allocated_regs, I+1 is not either. */
+ reg_ct[1] = required_regs & ~(allocated_regs | (allocated_regs >> 1));
+ tcg_debug_assert(reg_ct[1] != 0);
+ reg_ct[0] = reg_ct[1] & preferred_regs;
+
+ order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
+
+ /*
+ * Skip the preferred_regs option if it cannot be satisfied,
+ * or if the preference made no difference.
+ */
+ k = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
+
+ /*
+ * Minimize the number of flushes by looking for 2 free registers first,
+ * then a single flush, then two flushes.
+ */
+ for (fmin = 2; fmin >= 0; fmin--) {
+ for (j = k; j < 2; j++) {
+ TCGRegSet set = reg_ct[j];
+
+ for (i = 0; i < n; i++) {
+ TCGReg reg = order[i];
+
+ if (tcg_regset_test_reg(set, reg)) {
+ int f = !s->reg_to_temp[reg] + !s->reg_to_temp[reg + 1];
+ if (f >= fmin) {
+ tcg_reg_free(s, reg, allocated_regs);
+ tcg_reg_free(s, reg + 1, allocated_regs);
+ return reg;
+ }
+ }
+ }
+ }
+ }
+ g_assert_not_reached();
}
/* Make sure the temporary is in a register. If needed, allocate the register
@@ -3181,7 +4422,27 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
case TEMP_VAL_CONST:
reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
preferred_regs, ts->indirect_base);
- tcg_out_movi(s, ts->type, reg, ts->val);
+ if (ts->type <= TCG_TYPE_I64) {
+ tcg_out_movi(s, ts->type, reg, ts->val);
+ } else {
+ uint64_t val = ts->val;
+ MemOp vece = MO_64;
+
+ /*
+ * Find the minimal vector element that matches the constant.
+ * The targets will, in general, have to do this search anyway,
+ * do this generically.
+ */
+ if (val == dup_const(MO_8, val)) {
+ vece = MO_8;
+ } else if (val == dup_const(MO_16, val)) {
+ vece = MO_16;
+ } else if (val == dup_const(MO_32, val)) {
+ vece = MO_32;
+ }
+
+ tcg_out_dupi_vec(s, ts->type, vece, reg, ts->val);
+ }
ts->mem_coherent = 0;
break;
case TEMP_VAL_MEM:
@@ -3192,11 +4453,9 @@ static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
break;
case TEMP_VAL_DEAD:
default:
- tcg_abort();
+ g_assert_not_reached();
}
- ts->reg = reg;
- ts->val_type = TEMP_VAL_REG;
- s->reg_to_temp[reg] = ts;
+ set_temp_val_reg(s, ts, reg);
}
/* Save a temporary to memory. 'allocated_regs' is used in case a
@@ -3205,7 +4464,7 @@ static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
{
/* The liveness analysis already ensures that globals are back
in memory. Keep an tcg_debug_assert for safety. */
- tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
+ tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || temp_readonly(ts));
}
/* save globals to their canonical location and assume they can be
@@ -3230,7 +4489,7 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
for (i = 0, n = s->nb_globals; i < n; i++) {
TCGTemp *ts = &s->temps[i];
tcg_debug_assert(ts->val_type != TEMP_VAL_REG
- || ts->fixed_reg
+ || ts->kind == TEMP_FIXED
|| ts->mem_coherent);
}
}
@@ -3243,33 +4502,68 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
for (i = s->nb_globals; i < s->nb_temps; i++) {
TCGTemp *ts = &s->temps[i];
- if (ts->temp_local) {
+
+ switch (ts->kind) {
+ case TEMP_TB:
temp_save(s, ts, allocated_regs);
- } else {
+ break;
+ case TEMP_EBB:
/* The liveness analysis already ensures that temps are dead.
Keep an tcg_debug_assert for safety. */
tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
+ break;
+ case TEMP_CONST:
+ /* Similarly, we should have freed any allocated register. */
+ tcg_debug_assert(ts->val_type == TEMP_VAL_CONST);
+ break;
+ default:
+ g_assert_not_reached();
}
}
save_globals(s, allocated_regs);
}
+/*
+ * At a conditional branch, we assume all temporaries are dead unless
+ * explicitly live-across-conditional-branch; all globals and local
+ * temps are synced to their location.
+ */
+static void tcg_reg_alloc_cbranch(TCGContext *s, TCGRegSet allocated_regs)
+{
+ sync_globals(s, allocated_regs);
+
+ for (int i = s->nb_globals; i < s->nb_temps; i++) {
+ TCGTemp *ts = &s->temps[i];
+ /*
+ * The liveness analysis already ensures that temps are dead.
+ * Keep tcg_debug_asserts for safety.
+ */
+ switch (ts->kind) {
+ case TEMP_TB:
+ tcg_debug_assert(ts->val_type != TEMP_VAL_REG || ts->mem_coherent);
+ break;
+ case TEMP_EBB:
+ case TEMP_CONST:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+/*
+ * Specialized code generation for INDEX_op_mov_* with a constant.
+ */
static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
tcg_target_ulong val, TCGLifeData arg_life,
TCGRegSet preferred_regs)
{
- if (ots->fixed_reg) {
- /* For fixed registers, we do not do any constant propagation. */
- tcg_out_movi(s, ots->type, ots->reg, val);
- return;
- }
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ots));
/* The movi is not explicitly generated here. */
- if (ots->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ots->reg] = NULL;
- }
- ots->val_type = TEMP_VAL_CONST;
+ set_temp_val_nonreg(s, ots, TEMP_VAL_CONST);
ots->val = val;
ots->mem_coherent = 0;
if (NEED_SYNC_ARG(0)) {
@@ -3279,26 +4573,25 @@ static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
}
}
-static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
-{
- TCGTemp *ots = arg_temp(op->args[0]);
- tcg_target_ulong val = op->args[1];
-
- tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
-}
-
+/*
+ * Specialized code generation for INDEX_op_mov_*.
+ */
static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
{
const TCGLifeData arg_life = op->life;
TCGRegSet allocated_regs, preferred_regs;
TCGTemp *ts, *ots;
TCGType otype, itype;
+ TCGReg oreg, ireg;
allocated_regs = s->reserved_regs;
- preferred_regs = op->output_pref[0];
+ preferred_regs = output_pref(op, 0);
ots = arg_temp(op->args[0]);
ts = arg_temp(op->args[1]);
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ots));
+
/* Note that otype != itype for no-op truncation. */
otype = ots->type;
itype = ts->type;
@@ -3321,45 +4614,173 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
temp_load(s, ts, tcg_target_available_regs[itype],
allocated_regs, preferred_regs);
}
-
tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
- if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
+ ireg = ts->reg;
+
+ if (IS_DEAD_ARG(0)) {
/* mov to a non-saved dead register makes no sense (even with
liveness analysis disabled). */
tcg_debug_assert(NEED_SYNC_ARG(0));
if (!ots->mem_allocated) {
temp_allocate_frame(s, ots);
}
- tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
+ tcg_out_st(s, otype, ireg, ots->mem_base->reg, ots->mem_offset);
if (IS_DEAD_ARG(1)) {
temp_dead(s, ts);
}
temp_dead(s, ots);
+ return;
+ }
+
+ if (IS_DEAD_ARG(1) && ts->kind != TEMP_FIXED) {
+ /*
+ * The mov can be suppressed. Kill input first, so that it
+ * is unlinked from reg_to_temp, then set the output to the
+ * reg that we saved from the input.
+ */
+ temp_dead(s, ts);
+ oreg = ireg;
} else {
- if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
- /* the mov can be suppressed */
- if (ots->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ots->reg] = NULL;
- }
- ots->reg = ts->reg;
- temp_dead(s, ts);
+ if (ots->val_type == TEMP_VAL_REG) {
+ oreg = ots->reg;
} else {
- if (ots->val_type != TEMP_VAL_REG) {
- /* When allocating a new register, make sure to not spill the
- input one. */
- tcg_regset_set_reg(allocated_regs, ts->reg);
- ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
- allocated_regs, preferred_regs,
- ots->indirect_base);
+ /* Make sure to not spill the input register during allocation. */
+ oreg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
+ allocated_regs | ((TCGRegSet)1 << ireg),
+ preferred_regs, ots->indirect_base);
+ }
+ if (!tcg_out_mov(s, otype, oreg, ireg)) {
+ /*
+ * Cross register class move not supported.
+ * Store the source register into the destination slot
+ * and leave the destination temp as TEMP_VAL_MEM.
+ */
+ assert(!temp_readonly(ots));
+ if (!ts->mem_allocated) {
+ temp_allocate_frame(s, ots);
}
- tcg_out_mov(s, otype, ots->reg, ts->reg);
+ tcg_out_st(s, ts->type, ireg, ots->mem_base->reg, ots->mem_offset);
+ set_temp_val_nonreg(s, ts, TEMP_VAL_MEM);
+ ots->mem_coherent = 1;
+ return;
}
- ots->val_type = TEMP_VAL_REG;
- ots->mem_coherent = 0;
- s->reg_to_temp[ots->reg] = ots;
- if (NEED_SYNC_ARG(0)) {
- temp_sync(s, ots, allocated_regs, 0, 0);
+ }
+ set_temp_val_reg(s, ots, oreg);
+ ots->mem_coherent = 0;
+
+ if (NEED_SYNC_ARG(0)) {
+ temp_sync(s, ots, allocated_regs, 0, 0);
+ }
+}
+
+/*
+ * Specialized code generation for INDEX_op_dup_vec.
+ */
+static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
+{
+ const TCGLifeData arg_life = op->life;
+ TCGRegSet dup_out_regs, dup_in_regs;
+ TCGTemp *its, *ots;
+ TCGType itype, vtype;
+ unsigned vece;
+ int lowpart_ofs;
+ bool ok;
+
+ ots = arg_temp(op->args[0]);
+ its = arg_temp(op->args[1]);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ots));
+
+ itype = its->type;
+ vece = TCGOP_VECE(op);
+ vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
+
+ if (its->val_type == TEMP_VAL_CONST) {
+ /* Propagate constant via movi -> dupi. */
+ tcg_target_ulong val = its->val;
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, its);
}
+ tcg_reg_alloc_do_movi(s, ots, val, arg_life, output_pref(op, 0));
+ return;
+ }
+
+ dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
+ dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
+
+ /* Allocate the output register now. */
+ if (ots->val_type != TEMP_VAL_REG) {
+ TCGRegSet allocated_regs = s->reserved_regs;
+ TCGReg oreg;
+
+ if (!IS_DEAD_ARG(1) && its->val_type == TEMP_VAL_REG) {
+ /* Make sure to not spill the input register. */
+ tcg_regset_set_reg(allocated_regs, its->reg);
+ }
+ oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
+ output_pref(op, 0), ots->indirect_base);
+ set_temp_val_reg(s, ots, oreg);
+ }
+
+ switch (its->val_type) {
+ case TEMP_VAL_REG:
+ /*
+ * The dup constriaints must be broad, covering all possible VECE.
+ * However, tcg_op_dup_vec() gets to see the VECE and we allow it
+ * to fail, indicating that extra moves are required for that case.
+ */
+ if (tcg_regset_test_reg(dup_in_regs, its->reg)) {
+ if (tcg_out_dup_vec(s, vtype, vece, ots->reg, its->reg)) {
+ goto done;
+ }
+ /* Try again from memory or a vector input register. */
+ }
+ if (!its->mem_coherent) {
+ /*
+ * The input register is not synced, and so an extra store
+ * would be required to use memory. Attempt an integer-vector
+ * register move first. We do not have a TCGRegSet for this.
+ */
+ if (tcg_out_mov(s, itype, ots->reg, its->reg)) {
+ break;
+ }
+ /* Sync the temp back to its slot and load from there. */
+ temp_sync(s, its, s->reserved_regs, 0, 0);
+ }
+ /* fall through */
+
+ case TEMP_VAL_MEM:
+ lowpart_ofs = 0;
+ if (HOST_BIG_ENDIAN) {
+ lowpart_ofs = tcg_type_size(itype) - (1 << vece);
+ }
+ if (tcg_out_dupm_vec(s, vtype, vece, ots->reg, its->mem_base->reg,
+ its->mem_offset + lowpart_ofs)) {
+ goto done;
+ }
+ /* Load the input into the destination vector register. */
+ tcg_out_ld(s, itype, ots->reg, its->mem_base->reg, its->mem_offset);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* We now have a vector input register, so dup must succeed. */
+ ok = tcg_out_dup_vec(s, vtype, vece, ots->reg, ots->reg);
+ tcg_debug_assert(ok);
+
+ done:
+ ots->mem_coherent = 0;
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, its);
+ }
+ if (NEED_SYNC_ARG(0)) {
+ temp_sync(s, ots, s->reserved_regs, 0, 0);
+ }
+ if (IS_DEAD_ARG(0)) {
+ temp_dead(s, ots);
}
}
@@ -3376,89 +4797,243 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
TCGTemp *ts;
TCGArg new_args[TCG_MAX_OP_ARGS];
int const_args[TCG_MAX_OP_ARGS];
+ TCGCond op_cond;
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
/* copy constants */
- memcpy(new_args + nb_oargs + nb_iargs,
+ memcpy(new_args + nb_oargs + nb_iargs,
op->args + nb_oargs + nb_iargs,
sizeof(TCGArg) * def->nb_cargs);
i_allocated_regs = s->reserved_regs;
o_allocated_regs = s->reserved_regs;
- /* satisfy input constraints */
+ switch (op->opc) {
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ op_cond = op->args[2];
+ break;
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
+ case INDEX_op_cmp_vec:
+ op_cond = op->args[3];
+ break;
+ case INDEX_op_brcond2_i32:
+ op_cond = op->args[4];
+ break;
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ case INDEX_op_setcond2_i32:
+ case INDEX_op_cmpsel_vec:
+ op_cond = op->args[5];
+ break;
+ default:
+ /* No condition within opcode. */
+ op_cond = TCG_COND_ALWAYS;
+ break;
+ }
+
+ /* satisfy input constraints */
for (k = 0; k < nb_iargs; k++) {
- TCGRegSet i_preferred_regs, o_preferred_regs;
+ TCGRegSet i_preferred_regs, i_required_regs;
+ bool allocate_new_reg, copyto_new_reg;
+ TCGTemp *ts2;
+ int i1, i2;
- i = def->sorted_args[nb_oargs + k];
+ i = def->args_ct[nb_oargs + k].sort_index;
arg = op->args[i];
arg_ct = &def->args_ct[i];
ts = arg_temp(arg);
if (ts->val_type == TEMP_VAL_CONST
- && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
+ && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
+ op_cond, TCGOP_VECE(op))) {
/* constant is OK for instruction */
const_args[i] = 1;
new_args[i] = ts->val;
continue;
}
- i_preferred_regs = o_preferred_regs = 0;
- if (arg_ct->ct & TCG_CT_IALIAS) {
- o_preferred_regs = op->output_pref[arg_ct->alias_index];
- if (ts->fixed_reg) {
- /* if fixed register, we must allocate a new register
- if the alias is not the same register */
- if (arg != op->args[arg_ct->alias_index]) {
- goto allocate_in_reg;
+ reg = ts->reg;
+ i_preferred_regs = 0;
+ i_required_regs = arg_ct->regs;
+ allocate_new_reg = false;
+ copyto_new_reg = false;
+
+ switch (arg_ct->pair) {
+ case 0: /* not paired */
+ if (arg_ct->ialias) {
+ i_preferred_regs = output_pref(op, arg_ct->alias_index);
+
+ /*
+ * If the input is readonly, then it cannot also be an
+ * output and aliased to itself. If the input is not
+ * dead after the instruction, we must allocate a new
+ * register and move it.
+ */
+ if (temp_readonly(ts) || !IS_DEAD_ARG(i)
+ || def->args_ct[arg_ct->alias_index].newreg) {
+ allocate_new_reg = true;
+ } else if (ts->val_type == TEMP_VAL_REG) {
+ /*
+ * Check if the current register has already been
+ * allocated for another input.
+ */
+ allocate_new_reg =
+ tcg_regset_test_reg(i_allocated_regs, reg);
+ }
+ }
+ if (!allocate_new_reg) {
+ temp_load(s, ts, i_required_regs, i_allocated_regs,
+ i_preferred_regs);
+ reg = ts->reg;
+ allocate_new_reg = !tcg_regset_test_reg(i_required_regs, reg);
+ }
+ if (allocate_new_reg) {
+ /*
+ * Allocate a new register matching the constraint
+ * and move the temporary register into it.
+ */
+ temp_load(s, ts, tcg_target_available_regs[ts->type],
+ i_allocated_regs, 0);
+ reg = tcg_reg_alloc(s, i_required_regs, i_allocated_regs,
+ i_preferred_regs, ts->indirect_base);
+ copyto_new_reg = true;
+ }
+ break;
+
+ case 1:
+ /* First of an input pair; if i1 == i2, the second is an output. */
+ i1 = i;
+ i2 = arg_ct->pair_index;
+ ts2 = i1 != i2 ? arg_temp(op->args[i2]) : NULL;
+
+ /*
+ * It is easier to default to allocating a new pair
+ * and to identify a few cases where it's not required.
+ */
+ if (arg_ct->ialias) {
+ i_preferred_regs = output_pref(op, arg_ct->alias_index);
+ if (IS_DEAD_ARG(i1) &&
+ IS_DEAD_ARG(i2) &&
+ !temp_readonly(ts) &&
+ ts->val_type == TEMP_VAL_REG &&
+ ts->reg < TCG_TARGET_NB_REGS - 1 &&
+ tcg_regset_test_reg(i_required_regs, reg) &&
+ !tcg_regset_test_reg(i_allocated_regs, reg) &&
+ !tcg_regset_test_reg(i_allocated_regs, reg + 1) &&
+ (ts2
+ ? ts2->val_type == TEMP_VAL_REG &&
+ ts2->reg == reg + 1 &&
+ !temp_readonly(ts2)
+ : s->reg_to_temp[reg + 1] == NULL)) {
+ break;
}
} else {
- /* if the input is aliased to an output and if it is
- not dead after the instruction, we must allocate
- a new register and move it */
- if (!IS_DEAD_ARG(i)) {
- goto allocate_in_reg;
+ /* Without aliasing, the pair must also be an input. */
+ tcg_debug_assert(ts2);
+ if (ts->val_type == TEMP_VAL_REG &&
+ ts2->val_type == TEMP_VAL_REG &&
+ ts2->reg == reg + 1 &&
+ tcg_regset_test_reg(i_required_regs, reg)) {
+ break;
}
+ }
+ reg = tcg_reg_alloc_pair(s, i_required_regs, i_allocated_regs,
+ 0, ts->indirect_base);
+ goto do_pair;
+
+ case 2: /* pair second */
+ reg = new_args[arg_ct->pair_index] + 1;
+ goto do_pair;
+
+ case 3: /* ialias with second output, no first input */
+ tcg_debug_assert(arg_ct->ialias);
+ i_preferred_regs = output_pref(op, arg_ct->alias_index);
+
+ if (IS_DEAD_ARG(i) &&
+ !temp_readonly(ts) &&
+ ts->val_type == TEMP_VAL_REG &&
+ reg > 0 &&
+ s->reg_to_temp[reg - 1] == NULL &&
+ tcg_regset_test_reg(i_required_regs, reg) &&
+ !tcg_regset_test_reg(i_allocated_regs, reg) &&
+ !tcg_regset_test_reg(i_allocated_regs, reg - 1)) {
+ tcg_regset_set_reg(i_allocated_regs, reg - 1);
+ break;
+ }
+ reg = tcg_reg_alloc_pair(s, i_required_regs >> 1,
+ i_allocated_regs, 0,
+ ts->indirect_base);
+ tcg_regset_set_reg(i_allocated_regs, reg);
+ reg += 1;
+ goto do_pair;
+
+ do_pair:
+ /*
+ * If an aliased input is not dead after the instruction,
+ * we must allocate a new register and move it.
+ */
+ if (arg_ct->ialias && (!IS_DEAD_ARG(i) || temp_readonly(ts))) {
+ TCGRegSet t_allocated_regs = i_allocated_regs;
- /* check if the current register has already been allocated
- for another input aliased to an output */
- if (ts->val_type == TEMP_VAL_REG) {
- int k2, i2;
- reg = ts->reg;
- for (k2 = 0 ; k2 < k ; k2++) {
- i2 = def->sorted_args[nb_oargs + k2];
- if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
- reg == new_args[i2]) {
- goto allocate_in_reg;
- }
- }
+ /*
+ * Because of the alias, and the continued life, make sure
+ * that the temp is somewhere *other* than the reg pair,
+ * and we get a copy in reg.
+ */
+ tcg_regset_set_reg(t_allocated_regs, reg);
+ tcg_regset_set_reg(t_allocated_regs, reg + 1);
+ if (ts->val_type == TEMP_VAL_REG && ts->reg == reg) {
+ /* If ts was already in reg, copy it somewhere else. */
+ TCGReg nr;
+ bool ok;
+
+ tcg_debug_assert(ts->kind != TEMP_FIXED);
+ nr = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
+ t_allocated_regs, 0, ts->indirect_base);
+ ok = tcg_out_mov(s, ts->type, nr, reg);
+ tcg_debug_assert(ok);
+
+ set_temp_val_reg(s, ts, nr);
+ } else {
+ temp_load(s, ts, tcg_target_available_regs[ts->type],
+ t_allocated_regs, 0);
+ copyto_new_reg = true;
}
- i_preferred_regs = o_preferred_regs;
+ } else {
+ /* Preferably allocate to reg, otherwise copy. */
+ i_required_regs = (TCGRegSet)1 << reg;
+ temp_load(s, ts, i_required_regs, i_allocated_regs,
+ i_preferred_regs);
+ copyto_new_reg = ts->reg != reg;
}
- }
+ break;
- temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs);
- reg = ts->reg;
+ default:
+ g_assert_not_reached();
+ }
- if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
- /* nothing to do : the constraint is satisfied */
- } else {
- allocate_in_reg:
- /* allocate a new register matching the constraint
- and move the temporary register into it */
- temp_load(s, ts, tcg_target_available_regs[ts->type],
- i_allocated_regs, 0);
- reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
- o_preferred_regs, ts->indirect_base);
- tcg_out_mov(s, ts->type, reg, ts->reg);
+ if (copyto_new_reg) {
+ if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
+ /*
+ * Cross register class move not supported. Sync the
+ * temp back to its slot and load from there.
+ */
+ temp_sync(s, ts, i_allocated_regs, 0, 0);
+ tcg_out_ld(s, ts->type, reg,
+ ts->mem_base->reg, ts->mem_offset);
+ }
}
new_args[i] = reg;
const_args[i] = 0;
tcg_regset_set_reg(i_allocated_regs, reg);
}
-
+
/* mark dead temporaries and free the associated registers */
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
if (IS_DEAD_ARG(i)) {
@@ -3466,11 +5041,13 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
}
- if (def->flags & TCG_OPF_BB_END) {
+ if (def->flags & TCG_OPF_COND_BRANCH) {
+ tcg_reg_alloc_cbranch(s, i_allocated_regs);
+ } else if (def->flags & TCG_OPF_BB_END) {
tcg_reg_alloc_bb_end(s, i_allocated_regs);
} else {
if (def->flags & TCG_OPF_CALL_CLOBBER) {
- /* XXX: permit generic clobber register list ? */
+ /* XXX: permit generic clobber register list ? */
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
tcg_reg_free(s, i, i_allocated_regs);
@@ -3482,63 +5059,123 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
an exception. */
sync_globals(s, i_allocated_regs);
}
-
+
/* satisfy the output constraints */
for(k = 0; k < nb_oargs; k++) {
- i = def->sorted_args[k];
+ i = def->args_ct[k].sort_index;
arg = op->args[i];
arg_ct = &def->args_ct[i];
ts = arg_temp(arg);
- if ((arg_ct->ct & TCG_CT_ALIAS)
- && !const_args[arg_ct->alias_index]) {
- reg = new_args[arg_ct->alias_index];
- } else if (arg_ct->ct & TCG_CT_NEWREG) {
- reg = tcg_reg_alloc(s, arg_ct->u.regs,
- i_allocated_regs | o_allocated_regs,
- op->output_pref[k], ts->indirect_base);
- } else {
- /* if fixed register, we try to use it */
- reg = ts->reg;
- if (ts->fixed_reg &&
- tcg_regset_test_reg(arg_ct->u.regs, reg)) {
- goto oarg_end;
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ts));
+
+ switch (arg_ct->pair) {
+ case 0: /* not paired */
+ if (arg_ct->oalias && !const_args[arg_ct->alias_index]) {
+ reg = new_args[arg_ct->alias_index];
+ } else if (arg_ct->newreg) {
+ reg = tcg_reg_alloc(s, arg_ct->regs,
+ i_allocated_regs | o_allocated_regs,
+ output_pref(op, k), ts->indirect_base);
+ } else {
+ reg = tcg_reg_alloc(s, arg_ct->regs, o_allocated_regs,
+ output_pref(op, k), ts->indirect_base);
}
- reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
- op->output_pref[k], ts->indirect_base);
- }
- tcg_regset_set_reg(o_allocated_regs, reg);
- /* if a fixed register is used, then a move will be done afterwards */
- if (!ts->fixed_reg) {
- if (ts->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ts->reg] = NULL;
+ break;
+
+ case 1: /* first of pair */
+ if (arg_ct->oalias) {
+ reg = new_args[arg_ct->alias_index];
+ } else if (arg_ct->newreg) {
+ reg = tcg_reg_alloc_pair(s, arg_ct->regs,
+ i_allocated_regs | o_allocated_regs,
+ output_pref(op, k),
+ ts->indirect_base);
+ } else {
+ reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs,
+ output_pref(op, k),
+ ts->indirect_base);
+ }
+ break;
+
+ case 2: /* second of pair */
+ if (arg_ct->oalias) {
+ reg = new_args[arg_ct->alias_index];
+ } else {
+ reg = new_args[arg_ct->pair_index] + 1;
}
- ts->val_type = TEMP_VAL_REG;
- ts->reg = reg;
- /* temp value is modified, so the value kept in memory is
- potentially not the same */
- ts->mem_coherent = 0;
- s->reg_to_temp[reg] = ts;
+ break;
+
+ case 3: /* first of pair, aliasing with a second input */
+ tcg_debug_assert(!arg_ct->newreg);
+ reg = new_args[arg_ct->pair_index] - 1;
+ break;
+
+ default:
+ g_assert_not_reached();
}
- oarg_end:
+ tcg_regset_set_reg(o_allocated_regs, reg);
+ set_temp_val_reg(s, ts, reg);
+ ts->mem_coherent = 0;
new_args[i] = reg;
}
}
/* emit instruction */
- if (def->flags & TCG_OPF_VECTOR) {
- tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
- new_args, const_args);
- } else {
- tcg_out_op(s, op->opc, new_args, const_args);
+ switch (op->opc) {
+ case INDEX_op_ext8s_i32:
+ tcg_out_ext8s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext8s_i64:
+ tcg_out_ext8s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ tcg_out_ext8u(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext16s_i32:
+ tcg_out_ext16s(s, TCG_TYPE_I32, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext16s_i64:
+ tcg_out_ext16s(s, TCG_TYPE_I64, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ tcg_out_ext16u(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext32s_i64:
+ tcg_out_ext32s(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext32u_i64:
+ tcg_out_ext32u(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_ext_i32_i64:
+ tcg_out_exts_i32_i64(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_extu_i32_i64:
+ tcg_out_extu_i32_i64(s, new_args[0], new_args[1]);
+ break;
+ case INDEX_op_extrl_i64_i32:
+ tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
+ break;
+ default:
+ if (def->flags & TCG_OPF_VECTOR) {
+ tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
+ new_args, const_args);
+ } else {
+ tcg_out_op(s, op->opc, new_args, const_args);
+ }
+ break;
}
/* move the outputs in the correct register if needed */
for(i = 0; i < nb_oargs; i++) {
ts = arg_temp(op->args[i]);
- reg = new_args[i];
- if (ts->fixed_reg && ts->reg != reg) {
- tcg_out_mov(s, ts->type, ts->reg, reg);
- }
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ts));
+
if (NEED_SYNC_ARG(i)) {
temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
} else if (IS_DEAD_ARG(i)) {
@@ -3547,317 +5184,945 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
}
}
-#ifdef TCG_TARGET_STACK_GROWSUP
-#define STACK_DIR(x) (-(x))
-#else
-#define STACK_DIR(x) (x)
-#endif
-
-static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
+static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
{
- const int nb_oargs = TCGOP_CALLO(op);
- const int nb_iargs = TCGOP_CALLI(op);
const TCGLifeData arg_life = op->life;
- int flags, nb_regs, i;
- TCGReg reg;
- TCGArg arg;
- TCGTemp *ts;
- intptr_t stack_offset;
- size_t call_stack_size;
- tcg_insn_unit *func_addr;
- int allocate_args;
- TCGRegSet allocated_regs;
-
- func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
- flags = op->args[nb_oargs + nb_iargs + 1];
-
- nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
- if (nb_regs > nb_iargs) {
- nb_regs = nb_iargs;
- }
-
- /* assign stack slots first */
- call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
- call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
- ~(TCG_TARGET_STACK_ALIGN - 1);
- allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
- if (allocate_args) {
- /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
- preallocate call stack */
- tcg_abort();
- }
-
- stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
- for (i = nb_regs; i < nb_iargs; i++) {
- arg = op->args[nb_oargs + i];
-#ifdef TCG_TARGET_STACK_GROWSUP
- stack_offset -= sizeof(tcg_target_long);
-#endif
- if (arg != TCG_CALL_DUMMY_ARG) {
- ts = arg_temp(arg);
- temp_load(s, ts, tcg_target_available_regs[ts->type],
- s->reserved_regs, 0);
- tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
+ TCGTemp *ots, *itsl, *itsh;
+ TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
+
+ /* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
+ tcg_debug_assert(TCGOP_VECE(op) == MO_64);
+
+ ots = arg_temp(op->args[0]);
+ itsl = arg_temp(op->args[1]);
+ itsh = arg_temp(op->args[2]);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ots));
+
+ /* Allocate the output register now. */
+ if (ots->val_type != TEMP_VAL_REG) {
+ TCGRegSet allocated_regs = s->reserved_regs;
+ TCGRegSet dup_out_regs =
+ tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
+ TCGReg oreg;
+
+ /* Make sure to not spill the input registers. */
+ if (!IS_DEAD_ARG(1) && itsl->val_type == TEMP_VAL_REG) {
+ tcg_regset_set_reg(allocated_regs, itsl->reg);
}
-#ifndef TCG_TARGET_STACK_GROWSUP
- stack_offset += sizeof(tcg_target_long);
-#endif
+ if (!IS_DEAD_ARG(2) && itsh->val_type == TEMP_VAL_REG) {
+ tcg_regset_set_reg(allocated_regs, itsh->reg);
+ }
+
+ oreg = tcg_reg_alloc(s, dup_out_regs, allocated_regs,
+ output_pref(op, 0), ots->indirect_base);
+ set_temp_val_reg(s, ots, oreg);
}
-
- /* assign input registers */
- allocated_regs = s->reserved_regs;
- for (i = 0; i < nb_regs; i++) {
- arg = op->args[nb_oargs + i];
- if (arg != TCG_CALL_DUMMY_ARG) {
- ts = arg_temp(arg);
- reg = tcg_target_call_iarg_regs[i];
- if (ts->val_type == TEMP_VAL_REG) {
- if (ts->reg != reg) {
- tcg_reg_free(s, reg, allocated_regs);
- tcg_out_mov(s, ts->type, reg, ts->reg);
- }
- } else {
- TCGRegSet arg_set = 0;
+ /* Promote dup2 of immediates to dupi_vec. */
+ if (itsl->val_type == TEMP_VAL_CONST && itsh->val_type == TEMP_VAL_CONST) {
+ uint64_t val = deposit64(itsl->val, 32, 32, itsh->val);
+ MemOp vece = MO_64;
+
+ if (val == dup_const(MO_8, val)) {
+ vece = MO_8;
+ } else if (val == dup_const(MO_16, val)) {
+ vece = MO_16;
+ } else if (val == dup_const(MO_32, val)) {
+ vece = MO_32;
+ }
+
+ tcg_out_dupi_vec(s, vtype, vece, ots->reg, val);
+ goto done;
+ }
- tcg_reg_free(s, reg, allocated_regs);
- tcg_regset_set_reg(arg_set, reg);
- temp_load(s, ts, arg_set, allocated_regs, 0);
+ /* If the two inputs form one 64-bit value, try dupm_vec. */
+ if (itsl->temp_subindex == HOST_BIG_ENDIAN &&
+ itsh->temp_subindex == !HOST_BIG_ENDIAN &&
+ itsl == itsh + (HOST_BIG_ENDIAN ? 1 : -1)) {
+ TCGTemp *its = itsl - HOST_BIG_ENDIAN;
+
+ temp_sync(s, its + 0, s->reserved_regs, 0, 0);
+ temp_sync(s, its + 1, s->reserved_regs, 0, 0);
+
+ if (tcg_out_dupm_vec(s, vtype, MO_64, ots->reg,
+ its->mem_base->reg, its->mem_offset)) {
+ goto done;
+ }
+ }
+
+ /* Fall back to generic expansion. */
+ return false;
+
+ done:
+ ots->mem_coherent = 0;
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, itsl);
+ }
+ if (IS_DEAD_ARG(2)) {
+ temp_dead(s, itsh);
+ }
+ if (NEED_SYNC_ARG(0)) {
+ temp_sync(s, ots, s->reserved_regs, 0, IS_DEAD_ARG(0));
+ } else if (IS_DEAD_ARG(0)) {
+ temp_dead(s, ots);
+ }
+ return true;
+}
+
+static void load_arg_reg(TCGContext *s, TCGReg reg, TCGTemp *ts,
+ TCGRegSet allocated_regs)
+{
+ if (ts->val_type == TEMP_VAL_REG) {
+ if (ts->reg != reg) {
+ tcg_reg_free(s, reg, allocated_regs);
+ if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
+ /*
+ * Cross register class move not supported. Sync the
+ * temp back to its slot and load from there.
+ */
+ temp_sync(s, ts, allocated_regs, 0, 0);
+ tcg_out_ld(s, ts->type, reg,
+ ts->mem_base->reg, ts->mem_offset);
}
+ }
+ } else {
+ TCGRegSet arg_set = 0;
+
+ tcg_reg_free(s, reg, allocated_regs);
+ tcg_regset_set_reg(arg_set, reg);
+ temp_load(s, ts, arg_set, allocated_regs, 0);
+ }
+}
- tcg_regset_set_reg(allocated_regs, reg);
+static void load_arg_stk(TCGContext *s, unsigned arg_slot, TCGTemp *ts,
+ TCGRegSet allocated_regs)
+{
+ /*
+ * When the destination is on the stack, load up the temp and store.
+ * If there are many call-saved registers, the temp might live to
+ * see another use; otherwise it'll be discarded.
+ */
+ temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs, 0);
+ tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(arg_slot));
+}
+
+static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
+ TCGTemp *ts, TCGRegSet *allocated_regs)
+{
+ if (arg_slot_reg_p(l->arg_slot)) {
+ TCGReg reg = tcg_target_call_iarg_regs[l->arg_slot];
+ load_arg_reg(s, reg, ts, *allocated_regs);
+ tcg_regset_set_reg(*allocated_regs, reg);
+ } else {
+ load_arg_stk(s, l->arg_slot, ts, *allocated_regs);
+ }
+}
+
+static void load_arg_ref(TCGContext *s, unsigned arg_slot, TCGReg ref_base,
+ intptr_t ref_off, TCGRegSet *allocated_regs)
+{
+ TCGReg reg;
+
+ if (arg_slot_reg_p(arg_slot)) {
+ reg = tcg_target_call_iarg_regs[arg_slot];
+ tcg_reg_free(s, reg, *allocated_regs);
+ tcg_out_addi_ptr(s, reg, ref_base, ref_off);
+ tcg_regset_set_reg(*allocated_regs, reg);
+ } else {
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
+ *allocated_regs, 0, false);
+ tcg_out_addi_ptr(s, reg, ref_base, ref_off);
+ tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(arg_slot));
+ }
+}
+
+static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
+{
+ const int nb_oargs = TCGOP_CALLO(op);
+ const int nb_iargs = TCGOP_CALLI(op);
+ const TCGLifeData arg_life = op->life;
+ const TCGHelperInfo *info = tcg_call_info(op);
+ TCGRegSet allocated_regs = s->reserved_regs;
+ int i;
+
+ /*
+ * Move inputs into place in reverse order,
+ * so that we place stacked arguments first.
+ */
+ for (i = nb_iargs - 1; i >= 0; --i) {
+ const TCGCallArgumentLoc *loc = &info->in[i];
+ TCGTemp *ts = arg_temp(op->args[nb_oargs + i]);
+
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ load_arg_normal(s, loc, ts, &allocated_regs);
+ break;
+ case TCG_CALL_ARG_BY_REF:
+ load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
+ load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(loc->ref_slot),
+ &allocated_regs);
+ break;
+ case TCG_CALL_ARG_BY_REF_N:
+ load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
+ break;
+ default:
+ g_assert_not_reached();
}
}
-
- /* mark dead temporaries and free the associated registers */
+
+ /* Mark dead temporaries and free the associated registers. */
for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
if (IS_DEAD_ARG(i)) {
temp_dead(s, arg_temp(op->args[i]));
}
}
-
- /* clobber call registers */
+
+ /* Clobber call registers. */
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
tcg_reg_free(s, i, allocated_regs);
}
}
- /* Save globals if they might be written by the helper, sync them if
- they might be read. */
- if (flags & TCG_CALL_NO_READ_GLOBALS) {
+ /*
+ * Save globals if they might be written by the helper,
+ * sync them if they might be read.
+ */
+ if (info->flags & TCG_CALL_NO_READ_GLOBALS) {
/* Nothing to do */
- } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
+ } else if (info->flags & TCG_CALL_NO_WRITE_GLOBALS) {
sync_globals(s, allocated_regs);
} else {
save_globals(s, allocated_regs);
}
- tcg_out_call(s, func_addr);
+ /*
+ * If the ABI passes a pointer to the returned struct as the first
+ * argument, load that now. Pass a pointer to the output home slot.
+ */
+ if (info->out_kind == TCG_CALL_RET_BY_REF) {
+ TCGTemp *ts = arg_temp(op->args[0]);
- /* assign output registers and emit moves if needed */
- for(i = 0; i < nb_oargs; i++) {
- arg = op->args[i];
- ts = arg_temp(arg);
- reg = tcg_target_call_oarg_regs[i];
- tcg_debug_assert(s->reg_to_temp[reg] == NULL);
+ if (!ts->mem_allocated) {
+ temp_allocate_frame(s, ts);
+ }
+ load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
+ }
- if (ts->fixed_reg) {
- if (ts->reg != reg) {
- tcg_out_mov(s, ts->type, ts->reg, reg);
- }
- } else {
- if (ts->val_type == TEMP_VAL_REG) {
- s->reg_to_temp[ts->reg] = NULL;
- }
- ts->val_type = TEMP_VAL_REG;
- ts->reg = reg;
+ tcg_out_call(s, tcg_call_func(op), info);
+
+ /* Assign output registers and emit moves if needed. */
+ switch (info->out_kind) {
+ case TCG_CALL_RET_NORMAL:
+ for (i = 0; i < nb_oargs; i++) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+ TCGReg reg = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, i);
+
+ /* ENV should not be modified. */
+ tcg_debug_assert(!temp_readonly(ts));
+
+ set_temp_val_reg(s, ts, reg);
ts->mem_coherent = 0;
- s->reg_to_temp[reg] = ts;
- if (NEED_SYNC_ARG(i)) {
- temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
- } else if (IS_DEAD_ARG(i)) {
- temp_dead(s, ts);
+ }
+ break;
+
+ case TCG_CALL_RET_BY_VEC:
+ {
+ TCGTemp *ts = arg_temp(op->args[0]);
+
+ tcg_debug_assert(ts->base_type == TCG_TYPE_I128);
+ tcg_debug_assert(ts->temp_subindex == 0);
+ if (!ts->mem_allocated) {
+ temp_allocate_frame(s, ts);
}
+ tcg_out_st(s, TCG_TYPE_V128,
+ tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
+ ts->mem_base->reg, ts->mem_offset);
+ }
+ /* fall through to mark all parts in memory */
+
+ case TCG_CALL_RET_BY_REF:
+ /* The callee has performed a write through the reference. */
+ for (i = 0; i < nb_oargs; i++) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+ ts->val_type = TEMP_VAL_MEM;
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Flush or discard output registers as needed. */
+ for (i = 0; i < nb_oargs; i++) {
+ TCGTemp *ts = arg_temp(op->args[i]);
+ if (NEED_SYNC_ARG(i)) {
+ temp_sync(s, ts, s->reserved_regs, 0, IS_DEAD_ARG(i));
+ } else if (IS_DEAD_ARG(i)) {
+ temp_dead(s, ts);
}
}
}
-#ifdef CONFIG_PROFILER
-
-/* avoid copy/paste errors */
-#define PROF_ADD(to, from, field) \
- do { \
- (to)->field += atomic_read(&((from)->field)); \
- } while (0)
-
-#define PROF_MAX(to, from, field) \
- do { \
- typeof((from)->field) val__ = atomic_read(&((from)->field)); \
- if (val__ > (to)->field) { \
- (to)->field = val__; \
- } \
- } while (0)
-
-/* Pass in a zero'ed @prof */
-static inline
-void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
-{
- unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
- unsigned int i;
-
- for (i = 0; i < n_ctxs; i++) {
- TCGContext *s = atomic_read(&tcg_ctxs[i]);
- const TCGProfile *orig = &s->prof;
-
- if (counters) {
- PROF_ADD(prof, orig, cpu_exec_time);
- PROF_ADD(prof, orig, tb_count1);
- PROF_ADD(prof, orig, tb_count);
- PROF_ADD(prof, orig, op_count);
- PROF_MAX(prof, orig, op_count_max);
- PROF_ADD(prof, orig, temp_count);
- PROF_MAX(prof, orig, temp_count_max);
- PROF_ADD(prof, orig, del_op_count);
- PROF_ADD(prof, orig, code_in_len);
- PROF_ADD(prof, orig, code_out_len);
- PROF_ADD(prof, orig, search_out_len);
- PROF_ADD(prof, orig, interm_time);
- PROF_ADD(prof, orig, code_time);
- PROF_ADD(prof, orig, la_time);
- PROF_ADD(prof, orig, opt_time);
- PROF_ADD(prof, orig, restore_count);
- PROF_ADD(prof, orig, restore_time);
+/**
+ * atom_and_align_for_opc:
+ * @s: tcg context
+ * @opc: memory operation code
+ * @host_atom: MO_ATOM_{IFALIGN,WITHIN16,SUBALIGN} for host operations
+ * @allow_two_ops: true if we are prepared to issue two operations
+ *
+ * Return the alignment and atomicity to use for the inline fast path
+ * for the given memory operation. The alignment may be larger than
+ * that specified in @opc, and the correct alignment will be diagnosed
+ * by the slow path helper.
+ *
+ * If @allow_two_ops, the host is prepared to test for 2x alignment,
+ * and issue two loads or stores for subalignment.
+ */
+static TCGAtomAlign atom_and_align_for_opc(TCGContext *s, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+{
+ MemOp align = get_alignment_bits(opc);
+ MemOp size = opc & MO_SIZE;
+ MemOp half = size ? size - 1 : 0;
+ MemOp atom = opc & MO_ATOM_MASK;
+ MemOp atmax;
+
+ switch (atom) {
+ case MO_ATOM_NONE:
+ /* The operation requires no specific atomicity. */
+ atmax = MO_8;
+ break;
+
+ case MO_ATOM_IFALIGN:
+ atmax = size;
+ break;
+
+ case MO_ATOM_IFALIGN_PAIR:
+ atmax = half;
+ break;
+
+ case MO_ATOM_WITHIN16:
+ atmax = size;
+ if (size == MO_128) {
+ /* Misalignment implies !within16, and therefore no atomicity. */
+ } else if (host_atom != MO_ATOM_WITHIN16) {
+ /* The host does not implement within16, so require alignment. */
+ align = MAX(align, size);
}
- if (table) {
- int i;
+ break;
- for (i = 0; i < NB_OPS; i++) {
- PROF_ADD(prof, orig, table_op_count[i]);
+ case MO_ATOM_WITHIN16_PAIR:
+ atmax = size;
+ /*
+ * Misalignment implies !within16, and therefore half atomicity.
+ * Any host prepared for two operations can implement this with
+ * half alignment.
+ */
+ if (host_atom != MO_ATOM_WITHIN16 && allow_two_ops) {
+ align = MAX(align, half);
+ }
+ break;
+
+ case MO_ATOM_SUBALIGN:
+ atmax = size;
+ if (host_atom != MO_ATOM_SUBALIGN) {
+ /* If unaligned but not odd, there are subobjects up to half. */
+ if (allow_two_ops) {
+ align = MAX(align, half);
+ } else {
+ align = MAX(align, size);
}
}
+ break;
+
+ default:
+ g_assert_not_reached();
}
+
+ return (TCGAtomAlign){ .atom = atmax, .align = align };
}
-#undef PROF_ADD
-#undef PROF_MAX
+/*
+ * Similarly for qemu_ld/st slow path helpers.
+ * We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
+ * using only the provided backend tcg_out_* functions.
+ */
-static void tcg_profile_snapshot_counters(TCGProfile *prof)
+static int tcg_out_helper_stk_ofs(TCGType type, unsigned slot)
{
- tcg_profile_snapshot(prof, true, false);
+ int ofs = arg_slot_stk_ofs(slot);
+
+ /*
+ * Each stack slot is TCG_TARGET_LONG_BITS. If the host does not
+ * require extension to uint64_t, adjust the address for uint32_t.
+ */
+ if (HOST_BIG_ENDIAN &&
+ TCG_TARGET_REG_BITS == 64 &&
+ type == TCG_TYPE_I32) {
+ ofs += 4;
+ }
+ return ofs;
}
-static void tcg_profile_snapshot_table(TCGProfile *prof)
+static void tcg_out_helper_load_slots(TCGContext *s,
+ unsigned nmov, TCGMovExtend *mov,
+ const TCGLdstHelperParam *parm)
{
- tcg_profile_snapshot(prof, false, true);
+ unsigned i;
+ TCGReg dst3;
+
+ /*
+ * Start from the end, storing to the stack first.
+ * This frees those registers, so we need not consider overlap.
+ */
+ for (i = nmov; i-- > 0; ) {
+ unsigned slot = mov[i].dst;
+
+ if (arg_slot_reg_p(slot)) {
+ goto found_reg;
+ }
+
+ TCGReg src = mov[i].src;
+ TCGType dst_type = mov[i].dst_type;
+ MemOp dst_mo = dst_type == TCG_TYPE_I32 ? MO_32 : MO_64;
+
+ /* The argument is going onto the stack; extend into scratch. */
+ if ((mov[i].src_ext & MO_SIZE) != dst_mo) {
+ tcg_debug_assert(parm->ntmp != 0);
+ mov[i].dst = src = parm->tmp[0];
+ tcg_out_movext1(s, &mov[i]);
+ }
+
+ tcg_out_st(s, dst_type, src, TCG_REG_CALL_STACK,
+ tcg_out_helper_stk_ofs(dst_type, slot));
+ }
+ return;
+
+ found_reg:
+ /*
+ * The remaining arguments are in registers.
+ * Convert slot numbers to argument registers.
+ */
+ nmov = i + 1;
+ for (i = 0; i < nmov; ++i) {
+ mov[i].dst = tcg_target_call_iarg_regs[mov[i].dst];
+ }
+
+ switch (nmov) {
+ case 4:
+ /* The backend must have provided enough temps for the worst case. */
+ tcg_debug_assert(parm->ntmp >= 2);
+
+ dst3 = mov[3].dst;
+ for (unsigned j = 0; j < 3; ++j) {
+ if (dst3 == mov[j].src) {
+ /*
+ * Conflict. Copy the source to a temporary, perform the
+ * remaining moves, then the extension from our scratch
+ * on the way out.
+ */
+ TCGReg scratch = parm->tmp[1];
+
+ tcg_out_mov(s, mov[3].src_type, scratch, mov[3].src);
+ tcg_out_movext3(s, mov, mov + 1, mov + 2, parm->tmp[0]);
+ tcg_out_movext1_new_src(s, &mov[3], scratch);
+ break;
+ }
+ }
+
+ /* No conflicts: perform this move and continue. */
+ tcg_out_movext1(s, &mov[3]);
+ /* fall through */
+
+ case 3:
+ tcg_out_movext3(s, mov, mov + 1, mov + 2,
+ parm->ntmp ? parm->tmp[0] : -1);
+ break;
+ case 2:
+ tcg_out_movext2(s, mov, mov + 1,
+ parm->ntmp ? parm->tmp[0] : -1);
+ break;
+ case 1:
+ tcg_out_movext1(s, mov);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
-void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
+static void tcg_out_helper_load_imm(TCGContext *s, unsigned slot,
+ TCGType type, tcg_target_long imm,
+ const TCGLdstHelperParam *parm)
{
- TCGProfile prof = {};
- int i;
-
- tcg_profile_snapshot_table(&prof);
- for (i = 0; i < NB_OPS; i++) {
- cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
- prof.table_op_count[i]);
+ if (arg_slot_reg_p(slot)) {
+ tcg_out_movi(s, type, tcg_target_call_iarg_regs[slot], imm);
+ } else {
+ int ofs = tcg_out_helper_stk_ofs(type, slot);
+ if (!tcg_out_sti(s, type, imm, TCG_REG_CALL_STACK, ofs)) {
+ tcg_debug_assert(parm->ntmp != 0);
+ tcg_out_movi(s, type, parm->tmp[0], imm);
+ tcg_out_st(s, type, parm->tmp[0], TCG_REG_CALL_STACK, ofs);
+ }
}
}
-int64_t tcg_cpu_exec_time(void)
+static void tcg_out_helper_load_common_args(TCGContext *s,
+ const TCGLabelQemuLdst *ldst,
+ const TCGLdstHelperParam *parm,
+ const TCGHelperInfo *info,
+ unsigned next_arg)
{
- unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
- unsigned int i;
- int64_t ret = 0;
+ TCGMovExtend ptr_mov = {
+ .dst_type = TCG_TYPE_PTR,
+ .src_type = TCG_TYPE_PTR,
+ .src_ext = sizeof(void *) == 4 ? MO_32 : MO_64
+ };
+ const TCGCallArgumentLoc *loc = &info->in[0];
+ TCGType type;
+ unsigned slot;
+ tcg_target_ulong imm;
+
+ /*
+ * Handle env, which is always first.
+ */
+ ptr_mov.dst = loc->arg_slot;
+ ptr_mov.src = TCG_AREG0;
+ tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
+
+ /*
+ * Handle oi.
+ */
+ imm = ldst->oi;
+ loc = &info->in[next_arg];
+ type = TCG_TYPE_I32;
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ break;
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ /* No extension required for MemOpIdx. */
+ tcg_debug_assert(imm <= INT32_MAX);
+ type = TCG_TYPE_REG;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_out_helper_load_imm(s, loc->arg_slot, type, imm, parm);
+ next_arg++;
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = atomic_read(&tcg_ctxs[i]);
- const TCGProfile *prof = &s->prof;
+ /*
+ * Handle ra.
+ */
+ loc = &info->in[next_arg];
+ slot = loc->arg_slot;
+ if (parm->ra_gen) {
+ int arg_reg = -1;
+ TCGReg ra_reg;
+
+ if (arg_slot_reg_p(slot)) {
+ arg_reg = tcg_target_call_iarg_regs[slot];
+ }
+ ra_reg = parm->ra_gen(s, ldst, arg_reg);
- ret += atomic_read(&prof->cpu_exec_time);
+ ptr_mov.dst = slot;
+ ptr_mov.src = ra_reg;
+ tcg_out_helper_load_slots(s, 1, &ptr_mov, parm);
+ } else {
+ imm = (uintptr_t)ldst->raddr;
+ tcg_out_helper_load_imm(s, slot, TCG_TYPE_PTR, imm, parm);
}
- return ret;
}
-#else
-void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
+
+static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
+ const TCGCallArgumentLoc *loc,
+ TCGType dst_type, TCGType src_type,
+ TCGReg lo, TCGReg hi)
{
- cpu_fprintf(f, "[TCG profiler not compiled]\n");
+ MemOp reg_mo;
+
+ if (dst_type <= TCG_TYPE_REG) {
+ MemOp src_ext;
+
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ src_ext = src_type == TCG_TYPE_I32 ? MO_32 : MO_64;
+ break;
+ case TCG_CALL_ARG_EXTEND_U:
+ dst_type = TCG_TYPE_REG;
+ src_ext = MO_UL;
+ break;
+ case TCG_CALL_ARG_EXTEND_S:
+ dst_type = TCG_TYPE_REG;
+ src_ext = MO_SL;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ mov[0].dst = loc->arg_slot;
+ mov[0].dst_type = dst_type;
+ mov[0].src = lo;
+ mov[0].src_type = src_type;
+ mov[0].src_ext = src_ext;
+ return 1;
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ assert(dst_type == TCG_TYPE_I64);
+ reg_mo = MO_32;
+ } else {
+ assert(dst_type == TCG_TYPE_I128);
+ reg_mo = MO_64;
+ }
+
+ mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
+ mov[0].src = lo;
+ mov[0].dst_type = TCG_TYPE_REG;
+ mov[0].src_type = TCG_TYPE_REG;
+ mov[0].src_ext = reg_mo;
+
+ mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot;
+ mov[1].src = hi;
+ mov[1].dst_type = TCG_TYPE_REG;
+ mov[1].src_type = TCG_TYPE_REG;
+ mov[1].src_ext = reg_mo;
+
+ return 2;
}
-int64_t tcg_cpu_exec_time(void)
+static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
+ const TCGLdstHelperParam *parm)
{
- error_report("%s: TCG profiler not compiled", __func__);
- exit(EXIT_FAILURE);
-}
-#endif
+ const TCGHelperInfo *info;
+ const TCGCallArgumentLoc *loc;
+ TCGMovExtend mov[2];
+ unsigned next_arg, nmov;
+ MemOp mop = get_memop(ldst->oi);
+
+ switch (mop & MO_SIZE) {
+ case MO_8:
+ case MO_16:
+ case MO_32:
+ info = &info_helper_ld32_mmu;
+ break;
+ case MO_64:
+ info = &info_helper_ld64_mmu;
+ break;
+ case MO_128:
+ info = &info_helper_ld128_mmu;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ /* Defer env argument. */
+ next_arg = 1;
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
-{
-#ifdef CONFIG_PROFILER
- TCGProfile *prof = &s->prof;
-#endif
- int i, num_insns;
- TCGOp *op;
+ loc = &info->in[next_arg];
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
+ /*
+ * 32-bit host with 32-bit guest: zero-extend the guest address
+ * to 64-bits for the helper by storing the low part, then
+ * load a zero for the high part.
+ */
+ tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
+ TCG_TYPE_I32, TCG_TYPE_I32,
+ ldst->addrlo_reg, -1);
+ tcg_out_helper_load_slots(s, 1, mov, parm);
+
+ tcg_out_helper_load_imm(s, loc[!HOST_BIG_ENDIAN].arg_slot,
+ TCG_TYPE_I32, 0, parm);
+ next_arg += 2;
+ } else {
+ nmov = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
+ ldst->addrlo_reg, ldst->addrhi_reg);
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+ next_arg += nmov;
+ }
-#ifdef CONFIG_PROFILER
- {
- int n = 0;
+ switch (info->out_kind) {
+ case TCG_CALL_RET_NORMAL:
+ case TCG_CALL_RET_BY_VEC:
+ break;
+ case TCG_CALL_RET_BY_REF:
+ /*
+ * The return reference is in the first argument slot.
+ * We need memory in which to return: re-use the top of stack.
+ */
+ {
+ int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
- QTAILQ_FOREACH(op, &s->ops, link) {
- n++;
+ if (arg_slot_reg_p(0)) {
+ tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ } else {
+ tcg_debug_assert(parm->ntmp != 0);
+ tcg_out_addi_ptr(s, parm->tmp[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ }
}
- atomic_set(&prof->op_count, prof->op_count + n);
- if (n > prof->op_count_max) {
- atomic_set(&prof->op_count_max, n);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
+}
+
+static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
+ bool load_sign,
+ const TCGLdstHelperParam *parm)
+{
+ MemOp mop = get_memop(ldst->oi);
+ TCGMovExtend mov[2];
+ int ofs_slot0;
+
+ switch (ldst->type) {
+ case TCG_TYPE_I64:
+ if (TCG_TARGET_REG_BITS == 32) {
+ break;
}
+ /* fall through */
+
+ case TCG_TYPE_I32:
+ mov[0].dst = ldst->datalo_reg;
+ mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
+ mov[0].dst_type = ldst->type;
+ mov[0].src_type = TCG_TYPE_REG;
+
+ /*
+ * If load_sign, then we allowed the helper to perform the
+ * appropriate sign extension to tcg_target_ulong, and all
+ * we need now is a plain move.
+ *
+ * If they do not, then we expect the relevant extension
+ * instruction to be no more expensive than a move, and
+ * we thus save the icache etc by only using one of two
+ * helper functions.
+ */
+ if (load_sign || !(mop & MO_SIGN)) {
+ if (TCG_TARGET_REG_BITS == 32 || ldst->type == TCG_TYPE_I32) {
+ mov[0].src_ext = MO_32;
+ } else {
+ mov[0].src_ext = MO_64;
+ }
+ } else {
+ mov[0].src_ext = mop & MO_SSIZE;
+ }
+ tcg_out_movext1(s, mov);
+ return;
- n = s->nb_temps;
- atomic_set(&prof->temp_count, prof->temp_count + n);
- if (n > prof->temp_count_max) {
- atomic_set(&prof->temp_count_max, n);
+ case TCG_TYPE_I128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
+ switch (TCG_TARGET_CALL_RET_I128) {
+ case TCG_CALL_RET_NORMAL:
+ break;
+ case TCG_CALL_RET_BY_VEC:
+ tcg_out_st(s, TCG_TYPE_V128,
+ tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
+ TCG_REG_CALL_STACK, ofs_slot0);
+ /* fall through */
+ case TCG_CALL_RET_BY_REF:
+ tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg,
+ TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN);
+ tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg,
+ TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN);
+ return;
+ default:
+ g_assert_not_reached();
}
+ break;
+
+ default:
+ g_assert_not_reached();
}
-#endif
-#ifdef DEBUG_DISAS
+ mov[0].dst = ldst->datalo_reg;
+ mov[0].src =
+ tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
+ mov[0].dst_type = TCG_TYPE_REG;
+ mov[0].src_type = TCG_TYPE_REG;
+ mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+
+ mov[1].dst = ldst->datahi_reg;
+ mov[1].src =
+ tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
+ mov[1].dst_type = TCG_TYPE_REG;
+ mov[1].src_type = TCG_TYPE_REG;
+ mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+
+ tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
+}
+
+static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
+ const TCGLdstHelperParam *parm)
+{
+ const TCGHelperInfo *info;
+ const TCGCallArgumentLoc *loc;
+ TCGMovExtend mov[4];
+ TCGType data_type;
+ unsigned next_arg, nmov, n;
+ MemOp mop = get_memop(ldst->oi);
+
+ switch (mop & MO_SIZE) {
+ case MO_8:
+ case MO_16:
+ case MO_32:
+ info = &info_helper_st32_mmu;
+ data_type = TCG_TYPE_I32;
+ break;
+ case MO_64:
+ info = &info_helper_st64_mmu;
+ data_type = TCG_TYPE_I64;
+ break;
+ case MO_128:
+ info = &info_helper_st128_mmu;
+ data_type = TCG_TYPE_I128;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Defer env argument. */
+ next_arg = 1;
+ nmov = 0;
+
+ /* Handle addr argument. */
+ loc = &info->in[next_arg];
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
+ /*
+ * 32-bit host with 32-bit guest: zero-extend the guest address
+ * to 64-bits for the helper by storing the low part. Later,
+ * after we have processed the register inputs, we will load a
+ * zero for the high part.
+ */
+ tcg_out_helper_add_mov(mov, loc + HOST_BIG_ENDIAN,
+ TCG_TYPE_I32, TCG_TYPE_I32,
+ ldst->addrlo_reg, -1);
+ next_arg += 2;
+ nmov += 1;
+ } else {
+ n = tcg_out_helper_add_mov(mov, loc, TCG_TYPE_I64, s->addr_type,
+ ldst->addrlo_reg, ldst->addrhi_reg);
+ next_arg += n;
+ nmov += n;
+ }
+
+ /* Handle data argument. */
+ loc = &info->in[next_arg];
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
+ ldst->datalo_reg, ldst->datahi_reg);
+ next_arg += n;
+ nmov += n;
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+ break;
+
+ case TCG_CALL_ARG_BY_REF:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ tcg_debug_assert(data_type == TCG_TYPE_I128);
+ tcg_out_st(s, TCG_TYPE_I64,
+ HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot));
+ tcg_out_st(s, TCG_TYPE_I64,
+ HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg,
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot));
+
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+
+ if (arg_slot_reg_p(loc->arg_slot)) {
+ tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot],
+ TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(loc->ref_slot));
+ } else {
+ tcg_debug_assert(parm->ntmp != 0);
+ tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(loc->ref_slot));
+ tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot));
+ }
+ next_arg += 2;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ if (TCG_TARGET_REG_BITS == 32 && s->addr_type == TCG_TYPE_I32) {
+ /* Zero extend the address by loading a zero for the high part. */
+ loc = &info->in[1 + !HOST_BIG_ENDIAN];
+ tcg_out_helper_load_imm(s, loc->arg_slot, TCG_TYPE_I32, 0, parm);
+ }
+
+ tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
+}
+
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
+{
+ int i, start_words, num_insns;
+ TCGOp *op;
+
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
- && qemu_log_in_addr_range(tb->pc))) {
- qemu_log_lock();
- qemu_log("OP:\n");
- tcg_dump_ops(s, false);
- qemu_log("\n");
- qemu_log_unlock();
+ && qemu_log_in_addr_range(pc_start))) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "OP:\n");
+ tcg_dump_ops(s, logfile, false);
+ fprintf(logfile, "\n");
+ qemu_log_unlock(logfile);
+ }
}
-#endif
-#ifdef CONFIG_PROFILER
- atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
+#ifdef CONFIG_DEBUG_TCG
+ /* Ensure all labels referenced have been emitted. */
+ {
+ TCGLabel *l;
+ bool error = false;
+
+ QSIMPLEQ_FOREACH(l, &s->labels, next) {
+ if (unlikely(!l->present) && !QSIMPLEQ_EMPTY(&l->branches)) {
+ qemu_log_mask(CPU_LOG_TB_OP,
+ "$L%d referenced but not present.\n", l->id);
+ error = true;
+ }
+ }
+ assert(!error);
+ }
#endif
-#ifdef USE_TCG_OPTIMIZATIONS
tcg_optimize(s);
-#endif
-
-#ifdef CONFIG_PROFILER
- atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
- atomic_set(&prof->la_time, prof->la_time - profile_getclock());
-#endif
reachable_code_pass(s);
+ liveness_pass_0(s);
liveness_pass_1(s);
if (s->nb_indirects > 0) {
-#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
- && qemu_log_in_addr_range(tb->pc))) {
- qemu_log_lock();
- qemu_log("OP before indirect lowering:\n");
- tcg_dump_ops(s, false);
- qemu_log("\n");
- qemu_log_unlock();
+ && qemu_log_in_addr_range(pc_start))) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "OP before indirect lowering:\n");
+ tcg_dump_ops(s, logfile, false);
+ fprintf(logfile, "\n");
+ qemu_log_unlock(logfile);
+ }
}
-#endif
+
/* Replace indirect temps with direct temps. */
if (liveness_pass_2(s)) {
/* If changes were made, re-run liveness. */
@@ -3865,25 +6130,32 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
}
}
-#ifdef CONFIG_PROFILER
- atomic_set(&prof->la_time, prof->la_time + profile_getclock());
-#endif
-
-#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
- && qemu_log_in_addr_range(tb->pc))) {
- qemu_log_lock();
- qemu_log("OP after optimization and liveness analysis:\n");
- tcg_dump_ops(s, true);
- qemu_log("\n");
- qemu_log_unlock();
+ && qemu_log_in_addr_range(pc_start))) {
+ FILE *logfile = qemu_log_trylock();
+ if (logfile) {
+ fprintf(logfile, "OP after optimization and liveness analysis:\n");
+ tcg_dump_ops(s, logfile, true);
+ fprintf(logfile, "\n");
+ qemu_log_unlock(logfile);
+ }
}
-#endif
+
+ /* Initialize goto_tb jump offsets. */
+ tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
+ tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
tcg_reg_alloc_start(s);
- s->code_buf = tb->tc.ptr;
- s->code_ptr = tb->tc.ptr;
+ /*
+ * Reset the buffer pointers when restarting after overflow.
+ * TODO: Move this into translate-all.c with the rest of the
+ * buffer management. Having only this done here is confusing.
+ */
+ s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr);
+ s->code_ptr = s->code_buf;
#ifdef TCG_TARGET_NEED_LDST_LABELS
QSIMPLEQ_INIT(&s->ldst_labels);
@@ -3892,24 +6164,24 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
s->pool_labels = NULL;
#endif
+ start_words = s->insn_start_words;
+ s->gen_insn_data =
+ tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
+
+ tcg_out_tb_start(s);
+
num_insns = -1;
QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc;
-#ifdef CONFIG_PROFILER
- atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
-#endif
-
switch (opc) {
case INDEX_op_mov_i32:
case INDEX_op_mov_i64:
case INDEX_op_mov_vec:
tcg_reg_alloc_mov(s, op);
break;
- case INDEX_op_movi_i32:
- case INDEX_op_movi_i64:
- case INDEX_op_dupi_vec:
- tcg_reg_alloc_movi(s, op);
+ case INDEX_op_dup_vec:
+ tcg_reg_alloc_dup(s, op);
break;
case INDEX_op_insn_start:
if (num_insns >= 0) {
@@ -3919,14 +6191,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
assert(s->gen_insn_end_off[num_insns] == off);
}
num_insns++;
- for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
- target_ulong a;
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
- a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
-#else
- a = op->args[i];
-#endif
- s->gen_insn_data[num_insns][i] = a;
+ for (i = 0; i < start_words; ++i) {
+ s->gen_insn_data[num_insns * start_words + i] =
+ tcg_get_insn_start_param(op, i);
}
break;
case INDEX_op_discard:
@@ -3934,11 +6201,22 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
break;
case INDEX_op_set_label:
tcg_reg_alloc_bb_end(s, s->reserved_regs);
- tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
+ tcg_out_label(s, arg_label(op->args[0]));
break;
case INDEX_op_call:
tcg_reg_alloc_call(s, op);
break;
+ case INDEX_op_exit_tb:
+ tcg_out_exit_tb(s, op->args[0]);
+ break;
+ case INDEX_op_goto_tb:
+ tcg_out_goto_tb(s, op->args[0]);
+ break;
+ case INDEX_op_dup2_vec:
+ if (tcg_reg_alloc_dup2(s, op)) {
+ break;
+ }
+ /* fall through */
default:
/* Sanity check that we've not introduced any unhandled opcodes. */
tcg_debug_assert(tcg_op_supported(opc));
@@ -3948,9 +6226,6 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
tcg_reg_alloc_op(s, op);
break;
}
-#ifdef CONFIG_DEBUG_TCG
- check_regs(s);
-#endif
/* Test for (pending) buffer overflow. The assumption is that any
one operation beginning below the high water mark cannot overrun
the buffer completely. Thus we can test for overflow after
@@ -3958,92 +6233,41 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
return -1;
}
+ /* Test for TB overflow, as seen by gen_insn_end_off. */
+ if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
+ return -2;
+ }
}
- tcg_debug_assert(num_insns >= 0);
+ tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
/* Generate TB finalization at the end of block */
#ifdef TCG_TARGET_NEED_LDST_LABELS
- if (!tcg_out_ldst_finalize(s)) {
- return -1;
+ i = tcg_out_ldst_finalize(s);
+ if (i < 0) {
+ return i;
}
#endif
#ifdef TCG_TARGET_NEED_POOL_LABELS
- if (!tcg_out_pool_finalize(s)) {
- return -1;
+ i = tcg_out_pool_finalize(s);
+ if (i < 0) {
+ return i;
}
#endif
+ if (!tcg_resolve_relocs(s)) {
+ return -2;
+ }
+#ifndef CONFIG_TCG_INTERPRETER
/* flush instruction cache */
- flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
+ (uintptr_t)s->code_buf,
+ tcg_ptr_byte_diff(s->code_ptr, s->code_buf));
+#endif
return tcg_current_code_size(s);
}
-#ifdef CONFIG_PROFILER
-void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
-{
- TCGProfile prof = {};
- const TCGProfile *s;
- int64_t tb_count;
- int64_t tb_div_count;
- int64_t tot;
-
- tcg_profile_snapshot_counters(&prof);
- s = &prof;
- tb_count = s->tb_count;
- tb_div_count = tb_count ? tb_count : 1;
- tot = s->interm_time + s->code_time;
-
- cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
- tot, tot / 2.4e9);
- cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
- tb_count, s->tb_count1 - tb_count,
- (double)(s->tb_count1 - s->tb_count)
- / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
- cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
- (double)s->op_count / tb_div_count, s->op_count_max);
- cpu_fprintf(f, "deleted ops/TB %0.2f\n",
- (double)s->del_op_count / tb_div_count);
- cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
- (double)s->temp_count / tb_div_count, s->temp_count_max);
- cpu_fprintf(f, "avg host code/TB %0.1f\n",
- (double)s->code_out_len / tb_div_count);
- cpu_fprintf(f, "avg search data/TB %0.1f\n",
- (double)s->search_out_len / tb_div_count);
-
- cpu_fprintf(f, "cycles/op %0.1f\n",
- s->op_count ? (double)tot / s->op_count : 0);
- cpu_fprintf(f, "cycles/in byte %0.1f\n",
- s->code_in_len ? (double)tot / s->code_in_len : 0);
- cpu_fprintf(f, "cycles/out byte %0.1f\n",
- s->code_out_len ? (double)tot / s->code_out_len : 0);
- cpu_fprintf(f, "cycles/search byte %0.1f\n",
- s->search_out_len ? (double)tot / s->search_out_len : 0);
- if (tot == 0) {
- tot = 1;
- }
- cpu_fprintf(f, " gen_interm time %0.1f%%\n",
- (double)s->interm_time / tot * 100.0);
- cpu_fprintf(f, " gen_code time %0.1f%%\n",
- (double)s->code_time / tot * 100.0);
- cpu_fprintf(f, "optim./code time %0.1f%%\n",
- (double)s->opt_time / (s->code_time ? s->code_time : 1)
- * 100.0);
- cpu_fprintf(f, "liveness/code time %0.1f%%\n",
- (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
- cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
- s->restore_count);
- cpu_fprintf(f, " avg cycles %0.1f\n",
- s->restore_count ? (double)s->restore_time / s->restore_count : 0);
-}
-#else
-void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
-{
- cpu_fprintf(f, "[TCG profiler not compiled]\n");
-}
-#endif
-
#ifdef ELF_HOST_MACHINE
/* In order to use this feature, the backend needs to do three things:
@@ -4102,7 +6326,7 @@ static int find_string(const char *strtab, const char *str)
}
}
-static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
+static void tcg_register_jit_int(const void *buf_ptr, size_t buf_size,
const void *debug_frame,
size_t debug_frame_size)
{
@@ -4282,7 +6506,8 @@ static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
/* Enable this block to be able to debug the ELF image file creation.
One can use readelf, objdump, or other inspection utilities. */
{
- FILE *f = fopen("/tmp/qemu.jit", "w+b");
+ g_autofree char *jit = g_strdup_printf("%s/qemu.jit", g_get_tmp_dir());
+ FILE *f = fopen(jit, "w+b");
if (f) {
if (fwrite(img, img_size, 1, f) != img_size) {
/* Avoid stupid unused return value warning for fwrite. */
@@ -4304,13 +6529,13 @@ static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
/* No support for the feature. Provide the entry point expected by exec.c,
and implement the internal function we declared earlier. */
-static void tcg_register_jit_int(void *buf, size_t size,
+static void tcg_register_jit_int(const void *buf, size_t size,
const void *debug_frame,
size_t debug_frame_size)
{
}
-void tcg_register_jit(void *buf, size_t buf_size)
+void tcg_register_jit(const void *buf, size_t buf_size)
{
}
#endif /* ELF_HOST_MACHINE */