aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2011-09-08 09:23:11 +0100
committermalc <av1474@comtv.ru>2011-09-08 18:10:16 +0400
commit8278604134e36f7f96612c2fda3b615787e5b692 (patch)
tree14a8607111a4526f5f71bdf6eb2f6906614e3b78 /target-i386
parentc82dc29a9112f34e0a51cad9a412cf6d9d05dfb2 (diff)
target-i386: Compute all flag data inside %cl != 0 test.
The (x << (cl - 1)) quantity is only used if CL != 0. Move the computation of that quantity nearer its use. This avoids the creation of undefined TCG operations when the constant propagation optimization proves that CL == 0, and thus CL-1 is outside the range [0-wordsize). Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: malc <av1474@comtv.ru>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/translate.c72
1 files changed, 43 insertions, 29 deletions
diff --git a/target-i386/translate.c b/target-i386/translate.c
index ccef381be8..b9667628a0 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -1406,70 +1406,84 @@ static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
{
target_ulong mask;
int shift_label;
- TCGv t0, t1;
+ TCGv t0, t1, t2;
- if (ot == OT_QUAD)
+ if (ot == OT_QUAD) {
mask = 0x3f;
- else
+ } else {
mask = 0x1f;
+ }
/* load */
- if (op1 == OR_TMP0)
+ if (op1 == OR_TMP0) {
gen_op_ld_T0_A0(ot + s->mem_index);
- else
+ } else {
gen_op_mov_TN_reg(ot, 0, op1);
+ }
- tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ t2 = tcg_temp_local_new();
- tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
+ tcg_gen_andi_tl(t2, cpu_T[1], mask);
if (is_right) {
if (is_arith) {
gen_exts(ot, cpu_T[0]);
- tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
- tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, cpu_T[0]);
+ tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
} else {
gen_extu(ot, cpu_T[0]);
- tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
- tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, cpu_T[0]);
+ tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
}
} else {
- tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
- tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
+ tcg_gen_mov_tl(t0, cpu_T[0]);
+ tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
}
/* store */
- if (op1 == OR_TMP0)
+ if (op1 == OR_TMP0) {
gen_op_st_T0_A0(ot + s->mem_index);
- else
+ } else {
gen_op_mov_reg_T0(ot, op1);
-
+ }
+
/* update eflags if non zero shift */
- if (s->cc_op != CC_OP_DYNAMIC)
+ if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
+ }
- /* XXX: inefficient */
- t0 = tcg_temp_local_new();
- t1 = tcg_temp_local_new();
-
- tcg_gen_mov_tl(t0, cpu_T[0]);
- tcg_gen_mov_tl(t1, cpu_T3);
+ tcg_gen_mov_tl(t1, cpu_T[0]);
shift_label = gen_new_label();
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
- tcg_gen_mov_tl(cpu_cc_src, t1);
- tcg_gen_mov_tl(cpu_cc_dst, t0);
- if (is_right)
+ tcg_gen_addi_tl(t2, t2, -1);
+ tcg_gen_mov_tl(cpu_cc_dst, t1);
+
+ if (is_right) {
+ if (is_arith) {
+ tcg_gen_sar_tl(cpu_cc_src, t0, t2);
+ } else {
+ tcg_gen_shr_tl(cpu_cc_src, t0, t2);
+ }
+ } else {
+ tcg_gen_shl_tl(cpu_cc_src, t0, t2);
+ }
+
+ if (is_right) {
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
- else
+ } else {
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
-
+ }
+
gen_set_label(shift_label);
s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
tcg_temp_free(t0);
tcg_temp_free(t1);
+ tcg_temp_free(t2);
}
static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,