aboutsummaryrefslogtreecommitdiff
path: root/target-alpha
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2009-12-11 11:58:29 -0800
committerAurelien Jarno <aurelien@aurel32.net>2009-12-13 21:36:21 +0100
commitffec44f14f5387d1d9f17323165041aee4540625 (patch)
treedbd091dfeb8009e43c2becdd6e33f22c2de440be /target-alpha
parent14ab16342990d7400da5d889f3594bd498fa2e31 (diff)
target-alpha: Expand msk*h inline.
Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
Diffstat (limited to 'target-alpha')
-rw-r--r--target-alpha/helper.h3
-rw-r--r--target-alpha/op_helper.c15
-rw-r--r--target-alpha/translate.c63
3 files changed, 49 insertions, 32 deletions
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index 8db73b0df4..a545c5cd4c 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -19,11 +19,8 @@ DEF_HELPER_1(cttz, i64, i64)
DEF_HELPER_2(zap, i64, i64, i64)
DEF_HELPER_2(zapnot, i64, i64, i64)
-DEF_HELPER_2(mskwh, i64, i64, i64)
DEF_HELPER_2(inswh, i64, i64, i64)
-DEF_HELPER_2(msklh, i64, i64, i64)
DEF_HELPER_2(inslh, i64, i64, i64)
-DEF_HELPER_2(mskqh, i64, i64, i64)
DEF_HELPER_2(insqh, i64, i64, i64)
DEF_HELPER_2(cmpbge, i64, i64, i64)
diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c
index 591adbdc8f..b6ec0e8003 100644
--- a/target-alpha/op_helper.c
+++ b/target-alpha/op_helper.c
@@ -185,33 +185,18 @@ uint64_t helper_zapnot(uint64_t val, uint64_t mask)
return byte_zap(val, ~mask);
}
-uint64_t helper_mskwh(uint64_t val, uint64_t mask)
-{
- return byte_zap(val, (0x03 << (mask & 7)) >> 8);
-}
-
uint64_t helper_inswh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
}
-uint64_t helper_msklh(uint64_t val, uint64_t mask)
-{
- return byte_zap(val, (0x0F << (mask & 7)) >> 8);
-}
-
uint64_t helper_inslh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
}
-uint64_t helper_mskqh(uint64_t val, uint64_t mask)
-{
- return byte_zap(val, (0xFF << (mask & 7)) >> 8);
-}
-
uint64_t helper_insqh(uint64_t val, uint64_t mask)
{
val >>= 64 - ((mask & 7) * 8);
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index ebab95d65d..1dc344821a 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -579,8 +579,8 @@ static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
/* EXTWH, EXTLH, EXTQH */
-static inline void gen_ext_h(int ra, int rb, int rc, int islit,
- uint8_t lit, uint8_t byte_mask)
+static void gen_ext_h(int ra, int rb, int rc, int islit,
+ uint8_t lit, uint8_t byte_mask)
{
if (unlikely(rc == 31))
return;
@@ -604,8 +604,8 @@ static inline void gen_ext_h(int ra, int rb, int rc, int islit,
}
/* EXTBL, EXTWL, EXTLL, EXTQL */
-static inline void gen_ext_l(int ra, int rb, int rc, int islit,
- uint8_t lit, uint8_t byte_mask)
+static void gen_ext_l(int ra, int rb, int rc, int islit,
+ uint8_t lit, uint8_t byte_mask)
{
if (unlikely(rc == 31))
return;
@@ -626,8 +626,8 @@ static inline void gen_ext_l(int ra, int rb, int rc, int islit,
}
/* INSBL, INSWL, INSLL, INSQL */
-static inline void gen_ins_l(int ra, int rb, int rc, int islit,
- uint8_t lit, uint8_t byte_mask)
+static void gen_ins_l(int ra, int rb, int rc, int islit,
+ uint8_t lit, uint8_t byte_mask)
{
if (unlikely(rc == 31))
return;
@@ -655,9 +655,47 @@ static inline void gen_ins_l(int ra, int rb, int rc, int islit,
}
}
+/* MSKWH, MSKLH, MSKQH */
+static void gen_msk_h(int ra, int rb, int rc, int islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (unlikely(rc == 31))
+ return;
+ else if (unlikely(ra == 31))
+ tcg_gen_movi_i64(cpu_ir[rc], 0);
+ else if (islit) {
+ gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
+ } else {
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ /* The instruction description is as above, where the byte_mask
+ is shifted left, and then we extract bits <15:8>. This can be
+ emulated with a right-shift on the expanded byte mask. This
+ requires extra care because for an input <2:0> == 0 we need a
+ shift of 64 bits in order to generate a zero. This is done by
+ splitting the shift into two parts, the variable shift - 1
+ followed by a constant 1 shift. The code we expand below is
+ equivalent to ~((B & 7) * 8) & 63. */
+
+ tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+ tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
+ tcg_gen_shr_i64(mask, mask, shift);
+ tcg_gen_shri_i64(mask, mask, 1);
+
+ tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+ }
+}
+
/* MSKBL, MSKWL, MSKLL, MSKQL */
-static inline void gen_msk_l(int ra, int rb, int rc, int islit,
- uint8_t lit, uint8_t byte_mask)
+static void gen_msk_l(int ra, int rb, int rc, int islit,
+ uint8_t lit, uint8_t byte_mask)
{
if (unlikely(rc == 31))
return;
@@ -712,11 +750,8 @@ ARITH3(addlv)
ARITH3(sublv)
ARITH3(addqv)
ARITH3(subqv)
-ARITH3(mskwh)
ARITH3(inswh)
-ARITH3(msklh)
ARITH3(inslh)
-ARITH3(mskqh)
ARITH3(insqh)
ARITH3(umulh)
ARITH3(mullv)
@@ -1440,7 +1475,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x52:
/* MSKWH */
- gen_mskwh(ra, rb, rc, islit, lit);
+ gen_msk_h(ra, rb, rc, islit, lit, 0x03);
break;
case 0x57:
/* INSWH */
@@ -1452,7 +1487,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x62:
/* MSKLH */
- gen_msklh(ra, rb, rc, islit, lit);
+ gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x67:
/* INSLH */
@@ -1464,7 +1499,7 @@ static inline int translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x72:
/* MSKQH */
- gen_mskqh(ra, rb, rc, islit, lit);
+ gen_msk_h(ra, rb, rc, islit, lit, 0xff);
break;
case 0x77:
/* INSQH */