aboutsummaryrefslogtreecommitdiff
path: root/target-alpha
diff options
context:
space:
mode:
authorRichard Henderson <rth@twiddle.net>2012-03-24 09:51:09 -0700
committerBlue Swirl <blauwirbel@gmail.com>2012-03-24 17:07:27 +0000
commit4a58aedff479e02c33ba74d752f34944751ba28b (patch)
treeb832e27a11d78a8ff81958c694bcd41dcc764c71 /target-alpha
parentb9f0923eb782b92a85657092b625d96b0af26e2e (diff)
target-alpha: Move floating-point helpers to fpu_helper.c.
Signed-off-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'target-alpha')
-rw-r--r--target-alpha/fpu_helper.c846
-rw-r--r--target-alpha/helper.h106
-rw-r--r--target-alpha/op_helper.c812
-rw-r--r--target-alpha/translate.c130
4 files changed, 966 insertions, 928 deletions
diff --git a/target-alpha/fpu_helper.c b/target-alpha/fpu_helper.c
new file mode 100644
index 0000000000..d38521b6d8
--- /dev/null
+++ b/target-alpha/fpu_helper.c
@@ -0,0 +1,846 @@
+/*
+ * Helpers for floating point instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+#include "softfloat.h"
+
+#define FP_STATUS (env->fp_status)
+
+
+void helper_setroundmode(CPUAlphaState *env, uint32_t val)
+{
+ set_float_rounding_mode(val, &FP_STATUS);
+}
+
+void helper_setflushzero(CPUAlphaState *env, uint32_t val)
+{
+ set_flush_to_zero(val, &FP_STATUS);
+}
+
+void helper_fp_exc_clear(CPUAlphaState *env)
+{
+ set_float_exception_flags(0, &FP_STATUS);
+}
+
+uint32_t helper_fp_exc_get(CPUAlphaState *env)
+{
+ return get_float_exception_flags(&FP_STATUS);
+}
+
+static inline void inline_fp_exc_raise(CPUAlphaState *env, void *retaddr,
+ uint32_t exc, uint32_t regno)
+{
+ if (exc) {
+ uint32_t hw_exc = 0;
+
+ if (exc & float_flag_invalid) {
+ hw_exc |= EXC_M_INV;
+ }
+ if (exc & float_flag_divbyzero) {
+ hw_exc |= EXC_M_DZE;
+ }
+ if (exc & float_flag_overflow) {
+ hw_exc |= EXC_M_FOV;
+ }
+ if (exc & float_flag_underflow) {
+ hw_exc |= EXC_M_UNF;
+ }
+ if (exc & float_flag_inexact) {
+ hw_exc |= EXC_M_INE;
+ }
+
+ arith_excp(env, retaddr, hw_exc, 1ull << regno);
+ }
+}
+
+/* Raise exceptions for ieee fp insns without software completion.
+ In that case there are no exceptions that don't trap; the mask
+ doesn't apply. */
+void helper_fp_exc_raise(CPUAlphaState *env, uint32_t exc, uint32_t regno)
+{
+ inline_fp_exc_raise(env, GETPC(), exc, regno);
+}
+
+/* Raise exceptions for ieee fp insns with software completion. */
+void helper_fp_exc_raise_s(CPUAlphaState *env, uint32_t exc, uint32_t regno)
+{
+ if (exc) {
+ env->fpcr_exc_status |= exc;
+ exc &= ~env->fpcr_exc_mask;
+ inline_fp_exc_raise(env, GETPC(), exc, regno);
+ }
+}
+
+/* Input remapping without software completion. Handle denormal-map-to-zero
+ and trap for all other non-finite numbers. */
+uint64_t helper_ieee_input(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (frac != 0) {
+ /* If DNZ is set flush denormals to zero on input. */
+ if (env->fpcr_dnz) {
+ val &= 1ull << 63;
+ } else {
+ arith_excp(env, GETPC(), EXC_M_UNF, 0);
+ }
+ }
+ } else if (exp == 0x7ff) {
+ /* Infinity or NaN. */
+ /* ??? I'm not sure these exception bit flags are correct. I do
+ know that the Linux kernel, at least, doesn't rely on them and
+ just emulates the insn to figure out what exception to use. */
+ arith_excp(env, GETPC(), frac ? EXC_M_INV : EXC_M_FOV, 0);
+ }
+ return val;
+}
+
+/* Similar, but does not trap for infinities. Used for comparisons. */
+uint64_t helper_ieee_input_cmp(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (frac != 0) {
+ /* If DNZ is set flush denormals to zero on input. */
+ if (env->fpcr_dnz) {
+ val &= 1ull << 63;
+ } else {
+ arith_excp(env, GETPC(), EXC_M_UNF, 0);
+ }
+ }
+ } else if (exp == 0x7ff && frac) {
+ /* NaN. */
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+ return val;
+}
+
+/* Input remapping with software completion enabled. All we have to do
+ is handle denormal-map-to-zero; all other inputs get exceptions as
+ needed from the actual operation. */
+uint64_t helper_ieee_input_s(CPUAlphaState *env, uint64_t val)
+{
+ if (env->fpcr_dnz) {
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ if (exp == 0) {
+ val &= 1ull << 63;
+ }
+ }
+ return val;
+}
+
+/* F floating (VAX) */
+static uint64_t float32_to_f(float32 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_FloatU a;
+
+ a.f = fa;
+ sig = ((uint64_t)a.l & 0x80000000) << 32;
+ exp = (a.l >> 23) & 0xff;
+ mant = ((uint64_t)a.l & 0x007fffff) << 29;
+
+ if (exp == 255) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 253) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float32 f_to_float32(CPUAlphaState *env, void *retaddr, uint64_t a)
+{
+ uint32_t exp, mant_sig;
+ CPU_FloatU r;
+
+ exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
+ mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
+
+ if (unlikely(!exp && mant_sig)) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.l = 0;
+ } else {
+ r.l = ((exp - 2) << 23) | mant_sig;
+ }
+
+ return r.f;
+}
+
+uint32_t helper_f_to_memory(uint64_t a)
+{
+ uint32_t r;
+ r = (a & 0x00001fffe0000000ull) >> 13;
+ r |= (a & 0x07ffe00000000000ull) >> 45;
+ r |= (a & 0xc000000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_f(uint32_t a)
+{
+ uint64_t r;
+ r = ((uint64_t)(a & 0x0000c000)) << 48;
+ r |= ((uint64_t)(a & 0x003fffff)) << 45;
+ r |= ((uint64_t)(a & 0xffff0000)) << 13;
+ if (!(a & 0x00004000)) {
+ r |= 0x7ll << 59;
+ }
+ return r;
+}
+
+/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
+ either implement VAX arithmetic properly or just signal invalid opcode. */
+
+uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
+{
+ float32 ft, fr;
+
+ ft = f_to_float32(env, GETPC(), t);
+ fr = float32_sqrt(ft, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+
+/* G floating (VAX) */
+static uint64_t float64_to_g(float64 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_DoubleU a;
+
+ a.d = fa;
+ sig = a.ll & 0x8000000000000000ull;
+ exp = (a.ll >> 52) & 0x7ff;
+ mant = a.ll & 0x000fffffffffffffull;
+
+ if (exp == 2047) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 2045) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float64 g_to_float64(CPUAlphaState *env, void *retaddr, uint64_t a)
+{
+ uint64_t exp, mant_sig;
+ CPU_DoubleU r;
+
+ exp = (a >> 52) & 0x7ff;
+ mant_sig = a & 0x800fffffffffffffull;
+
+ if (!exp && mant_sig) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.ll = 0;
+ } else {
+ r.ll = ((exp - 2) << 52) | mant_sig;
+ }
+
+ return r.d;
+}
+
+uint64_t helper_g_to_memory(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_g(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+
+/* S floating (single) */
+
+/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
+static inline uint64_t float32_to_s_int(uint32_t fi)
+{
+ uint32_t frac = fi & 0x7fffff;
+ uint32_t sign = fi >> 31;
+ uint32_t exp_msb = (fi >> 30) & 1;
+ uint32_t exp_low = (fi >> 23) & 0x7f;
+ uint32_t exp;
+
+ exp = (exp_msb << 10) | exp_low;
+ if (exp_msb) {
+ if (exp_low == 0x7f) {
+ exp = 0x7ff;
+ }
+ } else {
+ if (exp_low != 0x00) {
+ exp |= 0x380;
+ }
+ }
+
+ return (((uint64_t)sign << 63)
+ | ((uint64_t)exp << 52)
+ | ((uint64_t)frac << 29));
+}
+
+static inline uint64_t float32_to_s(float32 fa)
+{
+ CPU_FloatU a;
+ a.f = fa;
+ return float32_to_s_int(a.l);
+}
+
+static inline uint32_t s_to_float32_int(uint64_t a)
+{
+ return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
+}
+
+static inline float32 s_to_float32(uint64_t a)
+{
+ CPU_FloatU r;
+ r.l = s_to_float32_int(a);
+ return r.f;
+}
+
+uint32_t helper_s_to_memory(uint64_t a)
+{
+ return s_to_float32_int(a);
+}
+
+uint64_t helper_memory_to_s(uint32_t a)
+{
+ return float32_to_s_int(a);
+}
+
+uint64_t helper_adds(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_subs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_muls(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_divs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_sqrts(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa, fr;
+
+ fa = s_to_float32(a);
+ fr = float32_sqrt(fa, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+
+/* T floating (double) */
+static inline float64 t_to_float64(uint64_t a)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.ll = a;
+ return r.d;
+}
+
+static inline uint64_t float64_to_t(float64 fa)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.d = fa;
+ return r.ll;
+}
+
+uint64_t helper_addt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_subt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_mult(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_divt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_sqrtt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = t_to_float64(a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+/* Comparisons */
+uint64_t helper_cmptun(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpteq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmptle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmptlt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+/* Floating point format conversion */
+uint64_t helper_cvtts(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = t_to_float64(a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+uint64_t helper_cvtst(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa;
+ float64 fr;
+
+ fa = s_to_float32(a);
+ fr = float32_to_float64(fa, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtqs(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ return float32_to_s(fr);
+}
+
+/* Implement float64 to uint64 conversion without saturation -- we must
+ supply the truncated result. This behaviour is used by the compiler
+ to get unsigned conversion for free with the same instruction.
+
+ The VI flag is set when overflow or inexact exceptions should be raised. */
+
+static inline uint64_t inline_cvttq(CPUAlphaState *env, uint64_t a,
+ int roundmode, int VI)
+{
+ uint64_t frac, ret = 0;
+ uint32_t exp, sign, exc = 0;
+ int shift;
+
+ sign = (a >> 63);
+ exp = (uint32_t)(a >> 52) & 0x7ff;
+ frac = a & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (unlikely(frac != 0)) {
+ goto do_underflow;
+ }
+ } else if (exp == 0x7ff) {
+ exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
+ } else {
+ /* Restore implicit bit. */
+ frac |= 0x10000000000000ull;
+
+ shift = exp - 1023 - 52;
+ if (shift >= 0) {
+ /* In this case the number is so large that we must shift
+ the fraction left. There is no rounding to do. */
+ if (shift < 63) {
+ ret = frac << shift;
+ if (VI && (ret >> shift) != frac) {
+ exc = float_flag_overflow;
+ }
+ }
+ } else {
+ uint64_t round;
+
+ /* In this case the number is smaller than the fraction as
+ represented by the 52 bit number. Here we must think
+ about rounding the result. Handle this by shifting the
+ fractional part of the number into the high bits of ROUND.
+ This will let us efficiently handle round-to-nearest. */
+ shift = -shift;
+ if (shift < 63) {
+ ret = frac >> shift;
+ round = frac << (64 - shift);
+ } else {
+ /* The exponent is so small we shift out everything.
+ Leave a sticky bit for proper rounding below. */
+ do_underflow:
+ round = 1;
+ }
+
+ if (round) {
+ exc = (VI ? float_flag_inexact : 0);
+ switch (roundmode) {
+ case float_round_nearest_even:
+ if (round == (1ull << 63)) {
+ /* Fraction is exactly 0.5; round to even. */
+ ret += (ret & 1);
+ } else if (round > (1ull << 63)) {
+ ret += 1;
+ }
+ break;
+ case float_round_to_zero:
+ break;
+ case float_round_up:
+ ret += 1 - sign;
+ break;
+ case float_round_down:
+ ret += sign;
+ break;
+ }
+ }
+ }
+ if (sign) {
+ ret = -ret;
+ }
+ }
+ if (unlikely(exc)) {
+ float_raise(exc, &FP_STATUS);
+ }
+
+ return ret;
+}
+
+uint64_t helper_cvttq(CPUAlphaState *env, uint64_t a)
+{
+ return inline_cvttq(env, a, FP_STATUS.float_rounding_mode, 1);
+}
+
+uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a)
+{
+ return inline_cvttq(env, a, float_round_to_zero, 0);
+}
+
+uint64_t helper_cvttq_svic(CPUAlphaState *env, uint64_t a)
+{
+ return inline_cvttq(env, a, float_round_to_zero, 1);
+}
+
+uint64_t helper_cvtqt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr = int64_to_float64(a, &FP_STATUS);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa = g_to_float64(env, GETPC(), a);
+ return float64_to_int64_round_to_zero(fa, &FP_STATUS);
+}
+
+uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr;
+ fr = int64_to_float64(a, &FP_STATUS);
+ return float64_to_g(fr);
+}
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index 91a9e395c5..d36df5fb9e 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -39,65 +39,65 @@ DEF_HELPER_FLAGS_1(store_fpcr, TCG_CALL_CONST, void, i64)
DEF_HELPER_FLAGS_1(f_to_memory, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64)
DEF_HELPER_FLAGS_1(memory_to_f, TCG_CALL_CONST | TCG_CALL_PURE, i64, i32)
-DEF_HELPER_FLAGS_2(addf, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(subf, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(mulf, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(divf, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_1(sqrtf, TCG_CALL_CONST, i64, i64)
+DEF_HELPER_FLAGS_3(addf, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subf, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulf, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divf, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtf, TCG_CALL_CONST, i64, env, i64)
DEF_HELPER_FLAGS_1(g_to_memory, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64)
DEF_HELPER_FLAGS_1(memory_to_g, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64)
-DEF_HELPER_FLAGS_2(addg, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(subg, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(mulg, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(divg, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_1(sqrtg, TCG_CALL_CONST, i64, i64)
+DEF_HELPER_FLAGS_3(addg, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subg, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulg, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divg, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtg, TCG_CALL_CONST, i64, env, i64)
DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64)
DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_CONST | TCG_CALL_PURE, i64, i32)
-DEF_HELPER_FLAGS_2(adds, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(subs, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(muls, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(divs, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_1(sqrts, TCG_CALL_CONST, i64, i64)
-
-DEF_HELPER_FLAGS_2(addt, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(subt, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(mult, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_2(divt, TCG_CALL_CONST, i64, i64, i64)
-DEF_HELPER_FLAGS_1(sqrtt, TCG_CALL_CONST, i64, i64)
-
-DEF_HELPER_FLAGS_2(cmptun, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmpteq, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmptle, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmptlt, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmpgeq, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmpgle, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-DEF_HELPER_FLAGS_2(cmpglt, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64)
-
-DEF_HELPER_FLAGS_1(cvtts, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtst, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtqs, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtqt, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtqf, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtgf, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtgq, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvtqg, TCG_CALL_CONST, i64, i64)
-
-DEF_HELPER_FLAGS_1(cvttq, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvttq_c, TCG_CALL_CONST, i64, i64)
-DEF_HELPER_FLAGS_1(cvttq_svic, TCG_CALL_CONST, i64, i64)
-
-DEF_HELPER_FLAGS_1(setroundmode, TCG_CALL_CONST, void, i32)
-DEF_HELPER_FLAGS_1(setflushzero, TCG_CALL_CONST, void, i32)
-DEF_HELPER_FLAGS_0(fp_exc_clear, TCG_CALL_CONST, void)
-DEF_HELPER_FLAGS_0(fp_exc_get, TCG_CALL_CONST | TCG_CALL_PURE, i32)
-DEF_HELPER_2(fp_exc_raise, void, i32, i32)
-DEF_HELPER_2(fp_exc_raise_s, void, i32, i32)
-
-DEF_HELPER_1(ieee_input, i64, i64)
-DEF_HELPER_1(ieee_input_cmp, i64, i64)
-DEF_HELPER_1(ieee_input_s, i64, i64)
+DEF_HELPER_FLAGS_3(adds, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subs, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(muls, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrts, TCG_CALL_CONST, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(addt, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subt, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mult, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divt, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtt, TCG_CALL_CONST, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(cmptun, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpteq, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptle, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptlt, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgeq, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgle, TCG_CALL_CONST, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpglt, TCG_CALL_CONST, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(cvtts, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtst, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqs, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqt, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqf, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgf, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgq, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqg, TCG_CALL_CONST, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(cvttq, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvttq_c, TCG_CALL_CONST, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvttq_svic, TCG_CALL_CONST, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(setroundmode, TCG_CALL_CONST, void, env, i32)
+DEF_HELPER_FLAGS_2(setflushzero, TCG_CALL_CONST, void, env, i32)
+DEF_HELPER_FLAGS_1(fp_exc_clear, TCG_CALL_CONST, void, env)
+DEF_HELPER_FLAGS_1(fp_exc_get, TCG_CALL_CONST | TCG_CALL_PURE, i32, env)
+DEF_HELPER_3(fp_exc_raise, void, env, i32, i32)
+DEF_HELPER_3(fp_exc_raise_s, void, env, i32, i32)
+
+DEF_HELPER_2(ieee_input, i64, env, i64)
+DEF_HELPER_2(ieee_input_cmp, i64, env, i64)
+DEF_HELPER_2(ieee_input_s, i64, env, i64)
#if !defined (CONFIG_USER_ONLY)
DEF_HELPER_1(hw_ret, void, i64)
diff --git a/target-alpha/op_helper.c b/target-alpha/op_helper.c
index af5fa824d5..6711d998b9 100644
--- a/target-alpha/op_helper.c
+++ b/target-alpha/op_helper.c
@@ -25,8 +25,6 @@
#include "sysemu.h"
#include "qemu-timer.h"
-#define FP_STATUS (env->fp_status)
-
/*****************************************************************************/
/* Exceptions processing helpers */
@@ -117,816 +115,6 @@ uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
return tl;
}
-/* Floating point helpers */
-
-void helper_setroundmode (uint32_t val)
-{
- set_float_rounding_mode(val, &FP_STATUS);
-}
-
-void helper_setflushzero (uint32_t val)
-{
- set_flush_to_zero(val, &FP_STATUS);
-}
-
-void helper_fp_exc_clear (void)
-{
- set_float_exception_flags(0, &FP_STATUS);
-}
-
-uint32_t helper_fp_exc_get (void)
-{
- return get_float_exception_flags(&FP_STATUS);
-}
-
-/* Raise exceptions for ieee fp insns without software completion.
- In that case there are no exceptions that don't trap; the mask
- doesn't apply. */
-void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
-{
- if (exc) {
- uint32_t hw_exc = 0;
-
- if (exc & float_flag_invalid) {
- hw_exc |= EXC_M_INV;
- }
- if (exc & float_flag_divbyzero) {
- hw_exc |= EXC_M_DZE;
- }
- if (exc & float_flag_overflow) {
- hw_exc |= EXC_M_FOV;
- }
- if (exc & float_flag_underflow) {
- hw_exc |= EXC_M_UNF;
- }
- if (exc & float_flag_inexact) {
- hw_exc |= EXC_M_INE;
- }
-
- arith_excp(env, GETPC(), hw_exc, 1ull << regno);
- }
-}
-
-/* Raise exceptions for ieee fp insns with software completion. */
-void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
-{
- if (exc) {
- env->fpcr_exc_status |= exc;
-
- exc &= ~env->fpcr_exc_mask;
- if (exc) {
- helper_fp_exc_raise(exc, regno);
- }
- }
-}
-
-/* Input remapping without software completion. Handle denormal-map-to-zero
- and trap for all other non-finite numbers. */
-uint64_t helper_ieee_input(uint64_t val)
-{
- uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
- uint64_t frac = val & 0xfffffffffffffull;
-
- if (exp == 0) {
- if (frac != 0) {
- /* If DNZ is set flush denormals to zero on input. */
- if (env->fpcr_dnz) {
- val &= 1ull << 63;
- } else {
- arith_excp(env, GETPC(), EXC_M_UNF, 0);
- }
- }
- } else if (exp == 0x7ff) {
- /* Infinity or NaN. */
- /* ??? I'm not sure these exception bit flags are correct. I do
- know that the Linux kernel, at least, doesn't rely on them and
- just emulates the insn to figure out what exception to use. */
- arith_excp(env, GETPC(), frac ? EXC_M_INV : EXC_M_FOV, 0);
- }
- return val;
-}
-
-/* Similar, but does not trap for infinities. Used for comparisons. */
-uint64_t helper_ieee_input_cmp(uint64_t val)
-{
- uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
- uint64_t frac = val & 0xfffffffffffffull;
-
- if (exp == 0) {
- if (frac != 0) {
- /* If DNZ is set flush denormals to zero on input. */
- if (env->fpcr_dnz) {
- val &= 1ull << 63;
- } else {
- arith_excp(env, GETPC(), EXC_M_UNF, 0);
- }
- }
- } else if (exp == 0x7ff && frac) {
- /* NaN. */
- arith_excp(env, GETPC(), EXC_M_INV, 0);
- }
- return val;
-}
-
-/* Input remapping with software completion enabled. All we have to do
- is handle denormal-map-to-zero; all other inputs get exceptions as
- needed from the actual operation. */
-uint64_t helper_ieee_input_s(uint64_t val)
-{
- if (env->fpcr_dnz) {
- uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
- if (exp == 0) {
- val &= 1ull << 63;
- }
- }
- return val;
-}
-
-/* F floating (VAX) */
-static inline uint64_t float32_to_f(float32 fa)
-{
- uint64_t r, exp, mant, sig;
- CPU_FloatU a;
-
- a.f = fa;
- sig = ((uint64_t)a.l & 0x80000000) << 32;
- exp = (a.l >> 23) & 0xff;
- mant = ((uint64_t)a.l & 0x007fffff) << 29;
-
- if (exp == 255) {
- /* NaN or infinity */
- r = 1; /* VAX dirty zero */
- } else if (exp == 0) {
- if (mant == 0) {
- /* Zero */
- r = 0;
- } else {
- /* Denormalized */
- r = sig | ((exp + 1) << 52) | mant;
- }
- } else {
- if (exp >= 253) {
- /* Overflow */
- r = 1; /* VAX dirty zero */
- } else {
- r = sig | ((exp + 2) << 52);
- }
- }
-
- return r;
-}
-
-static inline float32 f_to_float32(uint64_t a)
-{
- uint32_t exp, mant_sig;
- CPU_FloatU r;
-
- exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
- mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
-
- if (unlikely(!exp && mant_sig)) {
- /* Reserved operands / Dirty zero */
- dynamic_excp(env, GETPC(), EXCP_OPCDEC, 0);
- }
-
- if (exp < 3) {
- /* Underflow */
- r.l = 0;
- } else {
- r.l = ((exp - 2) << 23) | mant_sig;
- }
-
- return r.f;
-}
-
-uint32_t helper_f_to_memory (uint64_t a)
-{
- uint32_t r;
- r = (a & 0x00001fffe0000000ull) >> 13;
- r |= (a & 0x07ffe00000000000ull) >> 45;
- r |= (a & 0xc000000000000000ull) >> 48;
- return r;
-}
-
-uint64_t helper_memory_to_f (uint32_t a)
-{
- uint64_t r;
- r = ((uint64_t)(a & 0x0000c000)) << 48;
- r |= ((uint64_t)(a & 0x003fffff)) << 45;
- r |= ((uint64_t)(a & 0xffff0000)) << 13;
- if (!(a & 0x00004000))
- r |= 0x7ll << 59;
- return r;
-}
-
-/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
- either implement VAX arithmetic properly or just signal invalid opcode. */
-
-uint64_t helper_addf (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = f_to_float32(a);
- fb = f_to_float32(b);
- fr = float32_add(fa, fb, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_subf (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = f_to_float32(a);
- fb = f_to_float32(b);
- fr = float32_sub(fa, fb, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_mulf (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = f_to_float32(a);
- fb = f_to_float32(b);
- fr = float32_mul(fa, fb, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_divf (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = f_to_float32(a);
- fb = f_to_float32(b);
- fr = float32_div(fa, fb, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_sqrtf (uint64_t t)
-{
- float32 ft, fr;
-
- ft = f_to_float32(t);
- fr = float32_sqrt(ft, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-
-/* G floating (VAX) */
-static inline uint64_t float64_to_g(float64 fa)
-{
- uint64_t r, exp, mant, sig;
- CPU_DoubleU a;
-
- a.d = fa;
- sig = a.ll & 0x8000000000000000ull;
- exp = (a.ll >> 52) & 0x7ff;
- mant = a.ll & 0x000fffffffffffffull;
-
- if (exp == 2047) {
- /* NaN or infinity */
- r = 1; /* VAX dirty zero */
- } else if (exp == 0) {
- if (mant == 0) {
- /* Zero */
- r = 0;
- } else {
- /* Denormalized */
- r = sig | ((exp + 1) << 52) | mant;
- }
- } else {
- if (exp >= 2045) {
- /* Overflow */
- r = 1; /* VAX dirty zero */
- } else {
- r = sig | ((exp + 2) << 52);
- }
- }
-
- return r;
-}
-
-static inline float64 g_to_float64(uint64_t a)
-{
- uint64_t exp, mant_sig;
- CPU_DoubleU r;
-
- exp = (a >> 52) & 0x7ff;
- mant_sig = a & 0x800fffffffffffffull;
-
- if (!exp && mant_sig) {
- /* Reserved operands / Dirty zero */
- dynamic_excp(env, GETPC(), EXCP_OPCDEC, 0);
- }
-
- if (exp < 3) {
- /* Underflow */
- r.ll = 0;
- } else {
- r.ll = ((exp - 2) << 52) | mant_sig;
- }
-
- return r.d;
-}
-
-uint64_t helper_g_to_memory (uint64_t a)
-{
- uint64_t r;
- r = (a & 0x000000000000ffffull) << 48;
- r |= (a & 0x00000000ffff0000ull) << 16;
- r |= (a & 0x0000ffff00000000ull) >> 16;
- r |= (a & 0xffff000000000000ull) >> 48;
- return r;
-}
-
-uint64_t helper_memory_to_g (uint64_t a)
-{
- uint64_t r;
- r = (a & 0x000000000000ffffull) << 48;
- r |= (a & 0x00000000ffff0000ull) << 16;
- r |= (a & 0x0000ffff00000000ull) >> 16;
- r |= (a & 0xffff000000000000ull) >> 48;
- return r;
-}
-
-uint64_t helper_addg (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
- fr = float64_add(fa, fb, &FP_STATUS);
- return float64_to_g(fr);
-}
-
-uint64_t helper_subg (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
- fr = float64_sub(fa, fb, &FP_STATUS);
- return float64_to_g(fr);
-}
-
-uint64_t helper_mulg (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
- fr = float64_mul(fa, fb, &FP_STATUS);
- return float64_to_g(fr);
-}
-
-uint64_t helper_divg (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
- fr = float64_div(fa, fb, &FP_STATUS);
- return float64_to_g(fr);
-}
-
-uint64_t helper_sqrtg (uint64_t a)
-{
- float64 fa, fr;
-
- fa = g_to_float64(a);
- fr = float64_sqrt(fa, &FP_STATUS);
- return float64_to_g(fr);
-}
-
-
-/* S floating (single) */
-
-/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
-static inline uint64_t float32_to_s_int(uint32_t fi)
-{
- uint32_t frac = fi & 0x7fffff;
- uint32_t sign = fi >> 31;
- uint32_t exp_msb = (fi >> 30) & 1;
- uint32_t exp_low = (fi >> 23) & 0x7f;
- uint32_t exp;
-
- exp = (exp_msb << 10) | exp_low;
- if (exp_msb) {
- if (exp_low == 0x7f)
- exp = 0x7ff;
- } else {
- if (exp_low != 0x00)
- exp |= 0x380;
- }
-
- return (((uint64_t)sign << 63)
- | ((uint64_t)exp << 52)
- | ((uint64_t)frac << 29));
-}
-
-static inline uint64_t float32_to_s(float32 fa)
-{
- CPU_FloatU a;
- a.f = fa;
- return float32_to_s_int(a.l);
-}
-
-static inline uint32_t s_to_float32_int(uint64_t a)
-{
- return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
-}
-
-static inline float32 s_to_float32(uint64_t a)
-{
- CPU_FloatU r;
- r.l = s_to_float32_int(a);
- return r.f;
-}
-
-uint32_t helper_s_to_memory (uint64_t a)
-{
- return s_to_float32_int(a);
-}
-
-uint64_t helper_memory_to_s (uint32_t a)
-{
- return float32_to_s_int(a);
-}
-
-uint64_t helper_adds (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = s_to_float32(a);
- fb = s_to_float32(b);
- fr = float32_add(fa, fb, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-uint64_t helper_subs (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = s_to_float32(a);
- fb = s_to_float32(b);
- fr = float32_sub(fa, fb, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-uint64_t helper_muls (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = s_to_float32(a);
- fb = s_to_float32(b);
- fr = float32_mul(fa, fb, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-uint64_t helper_divs (uint64_t a, uint64_t b)
-{
- float32 fa, fb, fr;
-
- fa = s_to_float32(a);
- fb = s_to_float32(b);
- fr = float32_div(fa, fb, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-uint64_t helper_sqrts (uint64_t a)
-{
- float32 fa, fr;
-
- fa = s_to_float32(a);
- fr = float32_sqrt(fa, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-
-/* T floating (double) */
-static inline float64 t_to_float64(uint64_t a)
-{
- /* Memory format is the same as float64 */
- CPU_DoubleU r;
- r.ll = a;
- return r.d;
-}
-
-static inline uint64_t float64_to_t(float64 fa)
-{
- /* Memory format is the same as float64 */
- CPU_DoubleU r;
- r.d = fa;
- return r.ll;
-}
-
-uint64_t helper_addt (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
- fr = float64_add(fa, fb, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_subt (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
- fr = float64_sub(fa, fb, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_mult (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
- fr = float64_mul(fa, fb, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_divt (uint64_t a, uint64_t b)
-{
- float64 fa, fb, fr;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
- fr = float64_div(fa, fb, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_sqrtt (uint64_t a)
-{
- float64 fa, fr;
-
- fa = t_to_float64(a);
- fr = float64_sqrt(fa, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-/* Comparisons */
-uint64_t helper_cmptun (uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
-
- if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
- return 0x4000000000000000ULL;
- } else {
- return 0;
- }
-}
-
-uint64_t helper_cmpteq(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
-
- if (float64_eq_quiet(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-uint64_t helper_cmptle(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
-
- if (float64_le(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-uint64_t helper_cmptlt(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = t_to_float64(a);
- fb = t_to_float64(b);
-
- if (float64_lt(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
-
- if (float64_eq_quiet(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-uint64_t helper_cmpgle(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
-
- if (float64_le(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-uint64_t helper_cmpglt(uint64_t a, uint64_t b)
-{
- float64 fa, fb;
-
- fa = g_to_float64(a);
- fb = g_to_float64(b);
-
- if (float64_lt(fa, fb, &FP_STATUS))
- return 0x4000000000000000ULL;
- else
- return 0;
-}
-
-/* Floating point format conversion */
-uint64_t helper_cvtts (uint64_t a)
-{
- float64 fa;
- float32 fr;
-
- fa = t_to_float64(a);
- fr = float64_to_float32(fa, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-uint64_t helper_cvtst (uint64_t a)
-{
- float32 fa;
- float64 fr;
-
- fa = s_to_float32(a);
- fr = float32_to_float64(fa, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_cvtqs (uint64_t a)
-{
- float32 fr = int64_to_float32(a, &FP_STATUS);
- return float32_to_s(fr);
-}
-
-/* Implement float64 to uint64 conversion without saturation -- we must
- supply the truncated result. This behaviour is used by the compiler
- to get unsigned conversion for free with the same instruction.
-
- The VI flag is set when overflow or inexact exceptions should be raised. */
-
-static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
-{
- uint64_t frac, ret = 0;
- uint32_t exp, sign, exc = 0;
- int shift;
-
- sign = (a >> 63);
- exp = (uint32_t)(a >> 52) & 0x7ff;
- frac = a & 0xfffffffffffffull;
-
- if (exp == 0) {
- if (unlikely(frac != 0)) {
- goto do_underflow;
- }
- } else if (exp == 0x7ff) {
- exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
- } else {
- /* Restore implicit bit. */
- frac |= 0x10000000000000ull;
-
- shift = exp - 1023 - 52;
- if (shift >= 0) {
- /* In this case the number is so large that we must shift
- the fraction left. There is no rounding to do. */
- if (shift < 63) {
- ret = frac << shift;
- if (VI && (ret >> shift) != frac) {
- exc = float_flag_overflow;
- }
- }
- } else {
- uint64_t round;
-
- /* In this case the number is smaller than the fraction as
- represented by the 52 bit number. Here we must think
- about rounding the result. Handle this by shifting the
- fractional part of the number into the high bits of ROUND.
- This will let us efficiently handle round-to-nearest. */
- shift = -shift;
- if (shift < 63) {
- ret = frac >> shift;
- round = frac << (64 - shift);
- } else {
- /* The exponent is so small we shift out everything.
- Leave a sticky bit for proper rounding below. */
- do_underflow:
- round = 1;
- }
-
- if (round) {
- exc = (VI ? float_flag_inexact : 0);
- switch (roundmode) {
- case float_round_nearest_even:
- if (round == (1ull << 63)) {
- /* Fraction is exactly 0.5; round to even. */
- ret += (ret & 1);
- } else if (round > (1ull << 63)) {
- ret += 1;
- }
- break;
- case float_round_to_zero:
- break;
- case float_round_up:
- ret += 1 - sign;
- break;
- case float_round_down:
- ret += sign;
- break;
- }
- }
- }
- if (sign) {
- ret = -ret;
- }
- }
- if (unlikely(exc)) {
- float_raise(exc, &FP_STATUS);
- }
-
- return ret;
-}
-
-uint64_t helper_cvttq(uint64_t a)
-{
- return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
-}
-
-uint64_t helper_cvttq_c(uint64_t a)
-{
- return helper_cvttq_internal(a, float_round_to_zero, 0);
-}
-
-uint64_t helper_cvttq_svic(uint64_t a)
-{
- return helper_cvttq_internal(a, float_round_to_zero, 1);
-}
-
-uint64_t helper_cvtqt (uint64_t a)
-{
- float64 fr = int64_to_float64(a, &FP_STATUS);
- return float64_to_t(fr);
-}
-
-uint64_t helper_cvtqf (uint64_t a)
-{
- float32 fr = int64_to_float32(a, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_cvtgf (uint64_t a)
-{
- float64 fa;
- float32 fr;
-
- fa = g_to_float64(a);
- fr = float64_to_float32(fa, &FP_STATUS);
- return float32_to_f(fr);
-}
-
-uint64_t helper_cvtgq (uint64_t a)
-{
- float64 fa = g_to_float64(a);
- return float64_to_int64_round_to_zero(fa, &FP_STATUS);
-}
-
-uint64_t helper_cvtqg (uint64_t a)
-{
- float64 fr;
- fr = int64_to_float64(a, &FP_STATUS);
- return float64_to_g(fr);
-}
-
/* PALcode support special instructions */
#if !defined (CONFIG_USER_ONLY)
void helper_hw_ret (uint64_t a)
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 0c06b55a19..a9b97a3188 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -611,7 +611,8 @@ static void gen_qual_roundmode(DisasContext *ctx, int fn11)
tcg_gen_movi_i32(tmp, float_round_down);
break;
case QUAL_RM_D:
- tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUAlphaState, fpcr_dyn_round));
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_dyn_round));
break;
}
@@ -641,7 +642,8 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11)
tmp = tcg_temp_new_i32();
if (fn11) {
/* Underflow is enabled, use the FPCR setting. */
- tcg_gen_ld8u_i32(tmp, cpu_env, offsetof(CPUAlphaState, fpcr_flush_to_zero));
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_flush_to_zero));
} else {
/* Underflow is disabled, force flush-to-zero. */
tcg_gen_movi_i32(tmp, 1);
@@ -663,11 +665,11 @@ static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
if (reg == 31) {
tcg_gen_movi_i64(val, 0);
} else if (fn11 & QUAL_S) {
- gen_helper_ieee_input_s(val, cpu_fir[reg]);
+ gen_helper_ieee_input_s(val, cpu_env, cpu_fir[reg]);
} else if (is_cmp) {
- gen_helper_ieee_input_cmp(val, cpu_fir[reg]);
+ gen_helper_ieee_input_cmp(val, cpu_env, cpu_fir[reg]);
} else {
- gen_helper_ieee_input(val, cpu_fir[reg]);
+ gen_helper_ieee_input(val, cpu_env, cpu_fir[reg]);
}
return val;
}
@@ -680,7 +682,7 @@ static void gen_fp_exc_clear(void)
offsetof(CPUAlphaState, fp_status.float_exception_flags));
tcg_temp_free_i32(zero);
#else
- gen_helper_fp_exc_clear();
+ gen_helper_fp_exc_clear(cpu_env);
#endif
}
@@ -698,7 +700,7 @@ static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
tcg_gen_ld8u_i32(exc, cpu_env,
offsetof(CPUAlphaState, fp_status.float_exception_flags));
#else
- gen_helper_fp_exc_get(exc);
+ gen_helper_fp_exc_get(exc, cpu_env);
#endif
if (ignore) {
@@ -713,9 +715,9 @@ static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
reg = tcg_const_i32(rc + 32);
if (fn11 & QUAL_S) {
- gen_helper_fp_exc_raise_s(exc, reg);
+ gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
} else {
- gen_helper_fp_exc_raise(exc, reg);
+ gen_helper_fp_exc_raise(cpu_env, exc, reg);
}
tcg_temp_free_i32(reg);
@@ -784,20 +786,20 @@ static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
gen_fcvtql(rb, rc);
}
-#define FARITH2(name) \
-static inline void glue(gen_f, name)(int rb, int rc) \
-{ \
- if (unlikely(rc == 31)) { \
- return; \
- } \
- if (rb != 31) { \
- gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
- } else { \
- TCGv tmp = tcg_const_i64(0); \
- gen_helper_ ## name (cpu_fir[rc], tmp); \
- tcg_temp_free(tmp); \
- } \
-}
+#define FARITH2(name) \
+ static inline void glue(gen_f, name)(int rb, int rc) \
+ { \
+ if (unlikely(rc == 31)) { \
+ return; \
+ } \
+ if (rb != 31) { \
+ gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
+ } else { \
+ TCGv tmp = tcg_const_i64(0); \
+ gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
+ tcg_temp_free(tmp); \
+ } \
+ }
/* ??? VAX instruction qualifiers ignored. */
FARITH2(sqrtf)
@@ -807,7 +809,8 @@ FARITH2(cvtgq)
FARITH2(cvtqf)
FARITH2(cvtqg)
-static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
+static void gen_ieee_arith2(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
int rb, int rc, int fn11)
{
TCGv vb;
@@ -823,7 +826,7 @@ static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv),
gen_fp_exc_clear();
vb = gen_ieee_input(rb, fn11, 0);
- helper(cpu_fir[rc], vb);
+ helper(cpu_fir[rc], cpu_env, vb);
tcg_temp_free(vb);
gen_fp_exc_raise(rc, fn11);
@@ -859,18 +862,18 @@ static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
also do not have integer overflow enabled. Special case that. */
switch (fn11) {
case QUAL_RM_C:
- gen_helper_cvttq_c(cpu_fir[rc], vb);
+ gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
break;
case QUAL_V | QUAL_RM_C:
case QUAL_S | QUAL_V | QUAL_RM_C:
ignore = float_flag_inexact;
/* FALLTHRU */
case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
- gen_helper_cvttq_svic(cpu_fir[rc], vb);
+ gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
break;
default:
gen_qual_roundmode(ctx, fn11);
- gen_helper_cvttq(cpu_fir[rc], vb);
+ gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
break;
@@ -880,7 +883,8 @@ static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
gen_fp_exc_raise_ignore(rc, fn11, ignore);
}
-static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
+static void gen_ieee_intcvt(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
int rb, int rc, int fn11)
{
TCGv vb;
@@ -904,10 +908,10 @@ static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv),
inexact handling is requested. */
if (fn11 & QUAL_I) {
gen_fp_exc_clear();
- helper(cpu_fir[rc], vb);
+ helper(cpu_fir[rc], cpu_env, vb);
gen_fp_exc_raise(rc, fn11);
} else {
- helper(cpu_fir[rc], vb);
+ helper(cpu_fir[rc], cpu_env, vb);
}
if (rb == 31) {
@@ -999,34 +1003,34 @@ static inline void gen_fcpyse(int ra, int rb, int rc)
gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
}
-#define FARITH3(name) \
-static inline void glue(gen_f, name)(int ra, int rb, int rc) \
-{ \
- TCGv va, vb; \
- \
- if (unlikely(rc == 31)) { \
- return; \
- } \
- if (ra == 31) { \
- va = tcg_const_i64(0); \
- } else { \
- va = cpu_fir[ra]; \
- } \
- if (rb == 31) { \
- vb = tcg_const_i64(0); \
- } else { \
- vb = cpu_fir[rb]; \
- } \
- \
- gen_helper_ ## name (cpu_fir[rc], va, vb); \
- \
- if (ra == 31) { \
- tcg_temp_free(va); \
- } \
- if (rb == 31) { \
- tcg_temp_free(vb); \
- } \
-}
+#define FARITH3(name) \
+ static inline void glue(gen_f, name)(int ra, int rb, int rc) \
+ { \
+ TCGv va, vb; \
+ \
+ if (unlikely(rc == 31)) { \
+ return; \
+ } \
+ if (ra == 31) { \
+ va = tcg_const_i64(0); \
+ } else { \
+ va = cpu_fir[ra]; \
+ } \
+ if (rb == 31) { \
+ vb = tcg_const_i64(0); \
+ } else { \
+ vb = cpu_fir[rb]; \
+ } \
+ \
+ gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
+ \
+ if (ra == 31) { \
+ tcg_temp_free(va); \
+ } \
+ if (rb == 31) { \
+ tcg_temp_free(vb); \
+ } \
+ }
/* ??? VAX instruction qualifiers ignored. */
FARITH3(addf)
@@ -1042,7 +1046,7 @@ FARITH3(cmpglt)
FARITH3(cmpgle)
static void gen_ieee_arith3(DisasContext *ctx,
- void (*helper)(TCGv, TCGv, TCGv),
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
int ra, int rb, int rc, int fn11)
{
TCGv va, vb;
@@ -1059,7 +1063,7 @@ static void gen_ieee_arith3(DisasContext *ctx,
va = gen_ieee_input(ra, fn11, 0);
vb = gen_ieee_input(rb, fn11, 0);
- helper(cpu_fir[rc], va, vb);
+ helper(cpu_fir[rc], cpu_env, va, vb);
tcg_temp_free(va);
tcg_temp_free(vb);
@@ -1082,7 +1086,7 @@ IEEE_ARITH3(mult)
IEEE_ARITH3(divt)
static void gen_ieee_compare(DisasContext *ctx,
- void (*helper)(TCGv, TCGv, TCGv),
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
int ra, int rb, int rc, int fn11)
{
TCGv va, vb;
@@ -1097,7 +1101,7 @@ static void gen_ieee_compare(DisasContext *ctx,
va = gen_ieee_input(ra, fn11, 1);
vb = gen_ieee_input(rb, fn11, 1);
- helper(cpu_fir[rc], va, vb);
+ helper(cpu_fir[rc], cpu_env, va, vb);
tcg_temp_free(va);
tcg_temp_free(vb);