aboutsummaryrefslogtreecommitdiff
path: root/target-arm/translate-a64.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-04-15 19:18:39 +0100
committerPeter Maydell <peter.maydell@linaro.org>2014-04-17 21:34:03 +0100
commit8c6afa6ab158467d1938cc92022135bc7a872006 (patch)
tree828b37b0fba9f42d50862e612e65350d32b6170c /target-arm/translate-a64.c
parent00892383c9f5f663230921c6cf6b6d3a8a61b45b (diff)
target-arm: A64: Correctly fault FP/Neon if CPACR.FPEN set
For the A64 instruction set, the only FP/Neon disable trap is the CPACR FPEN bits, which may indicate "enabled", "disabled" or "disabled for EL0". Add a bit to the AArch64 tb flags indicating whether FP/Neon access is currently enabled and make the decoder emit code to raise exceptions on use of FP/Neon insns if it is not. We use a new flag in DisasContext rather than borrowing the existing vfp_enabled flag because the A32/T32 decoder is going to need both. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Acked-by: Peter Crosthwaite <peter.crosthwaite@xilinx.com> --- I'm aware this is a rather hard to review patch; sorry. I have done an exhaustive check that we have fp access checks in all code paths with the aid of the assertions added in the next patch plus the code-coverage hack patch I posted to the list earlier. This patch is correct as of 09e037354 target-arm: A64: Add saturating accumulate ops (USQADD/SUQADD) which was the last of the Neon insns to be added, so assuming no refactoring of the code it should be fine.
Diffstat (limited to 'target-arm/translate-a64.c')
-rw-r--r--target-arm/translate-a64.c300
1 files changed, 295 insertions, 5 deletions
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 210df2d6b4..2f67af3e6c 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -890,6 +890,23 @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
tcg_temp_free_i64(tcg_tmp);
}
+/* Check that FP/Neon access is enabled. If it is, return
+ * true. If not, emit code to generate an appropriate exception,
+ * and return false; the caller should not emit any code for
+ * the instruction. Note that this check must happen after all
+ * unallocated-encoding checks (otherwise the syndrome information
+ * for the resulting exception will be incorrect).
+ */
+static inline bool fp_access_check(DisasContext *s)
+{
+ if (s->cpacr_fpen) {
+ return true;
+ }
+
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false));
+ return false;
+}
+
/*
* This utility function is for doing register extension with an
* optional shift. You will likely want to pass a temporary for the
@@ -1728,6 +1745,9 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
return;
}
size = 2 + opc;
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (opc == 3) {
/* PRFM (literal) : prefetch */
@@ -1837,6 +1857,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
break;
}
+ if (is_vector && !fp_access_check(s)) {
+ return;
+ }
+
offset <<= size;
if (rn == 31) {
@@ -1930,6 +1954,9 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
return;
}
is_store = ((opc & 1) == 0);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2050,6 +2077,9 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
return;
}
is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2130,6 +2160,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
return;
}
is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2272,6 +2305,10 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (rn == 31) {
gen_check_sp_alignment(s);
}
@@ -2398,6 +2435,10 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
g_assert_not_reached();
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
ebytes = 1 << scale;
if (rn == 31) {
@@ -3874,6 +3915,10 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
}
@@ -3902,6 +3947,10 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (cond < 0x0e) { /* not always */
int label_match = gen_new_label();
label_continue = gen_new_label();
@@ -3958,6 +4007,10 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (cond < 0x0e) { /* not always */
int label_match = gen_new_label();
label_continue = gen_new_label();
@@ -4175,6 +4228,10 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
break;
}
@@ -4184,9 +4241,17 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
/* 32-to-32 and 64-to-64 ops */
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_1src_single(s, opcode, rd, rn);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_1src_double(s, opcode, rd, rn);
break;
default:
@@ -4326,9 +4391,15 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_2src_single(s, opcode, rd, rn, rm);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_2src_double(s, opcode, rd, rn, rm);
break;
default:
@@ -4430,9 +4501,15 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
break;
default:
@@ -4459,6 +4536,10 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* The imm8 encodes the sign bit, enough bits to represent
* an exponent in the range 01....1xx to 10....0xx,
* and the most significant 4 bits of the mantissa; see
@@ -4645,6 +4726,10 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
}
@@ -4744,6 +4829,9 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
break;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fmov(s, rd, rn, type, itof);
} else {
/* actual FP conversions */
@@ -4754,6 +4842,9 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
}
}
@@ -4854,6 +4945,10 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_resh = tcg_temp_new_i64();
tcg_resl = tcg_temp_new_i64();
@@ -4924,6 +5019,10 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* This does a table lookup: for every byte element in the input
* we index into a table formed from up to four vector registers,
* and then the output is the result of the lookups. Our helper
@@ -4994,6 +5093,10 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_resl = tcg_const_i64(0);
tcg_resh = tcg_const_i64(0);
tcg_res = tcg_temp_new_i64();
@@ -5127,6 +5230,10 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
esize = 8 << size;
elements = (is_q ? 128 : 64) / esize;
@@ -5259,6 +5366,10 @@ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
index = imm5 >> (size + 1);
tmp = tcg_temp_new_i64();
@@ -5293,6 +5404,10 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
index = imm5 >> (size + 1);
/* This instruction just extracts the specified element and
@@ -5325,6 +5440,11 @@ static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
unallocated_encoding(s);
return;
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
for (i = 0; i < elements; i++) {
write_vec_element(s, cpu_reg(s, rn), rd, i, size);
}
@@ -5354,6 +5474,11 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn,
unallocated_encoding(s);
return;
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
dst_index = extract32(imm5, 1+size, 5);
src_index = extract32(imm4, size, 4);
@@ -5386,6 +5511,10 @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
idx = extract32(imm5, 1 + size, 4 - size);
write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
}
@@ -5423,6 +5552,11 @@ static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
return;
}
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
element = extract32(imm5, 1+size, 4);
tcg_rd = cpu_reg(s, rd);
@@ -5515,6 +5649,10 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* See AdvSIMDExpandImm() in ARM ARM */
switch (cmode_3_1) {
case 0: /* Replicate(Zeros(24):imm8, 2) */
@@ -5663,6 +5801,10 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
TCGV_UNUSED_PTR(fpst);
break;
case 0xc: /* FMAXNMP */
@@ -5675,6 +5817,10 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
size = extract32(size, 0, 1) ? 3 : 2;
fpst = get_fpstatus_ptr();
break;
@@ -5893,6 +6039,10 @@ static void handle_scalar_simd_shri(DisasContext *s,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
switch (opcode) {
case 0x02: /* SSRA / USRA (accumulate) */
accumulate = true;
@@ -5950,6 +6100,10 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_rn = read_fp_dreg(s, rn);
tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
@@ -6005,6 +6159,10 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (is_u_shift) {
narrowfn = unsigned_narrow_fns[size];
} else {
@@ -6087,6 +6245,10 @@ static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
}
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 3) {
TCGv_i64 tcg_shift = tcg_const_i64(shift);
static NeonGenTwo64OpEnvFn * const fns[2][2] = {
@@ -6247,6 +6409,11 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
return;
}
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* immh == 0 would be a failure of the decode logic */
g_assert(immh);
@@ -6275,6 +6442,10 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
assert(!(is_scalar && is_q));
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
@@ -6438,6 +6609,10 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 2) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
@@ -6822,6 +6997,10 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
return;
}
@@ -6854,6 +7033,10 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_rd = tcg_temp_new_i64();
if (size == 3) {
@@ -7057,7 +7240,13 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
int size, int rn, int rd)
{
bool is_double = (size == 3);
- TCGv_ptr fpst = get_fpstatus_ptr();
+ TCGv_ptr fpst;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = get_fpstatus_ptr();
if (is_double) {
TCGv_i64 tcg_op = tcg_temp_new_i64();
@@ -7464,6 +7653,9 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
switch (opcode) {
case 0x3: /* USQADD / SUQADD*/
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_satacc(s, true, u, false, size, rn, rd);
return;
case 0x7: /* SQABS / SQNEG */
@@ -7493,6 +7685,9 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
return;
case 0xc ... 0xf:
@@ -7515,12 +7710,18 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x5d: /* UCVTF */
{
bool is_signed = (opcode == 0x1d);
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
return;
}
case 0x3d: /* FRECPE */
case 0x3f: /* FRECPX */
case 0x7d: /* FRSQRTE */
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
return;
case 0x1a: /* FCVTNS */
@@ -7545,6 +7746,9 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
return;
default:
@@ -7557,6 +7761,10 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (is_fcvt) {
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
@@ -7660,6 +7868,10 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
switch (opcode) {
case 0x02: /* SSRA / USRA (accumulate) */
accumulate = true;
@@ -7731,6 +7943,10 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
for (i = 0; i < elements; i++) {
read_vec_element(s, tcg_rn, rn, i, size);
if (insert) {
@@ -7766,6 +7982,10 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* For the LL variants the store is larger than the load,
* so if rd == rn we would overwrite parts of our input.
* So load everything right now and use shifts in the main loop.
@@ -7800,6 +8020,10 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_rn = tcg_temp_new_i64();
tcg_rd = tcg_temp_new_i64();
tcg_final = tcg_temp_new_i64();
@@ -8296,6 +8520,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
@@ -8305,6 +8532,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
case 14: /* PMULL, PMULL2 */
@@ -8317,6 +8547,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_pmull_64(s, is_q, rd, rn, rm);
return;
}
@@ -8342,6 +8575,10 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
return;
}
is_widening:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
default:
@@ -8360,11 +8597,15 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
int size = extract32(insn, 22, 2);
bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
- TCGv_i64 tcg_res[2];
+ TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
tcg_res[0] = tcg_temp_new_i64();
tcg_res[1] = tcg_temp_new_i64();
@@ -8467,6 +8708,10 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
TCGV_UNUSED_PTR(fpst);
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* These operations work on the concatenated rm:rn, with each pair of
* adjacent elements being operated on to produce an element in the result.
*/
@@ -8659,6 +8904,10 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
case 0x5f: /* FDIV */
case 0x7a: /* FABD */
case 0x7c: /* FCMGT */
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
return;
default:
@@ -8713,6 +8962,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
break;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 3) {
for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
@@ -9077,6 +9330,10 @@ static void handle_rev(DisasContext *s, int opcode, bool u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 0) {
/* Special case bytes, use bswap op on each group of elements */
int groups = dsize / (8 << grp_size);
@@ -9279,6 +9536,10 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
return;
case 0x4: /* CLS, CLZ */
@@ -9293,6 +9554,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
return;
case 0x13: /* SHLL, SHLL2 */
@@ -9300,6 +9564,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_shll(s, is_q, size, rn, rd);
return;
case 0xa: /* CMLT */
@@ -9321,6 +9588,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
return;
case 0x7: /* SQABS, SQNEG */
@@ -9356,6 +9626,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
return;
}
@@ -9414,6 +9687,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
return;
case 0x56: /* FCVTXN, FCVTXN2 */
@@ -9426,9 +9702,15 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
/* handle_2misc_narrow does a 2*size -> size operation, but these
* instructions encode the source size rather than dest size.
*/
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
return;
case 0x17: /* FCVTL, FCVTL2 */
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_2misc_widening(s, opcode, is_q, size, rn, rd);
return;
case 0x18: /* FRINTN */
@@ -9473,6 +9755,10 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (need_fpstatus) {
tcg_fpstatus = get_fpstatus_ptr();
} else {
@@ -9836,6 +10122,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (is_fp) {
fpst = get_fpstatus_ptr();
} else {
@@ -10334,7 +10624,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
#endif
- dc->vfp_enabled = 0;
+ dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;