aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-05-20 16:28:33 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-06-03 16:43:25 +0100
commit69049bcf5114f7af379b4f3cccfb0b213b30f88a (patch)
tree05efee656738c1b961f7301ac4a9fd9d4cb88b9a /target
parent7df6a1ffdbdcaf98fa57747dc79216ac089e6215 (diff)
target/arm: Update feature checks for insns which are "MVE or FP"
Some v8M instructions are present if either the floating point extension or MVE is implemented. Update our implementation of them to check for MVE as well as for FP. This is all the insns which use CheckDecodeFaults(ExtType_MveOrFp) or CheckDecodeFaults(ExtType_MveOrDpFp) in their pseudocode, which are essentially the loads and stores, moves and sysreg accesses, except for VMOV_reg_sp and VMOV_reg_dp, which we handle in subsequent patches because they need a refactor to provide a place to put the new MVE check. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210520152840.24453-3-peter.maydell@linaro.org
Diffstat (limited to 'target')
-rw-r--r--target/arm/translate-vfp.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c
index 3da84f30a0..2202f8985d 100644
--- a/target/arm/translate-vfp.c
+++ b/target/arm/translate-vfp.c
@@ -543,11 +543,16 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
/* VMOV scalar to general purpose register */
TCGv_i32 tmp;
- /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
- if (a->size == MO_32
- ? !dc_isar_feature(aa32_fpsp_v2, s)
- : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
+ /*
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
+ * all sizes, whether the CPU has fp or not.
+ */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ if (a->size == MO_32
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
}
/* UNDEF accesses to D16-D31 if they don't exist */
@@ -571,11 +576,16 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
/* VMOV general purpose register to scalar */
TCGv_i32 tmp;
- /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
- if (a->size == MO_32
- ? !dc_isar_feature(aa32_fpsp_v2, s)
- : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
- return false;
+ /*
+ * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
+ * all sizes, whether the CPU has fp or not.
+ */
+ if (!dc_isar_feature(aa32_mve, s)) {
+ if (a->size == MO_32
+ ? !dc_isar_feature(aa32_fpsp_v2, s)
+ : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
}
/* UNDEF accesses to D16-D31 if they don't exist */
@@ -671,7 +681,7 @@ typedef enum FPSysRegCheckResult {
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
{
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return FPSysRegCheckFailed;
}
@@ -1254,7 +1264,7 @@ static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
{
TCGv_i32 tmp;
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1287,7 +1297,7 @@ static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
{
TCGv_i32 tmp;
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1329,7 +1339,7 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
* floating point register. Note that this does not require support
* for double precision arithmetic.
*/
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1368,7 +1378,7 @@ static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
uint32_t offset;
TCGv_i32 addr, tmp;
- if (!dc_isar_feature(aa32_fp16_arith, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1403,7 +1413,7 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
uint32_t offset;
TCGv_i32 addr, tmp;
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1439,7 +1449,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
TCGv_i64 tmp;
/* Note that this does not require support for double arithmetic. */
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1479,7 +1489,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
TCGv_i32 addr, tmp;
int i, n;
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}
@@ -1557,7 +1567,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
int i, n;
/* Note that this does not require support for double arithmetic. */
- if (!dc_isar_feature(aa32_fpsp_v2, s)) {
+ if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
return false;
}