aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-06-17 13:16:27 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-06-21 17:12:52 +0100
commit3e6d6e88d915d0fea1fbbf530801e2aec9df2d0d (patch)
treec95e37b1375a866ac7332b4379833437dabba0ca
parent952aaa96b13150a7e655f8b48fea2259cc01fee2 (diff)
downloadqemu-arm-3e6d6e88d915d0fea1fbbf530801e2aec9df2d0d.tar.gz
target/arm: Implement MVE VADDV
Implement the MVE VADDV insn, which performs an addition across vector lanes. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210617121628.20116-44-peter.maydell@linaro.org
-rw-r--r--target/arm/helper-mve.h7
-rw-r--r--target/arm/mve.decode2
-rw-r--r--target/arm/mve_helper.c24
-rw-r--r--target/arm/translate-mve.c43
4 files changed, 76 insertions, 0 deletions
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 161308b67e..4bbb9b3ae2 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -348,3 +348,10 @@ DEF_HELPER_FLAGS_4(mve_vrmlaldavhuw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vrmlsldavhsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
DEF_HELPER_FLAGS_4(mve_vrmlsldavhxsw, TCG_CALL_NO_WG, i64, env, ptr, ptr, i64)
+
+DEF_HELPER_FLAGS_3(mve_vaddvsb, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvub, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvsh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvuh, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvsw, TCG_CALL_NO_WG, i32, env, ptr, i32)
+DEF_HELPER_FLAGS_3(mve_vaddvuw, TCG_CALL_NO_WG, i32, env, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 695097dcca..d9ece7be5d 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -252,6 +252,8 @@ VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar
VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar
+# Vector add across vector
+VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo
# Predicate operations
%mask_22_13 22:1 13:3
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 9839d3e64a..81534f6166 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -1134,3 +1134,27 @@ DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64
DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
+
+/* Vector add across vector */
+#define DO_VADDV(OP, ESIZE, TYPE) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
+ uint32_t ra) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ ra += m[H##ESIZE(e)]; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ } \
+
+DO_VADDV(vaddvsb, 1, uint8_t)
+DO_VADDV(vaddvsh, 2, uint16_t)
+DO_VADDV(vaddvsw, 4, uint32_t)
+DO_VADDV(vaddvub, 1, uint8_t)
+DO_VADDV(vaddvuh, 2, uint16_t)
+DO_VADDV(vaddvuw, 4, uint32_t)
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 73c15f4133..04d84e8846 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -33,6 +33,7 @@ typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
+typedef void MVEGenVADDVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32);
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
static inline long mve_qreg_offset(unsigned reg)
@@ -743,3 +744,45 @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
mve_update_and_store_eci(s);
return true;
}
+
+static bool trans_VADDV(DisasContext *s, arg_VADDV *a)
+{
+ /* VADDV: vector add across vector */
+ static MVEGenVADDVFn * const fns[4][2] = {
+ { gen_helper_mve_vaddvsb, gen_helper_mve_vaddvub },
+ { gen_helper_mve_vaddvsh, gen_helper_mve_vaddvuh },
+ { gen_helper_mve_vaddvsw, gen_helper_mve_vaddvuw },
+ { NULL, NULL }
+ };
+ TCGv_ptr qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ a->size == 3) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an A=0 (no-accumulate) insn which does not execute the first
+ * beat must start with the current value of Rda, not zero.
+ */
+ if (a->a || mve_skip_first_beat(s)) {
+ /* Accumulate input from Rda */
+ rda = load_reg(s, a->rda);
+ } else {
+ /* Accumulate starting at zero */
+ rda = tcg_const_i32(0);
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ fns[a->size][a->u](rda, cpu_env, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qm);
+
+ mve_update_eci(s);
+ return true;
+}