aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2023-09-01 16:44:31 +0100
committerPeter Maydell <peter.maydell@linaro.org>2023-09-01 16:45:01 +0100
commit6257c5ea13e9040e18e8fe664d3a00746c3b65c4 (patch)
tree9a24b0f2f53cd4f9a8ae1ff6c9fd88d1f8a09b7b
parenta3ae17d2f0d1b5dde9f6a14bc272097ede930cf7 (diff)
target/arm: Implement the SETG* instructionsfeat-mops
The FEAT_MOPS SETG* instructions are very similar to the SET* instructions, but as well as setting memory contents they also set the MTE tags. They are architecturally required to operate on tag-granule aligned regions only. Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target/arm/internals.h10
-rw-r--r--target/arm/tcg/a64.decode5
-rw-r--r--target/arm/tcg/helper-a64.c92
-rw-r--r--target/arm/tcg/mte_helper.c40
-rw-r--r--target/arm/tcg/translate-a64.c18
5 files changed, 157 insertions, 8 deletions
diff --git a/target/arm/internals.h b/target/arm/internals.h
index a70a7fd50f..642f77df29 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1300,6 +1300,16 @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
void mte_check_fail(CPUARMState *env, uint32_t desc,
uint64_t dirty_ptr, uintptr_t ra);
+/**
+ * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
+ * @env: CPU env
+ * @dirty_ptr: Start address of memory region (dirty pointer)
+ * @size: length of region (guaranteed not to cross page boundary)
+ * @desc: MTEDESC descriptor word
+ */
+void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
+ uint32_t desc);
+
static inline int allocation_tag_from_addr(uint64_t ptr)
{
return extract64(ptr, 56, 4);
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index 8cddc207a6..46caeb59fe 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -569,3 +569,8 @@ STZ2G 11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
SETP 00 011001110 ..... 00 . . 01 ..... ..... @set
SETM 00 011001110 ..... 01 . . 01 ..... ..... @set
SETE 00 011001110 ..... 10 . . 01 ..... ..... @set
+
+# Like SET, but also setting MTE tags
+SETGP 00 011101110 ..... 00 . . 01 ..... ..... @set
+SETGM 00 011101110 ..... 01 . . 01 ..... ..... @set
+SETGE 00 011101110 ..... 10 . . 01 ..... ..... @set
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index f2f008687e..81f3bf05a4 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -1079,6 +1079,54 @@ static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
return setsize;
}
+/*
+ * Similar, but setting tags. The architecture requires us to do this
+ * in 16-byte chunks. SETP accesses are not tag checked; they set
+ * the tags.
+ */
+static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
+ uint64_t setsize, uint32_t data, int memidx,
+ uint32_t *mtedesc, uintptr_t ra)
+{
+ void *mem;
+ uint64_t cleanaddr;
+
+ setsize = MIN(setsize, TARGET_PAGE_ALIGN(toaddr + 1) - toaddr);
+
+ cleanaddr = useronly_clean_ptr(toaddr);
+ /*
+ * Trapless lookup: returns NULL for invalid page, I/O,
+ * watchpoints, clean pages, etc.
+ */
+ mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
+
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!mem)) {
+ /*
+ * Slow-path: just do one write. This will handle the
+ * watchpoint, invalid page, etc handling correctly.
+ * The architecture requires that we do 16 bytes at a time,
+ * and we know both ptr and size are 16 byte aligned.
+ * For clean code pages, the next iteration will see
+ * the page dirty and will use the fast path.
+ */
+ uint64_t repldata = data * 0x0101010101010101ULL;
+ cpu_stq_mmuidx_ra(env, toaddr, repldata, memidx, ra);
+ cpu_stq_mmuidx_ra(env, toaddr + 8, repldata, memidx, ra);
+ mte_mops_set_tags(env, toaddr, 16, *mtedesc);
+ return 16;
+ }
+#endif
+ /* Easy case: just memset the host memory */
+ memset(mem, data, setsize);
+ mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
+ return setsize;
+}
+
+typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr,
+ uint64_t setsize, uint32_t data,
+ int memidx, uint32_t *mtedesc, uintptr_t ra);
+
/* Extract register numbers from a MOPS exception syndrome value */
static int mops_destreg(uint32_t syndrome)
{
@@ -1095,6 +1143,11 @@ static int mops_sizereg(uint32_t syndrome)
return extract32(syndrome, 0, 5);
}
+static bool mops_is_setg(uint32_t syndrome)
+{
+ return extract32(syndrome, 23, 1);
+}
+
/*
* Return true if TCMA and TBI bits mean we need to do MTE checks.
* We only need to do this once per MOPS insn, not for every page.
@@ -1113,6 +1166,18 @@ static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
}
+/* Take an exception if the SETG addr/size are not granule aligned */
+static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size,
+ uint32_t memidx, uintptr_t ra)
+{
+ if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) ||
+ !QEMU_IS_ALIGNED(size, TAG_GRANULE)) {
+ arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
+ memidx, ra);
+
+ }
+}
+
/*
* For the Memory Set operation, our implementation chooses
* always to use "option A", where we update Xd to the final
@@ -1130,17 +1195,26 @@ void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
int rd = mops_destreg(syndrome);
int rs = mops_srcreg(syndrome);
int rn = mops_sizereg(syndrome);
+ bool set_tags = mops_is_setg(syndrome);
uint8_t data = env->xregs[rs];
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
uint64_t toaddr = env->xregs[rd];
uint64_t setsize = env->xregs[rn];
uint64_t stagesetsize, step;
uintptr_t ra = GETPC();
+ StepFn *stepfn = set_tags ? set_step_tags : set_step;
check_mops_enabled(env, ra);
if (setsize > INT64_MAX) {
setsize = INT64_MAX;
+ if (set_tags) {
+ setsize &= ~0xf;
+ }
+ }
+
+ if (unlikely(set_tags)) {
+ check_setg_alignment(env, toaddr, setsize, memidx, ra);
}
if (!mte_checks_needed(toaddr, mtedesc)) {
@@ -1151,7 +1225,7 @@ void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
while (stagesetsize) {
env->xregs[rd] = toaddr;
env->xregs[rn] = setsize;
- step = set_step(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
+ step = stepfn(env, toaddr, stagesetsize, data, memidx, &mtedesc, ra);
toaddr += step;
setsize -= step;
stagesetsize -= step;
@@ -1175,12 +1249,14 @@ void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
int rd = mops_destreg(syndrome);
int rs = mops_srcreg(syndrome);
int rn = mops_sizereg(syndrome);
+ bool set_tags = mops_is_setg(syndrome);
uint8_t data = env->xregs[rs];
uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
uint64_t setsize = -env->xregs[rn];
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
uint64_t step;
uintptr_t ra = GETPC();
+ StepFn *stepfn = set_tags ? set_step_tags : set_step;
check_mops_enabled(env, ra);
@@ -1208,13 +1284,17 @@ void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
mops_mismatch_exception_target_el(env), ra);
}
+ if (unlikely(set_tags)) {
+ check_setg_alignment(env, toaddr, setsize, memidx, ra);
+ }
+
if (!mte_checks_needed(toaddr, mtedesc)) {
mtedesc = 0;
}
/* Do the actual memset */
while (setsize >= TARGET_PAGE_SIZE) {
- step = set_step(env, toaddr, setsize, data, memidx, &mtedesc, ra);
+ step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
toaddr += step;
setsize -= step;
env->xregs[rn] = -setsize;
@@ -1231,12 +1311,14 @@ void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
int rd = mops_destreg(syndrome);
int rs = mops_srcreg(syndrome);
int rn = mops_sizereg(syndrome);
+ bool set_tags = mops_is_setg(syndrome);
uint8_t data = env->xregs[rs];
uint64_t toaddr = env->xregs[rd] + env->xregs[rn];
uint64_t setsize = -env->xregs[rn];
uint32_t memidx = FIELD_EX32(mtedesc, MTEDESC, MIDX);
uint64_t step;
uintptr_t ra = GETPC();
+ StepFn *stepfn = set_tags ? set_step_tags : set_step;
check_mops_enabled(env, ra);
@@ -1266,13 +1348,17 @@ void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
mops_mismatch_exception_target_el(env), ra);
}
+ if (unlikely(set_tags)) {
+ check_setg_alignment(env, toaddr, setsize, memidx, ra);
+ }
+
if (!mte_checks_needed(toaddr, mtedesc)) {
mtedesc = 0;
}
/* Do the actual memset */
while (setsize > 0) {
- step = set_step(env, toaddr, setsize, data, memidx, &mtedesc, ra);
+ step = stepfn(env, toaddr, setsize, data, memidx, &mtedesc, ra);
toaddr += step;
setsize -= step;
env->xregs[rn] = -setsize;
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 0065012be1..d934f89beb 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -1040,3 +1040,43 @@ uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
return n * TAG_GRANULE - (ptr - tag_first);
}
}
+
+void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
+ uint32_t desc)
+{
+ int mmu_idx, tag_count;
+ uint64_t ptr_tag;
+ void *mem;
+
+ if (!desc) {
+ /* Tags not actually enabled */
+ return;
+ }
+
+ mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+ /* True probe: this will never fault */
+ mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
+ MMU_DATA_STORE, true, 0);
+ if (!mem) {
+ return;
+ }
+
+ /*
+ * We know that ptr and size are both TAG_GRANULE aligned; store
+ * the tag from the pointer value into the tag memory.
+ */
+ ptr_tag = allocation_tag_from_addr(ptr);
+ tag_count = size / TAG_GRANULE;
+ if (ptr & TAG_GRANULE) {
+ /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
+ store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
+ mem++;
+ tag_count--;
+ }
+ memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
+ if (tag_count & 1) {
+ /* Final trailing unaligned nibble */
+ mem += tag_count / 2;
+ store_tag1_parallel(0, mem, ptr_tag);
+ }
+}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 9d66135170..6e2392b3ba 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -3954,11 +3954,16 @@ TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32);
-static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
+static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
+ bool is_setg, SetFn fn)
{
int memidx;
uint32_t syndrome, desc = 0;
+ if (is_setg && !dc_isar_feature(aa64_mte, s)) {
+ return false;
+ }
+
/*
* UNPREDICTABLE cases: we choose to UNDEF, which allows
* us to pull this check before the CheckMOPSEnabled() test
@@ -3975,7 +3980,7 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
* We pass option_a == false, matching our implementation;
* we pass wrong_option == false: helper function may set that bit.
*/
- syndrome = syn_mop(true, false, (a->nontemp << 1) | a->unpriv,
+ syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv,
is_epilogue, false, false, a->rd, a->rs, a->rn);
if (s->ata) {
@@ -3997,9 +4002,12 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
return true;
}
-TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, gen_helper_setp)
-TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, gen_helper_setm)
-TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, gen_helper_sete)
+TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp)
+TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm)
+TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete)
+TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setp)
+TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setm)
+TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_sete)
typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);