aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAlexander Gilday <alexander.gilday@arm.com>2018-04-16 17:42:00 +0100
committerAlexander Gilday <alexander.gilday@arm.com>2018-04-17 08:52:36 +0100
commit4e5bad9e4915ba673bfe016dbdced31fe3cb7687 (patch)
tree6eaebdab0d5b98479841386a6ea96309fd28bc30 /test
parent4e52d4db0b9629aa57ecd354afb31bc814eef5b7 (diff)
Add support for compare and swap in memory instructions.
Part of Armv8.1 Large System Extensions. Includes: - CAS, CASA, CASL, CASAL - Compare and swap word or doubleword in memory. - CASB, CASAB, CASLB, CASALB - Compare and swap byte in memory. - CASH, CASAH, CASLH, CASALH - Compare and swap halfword in memory. - CASP, CASPA, CASPL, CASPAL - Compare and swap pair of words or doublewords in memory. Change-Id: I1b55b4f53a987c455d1cbc96210856ebeb9f63bf
Diffstat (limited to 'test')
-rw-r--r--test/aarch64/test-assembler-aarch64.cc415
-rw-r--r--test/aarch64/test-disasm-aarch64.cc64
2 files changed, 479 insertions, 0 deletions
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 9cbe2f64..a93319a8 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -16362,6 +16362,421 @@ TEST(ldaxr_stlxr_fail) {
}
#endif
+TEST(cas_casa_casl_casal_w) {
+ uint64_t data1[] = {0x01234567, 0};
+ uint64_t data2[] = {0x01234567, 0};
+ uint64_t data3[] = {0x01234567, 0};
+ uint64_t data4[] = {0x01234567, 0};
+ uint64_t data5[] = {0x01234567, 0};
+ uint64_t data6[] = {0x01234567, 0};
+ uint64_t data7[] = {0x01234567, 0};
+ uint64_t data8[] = {0x01234567, 0};
+
+ uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
+ uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
+ uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
+ uint64_t* data4_aligned = AlignUp(data4, kXRegSizeInBytes * 2);
+ uint64_t* data5_aligned = AlignUp(data5, kXRegSizeInBytes * 2);
+ uint64_t* data6_aligned = AlignUp(data6, kXRegSizeInBytes * 2);
+ uint64_t* data7_aligned = AlignUp(data7, kXRegSizeInBytes * 2);
+ uint64_t* data8_aligned = AlignUp(data8, kXRegSizeInBytes * 2);
+
+ SETUP();
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4_aligned));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5_aligned));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6_aligned));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7_aligned));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8_aligned));
+
+ __ Mov(x0, 0xffffffff);
+
+ __ Mov(x1, 0x76543210);
+ __ Mov(x2, 0x01234567);
+ __ Mov(x3, 0x76543210);
+ __ Mov(x4, 0x01234567);
+ __ Mov(x5, 0x76543210);
+ __ Mov(x6, 0x01234567);
+ __ Mov(x7, 0x76543210);
+ __ Mov(x8, 0x01234567);
+
+ __ Cas(w1, w0, MemOperand(x21));
+ __ Cas(w2, w0, MemOperand(x22));
+ __ Casa(w3, w0, MemOperand(x23));
+ __ Casa(w4, w0, MemOperand(x24));
+ __ Casl(w5, w0, MemOperand(x25));
+ __ Casl(w6, w0, MemOperand(x26));
+ __ Casal(w7, w0, MemOperand(x27));
+ __ Casal(w8, w0, MemOperand(x28));
+
+ END();
+
+// TODO: test on real hardware when available
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+ RUN();
+
+ ASSERT_EQUAL_64(0x01234567, x1);
+ ASSERT_EQUAL_64(0x01234567, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x01234567, x4);
+ ASSERT_EQUAL_64(0x01234567, x5);
+ ASSERT_EQUAL_64(0x01234567, x6);
+ ASSERT_EQUAL_64(0x01234567, x7);
+ ASSERT_EQUAL_64(0x01234567, x8);
+
+ ASSERT_EQUAL_64(0x01234567, data1[0]);
+ ASSERT_EQUAL_64(0xffffffff, data2[0]);
+ ASSERT_EQUAL_64(0x01234567, data3[0]);
+ ASSERT_EQUAL_64(0xffffffff, data4[0]);
+ ASSERT_EQUAL_64(0x01234567, data5[0]);
+ ASSERT_EQUAL_64(0xffffffff, data6[0]);
+ ASSERT_EQUAL_64(0x01234567, data7[0]);
+ ASSERT_EQUAL_64(0xffffffff, data8[0]);
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+
+ TEARDOWN();
+}
+
+TEST(cas_casa_casl_casal_x) {
+ uint64_t data1[] = {0x0123456789abcdef, 0};
+ uint64_t data2[] = {0x0123456789abcdef, 0};
+ uint64_t data3[] = {0x0123456789abcdef, 0};
+ uint64_t data4[] = {0x0123456789abcdef, 0};
+ uint64_t data5[] = {0x0123456789abcdef, 0};
+ uint64_t data6[] = {0x0123456789abcdef, 0};
+ uint64_t data7[] = {0x0123456789abcdef, 0};
+ uint64_t data8[] = {0x0123456789abcdef, 0};
+
+ uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
+ uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
+ uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
+ uint64_t* data4_aligned = AlignUp(data4, kXRegSizeInBytes * 2);
+ uint64_t* data5_aligned = AlignUp(data5, kXRegSizeInBytes * 2);
+ uint64_t* data6_aligned = AlignUp(data6, kXRegSizeInBytes * 2);
+ uint64_t* data7_aligned = AlignUp(data7, kXRegSizeInBytes * 2);
+ uint64_t* data8_aligned = AlignUp(data8, kXRegSizeInBytes * 2);
+
+ SETUP();
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4_aligned));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5_aligned));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6_aligned));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7_aligned));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8_aligned));
+
+ __ Mov(x0, 0xffffffffffffffff);
+
+ __ Mov(x1, 0xfedcba9876543210);
+ __ Mov(x2, 0x0123456789abcdef);
+ __ Mov(x3, 0xfedcba9876543210);
+ __ Mov(x4, 0x0123456789abcdef);
+ __ Mov(x5, 0xfedcba9876543210);
+ __ Mov(x6, 0x0123456789abcdef);
+ __ Mov(x7, 0xfedcba9876543210);
+ __ Mov(x8, 0x0123456789abcdef);
+
+ __ Cas(x1, x0, MemOperand(x21));
+ __ Cas(x2, x0, MemOperand(x22));
+ __ Casa(x3, x0, MemOperand(x23));
+ __ Casa(x4, x0, MemOperand(x24));
+ __ Casl(x5, x0, MemOperand(x25));
+ __ Casl(x6, x0, MemOperand(x26));
+ __ Casal(x7, x0, MemOperand(x27));
+ __ Casal(x8, x0, MemOperand(x28));
+
+ END();
+
+// TODO: test on real hardware when available
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+ RUN();
+
+ ASSERT_EQUAL_64(0x0123456789abcdef, x1);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x2);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x3);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x4);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x5);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x6);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x7);
+ ASSERT_EQUAL_64(0x0123456789abcdef, x8);
+
+ ASSERT_EQUAL_64(0x0123456789abcdef, data1[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data2[0]);
+ ASSERT_EQUAL_64(0x0123456789abcdef, data3[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data4[0]);
+ ASSERT_EQUAL_64(0x0123456789abcdef, data5[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data6[0]);
+ ASSERT_EQUAL_64(0x0123456789abcdef, data7[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data8[0]);
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+
+ TEARDOWN();
+}
+
+TEST(casb_casab_caslb_casalb) {
+ uint64_t data1[] = {0x01234567, 0};
+ uint64_t data2[] = {0x01234567, 0};
+ uint64_t data3[] = {0x01234567, 0};
+ uint64_t data4[] = {0x01234567, 0};
+ uint64_t data5[] = {0x01234567, 0};
+ uint64_t data6[] = {0x01234567, 0};
+ uint64_t data7[] = {0x01234567, 0};
+ uint64_t data8[] = {0x01234567, 0};
+
+ uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
+ uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
+ uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
+ uint64_t* data4_aligned = AlignUp(data4, kXRegSizeInBytes * 2);
+ uint64_t* data5_aligned = AlignUp(data5, kXRegSizeInBytes * 2);
+ uint64_t* data6_aligned = AlignUp(data6, kXRegSizeInBytes * 2);
+ uint64_t* data7_aligned = AlignUp(data7, kXRegSizeInBytes * 2);
+ uint64_t* data8_aligned = AlignUp(data8, kXRegSizeInBytes * 2);
+
+ SETUP();
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4_aligned));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5_aligned));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6_aligned));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7_aligned));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8_aligned));
+
+ __ Mov(x0, 0xffffffff);
+
+ __ Mov(x1, 0x76543210);
+ __ Mov(x2, 0x01234567);
+ __ Mov(x3, 0x76543210);
+ __ Mov(x4, 0x01234567);
+ __ Mov(x5, 0x76543210);
+ __ Mov(x6, 0x01234567);
+ __ Mov(x7, 0x76543210);
+ __ Mov(x8, 0x01234567);
+
+ __ Casb(w1, w0, MemOperand(x21));
+ __ Casb(w2, w0, MemOperand(x22));
+ __ Casab(w3, w0, MemOperand(x23));
+ __ Casab(w4, w0, MemOperand(x24));
+ __ Caslb(w5, w0, MemOperand(x25));
+ __ Caslb(w6, w0, MemOperand(x26));
+ __ Casalb(w7, w0, MemOperand(x27));
+ __ Casalb(w8, w0, MemOperand(x28));
+
+ END();
+
+// TODO: test on real hardware when available
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+ RUN();
+
+ ASSERT_EQUAL_64(0x00000067, x1);
+ ASSERT_EQUAL_64(0x00000067, x2);
+ ASSERT_EQUAL_64(0x00000067, x3);
+ ASSERT_EQUAL_64(0x00000067, x4);
+ ASSERT_EQUAL_64(0x00000067, x5);
+ ASSERT_EQUAL_64(0x00000067, x6);
+ ASSERT_EQUAL_64(0x00000067, x7);
+ ASSERT_EQUAL_64(0x00000067, x8);
+
+ ASSERT_EQUAL_64(0x01234567, data1[0]);
+ ASSERT_EQUAL_64(0x012345ff, data2[0]);
+ ASSERT_EQUAL_64(0x01234567, data3[0]);
+ ASSERT_EQUAL_64(0x012345ff, data4[0]);
+ ASSERT_EQUAL_64(0x01234567, data5[0]);
+ ASSERT_EQUAL_64(0x012345ff, data6[0]);
+ ASSERT_EQUAL_64(0x01234567, data7[0]);
+ ASSERT_EQUAL_64(0x012345ff, data8[0]);
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+
+ TEARDOWN();
+}
+
+TEST(cash_casah_caslh_casalh) {
+ uint64_t data1[] = {0x01234567, 0};
+ uint64_t data2[] = {0x01234567, 0};
+ uint64_t data3[] = {0x01234567, 0};
+ uint64_t data4[] = {0x01234567, 0};
+ uint64_t data5[] = {0x01234567, 0};
+ uint64_t data6[] = {0x01234567, 0};
+ uint64_t data7[] = {0x01234567, 0};
+ uint64_t data8[] = {0x01234567, 0};
+
+ uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
+ uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
+ uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
+ uint64_t* data4_aligned = AlignUp(data4, kXRegSizeInBytes * 2);
+ uint64_t* data5_aligned = AlignUp(data5, kXRegSizeInBytes * 2);
+ uint64_t* data6_aligned = AlignUp(data6, kXRegSizeInBytes * 2);
+ uint64_t* data7_aligned = AlignUp(data7, kXRegSizeInBytes * 2);
+ uint64_t* data8_aligned = AlignUp(data8, kXRegSizeInBytes * 2);
+
+ SETUP();
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4_aligned));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5_aligned));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6_aligned));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7_aligned));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8_aligned));
+
+ __ Mov(x0, 0xffffffff);
+
+ __ Mov(x1, 0x76543210);
+ __ Mov(x2, 0x01234567);
+ __ Mov(x3, 0x76543210);
+ __ Mov(x4, 0x01234567);
+ __ Mov(x5, 0x76543210);
+ __ Mov(x6, 0x01234567);
+ __ Mov(x7, 0x76543210);
+ __ Mov(x8, 0x01234567);
+
+ __ Cash(w1, w0, MemOperand(x21));
+ __ Cash(w2, w0, MemOperand(x22));
+ __ Casah(w3, w0, MemOperand(x23));
+ __ Casah(w4, w0, MemOperand(x24));
+ __ Caslh(w5, w0, MemOperand(x25));
+ __ Caslh(w6, w0, MemOperand(x26));
+ __ Casalh(w7, w0, MemOperand(x27));
+ __ Casalh(w8, w0, MemOperand(x28));
+
+ END();
+
+// TODO: test on real hardware when available
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+ RUN();
+
+ ASSERT_EQUAL_64(0x00004567, x1);
+ ASSERT_EQUAL_64(0x00004567, x2);
+ ASSERT_EQUAL_64(0x00004567, x3);
+ ASSERT_EQUAL_64(0x00004567, x4);
+ ASSERT_EQUAL_64(0x00004567, x5);
+ ASSERT_EQUAL_64(0x00004567, x6);
+ ASSERT_EQUAL_64(0x00004567, x7);
+ ASSERT_EQUAL_64(0x00004567, x8);
+
+ ASSERT_EQUAL_64(0x01234567, data1[0]);
+ ASSERT_EQUAL_64(0x0123ffff, data2[0]);
+ ASSERT_EQUAL_64(0x01234567, data3[0]);
+ ASSERT_EQUAL_64(0x0123ffff, data4[0]);
+ ASSERT_EQUAL_64(0x01234567, data5[0]);
+ ASSERT_EQUAL_64(0x0123ffff, data6[0]);
+ ASSERT_EQUAL_64(0x01234567, data7[0]);
+ ASSERT_EQUAL_64(0x0123ffff, data8[0]);
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+
+ TEARDOWN();
+}
+
+TEST(casp_caspa_caspl_caspal) {
+ uint64_t data1[] = {0x89abcdef01234567, 0};
+ uint64_t data2[] = {0x89abcdef01234567, 0};
+ uint64_t data3[] = {0x89abcdef01234567, 0};
+ uint64_t data4[] = {0x89abcdef01234567, 0};
+ uint64_t data5[] = {0x89abcdef01234567, 0};
+ uint64_t data6[] = {0x89abcdef01234567, 0};
+ uint64_t data7[] = {0x89abcdef01234567, 0};
+ uint64_t data8[] = {0x89abcdef01234567, 0};
+
+ uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
+ uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
+ uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
+ uint64_t* data4_aligned = AlignUp(data4, kXRegSizeInBytes * 2);
+ uint64_t* data5_aligned = AlignUp(data5, kXRegSizeInBytes * 2);
+ uint64_t* data6_aligned = AlignUp(data6, kXRegSizeInBytes * 2);
+ uint64_t* data7_aligned = AlignUp(data7, kXRegSizeInBytes * 2);
+ uint64_t* data8_aligned = AlignUp(data8, kXRegSizeInBytes * 2);
+
+ SETUP();
+ START();
+
+ __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
+ __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
+ __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
+ __ Mov(x24, reinterpret_cast<uintptr_t>(data4_aligned));
+ __ Mov(x25, reinterpret_cast<uintptr_t>(data5_aligned));
+ __ Mov(x26, reinterpret_cast<uintptr_t>(data6_aligned));
+ __ Mov(x27, reinterpret_cast<uintptr_t>(data7_aligned));
+ __ Mov(x28, reinterpret_cast<uintptr_t>(data8_aligned));
+
+ __ Mov(x0, 0xffffffff);
+ __ Mov(x1, 0xffffffff);
+
+ __ Mov(x2, 0x76543210);
+ __ Mov(x3, 0xfedcba98);
+ __ Mov(x4, 0x89abcdef);
+ __ Mov(x5, 0x01234567);
+
+ __ Mov(x6, 0x76543210);
+ __ Mov(x7, 0xfedcba98);
+ __ Mov(x8, 0x89abcdef);
+ __ Mov(x9, 0x01234567);
+
+ __ Mov(x10, 0x76543210);
+ __ Mov(x11, 0xfedcba98);
+ __ Mov(x12, 0x89abcdef);
+ __ Mov(x13, 0x01234567);
+
+ __ Mov(x14, 0x76543210);
+ __ Mov(x15, 0xfedcba98);
+ __ Mov(x16, 0x89abcdef);
+ __ Mov(x17, 0x01234567);
+
+ __ Casp(w2, w3, w0, w1, MemOperand(x21));
+ __ Casp(w4, w5, w0, w1, MemOperand(x22));
+ __ Caspa(w6, w7, w0, w1, MemOperand(x23));
+ __ Caspa(w8, w9, w0, w1, MemOperand(x24));
+ __ Caspl(w10, w11, w0, w1, MemOperand(x25));
+ __ Caspl(w12, w13, w0, w1, MemOperand(x26));
+ __ Caspal(w14, w15, w0, w1, MemOperand(x27));
+ __ Caspal(w16, w17, w0, w1, MemOperand(x28));
+
+ END();
+
+// TODO: test on real hardware when available
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+ RUN();
+
+ ASSERT_EQUAL_64(0x89abcdef, x2);
+ ASSERT_EQUAL_64(0x01234567, x3);
+ ASSERT_EQUAL_64(0x89abcdef, x4);
+ ASSERT_EQUAL_64(0x01234567, x5);
+ ASSERT_EQUAL_64(0x89abcdef, x6);
+ ASSERT_EQUAL_64(0x01234567, x7);
+ ASSERT_EQUAL_64(0x89abcdef, x8);
+ ASSERT_EQUAL_64(0x01234567, x9);
+ ASSERT_EQUAL_64(0x89abcdef, x10);
+ ASSERT_EQUAL_64(0x01234567, x11);
+ ASSERT_EQUAL_64(0x89abcdef, x12);
+ ASSERT_EQUAL_64(0x01234567, x13);
+ ASSERT_EQUAL_64(0x89abcdef, x14);
+ ASSERT_EQUAL_64(0x01234567, x15);
+ ASSERT_EQUAL_64(0x89abcdef, x16);
+ ASSERT_EQUAL_64(0x01234567, x17);
+
+ ASSERT_EQUAL_64(0x89abcdef01234567, data1[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data2[0]);
+ ASSERT_EQUAL_64(0x89abcdef01234567, data3[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data4[0]);
+ ASSERT_EQUAL_64(0x89abcdef01234567, data5[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data6[0]);
+ ASSERT_EQUAL_64(0x89abcdef01234567, data7[0]);
+ ASSERT_EQUAL_64(0xffffffffffffffff, data8[0]);
+#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
+
+ TEARDOWN();
+}
+
TEST(load_store_tagged_immediate_offset) {
uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
diff --git a/test/aarch64/test-disasm-aarch64.cc b/test/aarch64/test-disasm-aarch64.cc
index 616e3555..fefaffe9 100644
--- a/test/aarch64/test-disasm-aarch64.cc
+++ b/test/aarch64/test-disasm-aarch64.cc
@@ -1861,6 +1861,70 @@ TEST(load_store_exclusive) {
COMPARE(ldar(x22, MemOperand(x23)), "ldar x22, [x23]");
COMPARE(ldar(x24, MemOperand(sp)), "ldar x24, [sp]");
+ COMPARE(cas(w25, w26, MemOperand(x27)), "cas w25, w26, [x27]");
+ COMPARE(cas(w28, w29, MemOperand(sp)), "cas w28, w29, [sp]");
+ COMPARE(cas(x30, x0, MemOperand(x1)), "cas x30, x0, [x1]");
+ COMPARE(cas(x2, x3, MemOperand(sp)), "cas x2, x3, [sp]");
+ COMPARE(casa(w4, w5, MemOperand(x6)), "casa w4, w5, [x6]");
+ COMPARE(casa(w7, w8, MemOperand(sp)), "casa w7, w8, [sp]");
+ COMPARE(casa(x9, x10, MemOperand(x11)), "casa x9, x10, [x11]");
+ COMPARE(casa(x12, x13, MemOperand(sp)), "casa x12, x13, [sp]");
+ COMPARE(casl(w14, w15, MemOperand(x16)), "casl w14, w15, [x16]");
+ COMPARE(casl(w17, w18, MemOperand(sp)), "casl w17, w18, [sp]");
+ COMPARE(casl(x19, x20, MemOperand(x21)), "casl x19, x20, [x21]");
+ COMPARE(casl(x22, x23, MemOperand(sp)), "casl x22, x23, [sp]");
+ COMPARE(casal(w24, w25, MemOperand(x26)), "casal w24, w25, [x26]");
+ COMPARE(casal(w27, w28, MemOperand(sp)), "casal w27, w28, [sp]");
+ COMPARE(casal(x29, x30, MemOperand(x0)), "casal x29, x30, [x0]");
+ COMPARE(casal(x1, x2, MemOperand(sp)), "casal x1, x2, [sp]");
+ COMPARE(casb(w3, w4, MemOperand(x5)), "casb w3, w4, [x5]");
+ COMPARE(casb(w6, w7, MemOperand(sp)), "casb w6, w7, [sp]");
+ COMPARE(casab(w8, w9, MemOperand(x10)), "casab w8, w9, [x10]");
+ COMPARE(casab(w11, w12, MemOperand(sp)), "casab w11, w12, [sp]");
+ COMPARE(caslb(w13, w14, MemOperand(x15)), "caslb w13, w14, [x15]");
+ COMPARE(caslb(w16, w17, MemOperand(sp)), "caslb w16, w17, [sp]");
+ COMPARE(casalb(w18, w19, MemOperand(x20)), "casalb w18, w19, [x20]");
+ COMPARE(casalb(w21, w22, MemOperand(sp)), "casalb w21, w22, [sp]");
+ COMPARE(cash(w23, w24, MemOperand(x25)), "cash w23, w24, [x25]");
+ COMPARE(cash(w26, w27, MemOperand(sp)), "cash w26, w27, [sp]");
+ COMPARE(casah(w28, w29, MemOperand(x30)), "casah w28, w29, [x30]");
+ COMPARE(casah(w0, w1, MemOperand(sp)), "casah w0, w1, [sp]");
+ COMPARE(caslh(w2, w3, MemOperand(x4)), "caslh w2, w3, [x4]");
+ COMPARE(caslh(w5, w6, MemOperand(sp)), "caslh w5, w6, [sp]");
+ COMPARE(casalh(w7, w8, MemOperand(x9)), "casalh w7, w8, [x9]");
+ COMPARE(casalh(w10, w11, MemOperand(sp)), "casalh w10, w11, [sp]");
+ COMPARE(casp(w12, w13, w14, w15, MemOperand(x16)),
+ "casp w12, w13, w14, w15, [x16]");
+ COMPARE(casp(w18, w19, w20, w21, MemOperand(sp)),
+ "casp w18, w19, w20, w21, [sp]");
+ COMPARE(casp(x22, x23, x24, x25, MemOperand(x26)),
+ "casp x22, x23, x24, x25, [x26]");
+ COMPARE(casp(x28, x29, x0, x1, MemOperand(sp)),
+ "casp x28, x29, x0, x1, [sp]");
+ COMPARE(caspa(w2, w3, w4, w5, MemOperand(x6)), "caspa w2, w3, w4, w5, [x6]");
+ COMPARE(caspa(w8, w9, w10, w11, MemOperand(sp)),
+ "caspa w8, w9, w10, w11, [sp]");
+ COMPARE(caspa(x12, x13, x14, x15, MemOperand(x16)),
+ "caspa x12, x13, x14, x15, [x16]");
+ COMPARE(caspa(x18, x19, x20, x21, MemOperand(sp)),
+ "caspa x18, x19, x20, x21, [sp]");
+ COMPARE(caspl(w22, w23, w24, w25, MemOperand(x26)),
+ "caspl w22, w23, w24, w25, [x26]");
+ COMPARE(caspl(w28, w29, w0, w1, MemOperand(sp)),
+ "caspl w28, w29, w0, w1, [sp]");
+ COMPARE(caspl(x2, x3, x4, x5, MemOperand(x6)), "caspl x2, x3, x4, x5, [x6]");
+ COMPARE(caspl(x8, x9, x10, x11, MemOperand(sp)),
+ "caspl x8, x9, x10, x11, [sp]");
+ COMPARE(caspal(w12, w13, w14, w15, MemOperand(x16)),
+ "caspal w12, w13, w14, w15, [x16]");
+ COMPARE(caspal(w18, w19, w20, w21, MemOperand(sp)),
+ "caspal w18, w19, w20, w21, [sp]");
+ COMPARE(caspal(x22, x23, x24, x25, MemOperand(x26)),
+ "caspal x22, x23, x24, x25, [x26]");
+ COMPARE(caspal(x28, x29, x0, x1, MemOperand(sp)),
+ "caspal x28, x29, x0, x1, [sp]");
+
+
CLEANUP();
}