Make MOPS tests tolerant of implementation options.

Most MOPS sequences implement one of options "A" or "B", with slightly
different flags in each case. Some `cpy` sequences also permit either
forward or backward iteration, with different register results.

This relies on a new overloaded form of `Equal64` that checks that a
register matches any one of the listed reference values.

In addition, renumber registers to avoid the use of `x16` in a couple of
tests, because this is a MacroAssembler scratch register and could cause
issues in the future.
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 1ced2fe..9da3f12 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -14357,23 +14357,36 @@
   __ Setp(x1, x2, x3);
   __ Setm(x1, x2, x3);
   __ Sete(x1, x2, x3);
+  __ Mrs(x20, NZCV);
 
   // x2 is now zero, so this should do nothing.
   __ Setp(x1, x2, x3);
   __ Setm(x1, x2, x3);
   __ Sete(x1, x2, x3);
+  __ Mrs(x21, NZCV);
 
   // Set dst[15] to zero using the masm helper.
   __ Add(x1, x0, 15);
   __ Mov(x2, 1);
   __ Set(x1, x2, xzr);
+  __ Mrs(x22, NZCV);
 
   // Load dst for comparison.
   __ Ldp(x10, x11, MemOperand(x0));
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //            NZCV    Xd                Xn
+    //  Option A: ....    end of buffer     0
+    //  Option B: ..C.    end of buffer     0
+
+    std::vector<uint64_t> allowed_flags = {NoFlag, CFlag};
+
     RUN();
+    ASSERT_EQUAL_64(allowed_flags, x20);
+    ASSERT_EQUAL_64(allowed_flags, x21);
+    ASSERT_EQUAL_64(allowed_flags, x22);
     ASSERT_EQUAL_64(dst_addr + 16, x1);
     ASSERT_EQUAL_64(0, x2);
     ASSERT_EQUAL_64(0x1234aa, x3);
@@ -14397,11 +14410,20 @@
   __ Mov(x2, 16);
   __ Mov(x3, 0x42);
   __ Setn(x1, x2, x3);
+  __ Mrs(x20, NZCV);
   __ Ldp(x10, x11, MemOperand(x0));
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //            NZCV    Xd                Xn
+    //  Option A: ....    end of buffer     0
+    //  Option B: ..C.    end of buffer     0
+
+    std::vector<uint64_t> allowed_flags = {NoFlag, CFlag};
+
     RUN();
+    ASSERT_EQUAL_64(allowed_flags, x20);
     ASSERT_EQUAL_64(dst_addr + 16, x1);
     ASSERT_EQUAL_64(0, x2);
     ASSERT_EQUAL_64(0x42, x3);
@@ -14413,10 +14435,10 @@
 TEST(mops_setg) {
   SETUP_WITH_FEATURES(CPUFeatures::kMOPS, CPUFeatures::kMTE);
 
-  uint8_t* dst_addr = nullptr;
+  uint8_t* dst = nullptr;
 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
   const int dst_size = 32;
-  dst_addr = reinterpret_cast<uint8_t*>(
+  dst = reinterpret_cast<uint8_t*>(
       simulator.Mmap(NULL,
                      dst_size * sizeof(uint8_t),
                      PROT_READ | PROT_WRITE | PROT_MTE,
@@ -14424,32 +14446,47 @@
                      -1,
                      0));
 
-  VIXL_ASSERT(dst_addr != nullptr);
-  uint8_t* untagged_ptr = AddressUntag(dst_addr);
+  VIXL_ASSERT(dst != nullptr);
+  uint8_t* untagged_ptr = AddressUntag(dst);
   memset(untagged_ptr, 0xc9, dst_size);
 #else
 // TODO: Port the memory allocation to work on MTE supported platform natively.
 // Note that `CAN_RUN` prevents running in MTE-unsupported environments.
 #endif
 
+  uintptr_t dst_addr = reinterpret_cast<uintptr_t>(dst);
+  uint64_t tag_mask = 0xf0ff'ffff'ffff'ffff;
+
   START();
-  __ Mov(x0, reinterpret_cast<uint64_t>(dst_addr));
+  __ Mov(x0, dst_addr);
   __ Gmi(x2, x0, xzr);
   __ Irg(x1, x0, x2);  // Choose new tag for setg destination.
   __ Mov(x2, 16);
   __ Mov(x3, 0x42);
   __ Setg(x1, x2, x3);
+  __ Mrs(x20, NZCV);
 
   __ Ubfx(x4, x1, 56, 4);  // Extract new tag.
   __ Bfi(x0, x4, 56, 4);   // Tag dst_addr so set region can be loaded.
   __ Ldp(x10, x11, MemOperand(x0));
 
-  __ Mov(x0, reinterpret_cast<uint64_t>(dst_addr));
+  __ Mov(x0, dst_addr);
   __ Ldp(x12, x13, MemOperand(x0, 16));  // Unset region has original tag.
+
+  __ And(x1, x1, tag_mask);  // Strip tag for repeatable checks.
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //            NZCV    Xd                Xn
+    //  Option A: ....    end of buffer     0
+    //  Option B: ..C.    end of buffer     0
+
+    std::vector<uint64_t> allowed_flags = {NoFlag, CFlag};
+
     RUN();
+    ASSERT_EQUAL_64(allowed_flags, x20);
+    ASSERT_EQUAL_64((dst_addr & tag_mask) + 16, x1);
     ASSERT_EQUAL_64(0, x2);
     ASSERT_EQUAL_64(0x42, x3);
     ASSERT_EQUAL_64(0x4242'4242'4242'4242, x10);
@@ -14459,7 +14496,7 @@
   }
 
 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
-  simulator.Munmap(dst_addr, dst_size, PROT_MTE);
+  simulator.Munmap(dst, dst_size, PROT_MTE);
 #endif
 }
 
@@ -14477,38 +14514,73 @@
   __ Mov(x0, buf_addr);
 
   // Copy first eight bytes into second eight.
-  __ Mov(x2, x0);     // src = &buf[0]
-  __ Add(x3, x0, 8);  // dst = &buf[8]
-  __ Mov(x4, 8);      // count = 8
-  __ Cpyp(x3, x2, x4);
-  __ Cpym(x3, x2, x4);
-  __ Cpye(x3, x2, x4);
+  __ Mov(x1, x0);     // src = &buf[0]
+  __ Add(x2, x0, 8);  // dst = &buf[8]
+  __ Mov(x3, 8);      // count = 8
+  __ Cpyp(x2, x1, x3);
+  __ Cpym(x2, x1, x3);
+  __ Cpye(x2, x1, x3);
   __ Ldp(x10, x11, MemOperand(x0));
   __ Mrs(x20, NZCV);
 
-  // Copy first eight bytes to overlapping offset, causing reverse copy.
-  __ Mov(x5, x0);     // src = &buf[0]
-  __ Add(x6, x0, 4);  // dst = &buf[4]
-  __ Mov(x7, 8);      // count = 8
-  __ Cpy(x6, x5, x7);
+  // Copy first eight bytes to overlapping offset, forcing backwards copy.
+  __ Mov(x4, x0);     // src = &buf[0]
+  __ Add(x5, x0, 4);  // dst = &buf[4]
+  __ Mov(x6, 8);      // count = 8
+  __ Cpy(x5, x4, x6);
   __ Ldp(x12, x13, MemOperand(x0));
+  __ Mrs(x21, NZCV);
+
+  // Copy last eight bytes to overlapping offset, forcing forwards copy.
+  __ Add(x7, x0, 8);  // src = &buf[8]
+  __ Add(x8, x0, 6);  // dst = &buf[6]
+  __ Mov(x9, 8);      // count = 8
+  __ Cpy(x8, x7, x9);
+  __ Ldp(x14, x15, MemOperand(x0));
+  __ Mrs(x22, NZCV);
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //                        NZCV    Xs/Xd               Xn
+    //  Option A (forwards) : ....    ends of buffers     0
+    //  Option A (backwards): ....    starts of buffers   0
+    //  Option B (forwards) : ..C.    ends of buffers     0
+    //  Option B (backwards): N.C.    starts of buffers   0
+
+    std::vector<uint64_t> allowed_backwards_flags = {NoFlag, NCFlag};
+    std::vector<uint64_t> allowed_forwards_flags = {NoFlag, CFlag};
+
     RUN();
-    ASSERT_EQUAL_64(buf_addr + 8, x2);
-    ASSERT_EQUAL_64(buf_addr + 16, x3);
-    ASSERT_EQUAL_64(0, x4);
+    // IMPLEMENTATION DEFINED direction
+    if (static_cast<uintptr_t>(core.xreg(2)) > buf_addr) {
+      // Forwards
+      ASSERT_EQUAL_64(buf_addr + 8, x1);
+      ASSERT_EQUAL_64(buf_addr + 16, x2);
+      ASSERT_EQUAL_64(allowed_forwards_flags, x20);
+    } else {
+      // Backwards
+      ASSERT_EQUAL_64(buf_addr, x1);
+      ASSERT_EQUAL_64(buf_addr + 8, x2);
+      ASSERT_EQUAL_64(allowed_backwards_flags, x20);
+    }
+    ASSERT_EQUAL_64(0, x3);  // Xn
     ASSERT_EQUAL_64(0x0706'0504'0302'0100, x10);
     ASSERT_EQUAL_64(0x0706'0504'0302'0100, x11);
-    ASSERT_EQUAL_64(CFlag, x20);
 
-    ASSERT_EQUAL_64(buf_addr, x5);
-    ASSERT_EQUAL_64(buf_addr + 4, x6);
-    ASSERT_EQUAL_64(0, x7);
+    ASSERT_EQUAL_64(buf_addr, x4);      // Xs
+    ASSERT_EQUAL_64(buf_addr + 4, x5);  // Xd
+    ASSERT_EQUAL_64(0, x6);             // Xn
     ASSERT_EQUAL_64(0x0302'0100'0302'0100, x12);
     ASSERT_EQUAL_64(0x0706'0504'0706'0504, x13);
-    ASSERT_EQUAL_NZCV(NCFlag);
+    ASSERT_EQUAL_64(allowed_backwards_flags, x21);
+
+    ASSERT_EQUAL_64(buf_addr + 16, x7);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 14, x8);  // Xd
+    ASSERT_EQUAL_64(0, x9);              // Xn
+    ASSERT_EQUAL_64(0x0504'0100'0302'0100, x14);
+    ASSERT_EQUAL_64(0x0706'0706'0504'0706, x15);
+    ASSERT_EQUAL_64(allowed_forwards_flags, x22);
   }
 }
 
@@ -14528,44 +14600,61 @@
   START();
   __ Mov(x0, buf_addr);
 
-  __ Add(x2, x0, 1);  // src = &buf[1]
-  __ Mov(x3, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpyn(x3, x2, x4);
+  __ Add(x1, x0, 1);  // src = &buf[1]
+  __ Mov(x2, x0);     // dst = &buf[0]
+  __ Mov(x3, 15);     // count = 15
+  __ Cpyn(x2, x1, x3);
   __ Ldp(x10, x11, MemOperand(x0));
+  __ Mrs(x20, NZCV);
 
-  __ Add(x5, x0, 1);  // src = &buf[1]
-  __ Mov(x6, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpyrn(x6, x5, x4);
+  __ Add(x4, x0, 1);  // src = &buf[1]
+  __ Mov(x5, x0);     // dst = &buf[0]
+  __ Mov(x6, 15);     // count = 15
+  __ Cpyrn(x5, x4, x6);
   __ Ldp(x12, x13, MemOperand(x0));
+  __ Mrs(x21, NZCV);
 
   __ Add(x7, x0, 1);  // src = &buf[1]
   __ Mov(x8, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpywn(x8, x7, x4);
+  __ Mov(x9, 15);     // count = 15
+  __ Cpywn(x8, x7, x9);
   __ Ldp(x14, x15, MemOperand(x0));
+  __ Mrs(x22, NZCV);
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //                        NZCV    Xs/Xd               Xn
+    //  Option A (forwards) : ....    ends of buffers     0
+    //  Option A (backwards): ....    starts of buffers   0
+    //  Option B (forwards) : ..C.    ends of buffers     0
+    //  Option B (backwards): N.C.    starts of buffers   0
+    //
+    // All cases overlap to force a forwards copy.
+
+    std::vector<uint64_t> allowed_forwards_flags = {NoFlag, CFlag};
+
     RUN();
-    ASSERT_EQUAL_64(buf_addr + 16, x2);
-    ASSERT_EQUAL_64(buf_addr + 15, x3);
+    ASSERT_EQUAL_64(buf_addr + 16, x1);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x2);  // Xd
+    ASSERT_EQUAL_64(0, x3);              // Xn
+    ASSERT_EQUAL_64(allowed_forwards_flags, x20);
     ASSERT_EQUAL_64(0x0807'0605'0403'0201, x10);
     ASSERT_EQUAL_64(0x0f0f'0e0d'0c0b'0a09, x11);
 
-    ASSERT_EQUAL_64(buf_addr + 16, x5);
-    ASSERT_EQUAL_64(buf_addr + 15, x6);
+    ASSERT_EQUAL_64(buf_addr + 16, x4);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x5);  // Xd
+    ASSERT_EQUAL_64(0, x6);              // Xn
+    ASSERT_EQUAL_64(allowed_forwards_flags, x21);
     ASSERT_EQUAL_64(0x0908'0706'0504'0302, x12);
     ASSERT_EQUAL_64(0x0f0f'0f0e'0d0c'0b0a, x13);
 
-    ASSERT_EQUAL_64(buf_addr + 16, x7);
-    ASSERT_EQUAL_64(buf_addr + 15, x8);
+    ASSERT_EQUAL_64(buf_addr + 16, x7);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x8);  // Xd
+    ASSERT_EQUAL_64(0, x9);              // Xn
+    ASSERT_EQUAL_64(allowed_forwards_flags, x22);
     ASSERT_EQUAL_64(0x0a09'0807'0605'0403, x14);
     ASSERT_EQUAL_64(0x0f0f'0f0f'0e0d'0c0b, x15);
-
-    ASSERT_EQUAL_64(0, x4);
-    ASSERT_EQUAL_NZCV(CFlag);
   }
 }
 
@@ -14579,46 +14668,79 @@
     buf[i] = i;
   }
 
-  // This test matches the cpy variant above, but using cpyf will result in a
-  // different answer for the overlapping copy.
+  // As `mops_cpy`, but `cpyf` always copies forwards, so is only useful for
+  // non-overlapping buffers, or those where the source address is greater than
+  // the destination address.
+
   START();
   __ Mov(x0, buf_addr);
 
-  // Copy first eight bytes into second eight.
-  __ Mov(x2, x0);     // src = &buf[0]
-  __ Add(x3, x0, 8);  // dst = &buf[8]
-  __ Mov(x4, 8);      // count = 8
-  __ Cpyf(x3, x2, x4);
+  // Copy first eight bytes into second eight, without overlap.
+  __ Mov(x1, x0);     // src = &buf[0]
+  __ Add(x2, x0, 8);  // dst = &buf[8]
+  __ Mov(x3, 8);      // count = 8
+  __ Cpyfp(x2, x1, x3);
+  __ Cpyfm(x2, x1, x3);
+  __ Cpyfe(x2, x1, x3);
   __ Ldp(x10, x11, MemOperand(x0));
   __ Mrs(x20, NZCV);
 
-  // Copy first eight bytes to overlapping offset.
-  __ Mov(x5, x0);     // src = &buf[0]
-  __ Add(x6, x0, 4);  // dst = &buf[4]
-  __ Mov(x7, 8);      // count = 8
-  __ Cpyf(x6, x5, x7);
+  // Copy last eight bytes to overlapping offset where src < dst.
+  __ Add(x4, x0, 8);  // src = &buf[8]
+  __ Add(x5, x0, 6);  // dst = &buf[6]
+  __ Mov(x6, 8);      // count = 8
+  __ Cpyf(x5, x4, x6);
   __ Ldp(x12, x13, MemOperand(x0));
+  __ Mrs(x21, NZCV);
+
+  // Copy first eight bytes to overlapping offset where src > dst.
+  __ Mov(x7, x0);     // src = &buf[0]
+  __ Add(x8, x0, 4);  // dst = &buf[4]
+  __ Mov(x9, 8);      // count = 8
+  __ Cpyf(x8, x7, x9);
+  // The only testable result is the first and last four bytes, which are not
+  // written at all.
+  __ Ldr(w14, MemOperand(x0));
+  __ Ldr(w15, MemOperand(x0, 12));
+  __ Mrs(x22, NZCV);
+
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //            NZCV    Xs/Xd               Xn
+    //  Option A: ....    ends of buffers     0
+    //  Option B: ..C.    ends of buffers     0
+
+    std::vector<uint64_t> allowed_forwards_flags = {NoFlag, CFlag};
+
     RUN();
-    ASSERT_EQUAL_64(buf_addr + 8, x2);
-    ASSERT_EQUAL_64(buf_addr + 16, x3);
-    ASSERT_EQUAL_64(0, x4);
+
+    // No overlap.
+    ASSERT_EQUAL_64(buf_addr + 8, x1);   // Xs
+    ASSERT_EQUAL_64(buf_addr + 16, x2);  // Xd
+    ASSERT_EQUAL_64(0, x3);              // Xn
+    ASSERT_EQUAL_64(allowed_forwards_flags, x20);
     ASSERT_EQUAL_64(0x0706'0504'0302'0100, x10);
     ASSERT_EQUAL_64(0x0706'0504'0302'0100, x11);
-    ASSERT_EQUAL_64(CFlag, x20);
 
-    ASSERT_EQUAL_64(buf_addr + 8, x5);
-    ASSERT_EQUAL_64(buf_addr + 12, x6);
-    ASSERT_EQUAL_64(0, x7);
-    ASSERT_EQUAL_NZCV(CFlag);
+    // Overlap, src > dst.
+    ASSERT_EQUAL_64(buf_addr + 16, x4);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 14, x5);  // Xd
+    ASSERT_EQUAL_64(0, x6);              // Xn
+    ASSERT_EQUAL_64(0x0100'0504'0302'0100, x12);
+    ASSERT_EQUAL_64(0x0706'0706'0504'0302, x13);
+    ASSERT_EQUAL_64(allowed_forwards_flags, x21);
 
-    // These results are not architecturally defined. They may change if the
-    // simulator is implemented in a different, but still architecturally
-    // correct, way.
-    ASSERT_EQUAL_64(0x0302'0100'0302'0100, x12);
-    ASSERT_EQUAL_64(0x0706'0504'0302'0100, x13);
+    // Overlap, src < dst.
+    ASSERT_EQUAL_64(buf_addr + 8, x7);   // Xs
+    ASSERT_EQUAL_64(buf_addr + 12, x8);  // Xd
+    ASSERT_EQUAL_64(0, x9);              // Xn
+    // We can only reliably test that the operation didn't write outside the
+    // specified region.
+    ASSERT_EQUAL_32(0x0302'0100, w14);
+    ASSERT_EQUAL_32(0x0706'0706, w15);
+    ASSERT_EQUAL_64(allowed_forwards_flags, x22);
   }
 }
 
@@ -14638,44 +14760,57 @@
   START();
   __ Mov(x0, buf_addr);
 
-  __ Add(x2, x0, 1);  // src = &buf[1]
-  __ Mov(x3, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpyfn(x3, x2, x4);
+  __ Add(x1, x0, 1);  // src = &buf[1]
+  __ Mov(x2, x0);     // dst = &buf[0]
+  __ Mov(x3, 15);     // count = 15
+  __ Cpyfn(x2, x1, x3);
   __ Ldp(x10, x11, MemOperand(x0));
+  __ Mrs(x20, NZCV);
 
-  __ Add(x5, x0, 1);  // src = &buf[1]
-  __ Mov(x6, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpyfrn(x6, x5, x4);
+  __ Add(x4, x0, 1);  // src = &buf[1]
+  __ Mov(x5, x0);     // dst = &buf[0]
+  __ Mov(x6, 15);     // count = 15
+  __ Cpyfrn(x5, x4, x6);
   __ Ldp(x12, x13, MemOperand(x0));
+  __ Mrs(x21, NZCV);
 
   __ Add(x7, x0, 1);  // src = &buf[1]
   __ Mov(x8, x0);     // dst = &buf[0]
-  __ Mov(x4, 15);     // count = 15
-  __ Cpyfwn(x8, x7, x4);
+  __ Mov(x9, 15);     // count = 15
+  __ Cpyfwn(x8, x7, x9);
   __ Ldp(x14, x15, MemOperand(x0));
+  __ Mrs(x22, NZCV);
   END();
 
   if (CAN_RUN()) {
+    // Permitted results:
+    //            NZCV    Xs/Xd               Xn
+    //  Option A: ....    ends of buffers     0
+    //  Option B: ..C.    ends of buffers     0
+
+    std::vector<uint64_t> allowed_flags = {NoFlag, CFlag};
+
     RUN();
-    ASSERT_EQUAL_64(buf_addr + 16, x2);
-    ASSERT_EQUAL_64(buf_addr + 15, x3);
+    ASSERT_EQUAL_64(buf_addr + 16, x1);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x2);  // Xd
+    ASSERT_EQUAL_64(0, x3);              // Xn
+    ASSERT_EQUAL_64(allowed_flags, x20);
     ASSERT_EQUAL_64(0x0807'0605'0403'0201, x10);
     ASSERT_EQUAL_64(0x0f0f'0e0d'0c0b'0a09, x11);
 
-    ASSERT_EQUAL_64(buf_addr + 16, x5);
-    ASSERT_EQUAL_64(buf_addr + 15, x6);
+    ASSERT_EQUAL_64(buf_addr + 16, x4);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x5);  // Xd
+    ASSERT_EQUAL_64(0, x6);              // Xn
+    ASSERT_EQUAL_64(allowed_flags, x21);
     ASSERT_EQUAL_64(0x0908'0706'0504'0302, x12);
     ASSERT_EQUAL_64(0x0f0f'0f0e'0d0c'0b0a, x13);
 
-    ASSERT_EQUAL_64(buf_addr + 16, x7);
-    ASSERT_EQUAL_64(buf_addr + 15, x8);
+    ASSERT_EQUAL_64(buf_addr + 16, x7);  // Xs
+    ASSERT_EQUAL_64(buf_addr + 15, x8);  // Xd
+    ASSERT_EQUAL_64(0, x9);              // Xn
+    ASSERT_EQUAL_64(allowed_flags, x22);
     ASSERT_EQUAL_64(0x0a09'0807'0605'0403, x14);
     ASSERT_EQUAL_64(0x0f0f'0f0f'0e0d'0c0b, x15);
-
-    ASSERT_EQUAL_64(0, x4);
-    ASSERT_EQUAL_NZCV(CFlag);
   }
 }
 
diff --git a/test/aarch64/test-utils-aarch64.cc b/test/aarch64/test-utils-aarch64.cc
index 398ed4f..c23f4e8 100644
--- a/test/aarch64/test-utils-aarch64.cc
+++ b/test/aarch64/test-utils-aarch64.cc
@@ -89,6 +89,34 @@
 }
 
 
+bool Equal64(std::vector<uint64_t> reference_list,
+             const RegisterDump*,
+             uint64_t result,
+             ExpectedResult option) {
+  switch (option) {
+    case kExpectEqual:
+      for (uint64_t reference : reference_list) {
+        if (result == reference) return true;
+      }
+      printf("Expected a result in (\n");
+      break;
+    case kExpectNotEqual:
+      for (uint64_t reference : reference_list) {
+        if (result == reference) {
+          printf("Expected a result not in (\n");
+          break;
+        }
+      }
+      return true;
+  }
+  for (uint64_t reference : reference_list) {
+    printf("  0x%016" PRIx64 ",\n", reference);
+  }
+  printf(")\t Found 0x%016" PRIx64 "\n", result);
+  return false;
+}
+
+
 bool Equal128(QRegisterValue expected,
               const RegisterDump*,
               QRegisterValue result) {
@@ -200,6 +228,16 @@
 }
 
 
+bool Equal64(std::vector<uint64_t> reference_list,
+             const RegisterDump* core,
+             const Register& reg,
+             ExpectedResult option) {
+  VIXL_ASSERT(reg.Is64Bits());
+  uint64_t result = core->xreg(reg.GetCode());
+  return Equal64(reference_list, core, result, option);
+}
+
+
 bool NotEqual64(uint64_t reference,
                 const RegisterDump* core,
                 const Register& reg) {
diff --git a/test/aarch64/test-utils-aarch64.h b/test/aarch64/test-utils-aarch64.h
index 9cf9154..40a5aa5 100644
--- a/test/aarch64/test-utils-aarch64.h
+++ b/test/aarch64/test-utils-aarch64.h
@@ -345,6 +345,10 @@
              const RegisterDump*,
              uint64_t result,
              ExpectedResult option = kExpectEqual);
+bool Equal64(std::vector<uint64_t> reference_list,
+             const RegisterDump*,
+             uint64_t result,
+             ExpectedResult option = kExpectEqual);
 bool Equal128(QRegisterValue expected,
               const RegisterDump*,
               QRegisterValue result);
@@ -358,6 +362,10 @@
              const RegisterDump* core,
              const Register& reg,
              ExpectedResult option = kExpectEqual);
+bool Equal64(std::vector<uint64_t> reference_list,
+             const RegisterDump* core,
+             const Register& reg,
+             ExpectedResult option = kExpectEqual);
 bool Equal64(uint64_t expected,
              const RegisterDump* core,
              const VRegister& vreg);