aboutsummaryrefslogtreecommitdiff
path: root/test.c
diff options
context:
space:
mode:
Diffstat (limited to 'test.c')
-rw-r--r--test.c1243
1 files changed, 1243 insertions, 0 deletions
diff --git a/test.c b/test.c
new file mode 100644
index 0000000..65a18f0
--- /dev/null
+++ b/test.c
@@ -0,0 +1,1243 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <time.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "test.h"
+
+
+#define CHECK(condition) assert(condition)
+
+
+// -----------------------------------------------------------------------------
+// Test utilities.
+// -----------------------------------------------------------------------------
+
+// Helpers for manipulating and checking tagged addresses.
+
+#ifndef DISABLE_TAGS
+static int const kTagShift = 56;
+static int const kTagCount = 256;
+static uint64_t const kTagMask = UINT64_C(0xff00000000000000);
+#else
+// With tags disabled, just pretend that there is only one tag (with value 0).
+static int const kTagShift = 0;
+static int const kTagCount = 1;
+static uint64_t const kTagMask = UINT64_C(0x0000000000000000);
+#endif
+
+
+// Return 'pointer' tagged with the specified tag.
+static void * get_tagged_address(void * pointer, uint8_t tag) {
+ CHECK(tag < kTagCount);
+ uint64_t mask = (uint64_t)tag << kTagShift;
+ uint64_t address = (uint64_t)pointer & ~kTagMask;
+ return (void*)(address | mask);
+}
+
+
+// Return the tag of 'pointer'.
+static uint8_t get_tag(void * pointer) {
+ return ((uint64_t)pointer & kTagMask) >> kTagShift;
+}
+
+
+// Check that 'pointer' has the specified tag.
+static void check_tag(void * pointer, uint8_t tag) {
+ CHECK(tag < kTagCount);
+ CHECK(get_tag(pointer) == tag);
+}
+
+
+// Return 'pointer' with a tag of 0.
+// Some build configurations don't use this function; the 'unused' attribute
+// silences a corresponding GCC warning.
+static __attribute__((unused)) void * get_clear_address(void * pointer) {
+ return get_tagged_address(pointer, 0);
+}
+
+
+// Signal-handling helpers.
+volatile int signal_counter;
+
+static void setup_signal(int sig_num,
+ void (*handler)(int, siginfo_t*, void*),
+ timer_t* timer_id) {
+ int ret;
+ struct sigevent event;
+ struct sigaction act;
+ struct itimerspec timer_specs;
+
+ memset(&event, 0, sizeof(struct sigevent));
+ memset(&act, 0, sizeof(struct sigaction));
+ memset(&timer_specs, 0, sizeof(struct itimerspec));
+
+ // Call signal_handler when receiving a signal.
+ act.sa_sigaction = handler;
+ sigemptyset(&act.sa_mask);
+ act.sa_flags = SA_SIGINFO;
+
+ ret = sigaction(sig_num, &act, NULL);
+ CHECK(ret == 0);
+
+ event.sigev_notify = SIGEV_SIGNAL;
+ event.sigev_signo = sig_num;
+
+ // Fire the signal every 10ms.
+ timer_specs.it_value.tv_sec = 1;
+ timer_specs.it_interval.tv_nsec = 10000000;
+
+ ret = timer_create(CLOCK_REALTIME, &event, timer_id);
+ CHECK(ret == 0);
+
+ timer_settime(*timer_id, 0, &timer_specs, NULL);
+ CHECK(ret == 0);
+}
+
+
+static void teardown_signal(timer_t* timer_id) {
+ int ret = timer_delete(*timer_id);
+ CHECK(ret == 0);
+}
+
+
+// -----------------------------------------------------------------------------
+// Tests.
+// -----------------------------------------------------------------------------
+
+
+// Read from a value using tagged addresses.
+static void test_read(void) {
+ uint64_t val = 0xbadbeef;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged = get_tagged_address(&val, tag);
+ val = tag;
+ CHECK(*tagged == tag);
+ check_tag(tagged, tag);
+ }
+}
+
+
+// Write to a value using tagged addresses.
+static void test_write(void) {
+ uint64_t val = 0xbadbeef;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged = get_tagged_address(&val, tag);
+ *tagged = tag;
+ CHECK(val == tag);
+ check_tag(tagged, tag);
+ }
+}
+
+
+// Read from sequential addresses in a heap buffer with varying tags.
+static void test_sequential_read_heap(void) {
+ uint8_t * data = calloc(kTagCount, sizeof(uint8_t));
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint8_t * tagged = get_tagged_address(data, tag);
+ data[tag] = tag;
+ CHECK(tagged[tag] == tag);
+ check_tag(tagged, tag);
+ }
+ free(data);
+}
+
+
+// Write to sequential addresses in a heap buffer with varying tags.
+static void test_sequential_write_heap(void) {
+ uint8_t * data = calloc(kTagCount, sizeof(uint8_t));
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint8_t * tagged = get_tagged_address(data, tag);
+ tagged[tag] = tag;
+ CHECK(data[tag] == tag);
+ check_tag(tagged, tag);
+ }
+ free(data);
+}
+
+
+// Read from sequential addresses in a stack buffer with varying tags.
+static void test_sequential_read_stack(void) {
+ uint8_t data[kTagCount];
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint8_t * tagged = get_tagged_address(data, tag);
+ data[tag] = tag;
+ CHECK(tagged[tag] == tag);
+ check_tag(tagged, tag);
+ }
+}
+
+
+// Write to sequential addresses in a stack buffer with varying tags.
+static void test_sequential_write_stack(void) {
+ uint8_t data[kTagCount];
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint8_t * tagged = get_tagged_address(data, tag);
+ tagged[tag] = tag;
+ CHECK(data[tag] == tag);
+ check_tag(tagged, tag);
+ }
+}
+
+
+// Access a large heap buffer (spanning multiple pages) using tagged addresses.
+static void test_large_heap(void) {
+ int count = 10000;
+ uint64_t * data = calloc(count, sizeof(uint64_t));
+
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ check_tag(tagged_address, tag);
+ }
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ check_tag(tagged_address, tag);
+ }
+ }
+ free(data);
+}
+
+
+// Access a large stack buffer (spanning multiple pages) using tagged addresses.
+static void test_large_stack(void) {
+ uint64_t data[10000];
+ int count = sizeof(data)/sizeof(data[0]);
+
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ check_tag(tagged_address, tag);
+ }
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ check_tag(tagged_address, tag);
+ }
+ }
+}
+
+
+// Test that accesses work with tagged addresses with concurrent signal
+// handlers. This checks that the process state is properly preserved when a
+// signal is received.
+static void handler_runtests(int sig_num, siginfo_t * siginfo, void * ucontext) {
+ test_read();
+ test_write();
+ signal_counter++;
+}
+
+static void test_signal(void) {
+ int watchdog = 1000000;
+ timer_t timer_id;
+ setup_signal(SIGINT, handler_runtests, &timer_id);
+ // Stop once a hundred signals have been handled.
+ while (signal_counter <= 100) {
+ usleep(1);
+ test_read();
+ test_write();
+ CHECK(--watchdog > 0);
+ }
+ teardown_signal(&timer_id);
+}
+
+
+// Test that signal handlers can themselves access tagged addresses.
+static uint64_t global_value;
+
+static void handler_tag_pointer(int sig_num, siginfo_t* siginfo, void* ucontext) {
+ static int tag = 0;
+ uint64_t * tagged = get_tagged_address(&global_value, tag);
+ *tagged = tag;
+ if (++tag >= kTagCount) tag = 0;
+ signal_counter++;
+}
+
+static void test_signal_handler(void) {
+ int watchdog = 1000000;
+ timer_t timer_id;
+ global_value = 0xbadbeef;
+ setup_signal(SIGTERM, handler_tag_pointer, &timer_id);
+ // Wait for the signal handler to execute at least kTagCount times.
+ while (signal_counter < kTagCount) {
+ usleep(1);
+ CHECK(--watchdog > 0);
+ }
+ teardown_signal(&timer_id);
+ CHECK(global_value < kTagCount);
+}
+
+
+// Test that memory allocated explicitly with MAP_PRIVATE can be accessed using
+// tagged addresses.
+static void test_mmap_private_data(void) {
+ int const count = 100;
+ uint64_t * data = (uint64_t*) mmap(NULL,
+ count * sizeof(uint64_t),
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1,
+ 0);
+ CHECK(data != MAP_FAILED);
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+
+ CHECK(munmap(data, count * sizeof(uint64_t)) == 0);
+}
+
+
+// Test that memory allocated explicitly with MAP_SHARED can be accessed using
+// tagged addresses.
+static void test_mmap_shared_data(void) {
+ int const count = 100;
+ uint64_t * data = (uint64_t*) mmap(NULL,
+ count * sizeof(uint64_t),
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_SHARED,
+ -1,
+ 0);
+ CHECK(data != MAP_FAILED);
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+
+ CHECK(munmap(data, count * sizeof(uint64_t)) == 0);
+}
+
+
+// Test that a large memory buffer (spanning multiple pages) allocated
+// explicitly with MAP_PRIVATE can be accessed using tagged addresses.
+static void test_large_mmap_private_data(void) {
+ int const count = 10000;
+ uint64_t * data = (uint64_t*) mmap(NULL,
+ count * sizeof(uint64_t),
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1,
+ 0);
+ CHECK(data != MAP_FAILED);
+
+ // The resulting address should never be tagged.
+ check_tag(data, 0);
+
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+
+ CHECK(munmap(data, count * sizeof(uint64_t)) == 0);
+}
+
+
+// Test that a large memory buffer (spanning multiple pages) allocated
+// explicitly with MAP_SHARED can be accessed using tagged addresses.
+static void test_large_mmap_shared_data(void) {
+ int const count = 10000;
+ uint64_t * data = (uint64_t*) mmap(NULL,
+ count * sizeof(uint64_t),
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_SHARED,
+ -1,
+ 0);
+ CHECK(data != MAP_FAILED);
+
+ // The resulting address should never be tagged.
+ check_tag(data, 0);
+
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ tagged_address[i] = i;
+ CHECK(data[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+ for (int tag = 0; tag < kTagCount; tag++) {
+ uint64_t * tagged_address = get_tagged_address(data, tag);
+ for (int i = 0; i < count; i++) {
+ CHECK(tagged_address[i] == i);
+ }
+ check_tag(tagged_address, tag);
+ }
+
+ CHECK(munmap(data, count * sizeof(uint64_t)) == 0);
+}
+
+
+// Test that memory allocated explicitly with MAP_SHARED can be accessed using
+// tagged addresses from multiple threads.
+static void test_multithreaded_mmap_shared_data(void) {
+ int watchdog = 1000000;
+ uint64_t * data = (uint64_t*) mmap(NULL,
+ kTagCount * kTagCount * sizeof(uint64_t),
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_SHARED,
+ -1,
+ 0);
+ CHECK(data != MAP_FAILED);
+
+ for (int i = 0; i < kTagCount; i++) {
+ data[i] = 0xbadbeef;
+ }
+
+ pid_t pid = fork();
+
+ // Write from the parent and check that the child observes the values.
+ for (int parent_tag = 0; parent_tag < kTagCount; parent_tag++) {
+ for (int child_tag = 0; child_tag < kTagCount; child_tag++) {
+ // Write (and read) a value that is unique to this iteration.
+ int index = (child_tag * kTagCount) + parent_tag;
+ uint64_t expected = UINT64_C(0x1111111100000000) + index;
+
+ if (pid != 0) {
+ // ---- Parent ----
+ // Write data to tagged addresses.
+ uint64_t volatile * tagged = get_tagged_address(data, parent_tag);
+ tagged[index] = expected;
+ __sync_synchronize();
+ } else {
+ // ---- Child ----
+ // Read data from a tagged address.
+ uint64_t volatile * tagged = get_tagged_address(data, child_tag);
+ while (tagged[index] != expected) {
+ CHECK(--watchdog > 0);
+ sched_yield();
+ __sync_synchronize();
+ }
+ }
+ }
+ }
+
+ // Do the same in reverse: write from the child.
+ for (int parent_tag = 0; parent_tag < kTagCount; parent_tag++) {
+ for (int child_tag = 0; child_tag < kTagCount; child_tag++) {
+ // Write (and read) a value that is unique to this iteration.
+ int index = (child_tag * kTagCount) + parent_tag;
+ uint64_t expected = UINT64_C(0x2222222200000000) + index;
+
+ if (pid != 0) {
+ // ---- Parent ----
+ // Read data from a tagged address.
+ uint64_t volatile * tagged = get_tagged_address(data, parent_tag);
+ while (tagged[index] != expected) {
+ CHECK(--watchdog > 0);
+ sched_yield();
+ __sync_synchronize();
+ }
+ } else {
+ // ---- Child ----
+ // Write data to tagged addresses.
+ uint64_t volatile * tagged = get_tagged_address(data, child_tag);
+ tagged[index] = expected;
+ __sync_synchronize();
+ }
+ }
+ }
+
+ if (pid != 0) {
+ // ---- Parent ----
+ int stat = 1;
+ CHECK(waitpid(pid, &stat, 0) == pid);
+ CHECK(stat == 0);
+ } else {
+ // ---- Child ----
+ exit(EXIT_SUCCESS);
+ }
+
+ CHECK(munmap(data, kTagCount * sizeof(uint64_t)) == 0);
+}
+
+
+#ifndef DISABLE_ASM
+
+
+static void test_load_store(void) {
+ uint64_t data_from[3] = {0xbadbeef, 0xbeefbad, 0xbeefbeef};
+ uint64_t data_to[3] = {0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ldp x10, x11, [%x[adr_from]] \n\t"
+ "stp x10, x11, [%x[adr_to]] \n\t"
+ "ldr x10, [%x[adr_from], #16] \n\t"
+ "str x10, [%x[adr_to], #16] \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "x10",
+ "x11"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 3; i++) {
+ CHECK(data_to[i] == data_from[i]);
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_preindex(void) {
+ uint64_t data_from[6] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef,
+ 0x123456789,
+ 0x987654321,
+ 0xabcdef};
+ uint64_t data_to[6] = {0, 0, 0, 0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "add %x[adr_from], %x[adr_from], #8 \n\t"
+ "add %x[adr_to], %x[adr_to], #8 \n\t"
+ "ldp x10, x11, [%x[adr_from], #-8]! \n\t"
+ "stp x10, x11, [%x[adr_to], #-8]! \n\t"
+
+ "add %x[adr_from], %x[adr_from], #32 \n\t"
+ "add %x[adr_to], %x[adr_to], #32 \n\t"
+ "ldr x10, [%x[adr_from], #-16]! \n\t"
+ "str x10, [%x[adr_to], #-16]! \n\t"
+
+ "ldp x10, x11, [%x[adr_from], #8]! \n\t"
+ "stp x10, x11, [%x[adr_to], #8]! \n\t"
+ "ldr x10, [%x[adr_from], #16]! \n\t"
+ "str x10, [%x[adr_to], #16]! \n\t"
+ "sub %x[adr_from], %x[adr_from], #40 \n\t"
+ "sub %x[adr_to], %x[adr_to], #40 \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "x10",
+ "x11"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 6; i++) {
+ CHECK(data_to[i] == data_from[i]);
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_postindex(void) {
+ uint64_t data_from[6] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef,
+ 0x123456789,
+ 0x987654321,
+ 0xabcdef};
+ uint64_t data_to[6] = {0, 0, 0, 0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ldp x10, x11, [%x[adr_from]], #16 \n\t"
+ "stp x10, x11, [%x[adr_to]], #16 \n\t"
+ "ldr x10, [%x[adr_from]], #8 \n\t"
+ "str x10, [%x[adr_to]], #8 \n\t"
+
+ "add %x[adr_from], %x[adr_from], #16 \n\t"
+ "add %x[adr_to], %x[adr_to], #16 \n\t"
+ "ldr x10, [%x[adr_from]], #-16 \n\t"
+ "str x10, [%x[adr_to]], #-16 \n\t"
+ "ldp x10, x11, [%x[adr_from]], #-24 \n\t"
+ "stp x10, x11, [%x[adr_to]], #-24 \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "x10",
+ "x11"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 6; i++) {
+ CHECK(data_to[i] == data_from[i]);
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_register_offset(void) {
+ uint8_t data_from[kTagCount];
+ uint8_t* tagged_data_from_in = NULL;
+ uint8_t* tagged_data_from_out = NULL;
+ uint8_t data_to[kTagCount];
+ uint8_t* tagged_data_to_in = NULL;
+ uint8_t* tagged_data_to_out = NULL;
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ data_from[tag2] = tag2;
+ data_to[tag2] = 0xbb;
+ }
+ tagged_data_from_in = get_tagged_address(data_from, tag1);
+ tagged_data_to_in = get_tagged_address(data_to, tag1);
+ uint32_t counter = kTagCount;
+ asm volatile (
+ "0: \n\t"
+ "sub %w[cnt], %w[cnt], #8 \n\t"
+#ifdef DISABLE_TAGS
+ "mov w10, %w[cnt] \n\t"
+#else
+ "orr x10, %x[cnt], %x[cnt], lsl 54 \n\t"
+#endif
+ "ldr x11, [%x[data_from], x10] \n\t"
+ "str x11, [%x[data_to], x10] \n\t"
+ "cbnz %w[cnt], 0b \n\t"
+ : [cnt] "+&r" (counter),
+ [data_from] "=&r" (tagged_data_from_out),
+ [data_to] "=&r" (tagged_data_to_out)
+ : "[data_from]" (tagged_data_from_in),
+ "[data_to]" (tagged_data_to_in)
+ : "x10",
+ "x11"
+ );
+ check_tag(tagged_data_from_out, tag1);
+ check_tag(tagged_data_to_out, tag1);
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ CHECK(tagged_data_from_out[tag2] == tag2);
+ CHECK(tagged_data_to_out[tag2] == tag2);
+ CHECK(data_to[tag2] == tag2);
+ }
+ }
+}
+
+
+static void test_load_store_s(void) {
+ uint32_t data_from[3] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef};
+ uint32_t data_to[3] = {0, 0, 0};
+ uint32_t* tagged_address_from_in = NULL;
+ uint32_t* tagged_address_from_out = NULL;
+ uint32_t* tagged_address_to_in = NULL;
+ uint32_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ldp s16, s17, [%x[adr_from]] \n\t"
+ "stp s16, s17, [%x[adr_to]] \n\t"
+ "ldr s16, [%x[adr_from], #8] \n\t"
+ "str s16, [%x[adr_to], #8] \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "s16",
+ "s17"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 3; i++) {
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ CHECK(data_to[i] == data_from[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_d(void) {
+ uint64_t data_from[3] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef};
+ uint64_t data_to[3] = {0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ldp d16, d17, [%x[adr_from]] \n\t"
+ "stp d16, d17, [%x[adr_to]] \n\t"
+ "ldr d16, [%x[adr_from], #16] \n\t"
+ "str d16, [%x[adr_to], #16] \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "d16",
+ "d17"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 3; i++) {
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ CHECK(data_to[i] == data_from[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_q(void) {
+ uint64_t data_from[6] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef,
+ 0xbadbad,
+ 0x123456789,
+ 0x987654321};
+ uint64_t data_to[6] = {0, 0, 0, 0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ldp q16, q17, [%x[adr_from]] \n\t"
+ "stp q16, q17, [%x[adr_to]] \n\t"
+ "ldr q16, [%x[adr_from], #32] \n\t"
+ "str q16, [%x[adr_to], #32] \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "q16",
+ "q17"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 6; i++) {
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ CHECK(data_to[i] == data_from[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_neon(void) {
+ uint64_t data_from[10] = {0xbadbeef,
+ 0xbeefbad,
+ 0xbeefbeef,
+ 0xbadbad,
+ 0xabcddcba,
+ 0xdcbaabcd,
+ 0x543212345,
+ 0x123456789,
+ 0x987654321,
+ 0x123454321};
+ uint64_t data_to[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint64_t* tagged_address_from_in = NULL;
+ uint64_t* tagged_address_from_out = NULL;
+ uint64_t* tagged_address_to_in = NULL;
+ uint64_t* tagged_address_to_out = NULL;
+ for (int tag = 0; tag < kTagCount; tag++) {
+ tagged_address_from_in = get_tagged_address(data_from, tag);
+ tagged_address_to_in = get_tagged_address(data_to, tag);
+ asm volatile (
+ "ld1 {v16.D}[0], [%x[adr_from]], #8 \n\t"
+ "st1 {v16.D}[0], [%x[adr_to]], #8 \n\t"
+ "ld2 {v16.D, v17.D}[0], [%x[adr_from]], #16 \n\t"
+ "st2 {v16.D, v17.D}[0], [%x[adr_to]], #16 \n\t"
+ "ld3 {v16.D, v17.D, v18.D}[0], [%x[adr_from]], #24 \n\t"
+ "st3 {v16.D, v17.D, v18.D}[0], [%x[adr_to]], #24 \n\t"
+ "ld4 {v16.D, v17.D, v18.D, v19.D}[0], [%x[adr_from]], #32 \n\t"
+ "st4 {v16.D, v17.D, v18.D, v19.D}[0], [%x[adr_to]], #32 \n\t"
+ "sub %x[adr_to], %x[adr_to], #80 \n\t"
+ "sub %x[adr_from], %x[adr_from], #80 \n\t"
+ : [adr_from] "=&r" (tagged_address_from_out),
+ [adr_to] "=&r" (tagged_address_to_out)
+ : "[adr_from]" (tagged_address_from_in),
+ "[adr_to]" (tagged_address_to_in)
+ : "v16",
+ "v17",
+ "v18",
+ "v19"
+ );
+ // Check that the tag is still present.
+ check_tag(tagged_address_from_out, tag);
+ check_tag(tagged_address_to_out, tag);
+ for (int i = 0; i < 10; i++) {
+ CHECK(tagged_address_to_in[i] == tagged_address_from_in[i]);
+ CHECK(tagged_address_to_out[i] == tagged_address_from_out[i]);
+ CHECK(data_to[i] == data_from[i]);
+ data_to[i] = 0;
+ }
+ }
+}
+
+
+static void test_load_store_neon_ld1_register_offset(void) {
+ // We are allocating data for each tag with load/store of
+ // 4 uint16_t at a time.
+ const unsigned int data_size = kTagCount * 4;
+ uint16_t data_from[data_size];
+ uint16_t* tagged_data_from_in = NULL;
+ uint16_t* tagged_data_from_out = NULL;
+ uint16_t data_to[data_size];
+ uint16_t* tagged_data_to_in = NULL;
+ uint16_t* tagged_data_to_out = NULL;
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ for(unsigned int i = 0; i < data_size; i++) {
+ data_from[i] = i;
+ data_to[i] = 0xbeef;
+ }
+ tagged_data_from_out = data_from;
+ tagged_data_to_out = data_to;
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ // Initialize input data with the last output data, plus tag1.
+ tagged_data_from_in = get_tagged_address(tagged_data_from_out, tag1);
+ tagged_data_to_in = get_tagged_address(tagged_data_to_out, tag1);
+ asm volatile (
+ "mov x10, #8 \n\t"
+#ifndef DISABLE_TAGS
+ "orr x10, x10, %x[tag], lsl 56 \n\t"
+#endif
+ "ld1 {v16.D}[0], [%x[data_from]], x10 \n\t"
+ "st1 {v16.D}[0], [%x[data_to]], x10 \n\t"
+ : [data_from] "=&r" (tagged_data_from_out),
+ [data_to] "=&r" (tagged_data_to_out)
+ : "[data_from]" (tagged_data_from_in),
+#ifndef DISABLE_TAGS
+ [tag] "r" (tag2),
+#endif
+ "[data_to]" (tagged_data_to_in)
+ : "v16",
+ "x10"
+ );
+ check_tag(tagged_data_from_out, tag1 + tag2);
+ check_tag(tagged_data_to_out, tag1 + tag2);
+ // Clear the tags.
+ tagged_data_from_out = get_clear_address(tagged_data_from_out);
+ tagged_data_to_out = get_clear_address(tagged_data_to_out);
+ }
+ // Move data_out back to the first element.
+ tagged_data_from_out-=data_size;
+ tagged_data_to_out-=data_size;
+ for(unsigned int i = 0; i < data_size; i++) {
+ CHECK(tagged_data_from_out[i] == i);
+ CHECK(tagged_data_to_out[i] == i);
+ CHECK(data_to[i] == i);
+ }
+ }
+}
+
+
+static void test_load_store_neon_ld2_register_offset(void) {
+ // We are allocating data for each tag with load/store of
+ // 8 uint16_t at a time.
+ const unsigned int data_size = kTagCount * 8;
+ uint16_t data_from[data_size];
+ uint16_t* tagged_data_from_in = NULL;
+ uint16_t* tagged_data_from_out = NULL;
+ uint16_t data_to[data_size];
+ uint16_t* tagged_data_to_in = NULL;
+ uint16_t* tagged_data_to_out = NULL;
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ for(unsigned int i = 0; i < data_size; i++) {
+ data_from[i] = i;
+ data_to[i] = 0xbeef;
+ }
+ tagged_data_from_out = data_from;
+ tagged_data_to_out = data_to;
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ // Initialize input data with the last output data, plus tag1.
+ tagged_data_from_in = get_tagged_address(tagged_data_from_out, tag1);
+ tagged_data_to_in = get_tagged_address(tagged_data_to_out, tag1);
+ asm volatile (
+ "mov x10, #16 \n\t"
+#ifndef DISABLE_TAGS
+ "orr x10, x10, %x[tag], lsl 56 \n\t"
+#endif
+ "ld2 {v16.D, v17.D}[0], [%x[data_from]], x10 \n\t"
+ "st2 {v16.D, v17.D}[0], [%x[data_to]], x10 \n\t"
+ : [data_from] "=&r" (tagged_data_from_out),
+ [data_to] "=&r" (tagged_data_to_out)
+ : "[data_from]" (tagged_data_from_in),
+#ifndef DISABLE_TAGS
+ [tag] "r" (tag2),
+#endif
+ "[data_to]" (tagged_data_to_in)
+ : "v16",
+ "v17",
+ "x10"
+ );
+ check_tag(tagged_data_from_out, tag1 + tag2);
+ check_tag(tagged_data_to_out, tag1 + tag2);
+ // Clear the tags.
+ tagged_data_from_out = get_clear_address(tagged_data_from_out);
+ tagged_data_to_out = get_clear_address(tagged_data_to_out);
+ }
+ // Move data_out back to the first element.
+ tagged_data_from_out-=data_size;
+ tagged_data_to_out-=data_size;
+ for(unsigned int i = 0; i < data_size; i++) {
+ CHECK(tagged_data_from_out[i] == i);
+ CHECK(tagged_data_to_out[i] == i);
+ CHECK(data_to[i] == i);
+ }
+ }
+}
+
+
+static void test_load_store_neon_ld3_register_offset(void) {
+ // We are allocating data for each tag with load/store of
+ // 12 uint16_t at a time.
+ const unsigned int data_size = kTagCount * 12;
+ uint16_t data_from[data_size];
+ uint16_t* tagged_data_from_in = NULL;
+ uint16_t* tagged_data_from_out = NULL;
+ uint16_t data_to[data_size];
+ uint16_t* tagged_data_to_in = NULL;
+ uint16_t* tagged_data_to_out = NULL;
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ for(unsigned int i = 0; i < data_size; i++) {
+ data_from[i] = i;
+ data_to[i] = 0xbeef;
+ }
+ tagged_data_from_out = data_from;
+ tagged_data_to_out = data_to;
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ // Initialize input data with the last output data, plus tag1.
+ tagged_data_from_in = get_tagged_address(tagged_data_from_out, tag1);
+ tagged_data_to_in = get_tagged_address(tagged_data_to_out, tag1);
+ asm volatile (
+ "mov x10, #24 \n\t"
+#ifndef DISABLE_TAGS
+ "orr x10, x10, %x[tag], lsl 56 \n\t"
+#endif
+ "ld3 {v16.D, v17.D, v18.D}[0], [%x[data_from]], x10 \n\t"
+ "st3 {v16.D, v17.D, v18.D}[0], [%x[data_to]], x10 \n\t"
+ : [data_from] "=&r" (tagged_data_from_out),
+ [data_to] "=&r" (tagged_data_to_out)
+ : "[data_from]" (tagged_data_from_in),
+#ifndef DISABLE_TAGS
+ [tag] "r" (tag2),
+#endif
+ "[data_to]" (tagged_data_to_in)
+ : "v16",
+ "v17",
+ "v18",
+ "x10"
+ );
+ check_tag(tagged_data_from_out, tag1 + tag2);
+ check_tag(tagged_data_to_out, tag1 + tag2);
+ // Clear the tags.
+ tagged_data_from_out = get_clear_address(tagged_data_from_out);
+ tagged_data_to_out = get_clear_address(tagged_data_to_out);
+ }
+ // Move data_out back to the first element.
+ tagged_data_from_out-=data_size;
+ tagged_data_to_out-=data_size;
+ for(unsigned int i = 0; i < data_size; i++) {
+ CHECK(tagged_data_from_out[i] == i);
+ CHECK(tagged_data_to_out[i] == i);
+ CHECK(data_to[i] == i);
+ }
+ }
+}
+
+
+static void test_load_store_neon_ld4_register_offset(void) {
+ // We are allocating data for each tag with load/store of
+ // 16 uint16_t at a time.
+ const unsigned int data_size = kTagCount * 16;
+ uint16_t data_from[data_size];
+ uint16_t* tagged_data_from_in = NULL;
+ uint16_t* tagged_data_from_out = NULL;
+ uint16_t data_to[data_size];
+ uint16_t* tagged_data_to_in = NULL;
+ uint16_t* tagged_data_to_out = NULL;
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ for(unsigned int i = 0; i < data_size; i++) {
+ data_from[i] = i;
+ data_to[i] = 0xbeef;
+ }
+ tagged_data_from_out = data_from;
+ tagged_data_to_out = data_to;
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ // Initialize input data with the last output data, plus tag1.
+ tagged_data_from_in = get_tagged_address(tagged_data_from_out, tag1);
+ tagged_data_to_in = get_tagged_address(tagged_data_to_out, tag1);
+ asm volatile (
+ "mov x10, #32 \n\t"
+#ifndef DISABLE_TAGS
+ "orr x10, x10, %x[tag], lsl 56 \n\t"
+#endif
+ "ld4 {v16.D, v17.D, v18.D, v19.D}[0], [%x[data_from]], x10 \n\t"
+ "st4 {v16.D, v17.D, v18.D, v19.D}[0], [%x[data_to]], x10 \n\t"
+ : [data_from] "=&r" (tagged_data_from_out),
+ [data_to] "=&r" (tagged_data_to_out)
+ : "[data_from]" (tagged_data_from_in),
+#ifndef DISABLE_TAGS
+ [tag] "r" (tag2),
+#endif
+ "[data_to]" (tagged_data_to_in)
+ : "v16",
+ "v17",
+ "v18",
+ "v19",
+ "x10"
+ );
+ check_tag(tagged_data_from_out, tag1 + tag2);
+ check_tag(tagged_data_to_out, tag1 + tag2);
+ // Clear the tags.
+ tagged_data_from_out = get_clear_address(tagged_data_from_out);
+ tagged_data_to_out = get_clear_address(tagged_data_to_out);
+ }
+ // Move data_out back to the first element.
+ tagged_data_from_out-=data_size;
+ tagged_data_to_out-=data_size;
+ for(unsigned int i = 0; i < data_size; i++) {
+ CHECK(tagged_data_from_out[i] == i);
+ CHECK(tagged_data_to_out[i] == i);
+ CHECK(data_to[i] == i);
+ }
+ }
+}
+
+
+static void test_executable_memory(void) {
+ int ret;
+ int number_of_instructions = 4;
+ uint32_t* code = NULL;
+ uint64_t data;
+ uint64_t* data_adr = &data;
+ uint32_t* tagged_address_write = NULL;
+ uint32_t* tagged_address_exec = NULL;
+ code = (uint32_t*) mmap(NULL,
+ number_of_instructions * sizeof(uint32_t),
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1,
+ 0);
+ CHECK(code != MAP_FAILED);
+ for (int tag1 = 0; tag1 < kTagCount; tag1++) {
+ // Write the following code with tagged addresses.
+ // add x0, x0, #1
+ // add x0, x0, #1
+ // add x0, x0, #1
+ // ret
+ tagged_address_write = get_tagged_address(code, tag1);
+ tagged_address_write[0] = 0x91000400;
+ tagged_address_write[1] = 0x91000400;
+ tagged_address_write[2] = 0x91000400;
+ tagged_address_write[3] = 0xd65f03c0;
+ __clear_cache((char*)tagged_address_write,
+ (char*)(&tagged_address_write[4]));
+ for (int tag2 = 0; tag2 < kTagCount; tag2++) {
+ data = 0;
+ // Branching to a tagged address should work (though the tag is cleared
+ // before the PC is written).
+ tagged_address_exec = get_tagged_address(code, tag2);
+ asm volatile(
+ "ldr x0, [%x[data]] \n\t"
+ "blr %x[code] \n\t"
+ "str x0, [%x[data]] \n\t"
+ : [data] "+&r" (data_adr)
+ : [code] "r" (tagged_address_exec)
+ : "x0", "x30"
+ );
+ CHECK(data == 3);
+ }
+ }
+ ret = munmap(code, number_of_instructions * sizeof(uint32_t));
+ CHECK(ret == 0);
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// Public interface.
+// -----------------------------------------------------------------------------
+
+
+#define MAX_TEST_COUNT 50
+struct {
+ int count;
+ struct {
+ char const * name;
+ void (*function)(void);
+ } tests[MAX_TEST_COUNT];
+} manifest;
+
+
+static void add_test_description(char const * name,
+ void (*function)(void)) {
+ CHECK(manifest.count < MAX_TEST_COUNT);
+ manifest.tests[manifest.count].name = name;
+ manifest.tests[manifest.count].function = function;
+ manifest.count++;
+}
+
+
+void test_init() {
+#define ADD_TEST_DESCRIPTION(name) add_test_description(#name, test_##name);
+
+ ADD_TEST_DESCRIPTION(read);
+ ADD_TEST_DESCRIPTION(write);
+ ADD_TEST_DESCRIPTION(sequential_read_heap);
+ ADD_TEST_DESCRIPTION(sequential_write_heap);
+ ADD_TEST_DESCRIPTION(sequential_read_stack);
+ ADD_TEST_DESCRIPTION(sequential_write_stack);
+ ADD_TEST_DESCRIPTION(large_heap);
+ ADD_TEST_DESCRIPTION(large_stack);
+ ADD_TEST_DESCRIPTION(signal);
+ ADD_TEST_DESCRIPTION(signal_handler);
+ ADD_TEST_DESCRIPTION(mmap_private_data);
+ ADD_TEST_DESCRIPTION(mmap_shared_data);
+ ADD_TEST_DESCRIPTION(large_mmap_private_data);
+ ADD_TEST_DESCRIPTION(large_mmap_shared_data);
+ ADD_TEST_DESCRIPTION(multithreaded_mmap_shared_data);
+#ifndef DISABLE_ASM
+ ADD_TEST_DESCRIPTION(load_store);
+ ADD_TEST_DESCRIPTION(load_store_preindex);
+ ADD_TEST_DESCRIPTION(load_store_postindex);
+ ADD_TEST_DESCRIPTION(load_store_register_offset);
+ ADD_TEST_DESCRIPTION(load_store_s);
+ ADD_TEST_DESCRIPTION(load_store_d);
+ ADD_TEST_DESCRIPTION(load_store_q);
+ ADD_TEST_DESCRIPTION(load_store_neon);
+ ADD_TEST_DESCRIPTION(load_store_neon_ld1_register_offset);
+ ADD_TEST_DESCRIPTION(load_store_neon_ld2_register_offset);
+ ADD_TEST_DESCRIPTION(load_store_neon_ld3_register_offset);
+ ADD_TEST_DESCRIPTION(load_store_neon_ld4_register_offset);
+ ADD_TEST_DESCRIPTION(executable_memory);
+#endif
+
+#undef ADD_TEST_DESCRIPTION
+}
+
+
+int test_run_by_name(char const * name) {
+ for (int i = 0; i < manifest.count; i++) {
+ if (strcmp(manifest.tests[i].name, name) == 0) {
+ manifest.tests[i].function();
+ return 0;
+ }
+ }
+ return -1;
+}
+
+
+void test_print_list() {
+ for (int i = 0; i < manifest.count; i++) {
+ puts(manifest.tests[i].name);
+ }
+}
+
+