blob: e2b02b84844d8e8c6b2b28e45b5b7080ff8a45a9 [file] [log] [blame]
armvixlad96eda2013-06-14 11:42:37 +01001// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include <stdio.h>
armvixlf37fdc02014-02-05 13:22:16 +000028#include <stdlib.h>
armvixlad96eda2013-06-14 11:42:37 +010029#include <string.h>
armvixl578645f2013-08-15 17:21:42 +010030#include <math.h>
31#include <float.h>
armvixlad96eda2013-06-14 11:42:37 +010032
33#include "cctest.h"
34#include "test-utils-a64.h"
35#include "a64/macro-assembler-a64.h"
36#include "a64/simulator-a64.h"
37#include "a64/debugger-a64.h"
38#include "a64/disasm-a64.h"
39#include "a64/cpu-a64.h"
40
41namespace vixl {
42
43// Test infrastructure.
44//
45// Tests are functions which accept no parameters and have no return values.
46// The testing code should not perform an explicit return once completed. For
47// example to test the mov immediate instruction a very simple test would be:
48//
49// TEST(mov_x0_one) {
50// SETUP();
51//
52// START();
53// __ mov(x0, Operand(1));
54// END();
55//
56// RUN();
57//
58// ASSERT_EQUAL_64(1, x0);
59//
60// TEARDOWN();
61// }
62//
63// Within a START ... END block all registers but sp can be modified. sp has to
64// be explicitly saved/restored. The END() macro replaces the function return
65// so it may appear multiple times in a test if the test has multiple exit
66// points.
67//
68// Once the test has been run all integer and floating point registers as well
69// as flags are accessible through a RegisterDump instance, see
70// utils-a64.cc for more info on RegisterDump.
71//
72// We provide some helper assert to handle common cases:
73//
74// ASSERT_EQUAL_32(int32_t, int_32t)
75// ASSERT_EQUAL_FP32(float, float)
76// ASSERT_EQUAL_32(int32_t, W register)
77// ASSERT_EQUAL_FP32(float, S register)
78// ASSERT_EQUAL_64(int64_t, int_64t)
79// ASSERT_EQUAL_FP64(double, double)
80// ASSERT_EQUAL_64(int64_t, X register)
81// ASSERT_EQUAL_64(X register, X register)
82// ASSERT_EQUAL_FP64(double, D register)
83//
84// e.g. ASSERT_EQUAL_64(0.5, d30);
85//
armvixl578645f2013-08-15 17:21:42 +010086// If more advanced computation is required before the assert then access the
armvixlad96eda2013-06-14 11:42:37 +010087// RegisterDump named core directly:
88//
89// ASSERT_EQUAL_64(0x1234, core->reg_x0() & 0xffff);
90
91
92#define __ masm.
93#define TEST(name) TEST_(ASM_##name)
94
95#define BUF_SIZE (4096)
96
armvixlad96eda2013-06-14 11:42:37 +010097#ifdef USE_SIMULATOR
armvixlad96eda2013-06-14 11:42:37 +010098// Run tests with the simulator.
armvixlc68cb642014-09-25 18:49:30 +010099
100#define SETUP() \
101 MacroAssembler masm(BUF_SIZE); \
102 SETUP_COMMON()
103
104#define SETUP_CUSTOM(size, pic) \
105 byte* buf = new byte[size + BUF_SIZE]; \
106 MacroAssembler masm(buf, size + BUF_SIZE, pic); \
107 SETUP_COMMON()
108
109#define SETUP_COMMON() \
armvixlad96eda2013-06-14 11:42:37 +0100110 Decoder decoder; \
111 Simulator* simulator = NULL; \
112 if (Cctest::run_debugger()) { \
113 simulator = new Debugger(&decoder); \
114 } else { \
115 simulator = new Simulator(&decoder); \
116 simulator->set_disasm_trace(Cctest::trace_sim()); \
117 } \
118 simulator->set_coloured_trace(Cctest::coloured_trace()); \
armvixl578645f2013-08-15 17:21:42 +0100119 simulator->set_instruction_stats(Cctest::instruction_stats()); \
armvixlad96eda2013-06-14 11:42:37 +0100120 RegisterDump core
121
armvixlc68cb642014-09-25 18:49:30 +0100122// This is a convenience macro to avoid creating a scope for every assembler
123// function called. It will still assert the buffer hasn't been exceeded.
124#define ALLOW_ASM() \
125 CodeBufferCheckScope guard(&masm, masm.BufferCapacity())
126
armvixlad96eda2013-06-14 11:42:37 +0100127#define START() \
128 masm.Reset(); \
129 simulator->ResetState(); \
130 __ PushCalleeSavedRegisters(); \
131 if (Cctest::run_debugger()) { \
132 if (Cctest::trace_reg()) { \
133 __ Trace(LOG_STATE, TRACE_ENABLE); \
134 } \
135 if (Cctest::trace_sim()) { \
136 __ Trace(LOG_DISASM, TRACE_ENABLE); \
137 } \
armvixl578645f2013-08-15 17:21:42 +0100138 } \
139 if (Cctest::instruction_stats()) { \
140 __ EnableInstrumentation(); \
armvixlad96eda2013-06-14 11:42:37 +0100141 }
142
143#define END() \
armvixl578645f2013-08-15 17:21:42 +0100144 if (Cctest::instruction_stats()) { \
145 __ DisableInstrumentation(); \
146 } \
armvixlad96eda2013-06-14 11:42:37 +0100147 if (Cctest::run_debugger()) { \
148 __ Trace(LOG_ALL, TRACE_DISABLE); \
149 } \
150 core.Dump(&masm); \
151 __ PopCalleeSavedRegisters(); \
152 __ Ret(); \
153 masm.FinalizeCode()
154
155#define RUN() \
armvixlc68cb642014-09-25 18:49:30 +0100156 simulator->RunFrom(masm.GetStartAddress<Instruction*>())
armvixlad96eda2013-06-14 11:42:37 +0100157
armvixlc68cb642014-09-25 18:49:30 +0100158#define TEARDOWN() TEARDOWN_COMMON()
159
160#define TEARDOWN_CUSTOM() \
161 delete[] buf; \
162 TEARDOWN_COMMON()
163
164#define TEARDOWN_COMMON() \
165 delete simulator;
armvixlad96eda2013-06-14 11:42:37 +0100166
167#else // ifdef USE_SIMULATOR.
168// Run the test on real hardware or models.
armvixlc68cb642014-09-25 18:49:30 +0100169#define SETUP() \
170 MacroAssembler masm(BUF_SIZE); \
171 SETUP_COMMON()
172
173#define SETUP_CUSTOM(size, pic) \
174 byte* buf = new byte[size + BUF_SIZE]; \
175 MacroAssembler masm(buf, size + BUF_SIZE, pic); \
176 SETUP_COMMON()
177
178#define SETUP_COMMON() \
armvixlad96eda2013-06-14 11:42:37 +0100179 RegisterDump core; \
180 CPU::SetUp()
181
armvixlc68cb642014-09-25 18:49:30 +0100182// This is a convenience macro to avoid creating a scope for every assembler
183// function called. It will still assert the buffer hasn't been exceeded.
184#define ALLOW_ASM() \
185 CodeBufferCheckScope guard(&masm, masm.BufferCapacity())
186
armvixlad96eda2013-06-14 11:42:37 +0100187#define START() \
188 masm.Reset(); \
189 __ PushCalleeSavedRegisters()
190
191#define END() \
192 core.Dump(&masm); \
193 __ PopCalleeSavedRegisters(); \
194 __ Ret(); \
195 masm.FinalizeCode()
196
197#define RUN() \
armvixlad96eda2013-06-14 11:42:37 +0100198 { \
armvixlc68cb642014-09-25 18:49:30 +0100199 byte* buffer_start = masm.GetStartAddress<byte*>(); \
200 size_t buffer_length = masm.CursorOffset(); \
armvixlad96eda2013-06-14 11:42:37 +0100201 void (*test_function)(void); \
armvixlc68cb642014-09-25 18:49:30 +0100202 \
203 CPU::EnsureIAndDCacheCoherency(buffer_start, buffer_length); \
204 VIXL_STATIC_ASSERT(sizeof(buffer_start) == sizeof(test_function)); \
205 memcpy(&test_function, &buffer_start, sizeof(buffer_start)); \
armvixlad96eda2013-06-14 11:42:37 +0100206 test_function(); \
207 }
208
armvixlc68cb642014-09-25 18:49:30 +0100209#define TEARDOWN()
210
211#define TEARDOWN_CUSTOM() \
212 delete[] buf; \
armvixlad96eda2013-06-14 11:42:37 +0100213
214#endif // ifdef USE_SIMULATOR.
215
216#define ASSERT_EQUAL_NZCV(expected) \
217 assert(EqualNzcv(expected, core.flags_nzcv()))
218
219#define ASSERT_EQUAL_REGISTERS(expected) \
220 assert(EqualRegisters(&expected, &core))
221
222#define ASSERT_EQUAL_32(expected, result) \
223 assert(Equal32(static_cast<uint32_t>(expected), &core, result))
224
225#define ASSERT_EQUAL_FP32(expected, result) \
226 assert(EqualFP32(expected, &core, result))
227
228#define ASSERT_EQUAL_64(expected, result) \
229 assert(Equal64(expected, &core, result))
230
231#define ASSERT_EQUAL_FP64(expected, result) \
232 assert(EqualFP64(expected, &core, result))
233
234#define ASSERT_LITERAL_POOL_SIZE(expected) \
235 assert((expected) == (__ LiteralPoolSize()))
236
237
238TEST(stack_ops) {
239 SETUP();
240
241 START();
242 // save sp.
243 __ Mov(x29, sp);
244
245 // Set the sp to a known value.
246 __ Mov(sp, 0x1004);
247 __ Mov(x0, sp);
248
249 // Add immediate to the sp, and move the result to a normal register.
armvixlb0c8ae22014-03-21 14:03:59 +0000250 __ Add(sp, sp, 0x50);
armvixlad96eda2013-06-14 11:42:37 +0100251 __ Mov(x1, sp);
252
253 // Add extended to the sp, and move the result to a normal register.
254 __ Mov(x17, 0xfff);
255 __ Add(sp, sp, Operand(x17, SXTB));
256 __ Mov(x2, sp);
257
258 // Create an sp using a logical instruction, and move to normal register.
armvixlb0c8ae22014-03-21 14:03:59 +0000259 __ Orr(sp, xzr, 0x1fff);
armvixlad96eda2013-06-14 11:42:37 +0100260 __ Mov(x3, sp);
261
262 // Write wsp using a logical instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000263 __ Orr(wsp, wzr, 0xfffffff8);
armvixlad96eda2013-06-14 11:42:37 +0100264 __ Mov(x4, sp);
265
266 // Write sp, and read back wsp.
armvixlb0c8ae22014-03-21 14:03:59 +0000267 __ Orr(sp, xzr, 0xfffffff8);
armvixlad96eda2013-06-14 11:42:37 +0100268 __ Mov(w5, wsp);
269
270 // restore sp.
271 __ Mov(sp, x29);
272 END();
273
274 RUN();
275
276 ASSERT_EQUAL_64(0x1004, x0);
277 ASSERT_EQUAL_64(0x1054, x1);
278 ASSERT_EQUAL_64(0x1053, x2);
279 ASSERT_EQUAL_64(0x1fff, x3);
280 ASSERT_EQUAL_64(0xfffffff8, x4);
281 ASSERT_EQUAL_64(0xfffffff8, x5);
282
283 TEARDOWN();
284}
285
286
287TEST(mvn) {
288 SETUP();
289
290 START();
291 __ Mvn(w0, 0xfff);
292 __ Mvn(x1, 0xfff);
293 __ Mvn(w2, Operand(w0, LSL, 1));
294 __ Mvn(x3, Operand(x1, LSL, 2));
295 __ Mvn(w4, Operand(w0, LSR, 3));
296 __ Mvn(x5, Operand(x1, LSR, 4));
297 __ Mvn(w6, Operand(w0, ASR, 11));
298 __ Mvn(x7, Operand(x1, ASR, 12));
299 __ Mvn(w8, Operand(w0, ROR, 13));
300 __ Mvn(x9, Operand(x1, ROR, 14));
301 __ Mvn(w10, Operand(w2, UXTB));
302 __ Mvn(x11, Operand(x2, SXTB, 1));
303 __ Mvn(w12, Operand(w2, UXTH, 2));
304 __ Mvn(x13, Operand(x2, SXTH, 3));
305 __ Mvn(x14, Operand(w2, UXTW, 4));
306 __ Mvn(x15, Operand(w2, SXTW, 4));
307 END();
308
309 RUN();
310
311 ASSERT_EQUAL_64(0xfffff000, x0);
armvixlb0c8ae22014-03-21 14:03:59 +0000312 ASSERT_EQUAL_64(0xfffffffffffff000, x1);
armvixlad96eda2013-06-14 11:42:37 +0100313 ASSERT_EQUAL_64(0x00001fff, x2);
armvixlb0c8ae22014-03-21 14:03:59 +0000314 ASSERT_EQUAL_64(0x0000000000003fff, x3);
armvixlad96eda2013-06-14 11:42:37 +0100315 ASSERT_EQUAL_64(0xe00001ff, x4);
armvixlb0c8ae22014-03-21 14:03:59 +0000316 ASSERT_EQUAL_64(0xf0000000000000ff, x5);
armvixlad96eda2013-06-14 11:42:37 +0100317 ASSERT_EQUAL_64(0x00000001, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000318 ASSERT_EQUAL_64(0x0000000000000000, x7);
armvixlad96eda2013-06-14 11:42:37 +0100319 ASSERT_EQUAL_64(0x7ff80000, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000320 ASSERT_EQUAL_64(0x3ffc000000000000, x9);
armvixlad96eda2013-06-14 11:42:37 +0100321 ASSERT_EQUAL_64(0xffffff00, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000322 ASSERT_EQUAL_64(0x0000000000000001, x11);
armvixlad96eda2013-06-14 11:42:37 +0100323 ASSERT_EQUAL_64(0xffff8003, x12);
armvixlb0c8ae22014-03-21 14:03:59 +0000324 ASSERT_EQUAL_64(0xffffffffffff0007, x13);
325 ASSERT_EQUAL_64(0xfffffffffffe000f, x14);
326 ASSERT_EQUAL_64(0xfffffffffffe000f, x15);
armvixlad96eda2013-06-14 11:42:37 +0100327
328 TEARDOWN();
329}
330
331
armvixlf37fdc02014-02-05 13:22:16 +0000332TEST(mov_imm_w) {
333 SETUP();
334
335 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000336 __ Mov(w0, 0xffffffff);
337 __ Mov(w1, 0xffff1234);
338 __ Mov(w2, 0x1234ffff);
339 __ Mov(w3, 0x00000000);
340 __ Mov(w4, 0x00001234);
341 __ Mov(w5, 0x12340000);
342 __ Mov(w6, 0x12345678);
armvixl4a102ba2014-07-14 09:02:40 +0100343 __ Mov(w7, (int32_t)0x80000000);
344 __ Mov(w8, (int32_t)0xffff0000);
345 __ Mov(w9, kWMinInt);
armvixlf37fdc02014-02-05 13:22:16 +0000346 END();
347
348 RUN();
349
armvixlb0c8ae22014-03-21 14:03:59 +0000350 ASSERT_EQUAL_64(0xffffffff, x0);
351 ASSERT_EQUAL_64(0xffff1234, x1);
352 ASSERT_EQUAL_64(0x1234ffff, x2);
353 ASSERT_EQUAL_64(0x00000000, x3);
354 ASSERT_EQUAL_64(0x00001234, x4);
355 ASSERT_EQUAL_64(0x12340000, x5);
356 ASSERT_EQUAL_64(0x12345678, x6);
armvixl4a102ba2014-07-14 09:02:40 +0100357 ASSERT_EQUAL_64(0x80000000, x7);
358 ASSERT_EQUAL_64(0xffff0000, x8);
359 ASSERT_EQUAL_32(kWMinInt, w9);
armvixlf37fdc02014-02-05 13:22:16 +0000360
361 TEARDOWN();
362}
363
364
365TEST(mov_imm_x) {
366 SETUP();
367
368 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000369 __ Mov(x0, 0xffffffffffffffff);
370 __ Mov(x1, 0xffffffffffff1234);
371 __ Mov(x2, 0xffffffff12345678);
372 __ Mov(x3, 0xffff1234ffff5678);
373 __ Mov(x4, 0x1234ffffffff5678);
374 __ Mov(x5, 0x1234ffff5678ffff);
375 __ Mov(x6, 0x12345678ffffffff);
376 __ Mov(x7, 0x1234ffffffffffff);
377 __ Mov(x8, 0x123456789abcffff);
378 __ Mov(x9, 0x12345678ffff9abc);
379 __ Mov(x10, 0x1234ffff56789abc);
380 __ Mov(x11, 0xffff123456789abc);
381 __ Mov(x12, 0x0000000000000000);
382 __ Mov(x13, 0x0000000000001234);
383 __ Mov(x14, 0x0000000012345678);
384 __ Mov(x15, 0x0000123400005678);
385 __ Mov(x18, 0x1234000000005678);
386 __ Mov(x19, 0x1234000056780000);
387 __ Mov(x20, 0x1234567800000000);
388 __ Mov(x21, 0x1234000000000000);
389 __ Mov(x22, 0x123456789abc0000);
390 __ Mov(x23, 0x1234567800009abc);
391 __ Mov(x24, 0x1234000056789abc);
392 __ Mov(x25, 0x0000123456789abc);
393 __ Mov(x26, 0x123456789abcdef0);
394 __ Mov(x27, 0xffff000000000001);
395 __ Mov(x28, 0x8000ffff00000000);
armvixlf37fdc02014-02-05 13:22:16 +0000396 END();
397
398 RUN();
399
armvixlb0c8ae22014-03-21 14:03:59 +0000400 ASSERT_EQUAL_64(0xffffffffffff1234, x1);
401 ASSERT_EQUAL_64(0xffffffff12345678, x2);
402 ASSERT_EQUAL_64(0xffff1234ffff5678, x3);
403 ASSERT_EQUAL_64(0x1234ffffffff5678, x4);
404 ASSERT_EQUAL_64(0x1234ffff5678ffff, x5);
405 ASSERT_EQUAL_64(0x12345678ffffffff, x6);
406 ASSERT_EQUAL_64(0x1234ffffffffffff, x7);
407 ASSERT_EQUAL_64(0x123456789abcffff, x8);
408 ASSERT_EQUAL_64(0x12345678ffff9abc, x9);
409 ASSERT_EQUAL_64(0x1234ffff56789abc, x10);
410 ASSERT_EQUAL_64(0xffff123456789abc, x11);
411 ASSERT_EQUAL_64(0x0000000000000000, x12);
412 ASSERT_EQUAL_64(0x0000000000001234, x13);
413 ASSERT_EQUAL_64(0x0000000012345678, x14);
414 ASSERT_EQUAL_64(0x0000123400005678, x15);
415 ASSERT_EQUAL_64(0x1234000000005678, x18);
416 ASSERT_EQUAL_64(0x1234000056780000, x19);
417 ASSERT_EQUAL_64(0x1234567800000000, x20);
418 ASSERT_EQUAL_64(0x1234000000000000, x21);
419 ASSERT_EQUAL_64(0x123456789abc0000, x22);
420 ASSERT_EQUAL_64(0x1234567800009abc, x23);
421 ASSERT_EQUAL_64(0x1234000056789abc, x24);
422 ASSERT_EQUAL_64(0x0000123456789abc, x25);
423 ASSERT_EQUAL_64(0x123456789abcdef0, x26);
424 ASSERT_EQUAL_64(0xffff000000000001, x27);
425 ASSERT_EQUAL_64(0x8000ffff00000000, x28);
armvixlf37fdc02014-02-05 13:22:16 +0000426
427
428 TEARDOWN();
429}
430
431
armvixlad96eda2013-06-14 11:42:37 +0100432TEST(mov) {
433 SETUP();
armvixlc68cb642014-09-25 18:49:30 +0100434 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +0100435
436 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000437 __ Mov(x0, 0xffffffffffffffff);
438 __ Mov(x1, 0xffffffffffffffff);
439 __ Mov(x2, 0xffffffffffffffff);
440 __ Mov(x3, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +0100441
armvixlb0c8ae22014-03-21 14:03:59 +0000442 __ Mov(x0, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100443
armvixlb0c8ae22014-03-21 14:03:59 +0000444 __ movz(x1, UINT64_C(0xabcd) << 16);
445 __ movk(x2, UINT64_C(0xabcd) << 32);
446 __ movn(x3, UINT64_C(0xabcd) << 48);
armvixlad96eda2013-06-14 11:42:37 +0100447
armvixlb0c8ae22014-03-21 14:03:59 +0000448 __ Mov(x4, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100449 __ Mov(x5, x4);
450
451 __ Mov(w6, -1);
452
453 // Test that moves back to the same register have the desired effect. This
454 // is a no-op for X registers, and a truncation for W registers.
armvixlb0c8ae22014-03-21 14:03:59 +0000455 __ Mov(x7, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100456 __ Mov(x7, x7);
armvixlb0c8ae22014-03-21 14:03:59 +0000457 __ Mov(x8, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100458 __ Mov(w8, w8);
armvixlb0c8ae22014-03-21 14:03:59 +0000459 __ Mov(x9, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100460 __ Mov(x9, Operand(x9));
armvixlb0c8ae22014-03-21 14:03:59 +0000461 __ Mov(x10, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +0100462 __ Mov(w10, Operand(w10));
463
464 __ Mov(w11, 0xfff);
465 __ Mov(x12, 0xfff);
466 __ Mov(w13, Operand(w11, LSL, 1));
467 __ Mov(x14, Operand(x12, LSL, 2));
468 __ Mov(w15, Operand(w11, LSR, 3));
469 __ Mov(x18, Operand(x12, LSR, 4));
470 __ Mov(w19, Operand(w11, ASR, 11));
471 __ Mov(x20, Operand(x12, ASR, 12));
472 __ Mov(w21, Operand(w11, ROR, 13));
473 __ Mov(x22, Operand(x12, ROR, 14));
474 __ Mov(w23, Operand(w13, UXTB));
475 __ Mov(x24, Operand(x13, SXTB, 1));
476 __ Mov(w25, Operand(w13, UXTH, 2));
477 __ Mov(x26, Operand(x13, SXTH, 3));
478 __ Mov(x27, Operand(w13, UXTW, 4));
armvixlf37fdc02014-02-05 13:22:16 +0000479
armvixlb0c8ae22014-03-21 14:03:59 +0000480 __ Mov(x28, 0x0123456789abcdef);
armvixlf37fdc02014-02-05 13:22:16 +0000481 __ Mov(w28, w28, kDiscardForSameWReg);
armvixlad96eda2013-06-14 11:42:37 +0100482 END();
483
484 RUN();
485
armvixlb0c8ae22014-03-21 14:03:59 +0000486 ASSERT_EQUAL_64(0x0123456789abcdef, x0);
487 ASSERT_EQUAL_64(0x00000000abcd0000, x1);
488 ASSERT_EQUAL_64(0xffffabcdffffffff, x2);
489 ASSERT_EQUAL_64(0x5432ffffffffffff, x3);
armvixlad96eda2013-06-14 11:42:37 +0100490 ASSERT_EQUAL_64(x4, x5);
491 ASSERT_EQUAL_32(-1, w6);
armvixlb0c8ae22014-03-21 14:03:59 +0000492 ASSERT_EQUAL_64(0x0123456789abcdef, x7);
493 ASSERT_EQUAL_32(0x89abcdef, w8);
494 ASSERT_EQUAL_64(0x0123456789abcdef, x9);
495 ASSERT_EQUAL_32(0x89abcdef, w10);
armvixlad96eda2013-06-14 11:42:37 +0100496 ASSERT_EQUAL_64(0x00000fff, x11);
armvixlb0c8ae22014-03-21 14:03:59 +0000497 ASSERT_EQUAL_64(0x0000000000000fff, x12);
armvixlad96eda2013-06-14 11:42:37 +0100498 ASSERT_EQUAL_64(0x00001ffe, x13);
armvixlb0c8ae22014-03-21 14:03:59 +0000499 ASSERT_EQUAL_64(0x0000000000003ffc, x14);
armvixlad96eda2013-06-14 11:42:37 +0100500 ASSERT_EQUAL_64(0x000001ff, x15);
armvixlb0c8ae22014-03-21 14:03:59 +0000501 ASSERT_EQUAL_64(0x00000000000000ff, x18);
armvixlad96eda2013-06-14 11:42:37 +0100502 ASSERT_EQUAL_64(0x00000001, x19);
armvixlb0c8ae22014-03-21 14:03:59 +0000503 ASSERT_EQUAL_64(0x0000000000000000, x20);
armvixlad96eda2013-06-14 11:42:37 +0100504 ASSERT_EQUAL_64(0x7ff80000, x21);
armvixlb0c8ae22014-03-21 14:03:59 +0000505 ASSERT_EQUAL_64(0x3ffc000000000000, x22);
armvixlad96eda2013-06-14 11:42:37 +0100506 ASSERT_EQUAL_64(0x000000fe, x23);
armvixlb0c8ae22014-03-21 14:03:59 +0000507 ASSERT_EQUAL_64(0xfffffffffffffffc, x24);
armvixlad96eda2013-06-14 11:42:37 +0100508 ASSERT_EQUAL_64(0x00007ff8, x25);
armvixlb0c8ae22014-03-21 14:03:59 +0000509 ASSERT_EQUAL_64(0x000000000000fff0, x26);
510 ASSERT_EQUAL_64(0x000000000001ffe0, x27);
511 ASSERT_EQUAL_64(0x0123456789abcdef, x28);
armvixlad96eda2013-06-14 11:42:37 +0100512
513 TEARDOWN();
514}
515
516
517TEST(orr) {
518 SETUP();
519
520 START();
521 __ Mov(x0, 0xf0f0);
522 __ Mov(x1, 0xf00000ff);
523
524 __ Orr(x2, x0, Operand(x1));
525 __ Orr(w3, w0, Operand(w1, LSL, 28));
526 __ Orr(x4, x0, Operand(x1, LSL, 32));
527 __ Orr(x5, x0, Operand(x1, LSR, 4));
528 __ Orr(w6, w0, Operand(w1, ASR, 4));
529 __ Orr(x7, x0, Operand(x1, ASR, 4));
530 __ Orr(w8, w0, Operand(w1, ROR, 12));
531 __ Orr(x9, x0, Operand(x1, ROR, 12));
armvixlb0c8ae22014-03-21 14:03:59 +0000532 __ Orr(w10, w0, 0xf);
533 __ Orr(x11, x0, 0xf0000000f0000000);
armvixlad96eda2013-06-14 11:42:37 +0100534 END();
535
536 RUN();
537
armvixlb0c8ae22014-03-21 14:03:59 +0000538 ASSERT_EQUAL_64(0x00000000f000f0ff, x2);
armvixlad96eda2013-06-14 11:42:37 +0100539 ASSERT_EQUAL_64(0xf000f0f0, x3);
armvixlb0c8ae22014-03-21 14:03:59 +0000540 ASSERT_EQUAL_64(0xf00000ff0000f0f0, x4);
541 ASSERT_EQUAL_64(0x000000000f00f0ff, x5);
armvixlad96eda2013-06-14 11:42:37 +0100542 ASSERT_EQUAL_64(0xff00f0ff, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000543 ASSERT_EQUAL_64(0x000000000f00f0ff, x7);
armvixlad96eda2013-06-14 11:42:37 +0100544 ASSERT_EQUAL_64(0x0ffff0f0, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000545 ASSERT_EQUAL_64(0x0ff00000000ff0f0, x9);
546 ASSERT_EQUAL_64(0x0000f0ff, x10);
547 ASSERT_EQUAL_64(0xf0000000f000f0f0, x11);
armvixlad96eda2013-06-14 11:42:37 +0100548
549 TEARDOWN();
550}
551
552
553TEST(orr_extend) {
554 SETUP();
555
556 START();
557 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +0000558 __ Mov(x1, 0x8000000080008080);
armvixlad96eda2013-06-14 11:42:37 +0100559 __ Orr(w6, w0, Operand(w1, UXTB));
560 __ Orr(x7, x0, Operand(x1, UXTH, 1));
561 __ Orr(w8, w0, Operand(w1, UXTW, 2));
562 __ Orr(x9, x0, Operand(x1, UXTX, 3));
563 __ Orr(w10, w0, Operand(w1, SXTB));
564 __ Orr(x11, x0, Operand(x1, SXTH, 1));
565 __ Orr(x12, x0, Operand(x1, SXTW, 2));
566 __ Orr(x13, x0, Operand(x1, SXTX, 3));
567 END();
568
569 RUN();
570
571 ASSERT_EQUAL_64(0x00000081, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000572 ASSERT_EQUAL_64(0x0000000000010101, x7);
armvixlad96eda2013-06-14 11:42:37 +0100573 ASSERT_EQUAL_64(0x00020201, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000574 ASSERT_EQUAL_64(0x0000000400040401, x9);
575 ASSERT_EQUAL_64(0xffffff81, x10);
576 ASSERT_EQUAL_64(0xffffffffffff0101, x11);
577 ASSERT_EQUAL_64(0xfffffffe00020201, x12);
578 ASSERT_EQUAL_64(0x0000000400040401, x13);
armvixlad96eda2013-06-14 11:42:37 +0100579
580 TEARDOWN();
581}
582
583
584TEST(bitwise_wide_imm) {
585 SETUP();
586
587 START();
588 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +0000589 __ Mov(x1, 0xf0f0f0f0f0f0f0f0);
armvixlad96eda2013-06-14 11:42:37 +0100590
armvixlb0c8ae22014-03-21 14:03:59 +0000591 __ Orr(x10, x0, 0x1234567890abcdef);
592 __ Orr(w11, w1, 0x90abcdef);
armvixl4a102ba2014-07-14 09:02:40 +0100593
594 __ Orr(w12, w0, kWMinInt);
595 __ Eor(w13, w0, kWMinInt);
armvixlad96eda2013-06-14 11:42:37 +0100596 END();
597
598 RUN();
599
600 ASSERT_EQUAL_64(0, x0);
armvixlb0c8ae22014-03-21 14:03:59 +0000601 ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0, x1);
602 ASSERT_EQUAL_64(0x1234567890abcdef, x10);
603 ASSERT_EQUAL_64(0x00000000f0fbfdff, x11);
armvixl4a102ba2014-07-14 09:02:40 +0100604 ASSERT_EQUAL_32(kWMinInt, w12);
605 ASSERT_EQUAL_32(kWMinInt, w13);
armvixlad96eda2013-06-14 11:42:37 +0100606
607 TEARDOWN();
608}
609
610
611TEST(orn) {
612 SETUP();
613
614 START();
615 __ Mov(x0, 0xf0f0);
616 __ Mov(x1, 0xf00000ff);
617
618 __ Orn(x2, x0, Operand(x1));
619 __ Orn(w3, w0, Operand(w1, LSL, 4));
620 __ Orn(x4, x0, Operand(x1, LSL, 4));
621 __ Orn(x5, x0, Operand(x1, LSR, 1));
622 __ Orn(w6, w0, Operand(w1, ASR, 1));
623 __ Orn(x7, x0, Operand(x1, ASR, 1));
624 __ Orn(w8, w0, Operand(w1, ROR, 16));
625 __ Orn(x9, x0, Operand(x1, ROR, 16));
armvixlb0c8ae22014-03-21 14:03:59 +0000626 __ Orn(w10, w0, 0x0000ffff);
627 __ Orn(x11, x0, 0x0000ffff0000ffff);
armvixlad96eda2013-06-14 11:42:37 +0100628 END();
629
630 RUN();
631
armvixlb0c8ae22014-03-21 14:03:59 +0000632 ASSERT_EQUAL_64(0xffffffff0ffffff0, x2);
armvixlad96eda2013-06-14 11:42:37 +0100633 ASSERT_EQUAL_64(0xfffff0ff, x3);
armvixlb0c8ae22014-03-21 14:03:59 +0000634 ASSERT_EQUAL_64(0xfffffff0fffff0ff, x4);
635 ASSERT_EQUAL_64(0xffffffff87fffff0, x5);
armvixlad96eda2013-06-14 11:42:37 +0100636 ASSERT_EQUAL_64(0x07fffff0, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000637 ASSERT_EQUAL_64(0xffffffff87fffff0, x7);
armvixlad96eda2013-06-14 11:42:37 +0100638 ASSERT_EQUAL_64(0xff00ffff, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000639 ASSERT_EQUAL_64(0xff00ffffffffffff, x9);
armvixlad96eda2013-06-14 11:42:37 +0100640 ASSERT_EQUAL_64(0xfffff0f0, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000641 ASSERT_EQUAL_64(0xffff0000fffff0f0, x11);
armvixlad96eda2013-06-14 11:42:37 +0100642
643 TEARDOWN();
644}
645
646
647TEST(orn_extend) {
648 SETUP();
649
650 START();
651 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +0000652 __ Mov(x1, 0x8000000080008081);
armvixlad96eda2013-06-14 11:42:37 +0100653 __ Orn(w6, w0, Operand(w1, UXTB));
654 __ Orn(x7, x0, Operand(x1, UXTH, 1));
655 __ Orn(w8, w0, Operand(w1, UXTW, 2));
656 __ Orn(x9, x0, Operand(x1, UXTX, 3));
657 __ Orn(w10, w0, Operand(w1, SXTB));
658 __ Orn(x11, x0, Operand(x1, SXTH, 1));
659 __ Orn(x12, x0, Operand(x1, SXTW, 2));
660 __ Orn(x13, x0, Operand(x1, SXTX, 3));
661 END();
662
663 RUN();
664
665 ASSERT_EQUAL_64(0xffffff7f, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000666 ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
armvixlad96eda2013-06-14 11:42:37 +0100667 ASSERT_EQUAL_64(0xfffdfdfb, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000668 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
armvixlad96eda2013-06-14 11:42:37 +0100669 ASSERT_EQUAL_64(0x0000007f, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000670 ASSERT_EQUAL_64(0x000000000000fefd, x11);
671 ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
672 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
armvixlad96eda2013-06-14 11:42:37 +0100673
674 TEARDOWN();
675}
676
677
678TEST(and_) {
679 SETUP();
680
681 START();
682 __ Mov(x0, 0xfff0);
683 __ Mov(x1, 0xf00000ff);
684
685 __ And(x2, x0, Operand(x1));
686 __ And(w3, w0, Operand(w1, LSL, 4));
687 __ And(x4, x0, Operand(x1, LSL, 4));
688 __ And(x5, x0, Operand(x1, LSR, 1));
689 __ And(w6, w0, Operand(w1, ASR, 20));
690 __ And(x7, x0, Operand(x1, ASR, 20));
691 __ And(w8, w0, Operand(w1, ROR, 28));
692 __ And(x9, x0, Operand(x1, ROR, 28));
693 __ And(w10, w0, Operand(0xff00));
694 __ And(x11, x0, Operand(0xff));
695 END();
696
697 RUN();
698
699 ASSERT_EQUAL_64(0x000000f0, x2);
700 ASSERT_EQUAL_64(0x00000ff0, x3);
701 ASSERT_EQUAL_64(0x00000ff0, x4);
702 ASSERT_EQUAL_64(0x00000070, x5);
703 ASSERT_EQUAL_64(0x0000ff00, x6);
704 ASSERT_EQUAL_64(0x00000f00, x7);
705 ASSERT_EQUAL_64(0x00000ff0, x8);
706 ASSERT_EQUAL_64(0x00000000, x9);
707 ASSERT_EQUAL_64(0x0000ff00, x10);
708 ASSERT_EQUAL_64(0x000000f0, x11);
709
710 TEARDOWN();
711}
712
713
714TEST(and_extend) {
715 SETUP();
716
717 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000718 __ Mov(x0, 0xffffffffffffffff);
719 __ Mov(x1, 0x8000000080008081);
armvixlad96eda2013-06-14 11:42:37 +0100720 __ And(w6, w0, Operand(w1, UXTB));
721 __ And(x7, x0, Operand(x1, UXTH, 1));
722 __ And(w8, w0, Operand(w1, UXTW, 2));
723 __ And(x9, x0, Operand(x1, UXTX, 3));
724 __ And(w10, w0, Operand(w1, SXTB));
725 __ And(x11, x0, Operand(x1, SXTH, 1));
726 __ And(x12, x0, Operand(x1, SXTW, 2));
727 __ And(x13, x0, Operand(x1, SXTX, 3));
728 END();
729
730 RUN();
731
732 ASSERT_EQUAL_64(0x00000081, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000733 ASSERT_EQUAL_64(0x0000000000010102, x7);
armvixlad96eda2013-06-14 11:42:37 +0100734 ASSERT_EQUAL_64(0x00020204, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000735 ASSERT_EQUAL_64(0x0000000400040408, x9);
armvixlad96eda2013-06-14 11:42:37 +0100736 ASSERT_EQUAL_64(0xffffff81, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000737 ASSERT_EQUAL_64(0xffffffffffff0102, x11);
738 ASSERT_EQUAL_64(0xfffffffe00020204, x12);
739 ASSERT_EQUAL_64(0x0000000400040408, x13);
armvixlad96eda2013-06-14 11:42:37 +0100740
741 TEARDOWN();
742}
743
744
745TEST(ands) {
746 SETUP();
747
748 START();
749 __ Mov(x1, 0xf00000ff);
armvixlf37fdc02014-02-05 13:22:16 +0000750 __ Ands(w0, w1, Operand(w1));
armvixlad96eda2013-06-14 11:42:37 +0100751 END();
752
753 RUN();
754
755 ASSERT_EQUAL_NZCV(NFlag);
756 ASSERT_EQUAL_64(0xf00000ff, x0);
757
758 START();
759 __ Mov(x0, 0xfff0);
760 __ Mov(x1, 0xf00000ff);
armvixlf37fdc02014-02-05 13:22:16 +0000761 __ Ands(w0, w0, Operand(w1, LSR, 4));
armvixlad96eda2013-06-14 11:42:37 +0100762 END();
763
764 RUN();
765
766 ASSERT_EQUAL_NZCV(ZFlag);
767 ASSERT_EQUAL_64(0x00000000, x0);
768
769 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000770 __ Mov(x0, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +0100771 __ Mov(x1, 0x00000001);
armvixlf37fdc02014-02-05 13:22:16 +0000772 __ Ands(x0, x0, Operand(x1, ROR, 1));
armvixlad96eda2013-06-14 11:42:37 +0100773 END();
774
775 RUN();
776
777 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +0000778 ASSERT_EQUAL_64(0x8000000000000000, x0);
armvixlad96eda2013-06-14 11:42:37 +0100779
780 START();
781 __ Mov(x0, 0xfff0);
armvixlf37fdc02014-02-05 13:22:16 +0000782 __ Ands(w0, w0, Operand(0xf));
armvixlad96eda2013-06-14 11:42:37 +0100783 END();
784
785 RUN();
786
787 ASSERT_EQUAL_NZCV(ZFlag);
788 ASSERT_EQUAL_64(0x00000000, x0);
789
790 START();
791 __ Mov(x0, 0xff000000);
armvixlf37fdc02014-02-05 13:22:16 +0000792 __ Ands(w0, w0, Operand(0x80000000));
armvixlad96eda2013-06-14 11:42:37 +0100793 END();
794
795 RUN();
796
797 ASSERT_EQUAL_NZCV(NFlag);
798 ASSERT_EQUAL_64(0x80000000, x0);
799
800 TEARDOWN();
801}
802
803
804TEST(bic) {
805 SETUP();
806
807 START();
808 __ Mov(x0, 0xfff0);
809 __ Mov(x1, 0xf00000ff);
810
811 __ Bic(x2, x0, Operand(x1));
812 __ Bic(w3, w0, Operand(w1, LSL, 4));
813 __ Bic(x4, x0, Operand(x1, LSL, 4));
814 __ Bic(x5, x0, Operand(x1, LSR, 1));
815 __ Bic(w6, w0, Operand(w1, ASR, 20));
816 __ Bic(x7, x0, Operand(x1, ASR, 20));
817 __ Bic(w8, w0, Operand(w1, ROR, 28));
818 __ Bic(x9, x0, Operand(x1, ROR, 24));
819 __ Bic(x10, x0, Operand(0x1f));
820 __ Bic(x11, x0, Operand(0x100));
821
822 // Test bic into sp when the constant cannot be encoded in the immediate
823 // field.
824 // Use x20 to preserve sp. We check for the result via x21 because the
825 // test infrastructure requires that sp be restored to its original value.
826 __ Mov(x20, sp);
827 __ Mov(x0, 0xffffff);
828 __ Bic(sp, x0, Operand(0xabcdef));
829 __ Mov(x21, sp);
830 __ Mov(sp, x20);
831 END();
832
833 RUN();
834
835 ASSERT_EQUAL_64(0x0000ff00, x2);
836 ASSERT_EQUAL_64(0x0000f000, x3);
837 ASSERT_EQUAL_64(0x0000f000, x4);
838 ASSERT_EQUAL_64(0x0000ff80, x5);
839 ASSERT_EQUAL_64(0x000000f0, x6);
840 ASSERT_EQUAL_64(0x0000f0f0, x7);
841 ASSERT_EQUAL_64(0x0000f000, x8);
842 ASSERT_EQUAL_64(0x0000ff00, x9);
843 ASSERT_EQUAL_64(0x0000ffe0, x10);
844 ASSERT_EQUAL_64(0x0000fef0, x11);
845
846 ASSERT_EQUAL_64(0x543210, x21);
847
848 TEARDOWN();
849}
850
851
852TEST(bic_extend) {
853 SETUP();
854
855 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000856 __ Mov(x0, 0xffffffffffffffff);
857 __ Mov(x1, 0x8000000080008081);
armvixlad96eda2013-06-14 11:42:37 +0100858 __ Bic(w6, w0, Operand(w1, UXTB));
859 __ Bic(x7, x0, Operand(x1, UXTH, 1));
860 __ Bic(w8, w0, Operand(w1, UXTW, 2));
861 __ Bic(x9, x0, Operand(x1, UXTX, 3));
862 __ Bic(w10, w0, Operand(w1, SXTB));
863 __ Bic(x11, x0, Operand(x1, SXTH, 1));
864 __ Bic(x12, x0, Operand(x1, SXTW, 2));
865 __ Bic(x13, x0, Operand(x1, SXTX, 3));
866 END();
867
868 RUN();
869
870 ASSERT_EQUAL_64(0xffffff7e, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000871 ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
armvixlad96eda2013-06-14 11:42:37 +0100872 ASSERT_EQUAL_64(0xfffdfdfb, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000873 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
armvixlad96eda2013-06-14 11:42:37 +0100874 ASSERT_EQUAL_64(0x0000007e, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000875 ASSERT_EQUAL_64(0x000000000000fefd, x11);
876 ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
877 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
armvixlad96eda2013-06-14 11:42:37 +0100878
879 TEARDOWN();
880}
881
882
883TEST(bics) {
884 SETUP();
885
886 START();
887 __ Mov(x1, 0xffff);
armvixlf37fdc02014-02-05 13:22:16 +0000888 __ Bics(w0, w1, Operand(w1));
armvixlad96eda2013-06-14 11:42:37 +0100889 END();
890
891 RUN();
892
893 ASSERT_EQUAL_NZCV(ZFlag);
894 ASSERT_EQUAL_64(0x00000000, x0);
895
896 START();
897 __ Mov(x0, 0xffffffff);
armvixlf37fdc02014-02-05 13:22:16 +0000898 __ Bics(w0, w0, Operand(w0, LSR, 1));
armvixlad96eda2013-06-14 11:42:37 +0100899 END();
900
901 RUN();
902
903 ASSERT_EQUAL_NZCV(NFlag);
904 ASSERT_EQUAL_64(0x80000000, x0);
905
906 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000907 __ Mov(x0, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +0100908 __ Mov(x1, 0x00000001);
armvixlf37fdc02014-02-05 13:22:16 +0000909 __ Bics(x0, x0, Operand(x1, ROR, 1));
armvixlad96eda2013-06-14 11:42:37 +0100910 END();
911
912 RUN();
913
914 ASSERT_EQUAL_NZCV(ZFlag);
915 ASSERT_EQUAL_64(0x00000000, x0);
916
917 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000918 __ Mov(x0, 0xffffffffffffffff);
919 __ Bics(x0, x0, 0x7fffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +0100920 END();
921
922 RUN();
923
924 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +0000925 ASSERT_EQUAL_64(0x8000000000000000, x0);
armvixlad96eda2013-06-14 11:42:37 +0100926
927 START();
928 __ Mov(w0, 0xffff0000);
armvixlb0c8ae22014-03-21 14:03:59 +0000929 __ Bics(w0, w0, 0xfffffff0);
armvixlad96eda2013-06-14 11:42:37 +0100930 END();
931
932 RUN();
933
934 ASSERT_EQUAL_NZCV(ZFlag);
935 ASSERT_EQUAL_64(0x00000000, x0);
936
937 TEARDOWN();
938}
939
940
941TEST(eor) {
942 SETUP();
943
944 START();
945 __ Mov(x0, 0xfff0);
946 __ Mov(x1, 0xf00000ff);
947
948 __ Eor(x2, x0, Operand(x1));
949 __ Eor(w3, w0, Operand(w1, LSL, 4));
950 __ Eor(x4, x0, Operand(x1, LSL, 4));
951 __ Eor(x5, x0, Operand(x1, LSR, 1));
952 __ Eor(w6, w0, Operand(w1, ASR, 20));
953 __ Eor(x7, x0, Operand(x1, ASR, 20));
954 __ Eor(w8, w0, Operand(w1, ROR, 28));
955 __ Eor(x9, x0, Operand(x1, ROR, 28));
armvixlb0c8ae22014-03-21 14:03:59 +0000956 __ Eor(w10, w0, 0xff00ff00);
957 __ Eor(x11, x0, 0xff00ff00ff00ff00);
armvixlad96eda2013-06-14 11:42:37 +0100958 END();
959
960 RUN();
961
armvixlb0c8ae22014-03-21 14:03:59 +0000962 ASSERT_EQUAL_64(0x00000000f000ff0f, x2);
armvixlad96eda2013-06-14 11:42:37 +0100963 ASSERT_EQUAL_64(0x0000f000, x3);
armvixlb0c8ae22014-03-21 14:03:59 +0000964 ASSERT_EQUAL_64(0x0000000f0000f000, x4);
965 ASSERT_EQUAL_64(0x000000007800ff8f, x5);
armvixlad96eda2013-06-14 11:42:37 +0100966 ASSERT_EQUAL_64(0xffff00f0, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000967 ASSERT_EQUAL_64(0x000000000000f0f0, x7);
armvixlad96eda2013-06-14 11:42:37 +0100968 ASSERT_EQUAL_64(0x0000f00f, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000969 ASSERT_EQUAL_64(0x00000ff00000ffff, x9);
armvixlad96eda2013-06-14 11:42:37 +0100970 ASSERT_EQUAL_64(0xff0000f0, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000971 ASSERT_EQUAL_64(0xff00ff00ff0000f0, x11);
armvixlad96eda2013-06-14 11:42:37 +0100972
973 TEARDOWN();
974}
975
976TEST(eor_extend) {
977 SETUP();
978
979 START();
armvixlb0c8ae22014-03-21 14:03:59 +0000980 __ Mov(x0, 0x1111111111111111);
981 __ Mov(x1, 0x8000000080008081);
armvixlad96eda2013-06-14 11:42:37 +0100982 __ Eor(w6, w0, Operand(w1, UXTB));
983 __ Eor(x7, x0, Operand(x1, UXTH, 1));
984 __ Eor(w8, w0, Operand(w1, UXTW, 2));
985 __ Eor(x9, x0, Operand(x1, UXTX, 3));
986 __ Eor(w10, w0, Operand(w1, SXTB));
987 __ Eor(x11, x0, Operand(x1, SXTH, 1));
988 __ Eor(x12, x0, Operand(x1, SXTW, 2));
989 __ Eor(x13, x0, Operand(x1, SXTX, 3));
990 END();
991
992 RUN();
993
994 ASSERT_EQUAL_64(0x11111190, x6);
armvixlb0c8ae22014-03-21 14:03:59 +0000995 ASSERT_EQUAL_64(0x1111111111101013, x7);
armvixlad96eda2013-06-14 11:42:37 +0100996 ASSERT_EQUAL_64(0x11131315, x8);
armvixlb0c8ae22014-03-21 14:03:59 +0000997 ASSERT_EQUAL_64(0x1111111511151519, x9);
armvixlad96eda2013-06-14 11:42:37 +0100998 ASSERT_EQUAL_64(0xeeeeee90, x10);
armvixlb0c8ae22014-03-21 14:03:59 +0000999 ASSERT_EQUAL_64(0xeeeeeeeeeeee1013, x11);
1000 ASSERT_EQUAL_64(0xeeeeeeef11131315, x12);
1001 ASSERT_EQUAL_64(0x1111111511151519, x13);
armvixlad96eda2013-06-14 11:42:37 +01001002
1003 TEARDOWN();
1004}
1005
1006
1007TEST(eon) {
1008 SETUP();
1009
1010 START();
1011 __ Mov(x0, 0xfff0);
1012 __ Mov(x1, 0xf00000ff);
1013
1014 __ Eon(x2, x0, Operand(x1));
1015 __ Eon(w3, w0, Operand(w1, LSL, 4));
1016 __ Eon(x4, x0, Operand(x1, LSL, 4));
1017 __ Eon(x5, x0, Operand(x1, LSR, 1));
1018 __ Eon(w6, w0, Operand(w1, ASR, 20));
1019 __ Eon(x7, x0, Operand(x1, ASR, 20));
1020 __ Eon(w8, w0, Operand(w1, ROR, 28));
1021 __ Eon(x9, x0, Operand(x1, ROR, 28));
armvixlb0c8ae22014-03-21 14:03:59 +00001022 __ Eon(w10, w0, 0x03c003c0);
1023 __ Eon(x11, x0, 0x0000100000001000);
armvixlad96eda2013-06-14 11:42:37 +01001024 END();
1025
1026 RUN();
1027
armvixlb0c8ae22014-03-21 14:03:59 +00001028 ASSERT_EQUAL_64(0xffffffff0fff00f0, x2);
armvixlad96eda2013-06-14 11:42:37 +01001029 ASSERT_EQUAL_64(0xffff0fff, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00001030 ASSERT_EQUAL_64(0xfffffff0ffff0fff, x4);
1031 ASSERT_EQUAL_64(0xffffffff87ff0070, x5);
armvixlad96eda2013-06-14 11:42:37 +01001032 ASSERT_EQUAL_64(0x0000ff0f, x6);
armvixlb0c8ae22014-03-21 14:03:59 +00001033 ASSERT_EQUAL_64(0xffffffffffff0f0f, x7);
armvixlad96eda2013-06-14 11:42:37 +01001034 ASSERT_EQUAL_64(0xffff0ff0, x8);
armvixlb0c8ae22014-03-21 14:03:59 +00001035 ASSERT_EQUAL_64(0xfffff00fffff0000, x9);
armvixlad96eda2013-06-14 11:42:37 +01001036 ASSERT_EQUAL_64(0xfc3f03cf, x10);
armvixlb0c8ae22014-03-21 14:03:59 +00001037 ASSERT_EQUAL_64(0xffffefffffff100f, x11);
armvixlad96eda2013-06-14 11:42:37 +01001038
1039 TEARDOWN();
1040}
1041
1042
1043TEST(eon_extend) {
1044 SETUP();
1045
1046 START();
armvixlb0c8ae22014-03-21 14:03:59 +00001047 __ Mov(x0, 0x1111111111111111);
1048 __ Mov(x1, 0x8000000080008081);
armvixlad96eda2013-06-14 11:42:37 +01001049 __ Eon(w6, w0, Operand(w1, UXTB));
1050 __ Eon(x7, x0, Operand(x1, UXTH, 1));
1051 __ Eon(w8, w0, Operand(w1, UXTW, 2));
1052 __ Eon(x9, x0, Operand(x1, UXTX, 3));
1053 __ Eon(w10, w0, Operand(w1, SXTB));
1054 __ Eon(x11, x0, Operand(x1, SXTH, 1));
1055 __ Eon(x12, x0, Operand(x1, SXTW, 2));
1056 __ Eon(x13, x0, Operand(x1, SXTX, 3));
1057 END();
1058
1059 RUN();
1060
1061 ASSERT_EQUAL_64(0xeeeeee6f, x6);
armvixlb0c8ae22014-03-21 14:03:59 +00001062 ASSERT_EQUAL_64(0xeeeeeeeeeeefefec, x7);
armvixlad96eda2013-06-14 11:42:37 +01001063 ASSERT_EQUAL_64(0xeeececea, x8);
armvixlb0c8ae22014-03-21 14:03:59 +00001064 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x9);
armvixlad96eda2013-06-14 11:42:37 +01001065 ASSERT_EQUAL_64(0x1111116f, x10);
armvixlb0c8ae22014-03-21 14:03:59 +00001066 ASSERT_EQUAL_64(0x111111111111efec, x11);
1067 ASSERT_EQUAL_64(0x11111110eeececea, x12);
1068 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x13);
armvixlad96eda2013-06-14 11:42:37 +01001069
1070 TEARDOWN();
1071}
1072
1073
1074TEST(mul) {
1075 SETUP();
1076
1077 START();
1078 __ Mov(x16, 0);
1079 __ Mov(x17, 1);
1080 __ Mov(x18, 0xffffffff);
armvixlb0c8ae22014-03-21 14:03:59 +00001081 __ Mov(x19, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001082
1083 __ Mul(w0, w16, w16);
1084 __ Mul(w1, w16, w17);
1085 __ Mul(w2, w17, w18);
1086 __ Mul(w3, w18, w19);
1087 __ Mul(x4, x16, x16);
1088 __ Mul(x5, x17, x18);
1089 __ Mul(x6, x18, x19);
1090 __ Mul(x7, x19, x19);
1091 __ Smull(x8, w17, w18);
1092 __ Smull(x9, w18, w18);
1093 __ Smull(x10, w19, w19);
1094 __ Mneg(w11, w16, w16);
1095 __ Mneg(w12, w16, w17);
1096 __ Mneg(w13, w17, w18);
1097 __ Mneg(w14, w18, w19);
1098 __ Mneg(x20, x16, x16);
1099 __ Mneg(x21, x17, x18);
1100 __ Mneg(x22, x18, x19);
1101 __ Mneg(x23, x19, x19);
1102 END();
1103
1104 RUN();
1105
1106 ASSERT_EQUAL_64(0, x0);
1107 ASSERT_EQUAL_64(0, x1);
1108 ASSERT_EQUAL_64(0xffffffff, x2);
1109 ASSERT_EQUAL_64(1, x3);
1110 ASSERT_EQUAL_64(0, x4);
1111 ASSERT_EQUAL_64(0xffffffff, x5);
armvixlb0c8ae22014-03-21 14:03:59 +00001112 ASSERT_EQUAL_64(0xffffffff00000001, x6);
armvixlad96eda2013-06-14 11:42:37 +01001113 ASSERT_EQUAL_64(1, x7);
armvixlb0c8ae22014-03-21 14:03:59 +00001114 ASSERT_EQUAL_64(0xffffffffffffffff, x8);
armvixlad96eda2013-06-14 11:42:37 +01001115 ASSERT_EQUAL_64(1, x9);
1116 ASSERT_EQUAL_64(1, x10);
1117 ASSERT_EQUAL_64(0, x11);
1118 ASSERT_EQUAL_64(0, x12);
1119 ASSERT_EQUAL_64(1, x13);
1120 ASSERT_EQUAL_64(0xffffffff, x14);
1121 ASSERT_EQUAL_64(0, x20);
armvixlb0c8ae22014-03-21 14:03:59 +00001122 ASSERT_EQUAL_64(0xffffffff00000001, x21);
armvixlad96eda2013-06-14 11:42:37 +01001123 ASSERT_EQUAL_64(0xffffffff, x22);
armvixlb0c8ae22014-03-21 14:03:59 +00001124 ASSERT_EQUAL_64(0xffffffffffffffff, x23);
armvixlad96eda2013-06-14 11:42:37 +01001125
1126 TEARDOWN();
1127}
1128
1129
armvixlf37fdc02014-02-05 13:22:16 +00001130static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1131 SETUP();
1132 START();
1133 __ Mov(w0, a);
1134 __ Mov(w1, b);
1135 __ Smull(x2, w0, w1);
1136 END();
1137 RUN();
1138 ASSERT_EQUAL_64(expected, x2);
1139 TEARDOWN();
1140}
1141
1142
1143TEST(smull) {
1144 SmullHelper(0, 0, 0);
1145 SmullHelper(1, 1, 1);
1146 SmullHelper(-1, -1, 1);
1147 SmullHelper(1, -1, -1);
1148 SmullHelper(0xffffffff80000000, 0x80000000, 1);
1149 SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1150}
1151
1152
armvixlad96eda2013-06-14 11:42:37 +01001153TEST(madd) {
1154 SETUP();
1155
1156 START();
1157 __ Mov(x16, 0);
1158 __ Mov(x17, 1);
1159 __ Mov(x18, 0xffffffff);
armvixlb0c8ae22014-03-21 14:03:59 +00001160 __ Mov(x19, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001161
1162 __ Madd(w0, w16, w16, w16);
1163 __ Madd(w1, w16, w16, w17);
1164 __ Madd(w2, w16, w16, w18);
1165 __ Madd(w3, w16, w16, w19);
1166 __ Madd(w4, w16, w17, w17);
1167 __ Madd(w5, w17, w17, w18);
1168 __ Madd(w6, w17, w17, w19);
1169 __ Madd(w7, w17, w18, w16);
1170 __ Madd(w8, w17, w18, w18);
1171 __ Madd(w9, w18, w18, w17);
1172 __ Madd(w10, w18, w19, w18);
1173 __ Madd(w11, w19, w19, w19);
1174
1175 __ Madd(x12, x16, x16, x16);
1176 __ Madd(x13, x16, x16, x17);
1177 __ Madd(x14, x16, x16, x18);
1178 __ Madd(x15, x16, x16, x19);
1179 __ Madd(x20, x16, x17, x17);
1180 __ Madd(x21, x17, x17, x18);
1181 __ Madd(x22, x17, x17, x19);
1182 __ Madd(x23, x17, x18, x16);
1183 __ Madd(x24, x17, x18, x18);
1184 __ Madd(x25, x18, x18, x17);
1185 __ Madd(x26, x18, x19, x18);
1186 __ Madd(x27, x19, x19, x19);
1187
1188 END();
1189
1190 RUN();
1191
1192 ASSERT_EQUAL_64(0, x0);
1193 ASSERT_EQUAL_64(1, x1);
1194 ASSERT_EQUAL_64(0xffffffff, x2);
1195 ASSERT_EQUAL_64(0xffffffff, x3);
1196 ASSERT_EQUAL_64(1, x4);
1197 ASSERT_EQUAL_64(0, x5);
1198 ASSERT_EQUAL_64(0, x6);
1199 ASSERT_EQUAL_64(0xffffffff, x7);
1200 ASSERT_EQUAL_64(0xfffffffe, x8);
1201 ASSERT_EQUAL_64(2, x9);
1202 ASSERT_EQUAL_64(0, x10);
1203 ASSERT_EQUAL_64(0, x11);
1204
1205 ASSERT_EQUAL_64(0, x12);
1206 ASSERT_EQUAL_64(1, x13);
armvixlb0c8ae22014-03-21 14:03:59 +00001207 ASSERT_EQUAL_64(0x00000000ffffffff, x14);
armvixlad96eda2013-06-14 11:42:37 +01001208 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1209 ASSERT_EQUAL_64(1, x20);
armvixlb0c8ae22014-03-21 14:03:59 +00001210 ASSERT_EQUAL_64(0x0000000100000000, x21);
armvixlad96eda2013-06-14 11:42:37 +01001211 ASSERT_EQUAL_64(0, x22);
armvixlb0c8ae22014-03-21 14:03:59 +00001212 ASSERT_EQUAL_64(0x00000000ffffffff, x23);
1213 ASSERT_EQUAL_64(0x00000001fffffffe, x24);
1214 ASSERT_EQUAL_64(0xfffffffe00000002, x25);
armvixlad96eda2013-06-14 11:42:37 +01001215 ASSERT_EQUAL_64(0, x26);
1216 ASSERT_EQUAL_64(0, x27);
1217
1218 TEARDOWN();
1219}
1220
1221
1222TEST(msub) {
1223 SETUP();
1224
1225 START();
1226 __ Mov(x16, 0);
1227 __ Mov(x17, 1);
1228 __ Mov(x18, 0xffffffff);
armvixlb0c8ae22014-03-21 14:03:59 +00001229 __ Mov(x19, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001230
1231 __ Msub(w0, w16, w16, w16);
1232 __ Msub(w1, w16, w16, w17);
1233 __ Msub(w2, w16, w16, w18);
1234 __ Msub(w3, w16, w16, w19);
1235 __ Msub(w4, w16, w17, w17);
1236 __ Msub(w5, w17, w17, w18);
1237 __ Msub(w6, w17, w17, w19);
1238 __ Msub(w7, w17, w18, w16);
1239 __ Msub(w8, w17, w18, w18);
1240 __ Msub(w9, w18, w18, w17);
1241 __ Msub(w10, w18, w19, w18);
1242 __ Msub(w11, w19, w19, w19);
1243
1244 __ Msub(x12, x16, x16, x16);
1245 __ Msub(x13, x16, x16, x17);
1246 __ Msub(x14, x16, x16, x18);
1247 __ Msub(x15, x16, x16, x19);
1248 __ Msub(x20, x16, x17, x17);
1249 __ Msub(x21, x17, x17, x18);
1250 __ Msub(x22, x17, x17, x19);
1251 __ Msub(x23, x17, x18, x16);
1252 __ Msub(x24, x17, x18, x18);
1253 __ Msub(x25, x18, x18, x17);
1254 __ Msub(x26, x18, x19, x18);
1255 __ Msub(x27, x19, x19, x19);
1256
1257 END();
1258
1259 RUN();
1260
1261 ASSERT_EQUAL_64(0, x0);
1262 ASSERT_EQUAL_64(1, x1);
1263 ASSERT_EQUAL_64(0xffffffff, x2);
1264 ASSERT_EQUAL_64(0xffffffff, x3);
1265 ASSERT_EQUAL_64(1, x4);
1266 ASSERT_EQUAL_64(0xfffffffe, x5);
1267 ASSERT_EQUAL_64(0xfffffffe, x6);
1268 ASSERT_EQUAL_64(1, x7);
1269 ASSERT_EQUAL_64(0, x8);
1270 ASSERT_EQUAL_64(0, x9);
1271 ASSERT_EQUAL_64(0xfffffffe, x10);
1272 ASSERT_EQUAL_64(0xfffffffe, x11);
1273
1274 ASSERT_EQUAL_64(0, x12);
1275 ASSERT_EQUAL_64(1, x13);
armvixlb0c8ae22014-03-21 14:03:59 +00001276 ASSERT_EQUAL_64(0x00000000ffffffff, x14);
1277 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
armvixlad96eda2013-06-14 11:42:37 +01001278 ASSERT_EQUAL_64(1, x20);
armvixlb0c8ae22014-03-21 14:03:59 +00001279 ASSERT_EQUAL_64(0x00000000fffffffe, x21);
1280 ASSERT_EQUAL_64(0xfffffffffffffffe, x22);
1281 ASSERT_EQUAL_64(0xffffffff00000001, x23);
armvixlad96eda2013-06-14 11:42:37 +01001282 ASSERT_EQUAL_64(0, x24);
armvixlb0c8ae22014-03-21 14:03:59 +00001283 ASSERT_EQUAL_64(0x0000000200000000, x25);
1284 ASSERT_EQUAL_64(0x00000001fffffffe, x26);
1285 ASSERT_EQUAL_64(0xfffffffffffffffe, x27);
armvixlad96eda2013-06-14 11:42:37 +01001286
1287 TEARDOWN();
1288}
1289
1290
1291TEST(smulh) {
1292 SETUP();
1293
1294 START();
1295 __ Mov(x20, 0);
1296 __ Mov(x21, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00001297 __ Mov(x22, 0x0000000100000000);
1298 __ Mov(x23, 0x0000000012345678);
1299 __ Mov(x24, 0x0123456789abcdef);
1300 __ Mov(x25, 0x0000000200000000);
1301 __ Mov(x26, 0x8000000000000000);
1302 __ Mov(x27, 0xffffffffffffffff);
1303 __ Mov(x28, 0x5555555555555555);
1304 __ Mov(x29, 0xaaaaaaaaaaaaaaaa);
armvixlad96eda2013-06-14 11:42:37 +01001305
1306 __ Smulh(x0, x20, x24);
1307 __ Smulh(x1, x21, x24);
1308 __ Smulh(x2, x22, x23);
1309 __ Smulh(x3, x22, x24);
1310 __ Smulh(x4, x24, x25);
1311 __ Smulh(x5, x23, x27);
1312 __ Smulh(x6, x26, x26);
1313 __ Smulh(x7, x26, x27);
1314 __ Smulh(x8, x27, x27);
1315 __ Smulh(x9, x28, x28);
1316 __ Smulh(x10, x28, x29);
1317 __ Smulh(x11, x29, x29);
1318 END();
1319
1320 RUN();
1321
1322 ASSERT_EQUAL_64(0, x0);
1323 ASSERT_EQUAL_64(0, x1);
1324 ASSERT_EQUAL_64(0, x2);
armvixlb0c8ae22014-03-21 14:03:59 +00001325 ASSERT_EQUAL_64(0x0000000001234567, x3);
1326 ASSERT_EQUAL_64(0x0000000002468acf, x4);
1327 ASSERT_EQUAL_64(0xffffffffffffffff, x5);
1328 ASSERT_EQUAL_64(0x4000000000000000, x6);
armvixlad96eda2013-06-14 11:42:37 +01001329 ASSERT_EQUAL_64(0, x7);
1330 ASSERT_EQUAL_64(0, x8);
armvixlb0c8ae22014-03-21 14:03:59 +00001331 ASSERT_EQUAL_64(0x1c71c71c71c71c71, x9);
1332 ASSERT_EQUAL_64(0xe38e38e38e38e38e, x10);
1333 ASSERT_EQUAL_64(0x1c71c71c71c71c72, x11);
armvixlad96eda2013-06-14 11:42:37 +01001334
1335 TEARDOWN();
1336}
1337
1338
1339TEST(smaddl_umaddl) {
1340 SETUP();
1341
1342 START();
1343 __ Mov(x17, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00001344 __ Mov(x18, 0x00000000ffffffff);
1345 __ Mov(x19, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001346 __ Mov(x20, 4);
armvixlb0c8ae22014-03-21 14:03:59 +00001347 __ Mov(x21, 0x0000000200000000);
armvixlad96eda2013-06-14 11:42:37 +01001348
1349 __ Smaddl(x9, w17, w18, x20);
1350 __ Smaddl(x10, w18, w18, x20);
1351 __ Smaddl(x11, w19, w19, x20);
1352 __ Smaddl(x12, w19, w19, x21);
1353 __ Umaddl(x13, w17, w18, x20);
1354 __ Umaddl(x14, w18, w18, x20);
1355 __ Umaddl(x15, w19, w19, x20);
1356 __ Umaddl(x22, w19, w19, x21);
1357 END();
1358
1359 RUN();
1360
1361 ASSERT_EQUAL_64(3, x9);
1362 ASSERT_EQUAL_64(5, x10);
1363 ASSERT_EQUAL_64(5, x11);
armvixlb0c8ae22014-03-21 14:03:59 +00001364 ASSERT_EQUAL_64(0x0000000200000001, x12);
1365 ASSERT_EQUAL_64(0x0000000100000003, x13);
1366 ASSERT_EQUAL_64(0xfffffffe00000005, x14);
1367 ASSERT_EQUAL_64(0xfffffffe00000005, x15);
1368 ASSERT_EQUAL_64(1, x22);
armvixlad96eda2013-06-14 11:42:37 +01001369
1370 TEARDOWN();
1371}
1372
1373
1374TEST(smsubl_umsubl) {
1375 SETUP();
1376
1377 START();
1378 __ Mov(x17, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00001379 __ Mov(x18, 0x00000000ffffffff);
1380 __ Mov(x19, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001381 __ Mov(x20, 4);
armvixlb0c8ae22014-03-21 14:03:59 +00001382 __ Mov(x21, 0x0000000200000000);
armvixlad96eda2013-06-14 11:42:37 +01001383
1384 __ Smsubl(x9, w17, w18, x20);
1385 __ Smsubl(x10, w18, w18, x20);
1386 __ Smsubl(x11, w19, w19, x20);
1387 __ Smsubl(x12, w19, w19, x21);
1388 __ Umsubl(x13, w17, w18, x20);
1389 __ Umsubl(x14, w18, w18, x20);
1390 __ Umsubl(x15, w19, w19, x20);
1391 __ Umsubl(x22, w19, w19, x21);
1392 END();
1393
1394 RUN();
1395
1396 ASSERT_EQUAL_64(5, x9);
1397 ASSERT_EQUAL_64(3, x10);
1398 ASSERT_EQUAL_64(3, x11);
armvixlb0c8ae22014-03-21 14:03:59 +00001399 ASSERT_EQUAL_64(0x00000001ffffffff, x12);
1400 ASSERT_EQUAL_64(0xffffffff00000005, x13);
1401 ASSERT_EQUAL_64(0x0000000200000003, x14);
1402 ASSERT_EQUAL_64(0x0000000200000003, x15);
1403 ASSERT_EQUAL_64(0x00000003ffffffff, x22);
armvixlad96eda2013-06-14 11:42:37 +01001404
1405 TEARDOWN();
1406}
1407
1408
1409TEST(div) {
1410 SETUP();
1411
1412 START();
1413 __ Mov(x16, 1);
1414 __ Mov(x17, 0xffffffff);
armvixlb0c8ae22014-03-21 14:03:59 +00001415 __ Mov(x18, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01001416 __ Mov(x19, 0x80000000);
armvixlb0c8ae22014-03-21 14:03:59 +00001417 __ Mov(x20, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +01001418 __ Mov(x21, 2);
1419
1420 __ Udiv(w0, w16, w16);
1421 __ Udiv(w1, w17, w16);
1422 __ Sdiv(w2, w16, w16);
1423 __ Sdiv(w3, w16, w17);
1424 __ Sdiv(w4, w17, w18);
1425
1426 __ Udiv(x5, x16, x16);
1427 __ Udiv(x6, x17, x18);
1428 __ Sdiv(x7, x16, x16);
1429 __ Sdiv(x8, x16, x17);
1430 __ Sdiv(x9, x17, x18);
1431
1432 __ Udiv(w10, w19, w21);
1433 __ Sdiv(w11, w19, w21);
1434 __ Udiv(x12, x19, x21);
1435 __ Sdiv(x13, x19, x21);
1436 __ Udiv(x14, x20, x21);
1437 __ Sdiv(x15, x20, x21);
armvixlf37fdc02014-02-05 13:22:16 +00001438
1439 __ Udiv(w22, w19, w17);
1440 __ Sdiv(w23, w19, w17);
1441 __ Udiv(x24, x20, x18);
1442 __ Sdiv(x25, x20, x18);
1443
1444 __ Udiv(x26, x16, x21);
1445 __ Sdiv(x27, x16, x21);
1446 __ Udiv(x28, x18, x21);
1447 __ Sdiv(x29, x18, x21);
1448
1449 __ Mov(x17, 0);
1450 __ Udiv(w18, w16, w17);
1451 __ Sdiv(w19, w16, w17);
1452 __ Udiv(x20, x16, x17);
1453 __ Sdiv(x21, x16, x17);
armvixlad96eda2013-06-14 11:42:37 +01001454 END();
1455
1456 RUN();
1457
1458 ASSERT_EQUAL_64(1, x0);
1459 ASSERT_EQUAL_64(0xffffffff, x1);
1460 ASSERT_EQUAL_64(1, x2);
1461 ASSERT_EQUAL_64(0xffffffff, x3);
1462 ASSERT_EQUAL_64(1, x4);
1463 ASSERT_EQUAL_64(1, x5);
1464 ASSERT_EQUAL_64(0, x6);
1465 ASSERT_EQUAL_64(1, x7);
1466 ASSERT_EQUAL_64(0, x8);
armvixlb0c8ae22014-03-21 14:03:59 +00001467 ASSERT_EQUAL_64(0xffffffff00000001, x9);
armvixlad96eda2013-06-14 11:42:37 +01001468 ASSERT_EQUAL_64(0x40000000, x10);
1469 ASSERT_EQUAL_64(0xC0000000, x11);
armvixlb0c8ae22014-03-21 14:03:59 +00001470 ASSERT_EQUAL_64(0x0000000040000000, x12);
1471 ASSERT_EQUAL_64(0x0000000040000000, x13);
1472 ASSERT_EQUAL_64(0x4000000000000000, x14);
1473 ASSERT_EQUAL_64(0xC000000000000000, x15);
armvixlf37fdc02014-02-05 13:22:16 +00001474 ASSERT_EQUAL_64(0, x22);
1475 ASSERT_EQUAL_64(0x80000000, x23);
1476 ASSERT_EQUAL_64(0, x24);
armvixlb0c8ae22014-03-21 14:03:59 +00001477 ASSERT_EQUAL_64(0x8000000000000000, x25);
armvixlf37fdc02014-02-05 13:22:16 +00001478 ASSERT_EQUAL_64(0, x26);
1479 ASSERT_EQUAL_64(0, x27);
armvixlb0c8ae22014-03-21 14:03:59 +00001480 ASSERT_EQUAL_64(0x7fffffffffffffff, x28);
armvixlf37fdc02014-02-05 13:22:16 +00001481 ASSERT_EQUAL_64(0, x29);
1482 ASSERT_EQUAL_64(0, x18);
1483 ASSERT_EQUAL_64(0, x19);
1484 ASSERT_EQUAL_64(0, x20);
1485 ASSERT_EQUAL_64(0, x21);
armvixlad96eda2013-06-14 11:42:37 +01001486
1487 TEARDOWN();
1488}
1489
1490
1491TEST(rbit_rev) {
1492 SETUP();
1493
1494 START();
armvixlb0c8ae22014-03-21 14:03:59 +00001495 __ Mov(x24, 0xfedcba9876543210);
armvixlad96eda2013-06-14 11:42:37 +01001496 __ Rbit(w0, w24);
1497 __ Rbit(x1, x24);
1498 __ Rev16(w2, w24);
1499 __ Rev16(x3, x24);
1500 __ Rev(w4, w24);
1501 __ Rev32(x5, x24);
1502 __ Rev(x6, x24);
1503 END();
1504
1505 RUN();
1506
1507 ASSERT_EQUAL_64(0x084c2a6e, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00001508 ASSERT_EQUAL_64(0x084c2a6e195d3b7f, x1);
armvixlad96eda2013-06-14 11:42:37 +01001509 ASSERT_EQUAL_64(0x54761032, x2);
armvixlb0c8ae22014-03-21 14:03:59 +00001510 ASSERT_EQUAL_64(0xdcfe98ba54761032, x3);
armvixlad96eda2013-06-14 11:42:37 +01001511 ASSERT_EQUAL_64(0x10325476, x4);
armvixlb0c8ae22014-03-21 14:03:59 +00001512 ASSERT_EQUAL_64(0x98badcfe10325476, x5);
1513 ASSERT_EQUAL_64(0x1032547698badcfe, x6);
armvixlad96eda2013-06-14 11:42:37 +01001514
1515 TEARDOWN();
1516}
1517
1518
1519TEST(clz_cls) {
1520 SETUP();
1521
1522 START();
armvixlb0c8ae22014-03-21 14:03:59 +00001523 __ Mov(x24, 0x0008000000800000);
1524 __ Mov(x25, 0xff800000fff80000);
armvixlad96eda2013-06-14 11:42:37 +01001525 __ Mov(x26, 0);
1526 __ Clz(w0, w24);
1527 __ Clz(x1, x24);
1528 __ Clz(w2, w25);
1529 __ Clz(x3, x25);
1530 __ Clz(w4, w26);
1531 __ Clz(x5, x26);
1532 __ Cls(w6, w24);
1533 __ Cls(x7, x24);
1534 __ Cls(w8, w25);
1535 __ Cls(x9, x25);
1536 __ Cls(w10, w26);
1537 __ Cls(x11, x26);
1538 END();
1539
1540 RUN();
1541
1542 ASSERT_EQUAL_64(8, x0);
1543 ASSERT_EQUAL_64(12, x1);
1544 ASSERT_EQUAL_64(0, x2);
1545 ASSERT_EQUAL_64(0, x3);
1546 ASSERT_EQUAL_64(32, x4);
1547 ASSERT_EQUAL_64(64, x5);
1548 ASSERT_EQUAL_64(7, x6);
1549 ASSERT_EQUAL_64(11, x7);
1550 ASSERT_EQUAL_64(12, x8);
1551 ASSERT_EQUAL_64(8, x9);
1552 ASSERT_EQUAL_64(31, x10);
1553 ASSERT_EQUAL_64(63, x11);
1554
1555 TEARDOWN();
1556}
1557
1558
1559TEST(label) {
1560 SETUP();
1561
1562 Label label_1, label_2, label_3, label_4;
1563
1564 START();
1565 __ Mov(x0, 0x1);
1566 __ Mov(x1, 0x0);
1567 __ Mov(x22, lr); // Save lr.
1568
1569 __ B(&label_1);
1570 __ B(&label_1);
1571 __ B(&label_1); // Multiple branches to the same label.
1572 __ Mov(x0, 0x0);
1573 __ Bind(&label_2);
1574 __ B(&label_3); // Forward branch.
1575 __ Mov(x0, 0x0);
1576 __ Bind(&label_1);
1577 __ B(&label_2); // Backward branch.
1578 __ Mov(x0, 0x0);
1579 __ Bind(&label_3);
1580 __ Bl(&label_4);
1581 END();
1582
1583 __ Bind(&label_4);
1584 __ Mov(x1, 0x1);
1585 __ Mov(lr, x22);
1586 END();
1587
1588 RUN();
1589
1590 ASSERT_EQUAL_64(0x1, x0);
1591 ASSERT_EQUAL_64(0x1, x1);
1592
1593 TEARDOWN();
1594}
1595
1596
armvixlc68cb642014-09-25 18:49:30 +01001597TEST(label_2) {
1598 SETUP();
1599
1600 Label label_1, label_2, label_3;
1601 Label first_jump_to_3;
1602
1603 START();
1604 __ Mov(x0, 0x0);
1605
1606 __ B(&label_1);
1607 ptrdiff_t offset_2 = masm.CursorOffset();
1608 __ Orr(x0, x0, 1 << 1);
1609 __ B(&label_3);
1610 ptrdiff_t offset_1 = masm.CursorOffset();
1611 __ Orr(x0, x0, 1 << 0);
1612 __ B(&label_2);
1613 ptrdiff_t offset_3 = masm.CursorOffset();
1614 __ Tbz(x0, 2, &first_jump_to_3);
1615 __ Orr(x0, x0, 1 << 3);
1616 __ Bind(&first_jump_to_3);
1617 __ Orr(x0, x0, 1 << 2);
1618 __ Tbz(x0, 3, &label_3);
1619
1620 // Labels 1, 2, and 3 are bound before the current buffer offset. Branches to
1621 // label_1 and label_2 branch respectively forward and backward. Branches to
1622 // label 3 include both forward and backward branches.
1623 masm.BindToOffset(&label_1, offset_1);
1624 masm.BindToOffset(&label_2, offset_2);
1625 masm.BindToOffset(&label_3, offset_3);
1626
1627 END();
1628
1629 RUN();
1630
1631 ASSERT_EQUAL_64(0xf, x0);
1632
1633 TEARDOWN();
1634}
1635
1636
armvixlad96eda2013-06-14 11:42:37 +01001637TEST(adr) {
1638 SETUP();
1639
1640 Label label_1, label_2, label_3, label_4;
1641
1642 START();
1643 __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
1644 __ Adr(x1, &label_3); // Set to zero to indicate success.
1645
1646 __ Adr(x2, &label_1); // Multiple forward references to the same label.
1647 __ Adr(x3, &label_1);
1648 __ Adr(x4, &label_1);
1649
1650 __ Bind(&label_2);
1651 __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
1652 __ Eor(x6, x2, Operand(x4));
1653 __ Orr(x0, x0, Operand(x5));
1654 __ Orr(x0, x0, Operand(x6));
1655 __ Br(x2); // label_1, label_3
1656
1657 __ Bind(&label_3);
1658 __ Adr(x2, &label_3); // Self-reference (offset 0).
1659 __ Eor(x1, x1, Operand(x2));
1660 __ Adr(x2, &label_4); // Simple forward reference.
1661 __ Br(x2); // label_4
1662
1663 __ Bind(&label_1);
1664 __ Adr(x2, &label_3); // Multiple reverse references to the same label.
1665 __ Adr(x3, &label_3);
1666 __ Adr(x4, &label_3);
1667 __ Adr(x5, &label_2); // Simple reverse reference.
1668 __ Br(x5); // label_2
1669
1670 __ Bind(&label_4);
1671 END();
1672
1673 RUN();
1674
1675 ASSERT_EQUAL_64(0x0, x0);
1676 ASSERT_EQUAL_64(0x0, x1);
1677
1678 TEARDOWN();
1679}
1680
1681
armvixl4a102ba2014-07-14 09:02:40 +01001682// Simple adrp tests: check that labels are linked and handled properly.
1683// This is similar to the adr test, but all the adrp instructions are put on the
1684// same page so that they return the same value.
1685TEST(adrp) {
1686 Label start;
1687 Label label_1, label_2, label_3;
1688
1689 SETUP_CUSTOM(2 * kPageSize, PageOffsetDependentCode);
1690 START();
1691
1692 // Waste space until the start of a page.
armvixlc68cb642014-09-25 18:49:30 +01001693 {
1694 InstructionAccurateScope scope(&masm,
1695 kPageSize / kInstructionSize,
1696 InstructionAccurateScope::kMaximumSize);
armvixl4a102ba2014-07-14 09:02:40 +01001697 const uintptr_t kPageOffsetMask = kPageSize - 1;
armvixlc68cb642014-09-25 18:49:30 +01001698 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
armvixl4a102ba2014-07-14 09:02:40 +01001699 __ b(&start);
1700 }
1701 __ bind(&start);
1702 }
1703
1704 // Simple forward reference.
1705 __ Adrp(x0, &label_2);
1706
1707 __ Bind(&label_1);
1708
1709 // Multiple forward references to the same label.
1710 __ Adrp(x1, &label_3);
1711 __ Adrp(x2, &label_3);
1712 __ Adrp(x3, &label_3);
1713
1714 __ Bind(&label_2);
1715
1716 // Self-reference (offset 0).
1717 __ Adrp(x4, &label_2);
1718
1719 __ Bind(&label_3);
1720
1721 // Simple reverse reference.
1722 __ Adrp(x5, &label_1);
1723
1724 // Multiple reverse references to the same label.
1725 __ Adrp(x6, &label_2);
1726 __ Adrp(x7, &label_2);
1727 __ Adrp(x8, &label_2);
1728
1729 VIXL_ASSERT(masm.SizeOfCodeGeneratedSince(&start) < kPageSize);
1730 END();
1731 RUN();
1732
1733 uint64_t expected = reinterpret_cast<uint64_t>(
1734 AlignDown(masm.GetLabelAddress<uint64_t*>(&start), kPageSize));
1735 ASSERT_EQUAL_64(expected, x0);
1736 ASSERT_EQUAL_64(expected, x1);
1737 ASSERT_EQUAL_64(expected, x2);
1738 ASSERT_EQUAL_64(expected, x3);
1739 ASSERT_EQUAL_64(expected, x4);
1740 ASSERT_EQUAL_64(expected, x5);
1741 ASSERT_EQUAL_64(expected, x6);
1742 ASSERT_EQUAL_64(expected, x7);
1743 ASSERT_EQUAL_64(expected, x8);
1744
armvixlc68cb642014-09-25 18:49:30 +01001745 TEARDOWN_CUSTOM();
armvixl4a102ba2014-07-14 09:02:40 +01001746}
1747
1748
1749static void AdrpPageBoundaryHelper(unsigned offset_into_page) {
1750 VIXL_ASSERT(offset_into_page < kPageSize);
1751 VIXL_ASSERT((offset_into_page % kInstructionSize) == 0);
1752
1753 const uintptr_t kPageOffsetMask = kPageSize - 1;
1754
1755 // The test label is always bound on page 0. Adrp instructions are generated
1756 // on pages from kStartPage to kEndPage (inclusive).
1757 const int kStartPage = -16;
1758 const int kEndPage = 16;
armvixlc68cb642014-09-25 18:49:30 +01001759 const int kMaxCodeSize = (kEndPage - kStartPage + 2) * kPageSize;
armvixl4a102ba2014-07-14 09:02:40 +01001760
armvixlc68cb642014-09-25 18:49:30 +01001761 SETUP_CUSTOM(kMaxCodeSize, PageOffsetDependentCode);
armvixl4a102ba2014-07-14 09:02:40 +01001762 START();
1763
armvixl4a102ba2014-07-14 09:02:40 +01001764 Label test;
armvixlc68cb642014-09-25 18:49:30 +01001765 Label start;
armvixl4a102ba2014-07-14 09:02:40 +01001766
armvixlc68cb642014-09-25 18:49:30 +01001767 {
1768 InstructionAccurateScope scope(&masm,
1769 kMaxCodeSize / kInstructionSize,
1770 InstructionAccurateScope::kMaximumSize);
1771 // Initialize NZCV with `eq` flags.
1772 __ cmp(wzr, wzr);
armvixl4a102ba2014-07-14 09:02:40 +01001773 // Waste space until the start of a page.
armvixlc68cb642014-09-25 18:49:30 +01001774 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
armvixl4a102ba2014-07-14 09:02:40 +01001775 __ b(&start);
1776 }
1777
1778 // The first page.
1779 VIXL_STATIC_ASSERT(kStartPage < 0);
armvixlc68cb642014-09-25 18:49:30 +01001780 {
1781 InstructionAccurateScope scope_page(&masm, kPageSize / kInstructionSize);
armvixl4a102ba2014-07-14 09:02:40 +01001782 __ bind(&start);
1783 __ adrp(x0, &test);
1784 __ adrp(x1, &test);
1785 for (size_t i = 2; i < (kPageSize / kInstructionSize); i += 2) {
1786 __ ccmp(x0, x1, NoFlag, eq);
1787 __ adrp(x1, &test);
1788 }
1789 }
1790
1791 // Subsequent pages.
1792 VIXL_STATIC_ASSERT(kEndPage >= 0);
1793 for (int page = (kStartPage + 1); page <= kEndPage; page++) {
1794 InstructionAccurateScope scope_page(&masm, kPageSize / kInstructionSize);
1795 if (page == 0) {
1796 for (size_t i = 0; i < (kPageSize / kInstructionSize);) {
1797 if (i++ == (offset_into_page / kInstructionSize)) __ bind(&test);
1798 __ ccmp(x0, x1, NoFlag, eq);
1799 if (i++ == (offset_into_page / kInstructionSize)) __ bind(&test);
1800 __ adrp(x1, &test);
1801 }
1802 } else {
1803 for (size_t i = 0; i < (kPageSize / kInstructionSize); i += 2) {
1804 __ ccmp(x0, x1, NoFlag, eq);
1805 __ adrp(x1, &test);
1806 }
1807 }
1808 }
armvixl4a102ba2014-07-14 09:02:40 +01001809 }
1810
armvixlc68cb642014-09-25 18:49:30 +01001811 // Every adrp instruction pointed to the same label (`test`), so they should
1812 // all have produced the same result.
1813
armvixl4a102ba2014-07-14 09:02:40 +01001814 END();
1815 RUN();
1816
1817 uintptr_t expected =
1818 AlignDown(masm.GetLabelAddress<uintptr_t>(&test), kPageSize);
1819 ASSERT_EQUAL_64(expected, x0);
1820 ASSERT_EQUAL_64(expected, x1);
1821 ASSERT_EQUAL_NZCV(ZCFlag);
1822
armvixlc68cb642014-09-25 18:49:30 +01001823 TEARDOWN_CUSTOM();
armvixl4a102ba2014-07-14 09:02:40 +01001824}
1825
1826
1827// Test that labels are correctly referenced by adrp across page boundaries.
1828TEST(adrp_page_boundaries) {
1829 VIXL_STATIC_ASSERT(kPageSize == 4096);
1830 AdrpPageBoundaryHelper(kInstructionSize * 0);
1831 AdrpPageBoundaryHelper(kInstructionSize * 1);
1832 AdrpPageBoundaryHelper(kInstructionSize * 512);
1833 AdrpPageBoundaryHelper(kInstructionSize * 1022);
1834 AdrpPageBoundaryHelper(kInstructionSize * 1023);
1835}
1836
1837
1838static void AdrpOffsetHelper(int64_t imm21) {
1839 const size_t kPageOffsetMask = kPageSize - 1;
armvixlc68cb642014-09-25 18:49:30 +01001840 const int kMaxCodeSize = 2 * kPageSize;
armvixl4a102ba2014-07-14 09:02:40 +01001841
armvixlc68cb642014-09-25 18:49:30 +01001842 SETUP_CUSTOM(kMaxCodeSize, PageOffsetDependentCode);
armvixl4a102ba2014-07-14 09:02:40 +01001843 START();
1844
armvixl4a102ba2014-07-14 09:02:40 +01001845 Label page;
armvixlc68cb642014-09-25 18:49:30 +01001846
1847 {
1848 InstructionAccurateScope scope(&masm,
1849 kMaxCodeSize / kInstructionSize,
1850 InstructionAccurateScope::kMaximumSize);
1851 // Initialize NZCV with `eq` flags.
1852 __ cmp(wzr, wzr);
armvixl4a102ba2014-07-14 09:02:40 +01001853 // Waste space until the start of a page.
armvixlc68cb642014-09-25 18:49:30 +01001854 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
armvixl4a102ba2014-07-14 09:02:40 +01001855 __ b(&page);
1856 }
1857 __ bind(&page);
1858
armvixlc68cb642014-09-25 18:49:30 +01001859 {
1860 InstructionAccurateScope scope_page(&masm, kPageSize / kInstructionSize);
armvixl4a102ba2014-07-14 09:02:40 +01001861 // Every adrp instruction on this page should return the same value.
1862 __ adrp(x0, imm21);
1863 __ adrp(x1, imm21);
1864 for (size_t i = 2; i < kPageSize / kInstructionSize; i += 2) {
1865 __ ccmp(x0, x1, NoFlag, eq);
1866 __ adrp(x1, imm21);
1867 }
1868 }
1869 }
1870
1871 END();
1872 RUN();
1873
1874 uintptr_t expected =
1875 masm.GetLabelAddress<uintptr_t>(&page) + (kPageSize * imm21);
1876 ASSERT_EQUAL_64(expected, x0);
1877 ASSERT_EQUAL_64(expected, x1);
1878 ASSERT_EQUAL_NZCV(ZCFlag);
1879
armvixlc68cb642014-09-25 18:49:30 +01001880 TEARDOWN_CUSTOM();
armvixl4a102ba2014-07-14 09:02:40 +01001881}
1882
1883
1884// Check that adrp produces the correct result for a specific offset.
1885TEST(adrp_offset) {
1886 AdrpOffsetHelper(0);
1887 AdrpOffsetHelper(1);
1888 AdrpOffsetHelper(-1);
1889 AdrpOffsetHelper(4);
1890 AdrpOffsetHelper(-4);
1891 AdrpOffsetHelper(0x000fffff);
1892 AdrpOffsetHelper(-0x000fffff);
1893 AdrpOffsetHelper(-0x00100000);
1894}
1895
1896
armvixlad96eda2013-06-14 11:42:37 +01001897TEST(branch_cond) {
1898 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01001899 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01001900
1901 Label wrong;
1902
1903 START();
1904 __ Mov(x0, 0x1);
1905 __ Mov(x1, 0x1);
armvixlb0c8ae22014-03-21 14:03:59 +00001906 __ Mov(x2, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +01001907
1908 // For each 'cmp' instruction below, condition codes other than the ones
1909 // following it would branch.
1910
armvixl578645f2013-08-15 17:21:42 +01001911 __ Cmp(x1, 0);
armvixlad96eda2013-06-14 11:42:37 +01001912 __ B(&wrong, eq);
1913 __ B(&wrong, lo);
1914 __ B(&wrong, mi);
1915 __ B(&wrong, vs);
1916 __ B(&wrong, ls);
1917 __ B(&wrong, lt);
1918 __ B(&wrong, le);
1919 Label ok_1;
1920 __ B(&ok_1, ne);
1921 __ Mov(x0, 0x0);
1922 __ Bind(&ok_1);
1923
armvixl578645f2013-08-15 17:21:42 +01001924 __ Cmp(x1, 1);
armvixlad96eda2013-06-14 11:42:37 +01001925 __ B(&wrong, ne);
1926 __ B(&wrong, lo);
1927 __ B(&wrong, mi);
1928 __ B(&wrong, vs);
1929 __ B(&wrong, hi);
1930 __ B(&wrong, lt);
1931 __ B(&wrong, gt);
1932 Label ok_2;
1933 __ B(&ok_2, pl);
1934 __ Mov(x0, 0x0);
1935 __ Bind(&ok_2);
1936
armvixl578645f2013-08-15 17:21:42 +01001937 __ Cmp(x1, 2);
armvixlad96eda2013-06-14 11:42:37 +01001938 __ B(&wrong, eq);
1939 __ B(&wrong, hs);
1940 __ B(&wrong, pl);
1941 __ B(&wrong, vs);
1942 __ B(&wrong, hi);
1943 __ B(&wrong, ge);
1944 __ B(&wrong, gt);
1945 Label ok_3;
1946 __ B(&ok_3, vc);
1947 __ Mov(x0, 0x0);
1948 __ Bind(&ok_3);
1949
armvixl578645f2013-08-15 17:21:42 +01001950 __ Cmp(x2, 1);
armvixlad96eda2013-06-14 11:42:37 +01001951 __ B(&wrong, eq);
1952 __ B(&wrong, lo);
1953 __ B(&wrong, mi);
1954 __ B(&wrong, vc);
1955 __ B(&wrong, ls);
1956 __ B(&wrong, ge);
1957 __ B(&wrong, gt);
1958 Label ok_4;
1959 __ B(&ok_4, le);
1960 __ Mov(x0, 0x0);
1961 __ Bind(&ok_4);
armvixl578645f2013-08-15 17:21:42 +01001962
armvixlc68cb642014-09-25 18:49:30 +01001963 // The MacroAssembler does not allow al as a branch condition.
armvixl578645f2013-08-15 17:21:42 +01001964 Label ok_5;
1965 __ b(&ok_5, al);
1966 __ Mov(x0, 0x0);
1967 __ Bind(&ok_5);
1968
armvixlc68cb642014-09-25 18:49:30 +01001969 // The MacroAssembler does not allow nv as a branch condition.
armvixl578645f2013-08-15 17:21:42 +01001970 Label ok_6;
1971 __ b(&ok_6, nv);
1972 __ Mov(x0, 0x0);
1973 __ Bind(&ok_6);
1974
armvixlad96eda2013-06-14 11:42:37 +01001975 END();
1976
1977 __ Bind(&wrong);
1978 __ Mov(x0, 0x0);
1979 END();
1980
1981 RUN();
1982
1983 ASSERT_EQUAL_64(0x1, x0);
1984
1985 TEARDOWN();
1986}
1987
1988
1989TEST(branch_to_reg) {
1990 SETUP();
1991
1992 // Test br.
1993 Label fn1, after_fn1;
1994
1995 START();
1996 __ Mov(x29, lr);
1997
1998 __ Mov(x1, 0);
1999 __ B(&after_fn1);
2000
2001 __ Bind(&fn1);
2002 __ Mov(x0, lr);
2003 __ Mov(x1, 42);
2004 __ Br(x0);
2005
2006 __ Bind(&after_fn1);
2007 __ Bl(&fn1);
2008
2009 // Test blr.
2010 Label fn2, after_fn2;
2011
2012 __ Mov(x2, 0);
2013 __ B(&after_fn2);
2014
2015 __ Bind(&fn2);
2016 __ Mov(x0, lr);
2017 __ Mov(x2, 84);
2018 __ Blr(x0);
2019
2020 __ Bind(&after_fn2);
2021 __ Bl(&fn2);
2022 __ Mov(x3, lr);
2023
2024 __ Mov(lr, x29);
2025 END();
2026
2027 RUN();
2028
2029 ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
2030 ASSERT_EQUAL_64(42, x1);
2031 ASSERT_EQUAL_64(84, x2);
2032
2033 TEARDOWN();
2034}
2035
2036
2037TEST(compare_branch) {
2038 SETUP();
2039
2040 START();
2041 __ Mov(x0, 0);
2042 __ Mov(x1, 0);
2043 __ Mov(x2, 0);
2044 __ Mov(x3, 0);
2045 __ Mov(x4, 0);
2046 __ Mov(x5, 0);
2047 __ Mov(x16, 0);
2048 __ Mov(x17, 42);
2049
2050 Label zt, zt_end;
2051 __ Cbz(w16, &zt);
2052 __ B(&zt_end);
2053 __ Bind(&zt);
2054 __ Mov(x0, 1);
2055 __ Bind(&zt_end);
2056
2057 Label zf, zf_end;
2058 __ Cbz(x17, &zf);
2059 __ B(&zf_end);
2060 __ Bind(&zf);
2061 __ Mov(x1, 1);
2062 __ Bind(&zf_end);
2063
2064 Label nzt, nzt_end;
2065 __ Cbnz(w17, &nzt);
2066 __ B(&nzt_end);
2067 __ Bind(&nzt);
2068 __ Mov(x2, 1);
2069 __ Bind(&nzt_end);
2070
2071 Label nzf, nzf_end;
2072 __ Cbnz(x16, &nzf);
2073 __ B(&nzf_end);
2074 __ Bind(&nzf);
2075 __ Mov(x3, 1);
2076 __ Bind(&nzf_end);
2077
armvixlb0c8ae22014-03-21 14:03:59 +00002078 __ Mov(x18, 0xffffffff00000000);
armvixlad96eda2013-06-14 11:42:37 +01002079
2080 Label a, a_end;
2081 __ Cbz(w18, &a);
2082 __ B(&a_end);
2083 __ Bind(&a);
2084 __ Mov(x4, 1);
2085 __ Bind(&a_end);
2086
2087 Label b, b_end;
2088 __ Cbnz(w18, &b);
2089 __ B(&b_end);
2090 __ Bind(&b);
2091 __ Mov(x5, 1);
2092 __ Bind(&b_end);
2093
2094 END();
2095
2096 RUN();
2097
2098 ASSERT_EQUAL_64(1, x0);
2099 ASSERT_EQUAL_64(0, x1);
2100 ASSERT_EQUAL_64(1, x2);
2101 ASSERT_EQUAL_64(0, x3);
2102 ASSERT_EQUAL_64(1, x4);
2103 ASSERT_EQUAL_64(0, x5);
2104
2105 TEARDOWN();
2106}
2107
2108
2109TEST(test_branch) {
2110 SETUP();
2111
2112 START();
2113 __ Mov(x0, 0);
2114 __ Mov(x1, 0);
2115 __ Mov(x2, 0);
2116 __ Mov(x3, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00002117 __ Mov(x16, 0xaaaaaaaaaaaaaaaa);
armvixlad96eda2013-06-14 11:42:37 +01002118
2119 Label bz, bz_end;
armvixlf37fdc02014-02-05 13:22:16 +00002120 __ Tbz(w16, 0, &bz);
armvixlad96eda2013-06-14 11:42:37 +01002121 __ B(&bz_end);
2122 __ Bind(&bz);
2123 __ Mov(x0, 1);
2124 __ Bind(&bz_end);
2125
2126 Label bo, bo_end;
2127 __ Tbz(x16, 63, &bo);
2128 __ B(&bo_end);
2129 __ Bind(&bo);
2130 __ Mov(x1, 1);
2131 __ Bind(&bo_end);
2132
2133 Label nbz, nbz_end;
2134 __ Tbnz(x16, 61, &nbz);
2135 __ B(&nbz_end);
2136 __ Bind(&nbz);
2137 __ Mov(x2, 1);
2138 __ Bind(&nbz_end);
2139
2140 Label nbo, nbo_end;
armvixlf37fdc02014-02-05 13:22:16 +00002141 __ Tbnz(w16, 2, &nbo);
armvixlad96eda2013-06-14 11:42:37 +01002142 __ B(&nbo_end);
2143 __ Bind(&nbo);
2144 __ Mov(x3, 1);
2145 __ Bind(&nbo_end);
2146 END();
2147
2148 RUN();
2149
2150 ASSERT_EQUAL_64(1, x0);
2151 ASSERT_EQUAL_64(0, x1);
2152 ASSERT_EQUAL_64(1, x2);
2153 ASSERT_EQUAL_64(0, x3);
2154
2155 TEARDOWN();
2156}
2157
2158
armvixlb0c8ae22014-03-21 14:03:59 +00002159TEST(branch_type) {
2160 SETUP();
2161
2162 Label fail, done;
2163
2164 START();
2165 __ Mov(x0, 0x0);
2166 __ Mov(x10, 0x7);
2167 __ Mov(x11, 0x0);
2168
2169 // Test non taken branches.
2170 __ Cmp(x10, 0x7);
2171 __ B(&fail, ne);
2172 __ B(&fail, never);
2173 __ B(&fail, reg_zero, x10);
2174 __ B(&fail, reg_not_zero, x11);
2175 __ B(&fail, reg_bit_clear, x10, 0);
2176 __ B(&fail, reg_bit_set, x10, 3);
2177
2178 // Test taken branches.
2179 Label l1, l2, l3, l4, l5;
2180 __ Cmp(x10, 0x7);
2181 __ B(&l1, eq);
2182 __ B(&fail);
2183 __ Bind(&l1);
2184 __ B(&l2, always);
2185 __ B(&fail);
2186 __ Bind(&l2);
2187 __ B(&l3, reg_not_zero, x10);
2188 __ B(&fail);
2189 __ Bind(&l3);
2190 __ B(&l4, reg_bit_clear, x10, 15);
2191 __ B(&fail);
2192 __ Bind(&l4);
2193 __ B(&l5, reg_bit_set, x10, 1);
2194 __ B(&fail);
2195 __ Bind(&l5);
2196
2197 __ B(&done);
2198
2199 __ Bind(&fail);
2200 __ Mov(x0, 0x1);
2201
2202 __ Bind(&done);
2203
2204 END();
2205
2206 RUN();
2207
2208 ASSERT_EQUAL_64(0x0, x0);
2209
2210 TEARDOWN();
2211}
2212
2213
armvixlad96eda2013-06-14 11:42:37 +01002214TEST(ldr_str_offset) {
2215 SETUP();
2216
armvixlb0c8ae22014-03-21 14:03:59 +00002217 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
armvixlad96eda2013-06-14 11:42:37 +01002218 uint64_t dst[5] = {0, 0, 0, 0, 0};
2219 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2220 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2221
2222 START();
2223 __ Mov(x17, src_base);
2224 __ Mov(x18, dst_base);
2225 __ Ldr(w0, MemOperand(x17));
2226 __ Str(w0, MemOperand(x18));
2227 __ Ldr(w1, MemOperand(x17, 4));
2228 __ Str(w1, MemOperand(x18, 12));
2229 __ Ldr(x2, MemOperand(x17, 8));
2230 __ Str(x2, MemOperand(x18, 16));
2231 __ Ldrb(w3, MemOperand(x17, 1));
2232 __ Strb(w3, MemOperand(x18, 25));
2233 __ Ldrh(w4, MemOperand(x17, 2));
2234 __ Strh(w4, MemOperand(x18, 33));
2235 END();
2236
2237 RUN();
2238
2239 ASSERT_EQUAL_64(0x76543210, x0);
2240 ASSERT_EQUAL_64(0x76543210, dst[0]);
2241 ASSERT_EQUAL_64(0xfedcba98, x1);
armvixlb0c8ae22014-03-21 14:03:59 +00002242 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2243 ASSERT_EQUAL_64(0x0123456789abcdef, x2);
2244 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
armvixlad96eda2013-06-14 11:42:37 +01002245 ASSERT_EQUAL_64(0x32, x3);
2246 ASSERT_EQUAL_64(0x3200, dst[3]);
2247 ASSERT_EQUAL_64(0x7654, x4);
2248 ASSERT_EQUAL_64(0x765400, dst[4]);
2249 ASSERT_EQUAL_64(src_base, x17);
2250 ASSERT_EQUAL_64(dst_base, x18);
2251
2252 TEARDOWN();
2253}
2254
2255
2256TEST(ldr_str_wide) {
2257 SETUP();
2258
2259 uint32_t src[8192];
2260 uint32_t dst[8192];
2261 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2262 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2263 memset(src, 0xaa, 8192 * sizeof(src[0]));
2264 memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2265 src[0] = 0;
2266 src[6144] = 6144;
2267 src[8191] = 8191;
2268
2269 START();
2270 __ Mov(x22, src_base);
2271 __ Mov(x23, dst_base);
2272 __ Mov(x24, src_base);
2273 __ Mov(x25, dst_base);
2274 __ Mov(x26, src_base);
2275 __ Mov(x27, dst_base);
2276
2277 __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2278 __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2279 __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2280 __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2281 __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2282 __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2283 END();
2284
2285 RUN();
2286
2287 ASSERT_EQUAL_32(8191, w0);
2288 ASSERT_EQUAL_32(8191, dst[8191]);
2289 ASSERT_EQUAL_64(src_base, x22);
2290 ASSERT_EQUAL_64(dst_base, x23);
2291 ASSERT_EQUAL_32(0, w1);
2292 ASSERT_EQUAL_32(0, dst[0]);
2293 ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2294 ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2295 ASSERT_EQUAL_32(6144, w2);
2296 ASSERT_EQUAL_32(6144, dst[6144]);
2297 ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2298 ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2299
2300 TEARDOWN();
2301}
2302
2303
2304TEST(ldr_str_preindex) {
2305 SETUP();
2306
armvixlb0c8ae22014-03-21 14:03:59 +00002307 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
armvixlad96eda2013-06-14 11:42:37 +01002308 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2309 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2310 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2311
2312 START();
2313 __ Mov(x17, src_base);
2314 __ Mov(x18, dst_base);
2315 __ Mov(x19, src_base);
2316 __ Mov(x20, dst_base);
2317 __ Mov(x21, src_base + 16);
2318 __ Mov(x22, dst_base + 40);
2319 __ Mov(x23, src_base);
2320 __ Mov(x24, dst_base);
2321 __ Mov(x25, src_base);
2322 __ Mov(x26, dst_base);
2323 __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2324 __ Str(w0, MemOperand(x18, 12, PreIndex));
2325 __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2326 __ Str(x1, MemOperand(x20, 16, PreIndex));
2327 __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2328 __ Str(w2, MemOperand(x22, -4, PreIndex));
2329 __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2330 __ Strb(w3, MemOperand(x24, 25, PreIndex));
2331 __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2332 __ Strh(w4, MemOperand(x26, 41, PreIndex));
2333 END();
2334
2335 RUN();
2336
2337 ASSERT_EQUAL_64(0xfedcba98, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00002338 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2339 ASSERT_EQUAL_64(0x0123456789abcdef, x1);
2340 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
armvixlad96eda2013-06-14 11:42:37 +01002341 ASSERT_EQUAL_64(0x01234567, x2);
armvixlb0c8ae22014-03-21 14:03:59 +00002342 ASSERT_EQUAL_64(0x0123456700000000, dst[4]);
armvixlad96eda2013-06-14 11:42:37 +01002343 ASSERT_EQUAL_64(0x32, x3);
2344 ASSERT_EQUAL_64(0x3200, dst[3]);
2345 ASSERT_EQUAL_64(0x9876, x4);
2346 ASSERT_EQUAL_64(0x987600, dst[5]);
2347 ASSERT_EQUAL_64(src_base + 4, x17);
2348 ASSERT_EQUAL_64(dst_base + 12, x18);
2349 ASSERT_EQUAL_64(src_base + 8, x19);
2350 ASSERT_EQUAL_64(dst_base + 16, x20);
2351 ASSERT_EQUAL_64(src_base + 12, x21);
2352 ASSERT_EQUAL_64(dst_base + 36, x22);
2353 ASSERT_EQUAL_64(src_base + 1, x23);
2354 ASSERT_EQUAL_64(dst_base + 25, x24);
2355 ASSERT_EQUAL_64(src_base + 3, x25);
2356 ASSERT_EQUAL_64(dst_base + 41, x26);
2357
2358 TEARDOWN();
2359}
2360
2361
2362TEST(ldr_str_postindex) {
2363 SETUP();
2364
armvixlb0c8ae22014-03-21 14:03:59 +00002365 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
armvixlad96eda2013-06-14 11:42:37 +01002366 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2367 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2368 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2369
2370 START();
2371 __ Mov(x17, src_base + 4);
2372 __ Mov(x18, dst_base + 12);
2373 __ Mov(x19, src_base + 8);
2374 __ Mov(x20, dst_base + 16);
2375 __ Mov(x21, src_base + 8);
2376 __ Mov(x22, dst_base + 32);
2377 __ Mov(x23, src_base + 1);
2378 __ Mov(x24, dst_base + 25);
2379 __ Mov(x25, src_base + 3);
2380 __ Mov(x26, dst_base + 41);
2381 __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2382 __ Str(w0, MemOperand(x18, 12, PostIndex));
2383 __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2384 __ Str(x1, MemOperand(x20, 16, PostIndex));
2385 __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2386 __ Str(x2, MemOperand(x22, -32, PostIndex));
2387 __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2388 __ Strb(w3, MemOperand(x24, 5, PostIndex));
2389 __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2390 __ Strh(w4, MemOperand(x26, -41, PostIndex));
2391 END();
2392
2393 RUN();
2394
2395 ASSERT_EQUAL_64(0xfedcba98, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00002396 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2397 ASSERT_EQUAL_64(0x0123456789abcdef, x1);
2398 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
2399 ASSERT_EQUAL_64(0x0123456789abcdef, x2);
2400 ASSERT_EQUAL_64(0x0123456789abcdef, dst[4]);
armvixlad96eda2013-06-14 11:42:37 +01002401 ASSERT_EQUAL_64(0x32, x3);
2402 ASSERT_EQUAL_64(0x3200, dst[3]);
2403 ASSERT_EQUAL_64(0x9876, x4);
2404 ASSERT_EQUAL_64(0x987600, dst[5]);
2405 ASSERT_EQUAL_64(src_base + 8, x17);
2406 ASSERT_EQUAL_64(dst_base + 24, x18);
2407 ASSERT_EQUAL_64(src_base + 16, x19);
2408 ASSERT_EQUAL_64(dst_base + 32, x20);
2409 ASSERT_EQUAL_64(src_base, x21);
2410 ASSERT_EQUAL_64(dst_base, x22);
2411 ASSERT_EQUAL_64(src_base + 2, x23);
2412 ASSERT_EQUAL_64(dst_base + 30, x24);
2413 ASSERT_EQUAL_64(src_base, x25);
2414 ASSERT_EQUAL_64(dst_base, x26);
2415
2416 TEARDOWN();
2417}
2418
2419
2420TEST(ldr_str_largeindex) {
2421 SETUP();
2422
2423 // This value won't fit in the immediate offset field of ldr/str instructions.
2424 int largeoffset = 0xabcdef;
2425
2426 int64_t data[3] = { 0x1122334455667788, 0, 0 };
armvixlb0c8ae22014-03-21 14:03:59 +00002427 uint64_t base_addr = reinterpret_cast<uintptr_t>(data);
2428 uint64_t drifted_addr = base_addr - largeoffset;
armvixlad96eda2013-06-14 11:42:37 +01002429
2430 // This test checks that we we can use large immediate offsets when
2431 // using PreIndex or PostIndex addressing mode of the MacroAssembler
2432 // Ldr/Str instructions.
2433
2434 START();
armvixlad96eda2013-06-14 11:42:37 +01002435 __ Mov(x19, drifted_addr);
armvixlb0c8ae22014-03-21 14:03:59 +00002436 __ Ldr(x0, MemOperand(x19, largeoffset, PreIndex));
armvixlad96eda2013-06-14 11:42:37 +01002437
armvixlb0c8ae22014-03-21 14:03:59 +00002438 __ Mov(x20, base_addr);
2439 __ Ldr(x1, MemOperand(x20, largeoffset, PostIndex));
2440
2441 __ Mov(x21, drifted_addr);
2442 __ Str(x0, MemOperand(x21, largeoffset + 8, PreIndex));
2443
2444 __ Mov(x22, base_addr + 16);
2445 __ Str(x0, MemOperand(x22, largeoffset, PostIndex));
armvixlad96eda2013-06-14 11:42:37 +01002446 END();
2447
2448 RUN();
2449
2450 ASSERT_EQUAL_64(0x1122334455667788, data[0]);
2451 ASSERT_EQUAL_64(0x1122334455667788, data[1]);
2452 ASSERT_EQUAL_64(0x1122334455667788, data[2]);
2453 ASSERT_EQUAL_64(0x1122334455667788, x0);
2454 ASSERT_EQUAL_64(0x1122334455667788, x1);
2455
armvixlb0c8ae22014-03-21 14:03:59 +00002456 ASSERT_EQUAL_64(base_addr, x19);
2457 ASSERT_EQUAL_64(base_addr + largeoffset, x20);
2458 ASSERT_EQUAL_64(base_addr + 8, x21);
2459 ASSERT_EQUAL_64(base_addr + 16 + largeoffset, x22);
armvixlad96eda2013-06-14 11:42:37 +01002460
2461 TEARDOWN();
2462}
2463
2464
2465TEST(load_signed) {
2466 SETUP();
2467
2468 uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2469 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2470
2471 START();
2472 __ Mov(x24, src_base);
2473 __ Ldrsb(w0, MemOperand(x24));
2474 __ Ldrsb(w1, MemOperand(x24, 4));
2475 __ Ldrsh(w2, MemOperand(x24));
2476 __ Ldrsh(w3, MemOperand(x24, 4));
2477 __ Ldrsb(x4, MemOperand(x24));
2478 __ Ldrsb(x5, MemOperand(x24, 4));
2479 __ Ldrsh(x6, MemOperand(x24));
2480 __ Ldrsh(x7, MemOperand(x24, 4));
2481 __ Ldrsw(x8, MemOperand(x24));
2482 __ Ldrsw(x9, MemOperand(x24, 4));
2483 END();
2484
2485 RUN();
2486
2487 ASSERT_EQUAL_64(0xffffff80, x0);
2488 ASSERT_EQUAL_64(0x0000007f, x1);
2489 ASSERT_EQUAL_64(0xffff8080, x2);
2490 ASSERT_EQUAL_64(0x00007f7f, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00002491 ASSERT_EQUAL_64(0xffffffffffffff80, x4);
2492 ASSERT_EQUAL_64(0x000000000000007f, x5);
2493 ASSERT_EQUAL_64(0xffffffffffff8080, x6);
2494 ASSERT_EQUAL_64(0x0000000000007f7f, x7);
2495 ASSERT_EQUAL_64(0xffffffff80008080, x8);
2496 ASSERT_EQUAL_64(0x000000007fff7f7f, x9);
armvixlad96eda2013-06-14 11:42:37 +01002497
2498 TEARDOWN();
2499}
2500
2501
2502TEST(load_store_regoffset) {
2503 SETUP();
2504
2505 uint32_t src[3] = {1, 2, 3};
2506 uint32_t dst[4] = {0, 0, 0, 0};
2507 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2508 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2509
2510 START();
2511 __ Mov(x16, src_base);
2512 __ Mov(x17, dst_base);
2513 __ Mov(x18, src_base + 3 * sizeof(src[0]));
2514 __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2515 __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2516 __ Mov(x24, 0);
2517 __ Mov(x25, 4);
2518 __ Mov(x26, -4);
2519 __ Mov(x27, 0xfffffffc); // 32-bit -4.
2520 __ Mov(x28, 0xfffffffe); // 32-bit -2.
2521 __ Mov(x29, 0xffffffff); // 32-bit -1.
2522
2523 __ Ldr(w0, MemOperand(x16, x24));
2524 __ Ldr(x1, MemOperand(x16, x25));
2525 __ Ldr(w2, MemOperand(x18, x26));
2526 __ Ldr(w3, MemOperand(x18, x27, SXTW));
2527 __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2528 __ Str(w0, MemOperand(x17, x24));
2529 __ Str(x1, MemOperand(x17, x25));
2530 __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2531 END();
2532
2533 RUN();
2534
2535 ASSERT_EQUAL_64(1, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00002536 ASSERT_EQUAL_64(0x0000000300000002, x1);
armvixlad96eda2013-06-14 11:42:37 +01002537 ASSERT_EQUAL_64(3, x2);
2538 ASSERT_EQUAL_64(3, x3);
2539 ASSERT_EQUAL_64(2, x4);
2540 ASSERT_EQUAL_32(1, dst[0]);
2541 ASSERT_EQUAL_32(2, dst[1]);
2542 ASSERT_EQUAL_32(3, dst[2]);
2543 ASSERT_EQUAL_32(3, dst[3]);
2544
2545 TEARDOWN();
2546}
2547
2548
2549TEST(load_store_float) {
2550 SETUP();
2551
2552 float src[3] = {1.0, 2.0, 3.0};
2553 float dst[3] = {0.0, 0.0, 0.0};
2554 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2555 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2556
2557 START();
2558 __ Mov(x17, src_base);
2559 __ Mov(x18, dst_base);
2560 __ Mov(x19, src_base);
2561 __ Mov(x20, dst_base);
2562 __ Mov(x21, src_base);
2563 __ Mov(x22, dst_base);
2564 __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2565 __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2566 __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2567 __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2568 __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2569 __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2570 END();
2571
2572 RUN();
2573
2574 ASSERT_EQUAL_FP32(2.0, s0);
2575 ASSERT_EQUAL_FP32(2.0, dst[0]);
2576 ASSERT_EQUAL_FP32(1.0, s1);
2577 ASSERT_EQUAL_FP32(1.0, dst[2]);
2578 ASSERT_EQUAL_FP32(3.0, s2);
2579 ASSERT_EQUAL_FP32(3.0, dst[1]);
2580 ASSERT_EQUAL_64(src_base, x17);
2581 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2582 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2583 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2584 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2585 ASSERT_EQUAL_64(dst_base, x22);
2586
2587 TEARDOWN();
2588}
2589
2590
2591TEST(load_store_double) {
2592 SETUP();
2593
2594 double src[3] = {1.0, 2.0, 3.0};
2595 double dst[3] = {0.0, 0.0, 0.0};
2596 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2597 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2598
2599 START();
2600 __ Mov(x17, src_base);
2601 __ Mov(x18, dst_base);
2602 __ Mov(x19, src_base);
2603 __ Mov(x20, dst_base);
2604 __ Mov(x21, src_base);
2605 __ Mov(x22, dst_base);
2606 __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2607 __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2608 __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2609 __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2610 __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2611 __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2612 END();
2613
2614 RUN();
2615
2616 ASSERT_EQUAL_FP64(2.0, d0);
2617 ASSERT_EQUAL_FP64(2.0, dst[0]);
2618 ASSERT_EQUAL_FP64(1.0, d1);
2619 ASSERT_EQUAL_FP64(1.0, dst[2]);
2620 ASSERT_EQUAL_FP64(3.0, d2);
2621 ASSERT_EQUAL_FP64(3.0, dst[1]);
2622 ASSERT_EQUAL_64(src_base, x17);
2623 ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2624 ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
2625 ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2626 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2627 ASSERT_EQUAL_64(dst_base, x22);
2628
2629 TEARDOWN();
2630}
2631
2632
2633TEST(ldp_stp_float) {
2634 SETUP();
2635
2636 float src[2] = {1.0, 2.0};
2637 float dst[3] = {0.0, 0.0, 0.0};
2638 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2639 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2640
2641 START();
2642 __ Mov(x16, src_base);
2643 __ Mov(x17, dst_base);
2644 __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2645 __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2646 END();
2647
2648 RUN();
2649
2650 ASSERT_EQUAL_FP32(1.0, s31);
2651 ASSERT_EQUAL_FP32(2.0, s0);
2652 ASSERT_EQUAL_FP32(0.0, dst[0]);
2653 ASSERT_EQUAL_FP32(2.0, dst[1]);
2654 ASSERT_EQUAL_FP32(1.0, dst[2]);
2655 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2656 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2657
2658 TEARDOWN();
2659}
2660
2661
2662TEST(ldp_stp_double) {
2663 SETUP();
2664
2665 double src[2] = {1.0, 2.0};
2666 double dst[3] = {0.0, 0.0, 0.0};
2667 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2668 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2669
2670 START();
2671 __ Mov(x16, src_base);
2672 __ Mov(x17, dst_base);
2673 __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2674 __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2675 END();
2676
2677 RUN();
2678
2679 ASSERT_EQUAL_FP64(1.0, d31);
2680 ASSERT_EQUAL_FP64(2.0, d0);
2681 ASSERT_EQUAL_FP64(0.0, dst[0]);
2682 ASSERT_EQUAL_FP64(2.0, dst[1]);
2683 ASSERT_EQUAL_FP64(1.0, dst[2]);
2684 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2685 ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2686
2687 TEARDOWN();
2688}
2689
2690
2691TEST(ldp_stp_offset) {
2692 SETUP();
2693
armvixlb0c8ae22014-03-21 14:03:59 +00002694 uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2695 0xffeeddccbbaa9988};
armvixlad96eda2013-06-14 11:42:37 +01002696 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2697 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2698 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2699
2700 START();
2701 __ Mov(x16, src_base);
2702 __ Mov(x17, dst_base);
2703 __ Mov(x18, src_base + 24);
2704 __ Mov(x19, dst_base + 56);
2705 __ Ldp(w0, w1, MemOperand(x16));
2706 __ Ldp(w2, w3, MemOperand(x16, 4));
2707 __ Ldp(x4, x5, MemOperand(x16, 8));
2708 __ Ldp(w6, w7, MemOperand(x18, -12));
2709 __ Ldp(x8, x9, MemOperand(x18, -16));
2710 __ Stp(w0, w1, MemOperand(x17));
2711 __ Stp(w2, w3, MemOperand(x17, 8));
2712 __ Stp(x4, x5, MemOperand(x17, 16));
2713 __ Stp(w6, w7, MemOperand(x19, -24));
2714 __ Stp(x8, x9, MemOperand(x19, -16));
2715 END();
2716
2717 RUN();
2718
2719 ASSERT_EQUAL_64(0x44556677, x0);
2720 ASSERT_EQUAL_64(0x00112233, x1);
armvixlb0c8ae22014-03-21 14:03:59 +00002721 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
armvixlad96eda2013-06-14 11:42:37 +01002722 ASSERT_EQUAL_64(0x00112233, x2);
2723 ASSERT_EQUAL_64(0xccddeeff, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00002724 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
2725 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2726 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
2727 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2728 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
armvixlad96eda2013-06-14 11:42:37 +01002729 ASSERT_EQUAL_64(0x8899aabb, x6);
2730 ASSERT_EQUAL_64(0xbbaa9988, x7);
armvixlb0c8ae22014-03-21 14:03:59 +00002731 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
2732 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
2733 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
2734 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
2735 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
armvixlad96eda2013-06-14 11:42:37 +01002736 ASSERT_EQUAL_64(src_base, x16);
2737 ASSERT_EQUAL_64(dst_base, x17);
2738 ASSERT_EQUAL_64(src_base + 24, x18);
2739 ASSERT_EQUAL_64(dst_base + 56, x19);
2740
2741 TEARDOWN();
2742}
2743
2744
armvixlc68cb642014-09-25 18:49:30 +01002745TEST(ldp_stp_offset_wide) {
2746 SETUP();
2747
2748 uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2749 0xffeeddccbbaa9988};
2750 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2751 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2752 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2753 // Move base too far from the array to force multiple instructions
2754 // to be emitted.
2755 const int64_t base_offset = 1024;
2756
2757 START();
2758 __ Mov(x20, src_base - base_offset);
2759 __ Mov(x21, dst_base - base_offset);
2760 __ Mov(x18, src_base + base_offset + 24);
2761 __ Mov(x19, dst_base + base_offset + 56);
2762 __ Ldp(w0, w1, MemOperand(x20, base_offset));
2763 __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
2764 __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
2765 __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
2766 __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
2767 __ Stp(w0, w1, MemOperand(x21, base_offset));
2768 __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
2769 __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
2770 __ Stp(w6, w7, MemOperand(x19, -24 - base_offset));
2771 __ Stp(x8, x9, MemOperand(x19, -16 - base_offset));
2772 END();
2773
2774 RUN();
2775
2776 ASSERT_EQUAL_64(0x44556677, x0);
2777 ASSERT_EQUAL_64(0x00112233, x1);
2778 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
2779 ASSERT_EQUAL_64(0x00112233, x2);
2780 ASSERT_EQUAL_64(0xccddeeff, x3);
2781 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
2782 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2783 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
2784 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2785 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
2786 ASSERT_EQUAL_64(0x8899aabb, x6);
2787 ASSERT_EQUAL_64(0xbbaa9988, x7);
2788 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
2789 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
2790 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
2791 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
2792 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
2793 ASSERT_EQUAL_64(src_base - base_offset, x20);
2794 ASSERT_EQUAL_64(dst_base - base_offset, x21);
2795 ASSERT_EQUAL_64(src_base + base_offset + 24, x18);
2796 ASSERT_EQUAL_64(dst_base + base_offset + 56, x19);
2797
2798 TEARDOWN();
2799}
2800
2801
armvixlad96eda2013-06-14 11:42:37 +01002802TEST(ldnp_stnp_offset) {
2803 SETUP();
2804
armvixlb0c8ae22014-03-21 14:03:59 +00002805 uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2806 0xffeeddccbbaa9988};
armvixlad96eda2013-06-14 11:42:37 +01002807 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2808 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2809 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2810
2811 START();
2812 __ Mov(x16, src_base);
2813 __ Mov(x17, dst_base);
2814 __ Mov(x18, src_base + 24);
2815 __ Mov(x19, dst_base + 56);
2816 __ Ldnp(w0, w1, MemOperand(x16));
2817 __ Ldnp(w2, w3, MemOperand(x16, 4));
2818 __ Ldnp(x4, x5, MemOperand(x16, 8));
2819 __ Ldnp(w6, w7, MemOperand(x18, -12));
2820 __ Ldnp(x8, x9, MemOperand(x18, -16));
2821 __ Stnp(w0, w1, MemOperand(x17));
2822 __ Stnp(w2, w3, MemOperand(x17, 8));
2823 __ Stnp(x4, x5, MemOperand(x17, 16));
2824 __ Stnp(w6, w7, MemOperand(x19, -24));
2825 __ Stnp(x8, x9, MemOperand(x19, -16));
2826 END();
2827
2828 RUN();
2829
2830 ASSERT_EQUAL_64(0x44556677, x0);
2831 ASSERT_EQUAL_64(0x00112233, x1);
armvixlb0c8ae22014-03-21 14:03:59 +00002832 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
armvixlad96eda2013-06-14 11:42:37 +01002833 ASSERT_EQUAL_64(0x00112233, x2);
2834 ASSERT_EQUAL_64(0xccddeeff, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00002835 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
2836 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2837 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
2838 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2839 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
armvixlad96eda2013-06-14 11:42:37 +01002840 ASSERT_EQUAL_64(0x8899aabb, x6);
2841 ASSERT_EQUAL_64(0xbbaa9988, x7);
armvixlb0c8ae22014-03-21 14:03:59 +00002842 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
2843 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
2844 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
2845 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
2846 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
armvixlad96eda2013-06-14 11:42:37 +01002847 ASSERT_EQUAL_64(src_base, x16);
2848 ASSERT_EQUAL_64(dst_base, x17);
2849 ASSERT_EQUAL_64(src_base + 24, x18);
2850 ASSERT_EQUAL_64(dst_base + 56, x19);
2851
2852 TEARDOWN();
2853}
2854
2855
2856TEST(ldp_stp_preindex) {
2857 SETUP();
2858
armvixlb0c8ae22014-03-21 14:03:59 +00002859 uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2860 0xffeeddccbbaa9988};
armvixlad96eda2013-06-14 11:42:37 +01002861 uint64_t dst[5] = {0, 0, 0, 0, 0};
2862 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2863 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2864
2865 START();
2866 __ Mov(x16, src_base);
2867 __ Mov(x17, dst_base);
2868 __ Mov(x18, dst_base + 16);
2869 __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2870 __ Mov(x19, x16);
2871 __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2872 __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2873 __ Mov(x20, x17);
2874 __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2875 __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2876 __ Mov(x21, x16);
2877 __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2878 __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2879 __ Mov(x22, x18);
2880 __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
2881 END();
2882
2883 RUN();
2884
2885 ASSERT_EQUAL_64(0x00112233, x0);
2886 ASSERT_EQUAL_64(0xccddeeff, x1);
2887 ASSERT_EQUAL_64(0x44556677, x2);
2888 ASSERT_EQUAL_64(0x00112233, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00002889 ASSERT_EQUAL_64(0xccddeeff00112233, dst[0]);
2890 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
2891 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2892 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2893 ASSERT_EQUAL_64(0x0011223344556677, x6);
2894 ASSERT_EQUAL_64(0x8899aabbccddeeff, x7);
2895 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
2896 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
2897 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
armvixlad96eda2013-06-14 11:42:37 +01002898 ASSERT_EQUAL_64(src_base, x16);
2899 ASSERT_EQUAL_64(dst_base, x17);
2900 ASSERT_EQUAL_64(dst_base + 16, x18);
2901 ASSERT_EQUAL_64(src_base + 4, x19);
2902 ASSERT_EQUAL_64(dst_base + 4, x20);
2903 ASSERT_EQUAL_64(src_base + 8, x21);
2904 ASSERT_EQUAL_64(dst_base + 24, x22);
2905
2906 TEARDOWN();
2907}
2908
2909
armvixlc68cb642014-09-25 18:49:30 +01002910TEST(ldp_stp_preindex_wide) {
2911 SETUP();
2912
2913 uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2914 0xffeeddccbbaa9988};
2915 uint64_t dst[5] = {0, 0, 0, 0, 0};
2916 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2917 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2918 // Move base too far from the array to force multiple instructions
2919 // to be emitted.
2920 const int64_t base_offset = 1024;
2921
2922 START();
2923 __ Mov(x24, src_base - base_offset);
2924 __ Mov(x25, dst_base + base_offset);
2925 __ Mov(x18, dst_base + base_offset + 16);
2926 __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
2927 __ Mov(x19, x24);
2928 __ Mov(x24, src_base - base_offset + 4);
2929 __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
2930 __ Stp(w2, w3, MemOperand(x25, 4 - base_offset , PreIndex));
2931 __ Mov(x20, x25);
2932 __ Mov(x25, dst_base + base_offset + 4);
2933 __ Mov(x24, src_base - base_offset);
2934 __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PreIndex));
2935 __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PreIndex));
2936 __ Mov(x21, x24);
2937 __ Mov(x24, src_base - base_offset + 8);
2938 __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
2939 __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
2940 __ Mov(x22, x18);
2941 __ Mov(x18, dst_base + base_offset + 16 + 8);
2942 __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
2943 END();
2944
2945 RUN();
2946
2947 ASSERT_EQUAL_64(0x00112233, x0);
2948 ASSERT_EQUAL_64(0xccddeeff, x1);
2949 ASSERT_EQUAL_64(0x44556677, x2);
2950 ASSERT_EQUAL_64(0x00112233, x3);
2951 ASSERT_EQUAL_64(0xccddeeff00112233, dst[0]);
2952 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
2953 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
2954 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
2955 ASSERT_EQUAL_64(0x0011223344556677, x6);
2956 ASSERT_EQUAL_64(0x8899aabbccddeeff, x7);
2957 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
2958 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
2959 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
2960 ASSERT_EQUAL_64(src_base, x24);
2961 ASSERT_EQUAL_64(dst_base, x25);
2962 ASSERT_EQUAL_64(dst_base + 16, x18);
2963 ASSERT_EQUAL_64(src_base + 4, x19);
2964 ASSERT_EQUAL_64(dst_base + 4, x20);
2965 ASSERT_EQUAL_64(src_base + 8, x21);
2966 ASSERT_EQUAL_64(dst_base + 24, x22);
2967
2968 TEARDOWN();
2969}
2970
2971
armvixlad96eda2013-06-14 11:42:37 +01002972TEST(ldp_stp_postindex) {
2973 SETUP();
2974
armvixlb0c8ae22014-03-21 14:03:59 +00002975 uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
2976 0xffeeddccbbaa9988, 0x7766554433221100};
armvixlad96eda2013-06-14 11:42:37 +01002977 uint64_t dst[5] = {0, 0, 0, 0, 0};
2978 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2979 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2980
2981 START();
2982 __ Mov(x16, src_base);
2983 __ Mov(x17, dst_base);
2984 __ Mov(x18, dst_base + 16);
2985 __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
2986 __ Mov(x19, x16);
2987 __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
2988 __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
2989 __ Mov(x20, x17);
2990 __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
2991 __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
2992 __ Mov(x21, x16);
2993 __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
2994 __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
2995 __ Mov(x22, x18);
2996 __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
2997 END();
2998
2999 RUN();
3000
3001 ASSERT_EQUAL_64(0x44556677, x0);
3002 ASSERT_EQUAL_64(0x00112233, x1);
3003 ASSERT_EQUAL_64(0x00112233, x2);
3004 ASSERT_EQUAL_64(0xccddeeff, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00003005 ASSERT_EQUAL_64(0x4455667700112233, dst[0]);
3006 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3007 ASSERT_EQUAL_64(0x0011223344556677, x4);
3008 ASSERT_EQUAL_64(0x8899aabbccddeeff, x5);
3009 ASSERT_EQUAL_64(0x8899aabbccddeeff, x6);
3010 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x7);
3011 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3012 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3013 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
armvixlad96eda2013-06-14 11:42:37 +01003014 ASSERT_EQUAL_64(src_base, x16);
3015 ASSERT_EQUAL_64(dst_base, x17);
3016 ASSERT_EQUAL_64(dst_base + 16, x18);
3017 ASSERT_EQUAL_64(src_base + 4, x19);
3018 ASSERT_EQUAL_64(dst_base + 4, x20);
3019 ASSERT_EQUAL_64(src_base + 8, x21);
3020 ASSERT_EQUAL_64(dst_base + 24, x22);
3021
3022 TEARDOWN();
3023}
3024
3025
armvixlc68cb642014-09-25 18:49:30 +01003026TEST(ldp_stp_postindex_wide) {
3027 SETUP();
3028
3029 uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff,
3030 0xffeeddccbbaa9988, 0x7766554433221100};
3031 uint64_t dst[5] = {0, 0, 0, 0, 0};
3032 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3033 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3034 // Move base too far from the array to force multiple instructions
3035 // to be emitted.
3036 const int64_t base_offset = 1024;
3037
3038 START();
3039 __ Mov(x24, src_base);
3040 __ Mov(x25, dst_base);
3041 __ Mov(x18, dst_base + 16);
3042 __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
3043 __ Mov(x19, x24);
3044 __ Sub(x24, x24, base_offset);
3045 __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PostIndex));
3046 __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PostIndex));
3047 __ Mov(x20, x25);
3048 __ Sub(x24, x24, base_offset);
3049 __ Add(x25, x25, base_offset);
3050 __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PostIndex));
3051 __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PostIndex));
3052 __ Mov(x21, x24);
3053 __ Sub(x24, x24, base_offset);
3054 __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
3055 __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
3056 __ Mov(x22, x18);
3057 __ Add(x18, x18, base_offset);
3058 __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
3059 END();
3060
3061 RUN();
3062
3063 ASSERT_EQUAL_64(0x44556677, x0);
3064 ASSERT_EQUAL_64(0x00112233, x1);
3065 ASSERT_EQUAL_64(0x00112233, x2);
3066 ASSERT_EQUAL_64(0xccddeeff, x3);
3067 ASSERT_EQUAL_64(0x4455667700112233, dst[0]);
3068 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3069 ASSERT_EQUAL_64(0x0011223344556677, x4);
3070 ASSERT_EQUAL_64(0x8899aabbccddeeff, x5);
3071 ASSERT_EQUAL_64(0x8899aabbccddeeff, x6);
3072 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x7);
3073 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3074 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3075 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
3076 ASSERT_EQUAL_64(src_base + base_offset, x24);
3077 ASSERT_EQUAL_64(dst_base - base_offset, x25);
3078 ASSERT_EQUAL_64(dst_base - base_offset + 16, x18);
3079 ASSERT_EQUAL_64(src_base + base_offset + 4, x19);
3080 ASSERT_EQUAL_64(dst_base - base_offset + 4, x20);
3081 ASSERT_EQUAL_64(src_base + base_offset + 8, x21);
3082 ASSERT_EQUAL_64(dst_base - base_offset + 24, x22);
3083
3084 TEARDOWN();
3085}
3086
3087
armvixlad96eda2013-06-14 11:42:37 +01003088TEST(ldp_sign_extend) {
3089 SETUP();
3090
3091 uint32_t src[2] = {0x80000000, 0x7fffffff};
3092 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3093
3094 START();
3095 __ Mov(x24, src_base);
3096 __ Ldpsw(x0, x1, MemOperand(x24));
3097 END();
3098
3099 RUN();
3100
armvixlb0c8ae22014-03-21 14:03:59 +00003101 ASSERT_EQUAL_64(0xffffffff80000000, x0);
3102 ASSERT_EQUAL_64(0x000000007fffffff, x1);
armvixlad96eda2013-06-14 11:42:37 +01003103
3104 TEARDOWN();
3105}
3106
3107
3108TEST(ldur_stur) {
3109 SETUP();
3110
armvixlb0c8ae22014-03-21 14:03:59 +00003111 int64_t src[2] = {0x0123456789abcdef, 0x0123456789abcdef};
armvixlad96eda2013-06-14 11:42:37 +01003112 int64_t dst[5] = {0, 0, 0, 0, 0};
3113 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3114 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3115
3116 START();
3117 __ Mov(x17, src_base);
3118 __ Mov(x18, dst_base);
3119 __ Mov(x19, src_base + 16);
3120 __ Mov(x20, dst_base + 32);
3121 __ Mov(x21, dst_base + 40);
3122 __ Ldr(w0, MemOperand(x17, 1));
3123 __ Str(w0, MemOperand(x18, 2));
3124 __ Ldr(x1, MemOperand(x17, 3));
3125 __ Str(x1, MemOperand(x18, 9));
3126 __ Ldr(w2, MemOperand(x19, -9));
3127 __ Str(w2, MemOperand(x20, -5));
3128 __ Ldrb(w3, MemOperand(x19, -1));
3129 __ Strb(w3, MemOperand(x21, -1));
3130 END();
3131
3132 RUN();
3133
3134 ASSERT_EQUAL_64(0x6789abcd, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00003135 ASSERT_EQUAL_64(0x00006789abcd0000, dst[0]);
3136 ASSERT_EQUAL_64(0xabcdef0123456789, x1);
3137 ASSERT_EQUAL_64(0xcdef012345678900, dst[1]);
armvixlad96eda2013-06-14 11:42:37 +01003138 ASSERT_EQUAL_64(0x000000ab, dst[2]);
3139 ASSERT_EQUAL_64(0xabcdef01, x2);
armvixlb0c8ae22014-03-21 14:03:59 +00003140 ASSERT_EQUAL_64(0x00abcdef01000000, dst[3]);
armvixlad96eda2013-06-14 11:42:37 +01003141 ASSERT_EQUAL_64(0x00000001, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00003142 ASSERT_EQUAL_64(0x0100000000000000, dst[4]);
armvixlad96eda2013-06-14 11:42:37 +01003143 ASSERT_EQUAL_64(src_base, x17);
3144 ASSERT_EQUAL_64(dst_base, x18);
3145 ASSERT_EQUAL_64(src_base + 16, x19);
3146 ASSERT_EQUAL_64(dst_base + 32, x20);
3147
3148 TEARDOWN();
3149}
3150
3151
3152TEST(ldr_literal) {
3153 SETUP();
3154
3155 START();
armvixlb0c8ae22014-03-21 14:03:59 +00003156 __ Ldr(x2, 0x1234567890abcdef);
armvixlad96eda2013-06-14 11:42:37 +01003157 __ Ldr(w3, 0xfedcba09);
armvixlc68cb642014-09-25 18:49:30 +01003158 __ Ldrsw(x4, 0x7fffffff);
3159 __ Ldrsw(x5, 0x80000000);
armvixlad96eda2013-06-14 11:42:37 +01003160 __ Ldr(d13, 1.234);
3161 __ Ldr(s25, 2.5);
3162 END();
3163
3164 RUN();
3165
armvixlb0c8ae22014-03-21 14:03:59 +00003166 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
armvixlad96eda2013-06-14 11:42:37 +01003167 ASSERT_EQUAL_64(0xfedcba09, x3);
armvixlc68cb642014-09-25 18:49:30 +01003168 ASSERT_EQUAL_64(0x7fffffff, x4);
3169 ASSERT_EQUAL_64(0xffffffff80000000, x5);
armvixlad96eda2013-06-14 11:42:37 +01003170 ASSERT_EQUAL_FP64(1.234, d13);
3171 ASSERT_EQUAL_FP32(2.5, s25);
3172
3173 TEARDOWN();
3174}
3175
3176
armvixlc68cb642014-09-25 18:49:30 +01003177TEST(ldr_literal_range) {
3178 SETUP();
armvixlad96eda2013-06-14 11:42:37 +01003179
3180 START();
armvixlc68cb642014-09-25 18:49:30 +01003181 // Make sure the pool is empty;
3182 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
armvixlad96eda2013-06-14 11:42:37 +01003183 ASSERT_LITERAL_POOL_SIZE(0);
3184
armvixlc68cb642014-09-25 18:49:30 +01003185 // Create some literal pool entries.
armvixlb0c8ae22014-03-21 14:03:59 +00003186 __ Ldr(x0, 0x1234567890abcdef);
armvixlad96eda2013-06-14 11:42:37 +01003187 __ Ldr(w1, 0xfedcba09);
armvixlc68cb642014-09-25 18:49:30 +01003188 __ Ldrsw(x2, 0x7fffffff);
3189 __ Ldrsw(x3, 0x80000000);
armvixlad96eda2013-06-14 11:42:37 +01003190 __ Ldr(d0, 1.234);
3191 __ Ldr(s1, 2.5);
armvixlc68cb642014-09-25 18:49:30 +01003192 ASSERT_LITERAL_POOL_SIZE(32);
armvixlad96eda2013-06-14 11:42:37 +01003193
armvixlc68cb642014-09-25 18:49:30 +01003194 // Emit more code than the maximum literal load range to ensure the pool
3195 // should be emitted.
3196 const ptrdiff_t offset = masm.CursorOffset();
3197 while ((masm.CursorOffset() - offset) < (2 * kMaxLoadLiteralRange)) {
armvixlad96eda2013-06-14 11:42:37 +01003198 __ Nop();
armvixlad96eda2013-06-14 11:42:37 +01003199 }
3200
armvixlc68cb642014-09-25 18:49:30 +01003201 // The pool should have been emitted.
armvixlad96eda2013-06-14 11:42:37 +01003202 ASSERT_LITERAL_POOL_SIZE(0);
3203
3204 // These loads should be after the pool (and will require a new one).
armvixlb0c8ae22014-03-21 14:03:59 +00003205 __ Ldr(x4, 0x34567890abcdef12);
armvixlad96eda2013-06-14 11:42:37 +01003206 __ Ldr(w5, 0xdcba09fe);
armvixlc68cb642014-09-25 18:49:30 +01003207 __ Ldrsw(x6, 0x7fffffff);
3208 __ Ldrsw(x7, 0x80000000);
armvixlad96eda2013-06-14 11:42:37 +01003209 __ Ldr(d4, 123.4);
3210 __ Ldr(s5, 250.0);
armvixlc68cb642014-09-25 18:49:30 +01003211 ASSERT_LITERAL_POOL_SIZE(32);
armvixlad96eda2013-06-14 11:42:37 +01003212 END();
3213
3214 RUN();
3215
3216 // Check that the literals loaded correctly.
armvixlb0c8ae22014-03-21 14:03:59 +00003217 ASSERT_EQUAL_64(0x1234567890abcdef, x0);
armvixlad96eda2013-06-14 11:42:37 +01003218 ASSERT_EQUAL_64(0xfedcba09, x1);
armvixlc68cb642014-09-25 18:49:30 +01003219 ASSERT_EQUAL_64(0x7fffffff, x2);
3220 ASSERT_EQUAL_64(0xffffffff80000000, x3);
armvixlad96eda2013-06-14 11:42:37 +01003221 ASSERT_EQUAL_FP64(1.234, d0);
3222 ASSERT_EQUAL_FP32(2.5, s1);
armvixlb0c8ae22014-03-21 14:03:59 +00003223 ASSERT_EQUAL_64(0x34567890abcdef12, x4);
armvixlad96eda2013-06-14 11:42:37 +01003224 ASSERT_EQUAL_64(0xdcba09fe, x5);
armvixlc68cb642014-09-25 18:49:30 +01003225 ASSERT_EQUAL_64(0x7fffffff, x6);
3226 ASSERT_EQUAL_64(0xffffffff80000000, x7);
armvixlad96eda2013-06-14 11:42:37 +01003227 ASSERT_EQUAL_FP64(123.4, d4);
3228 ASSERT_EQUAL_FP32(250.0, s5);
3229
3230 TEARDOWN();
3231}
3232
3233
armvixlc68cb642014-09-25 18:49:30 +01003234template <typename T>
3235void LoadIntValueHelper(T values[], int card) {
3236 SETUP();
3237
3238 const bool is_32bits = (sizeof(T) == 4);
3239 const Register& tgt1 = is_32bits ? w1 : x1;
3240 const Register& tgt2 = is_32bits ? w2 : x2;
3241
3242 START();
3243 __ Mov(x0, 0);
3244
3245 // If one of the values differ then x0 will be one.
3246 for (int i = 0; i < card; ++i) {
3247 __ Mov(tgt1, values[i]);
3248 __ Ldr(tgt2, values[i]);
3249 __ Cmp(tgt1, tgt2);
3250 __ Cset(x0, ne);
3251 }
3252 END();
3253
3254 RUN();
3255
3256 // If one of the values differs, the trace can be used to identify which one.
3257 ASSERT_EQUAL_64(0, x0);
3258
3259 TEARDOWN();
armvixlad96eda2013-06-14 11:42:37 +01003260}
3261
3262
armvixlc68cb642014-09-25 18:49:30 +01003263TEST(ldr_literal_values_x) {
3264 static const uint64_t kValues[] = {
3265 0x8000000000000000, 0x7fffffffffffffff, 0x0000000000000000,
3266 0xffffffffffffffff, 0x00ff00ff00ff00ff, 0x1234567890abcdef
3267 };
3268
3269 LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
armvixlad96eda2013-06-14 11:42:37 +01003270}
3271
3272
armvixlc68cb642014-09-25 18:49:30 +01003273TEST(ldr_literal_values_w) {
3274 static const uint32_t kValues[] = {
3275 0x80000000, 0x7fffffff, 0x00000000, 0xffffffff, 0x00ff00ff, 0x12345678,
3276 0x90abcdef
3277 };
3278
3279 LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
armvixlad96eda2013-06-14 11:42:37 +01003280}
3281
3282
armvixlc68cb642014-09-25 18:49:30 +01003283template <typename T>
3284void LoadFPValueHelper(T values[], int card) {
3285 SETUP();
3286
3287 const bool is_32bits = (sizeof(T) == 4);
3288 const FPRegister& fp_tgt = is_32bits ? s2 : d2;
3289 const Register& tgt1 = is_32bits ? w1 : x1;
3290 const Register& tgt2 = is_32bits ? w2 : x2;
3291
3292 START();
3293 __ Mov(x0, 0);
3294
3295 // If one of the values differ then x0 will be one.
3296 for (int i = 0; i < card; ++i) {
3297 __ Mov(tgt1, is_32bits ? float_to_rawbits(values[i])
3298 : double_to_rawbits(values[i]));
3299 __ Ldr(fp_tgt, values[i]);
3300 __ Fmov(tgt2, fp_tgt);
3301 __ Cmp(tgt1, tgt2);
3302 __ Cset(x0, ne);
3303 }
3304 END();
3305
3306 RUN();
3307
3308 // If one of the values differs, the trace can be used to identify which one.
3309 ASSERT_EQUAL_64(0, x0);
3310
3311 TEARDOWN();
3312}
3313
3314TEST(ldr_literal_values_d) {
3315 static const double kValues[] = {
3316 -0.0, 0.0, -1.0, 1.0, -1e10, 1e10
3317 };
3318
3319 LoadFPValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
armvixlad96eda2013-06-14 11:42:37 +01003320}
3321
3322
armvixlc68cb642014-09-25 18:49:30 +01003323TEST(ldr_literal_values_s) {
3324 static const float kValues[] = {
3325 -0.0, 0.0, -1.0, 1.0, -1e10, 1e10
3326 };
3327
3328 LoadFPValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
armvixlad96eda2013-06-14 11:42:37 +01003329}
3330
3331
armvixlc68cb642014-09-25 18:49:30 +01003332TEST(ldr_literal_custom) {
3333 // The macro assembler always emit pools after the instruction using them,
3334 // this test emit a pool then use it.
3335 SETUP();
3336 ALLOW_ASM();
3337
3338 Label end_of_pool;
3339 Literal<uint64_t> literal_x(0x1234567890abcdef);
3340 Literal<uint32_t> literal_w(0xfedcba09);
3341 Literal<uint32_t> literal_sx(0x80000000);
3342 Literal<double> literal_d(1.234);
3343 Literal<float> literal_s(2.5);
3344
3345 START();
3346 // "Manually generate a pool.
3347 __ B(&end_of_pool);
3348 __ place(&literal_x);
3349 __ place(&literal_w);
3350 __ place(&literal_sx);
3351 __ place(&literal_d);
3352 __ place(&literal_s);
3353 __ Bind(&end_of_pool);
3354
3355 // now load the entries.
3356 __ ldr(x2, &literal_x);
3357 __ ldr(w3, &literal_w);
3358 __ ldrsw(x5, &literal_sx);
3359 __ ldr(d13, &literal_d);
3360 __ ldr(s25, &literal_s);
3361 END();
3362
3363 RUN();
3364
3365 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
3366 ASSERT_EQUAL_64(0xfedcba09, x3);
3367 ASSERT_EQUAL_64(0xffffffff80000000, x5);
3368 ASSERT_EQUAL_FP64(1.234, d13);
3369 ASSERT_EQUAL_FP32(2.5, s25);
3370
3371 TEARDOWN();
armvixlad96eda2013-06-14 11:42:37 +01003372}
3373
3374
3375TEST(add_sub_imm) {
3376 SETUP();
3377
3378 START();
3379 __ Mov(x0, 0x0);
3380 __ Mov(x1, 0x1111);
armvixlb0c8ae22014-03-21 14:03:59 +00003381 __ Mov(x2, 0xffffffffffffffff);
3382 __ Mov(x3, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +01003383
3384 __ Add(x10, x0, Operand(0x123));
3385 __ Add(x11, x1, Operand(0x122000));
3386 __ Add(x12, x0, Operand(0xabc << 12));
3387 __ Add(x13, x2, Operand(1));
3388
3389 __ Add(w14, w0, Operand(0x123));
3390 __ Add(w15, w1, Operand(0x122000));
3391 __ Add(w16, w0, Operand(0xabc << 12));
3392 __ Add(w17, w2, Operand(1));
3393
3394 __ Sub(x20, x0, Operand(0x1));
3395 __ Sub(x21, x1, Operand(0x111));
3396 __ Sub(x22, x1, Operand(0x1 << 12));
3397 __ Sub(x23, x3, Operand(1));
3398
3399 __ Sub(w24, w0, Operand(0x1));
3400 __ Sub(w25, w1, Operand(0x111));
3401 __ Sub(w26, w1, Operand(0x1 << 12));
3402 __ Sub(w27, w3, Operand(1));
3403 END();
3404
3405 RUN();
3406
3407 ASSERT_EQUAL_64(0x123, x10);
3408 ASSERT_EQUAL_64(0x123111, x11);
3409 ASSERT_EQUAL_64(0xabc000, x12);
3410 ASSERT_EQUAL_64(0x0, x13);
3411
3412 ASSERT_EQUAL_32(0x123, w14);
3413 ASSERT_EQUAL_32(0x123111, w15);
3414 ASSERT_EQUAL_32(0xabc000, w16);
3415 ASSERT_EQUAL_32(0x0, w17);
3416
armvixlb0c8ae22014-03-21 14:03:59 +00003417 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
armvixlad96eda2013-06-14 11:42:37 +01003418 ASSERT_EQUAL_64(0x1000, x21);
3419 ASSERT_EQUAL_64(0x111, x22);
armvixlb0c8ae22014-03-21 14:03:59 +00003420 ASSERT_EQUAL_64(0x7fffffffffffffff, x23);
armvixlad96eda2013-06-14 11:42:37 +01003421
3422 ASSERT_EQUAL_32(0xffffffff, w24);
3423 ASSERT_EQUAL_32(0x1000, w25);
3424 ASSERT_EQUAL_32(0x111, w26);
3425 ASSERT_EQUAL_32(0xffffffff, w27);
3426
3427 TEARDOWN();
3428}
3429
3430
3431TEST(add_sub_wide_imm) {
3432 SETUP();
3433
3434 START();
3435 __ Mov(x0, 0x0);
3436 __ Mov(x1, 0x1);
3437
armvixlb0c8ae22014-03-21 14:03:59 +00003438 __ Add(x10, x0, Operand(0x1234567890abcdef));
armvixlad96eda2013-06-14 11:42:37 +01003439 __ Add(x11, x1, Operand(0xffffffff));
3440
3441 __ Add(w12, w0, Operand(0x12345678));
3442 __ Add(w13, w1, Operand(0xffffffff));
3443
armvixl4a102ba2014-07-14 09:02:40 +01003444 __ Add(w18, w0, Operand(kWMinInt));
3445 __ Sub(w19, w0, Operand(kWMinInt));
armvixlad96eda2013-06-14 11:42:37 +01003446
armvixl4a102ba2014-07-14 09:02:40 +01003447 __ Sub(x20, x0, Operand(0x1234567890abcdef));
armvixlad96eda2013-06-14 11:42:37 +01003448 __ Sub(w21, w0, Operand(0x12345678));
armvixl4a102ba2014-07-14 09:02:40 +01003449
armvixlad96eda2013-06-14 11:42:37 +01003450 END();
3451
3452 RUN();
3453
armvixlb0c8ae22014-03-21 14:03:59 +00003454 ASSERT_EQUAL_64(0x1234567890abcdef, x10);
3455 ASSERT_EQUAL_64(0x100000000, x11);
armvixlad96eda2013-06-14 11:42:37 +01003456
3457 ASSERT_EQUAL_32(0x12345678, w12);
3458 ASSERT_EQUAL_64(0x0, x13);
3459
armvixl4a102ba2014-07-14 09:02:40 +01003460 ASSERT_EQUAL_32(kWMinInt, w18);
3461 ASSERT_EQUAL_32(kWMinInt, w19);
armvixlad96eda2013-06-14 11:42:37 +01003462
armvixl4a102ba2014-07-14 09:02:40 +01003463 ASSERT_EQUAL_64(-0x1234567890abcdef, x20);
armvixlad96eda2013-06-14 11:42:37 +01003464 ASSERT_EQUAL_32(-0x12345678, w21);
3465
3466 TEARDOWN();
3467}
3468
3469
3470TEST(add_sub_shifted) {
3471 SETUP();
3472
3473 START();
3474 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00003475 __ Mov(x1, 0x0123456789abcdef);
3476 __ Mov(x2, 0xfedcba9876543210);
3477 __ Mov(x3, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01003478
3479 __ Add(x10, x1, Operand(x2));
3480 __ Add(x11, x0, Operand(x1, LSL, 8));
3481 __ Add(x12, x0, Operand(x1, LSR, 8));
3482 __ Add(x13, x0, Operand(x1, ASR, 8));
3483 __ Add(x14, x0, Operand(x2, ASR, 8));
3484 __ Add(w15, w0, Operand(w1, ASR, 8));
3485 __ Add(w18, w3, Operand(w1, ROR, 8));
3486 __ Add(x19, x3, Operand(x1, ROR, 8));
3487
3488 __ Sub(x20, x3, Operand(x2));
3489 __ Sub(x21, x3, Operand(x1, LSL, 8));
3490 __ Sub(x22, x3, Operand(x1, LSR, 8));
3491 __ Sub(x23, x3, Operand(x1, ASR, 8));
3492 __ Sub(x24, x3, Operand(x2, ASR, 8));
3493 __ Sub(w25, w3, Operand(w1, ASR, 8));
3494 __ Sub(w26, w3, Operand(w1, ROR, 8));
3495 __ Sub(x27, x3, Operand(x1, ROR, 8));
3496 END();
3497
3498 RUN();
3499
armvixlb0c8ae22014-03-21 14:03:59 +00003500 ASSERT_EQUAL_64(0xffffffffffffffff, x10);
3501 ASSERT_EQUAL_64(0x23456789abcdef00, x11);
3502 ASSERT_EQUAL_64(0x000123456789abcd, x12);
3503 ASSERT_EQUAL_64(0x000123456789abcd, x13);
3504 ASSERT_EQUAL_64(0xfffedcba98765432, x14);
armvixlad96eda2013-06-14 11:42:37 +01003505 ASSERT_EQUAL_64(0xff89abcd, x15);
3506 ASSERT_EQUAL_64(0xef89abcc, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00003507 ASSERT_EQUAL_64(0xef0123456789abcc, x19);
armvixlad96eda2013-06-14 11:42:37 +01003508
armvixlb0c8ae22014-03-21 14:03:59 +00003509 ASSERT_EQUAL_64(0x0123456789abcdef, x20);
3510 ASSERT_EQUAL_64(0xdcba9876543210ff, x21);
3511 ASSERT_EQUAL_64(0xfffedcba98765432, x22);
3512 ASSERT_EQUAL_64(0xfffedcba98765432, x23);
3513 ASSERT_EQUAL_64(0x000123456789abcd, x24);
armvixlad96eda2013-06-14 11:42:37 +01003514 ASSERT_EQUAL_64(0x00765432, x25);
3515 ASSERT_EQUAL_64(0x10765432, x26);
armvixlb0c8ae22014-03-21 14:03:59 +00003516 ASSERT_EQUAL_64(0x10fedcba98765432, x27);
armvixlad96eda2013-06-14 11:42:37 +01003517
3518 TEARDOWN();
3519}
3520
3521
3522TEST(add_sub_extended) {
3523 SETUP();
3524
3525 START();
3526 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00003527 __ Mov(x1, 0x0123456789abcdef);
3528 __ Mov(x2, 0xfedcba9876543210);
armvixlad96eda2013-06-14 11:42:37 +01003529 __ Mov(w3, 0x80);
3530
3531 __ Add(x10, x0, Operand(x1, UXTB, 0));
3532 __ Add(x11, x0, Operand(x1, UXTB, 1));
3533 __ Add(x12, x0, Operand(x1, UXTH, 2));
3534 __ Add(x13, x0, Operand(x1, UXTW, 4));
3535
3536 __ Add(x14, x0, Operand(x1, SXTB, 0));
3537 __ Add(x15, x0, Operand(x1, SXTB, 1));
3538 __ Add(x16, x0, Operand(x1, SXTH, 2));
3539 __ Add(x17, x0, Operand(x1, SXTW, 3));
3540 __ Add(x18, x0, Operand(x2, SXTB, 0));
3541 __ Add(x19, x0, Operand(x2, SXTB, 1));
3542 __ Add(x20, x0, Operand(x2, SXTH, 2));
3543 __ Add(x21, x0, Operand(x2, SXTW, 3));
3544
3545 __ Add(x22, x1, Operand(x2, SXTB, 1));
3546 __ Sub(x23, x1, Operand(x2, SXTB, 1));
3547
3548 __ Add(w24, w1, Operand(w2, UXTB, 2));
3549 __ Add(w25, w0, Operand(w1, SXTB, 0));
3550 __ Add(w26, w0, Operand(w1, SXTB, 1));
3551 __ Add(w27, w2, Operand(w1, SXTW, 3));
3552
3553 __ Add(w28, w0, Operand(w1, SXTW, 3));
3554 __ Add(x29, x0, Operand(w1, SXTW, 3));
3555
3556 __ Sub(x30, x0, Operand(w3, SXTB, 1));
3557 END();
3558
3559 RUN();
3560
armvixlb0c8ae22014-03-21 14:03:59 +00003561 ASSERT_EQUAL_64(0xef, x10);
3562 ASSERT_EQUAL_64(0x1de, x11);
3563 ASSERT_EQUAL_64(0x337bc, x12);
3564 ASSERT_EQUAL_64(0x89abcdef0, x13);
armvixlad96eda2013-06-14 11:42:37 +01003565
armvixlb0c8ae22014-03-21 14:03:59 +00003566 ASSERT_EQUAL_64(0xffffffffffffffef, x14);
3567 ASSERT_EQUAL_64(0xffffffffffffffde, x15);
3568 ASSERT_EQUAL_64(0xffffffffffff37bc, x16);
3569 ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x17);
3570 ASSERT_EQUAL_64(0x10, x18);
3571 ASSERT_EQUAL_64(0x20, x19);
3572 ASSERT_EQUAL_64(0xc840, x20);
3573 ASSERT_EQUAL_64(0x3b2a19080, x21);
armvixlad96eda2013-06-14 11:42:37 +01003574
armvixlb0c8ae22014-03-21 14:03:59 +00003575 ASSERT_EQUAL_64(0x0123456789abce0f, x22);
3576 ASSERT_EQUAL_64(0x0123456789abcdcf, x23);
armvixlad96eda2013-06-14 11:42:37 +01003577
3578 ASSERT_EQUAL_32(0x89abce2f, w24);
3579 ASSERT_EQUAL_32(0xffffffef, w25);
3580 ASSERT_EQUAL_32(0xffffffde, w26);
3581 ASSERT_EQUAL_32(0xc3b2a188, w27);
3582
3583 ASSERT_EQUAL_32(0x4d5e6f78, w28);
armvixlb0c8ae22014-03-21 14:03:59 +00003584 ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x29);
armvixlad96eda2013-06-14 11:42:37 +01003585
3586 ASSERT_EQUAL_64(256, x30);
3587
3588 TEARDOWN();
3589}
3590
3591
3592TEST(add_sub_negative) {
3593 SETUP();
3594
3595 START();
3596 __ Mov(x0, 0);
3597 __ Mov(x1, 4687);
3598 __ Mov(x2, 0x1122334455667788);
3599 __ Mov(w3, 0x11223344);
3600 __ Mov(w4, 400000);
3601
3602 __ Add(x10, x0, -42);
3603 __ Add(x11, x1, -687);
3604 __ Add(x12, x2, -0x88);
3605
3606 __ Sub(x13, x0, -600);
3607 __ Sub(x14, x1, -313);
3608 __ Sub(x15, x2, -0x555);
3609
3610 __ Add(w19, w3, -0x344);
3611 __ Add(w20, w4, -2000);
3612
3613 __ Sub(w21, w3, -0xbc);
3614 __ Sub(w22, w4, -2000);
3615 END();
3616
3617 RUN();
3618
3619 ASSERT_EQUAL_64(-42, x10);
3620 ASSERT_EQUAL_64(4000, x11);
3621 ASSERT_EQUAL_64(0x1122334455667700, x12);
3622
3623 ASSERT_EQUAL_64(600, x13);
3624 ASSERT_EQUAL_64(5000, x14);
3625 ASSERT_EQUAL_64(0x1122334455667cdd, x15);
3626
3627 ASSERT_EQUAL_32(0x11223000, w19);
3628 ASSERT_EQUAL_32(398000, w20);
3629
3630 ASSERT_EQUAL_32(0x11223400, w21);
3631 ASSERT_EQUAL_32(402000, w22);
3632
3633 TEARDOWN();
3634}
3635
3636
armvixlf37fdc02014-02-05 13:22:16 +00003637TEST(add_sub_zero) {
3638 SETUP();
3639
3640 START();
3641 __ Mov(x0, 0);
3642 __ Mov(x1, 0);
3643 __ Mov(x2, 0);
3644
3645 Label blob1;
3646 __ Bind(&blob1);
3647 __ Add(x0, x0, 0);
3648 __ Sub(x1, x1, 0);
3649 __ Sub(x2, x2, xzr);
armvixlb0c8ae22014-03-21 14:03:59 +00003650 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob1) == 0);
armvixlf37fdc02014-02-05 13:22:16 +00003651
3652 Label blob2;
3653 __ Bind(&blob2);
3654 __ Add(w3, w3, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00003655 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob2) != 0);
armvixlf37fdc02014-02-05 13:22:16 +00003656
3657 Label blob3;
3658 __ Bind(&blob3);
3659 __ Sub(w3, w3, wzr);
armvixlb0c8ae22014-03-21 14:03:59 +00003660 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&blob3) != 0);
armvixlf37fdc02014-02-05 13:22:16 +00003661
3662 END();
3663
3664 RUN();
3665
3666 ASSERT_EQUAL_64(0, x0);
3667 ASSERT_EQUAL_64(0, x1);
3668 ASSERT_EQUAL_64(0, x2);
3669
3670 TEARDOWN();
3671}
3672
3673
3674TEST(claim_drop_zero) {
3675 SETUP();
3676
3677 START();
3678
3679 Label start;
3680 __ Bind(&start);
3681 __ Claim(Operand(0));
3682 __ Drop(Operand(0));
3683 __ Claim(Operand(xzr));
3684 __ Drop(Operand(xzr));
armvixlb0c8ae22014-03-21 14:03:59 +00003685 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&start) == 0);
armvixlf37fdc02014-02-05 13:22:16 +00003686
3687 END();
3688
3689 RUN();
3690
3691 TEARDOWN();
3692}
3693
3694
armvixlad96eda2013-06-14 11:42:37 +01003695TEST(neg) {
3696 SETUP();
3697
3698 START();
armvixlb0c8ae22014-03-21 14:03:59 +00003699 __ Mov(x0, 0xf123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +01003700
3701 // Immediate.
3702 __ Neg(x1, 0x123);
3703 __ Neg(w2, 0x123);
3704
3705 // Shifted.
3706 __ Neg(x3, Operand(x0, LSL, 1));
3707 __ Neg(w4, Operand(w0, LSL, 2));
3708 __ Neg(x5, Operand(x0, LSR, 3));
3709 __ Neg(w6, Operand(w0, LSR, 4));
3710 __ Neg(x7, Operand(x0, ASR, 5));
3711 __ Neg(w8, Operand(w0, ASR, 6));
3712
3713 // Extended.
3714 __ Neg(w9, Operand(w0, UXTB));
3715 __ Neg(x10, Operand(x0, SXTB, 1));
3716 __ Neg(w11, Operand(w0, UXTH, 2));
3717 __ Neg(x12, Operand(x0, SXTH, 3));
3718 __ Neg(w13, Operand(w0, UXTW, 4));
3719 __ Neg(x14, Operand(x0, SXTW, 4));
3720 END();
3721
3722 RUN();
3723
armvixlb0c8ae22014-03-21 14:03:59 +00003724 ASSERT_EQUAL_64(0xfffffffffffffedd, x1);
armvixlad96eda2013-06-14 11:42:37 +01003725 ASSERT_EQUAL_64(0xfffffedd, x2);
armvixlb0c8ae22014-03-21 14:03:59 +00003726 ASSERT_EQUAL_64(0x1db97530eca86422, x3);
armvixlad96eda2013-06-14 11:42:37 +01003727 ASSERT_EQUAL_64(0xd950c844, x4);
armvixlb0c8ae22014-03-21 14:03:59 +00003728 ASSERT_EQUAL_64(0xe1db97530eca8643, x5);
armvixlad96eda2013-06-14 11:42:37 +01003729 ASSERT_EQUAL_64(0xf7654322, x6);
armvixlb0c8ae22014-03-21 14:03:59 +00003730 ASSERT_EQUAL_64(0x0076e5d4c3b2a191, x7);
armvixlad96eda2013-06-14 11:42:37 +01003731 ASSERT_EQUAL_64(0x01d950c9, x8);
3732 ASSERT_EQUAL_64(0xffffff11, x9);
armvixlb0c8ae22014-03-21 14:03:59 +00003733 ASSERT_EQUAL_64(0x0000000000000022, x10);
armvixlad96eda2013-06-14 11:42:37 +01003734 ASSERT_EQUAL_64(0xfffcc844, x11);
armvixlb0c8ae22014-03-21 14:03:59 +00003735 ASSERT_EQUAL_64(0x0000000000019088, x12);
armvixlad96eda2013-06-14 11:42:37 +01003736 ASSERT_EQUAL_64(0x65432110, x13);
armvixlb0c8ae22014-03-21 14:03:59 +00003737 ASSERT_EQUAL_64(0x0000000765432110, x14);
armvixlad96eda2013-06-14 11:42:37 +01003738
3739 TEARDOWN();
3740}
3741
3742
3743TEST(adc_sbc_shift) {
3744 SETUP();
3745
3746 START();
3747 __ Mov(x0, 0);
3748 __ Mov(x1, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00003749 __ Mov(x2, 0x0123456789abcdef);
3750 __ Mov(x3, 0xfedcba9876543210);
3751 __ Mov(x4, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01003752
3753 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003754 __ Adds(x0, x0, Operand(0));
armvixlad96eda2013-06-14 11:42:37 +01003755
3756 __ Adc(x5, x2, Operand(x3));
3757 __ Adc(x6, x0, Operand(x1, LSL, 60));
3758 __ Sbc(x7, x4, Operand(x3, LSR, 4));
3759 __ Adc(x8, x2, Operand(x3, ASR, 4));
3760 __ Adc(x9, x2, Operand(x3, ROR, 8));
3761
3762 __ Adc(w10, w2, Operand(w3));
3763 __ Adc(w11, w0, Operand(w1, LSL, 30));
3764 __ Sbc(w12, w4, Operand(w3, LSR, 4));
3765 __ Adc(w13, w2, Operand(w3, ASR, 4));
3766 __ Adc(w14, w2, Operand(w3, ROR, 8));
3767
3768 // Set the C flag.
3769 __ Cmp(w0, Operand(w0));
3770
3771 __ Adc(x18, x2, Operand(x3));
3772 __ Adc(x19, x0, Operand(x1, LSL, 60));
3773 __ Sbc(x20, x4, Operand(x3, LSR, 4));
3774 __ Adc(x21, x2, Operand(x3, ASR, 4));
3775 __ Adc(x22, x2, Operand(x3, ROR, 8));
3776
3777 __ Adc(w23, w2, Operand(w3));
3778 __ Adc(w24, w0, Operand(w1, LSL, 30));
3779 __ Sbc(w25, w4, Operand(w3, LSR, 4));
3780 __ Adc(w26, w2, Operand(w3, ASR, 4));
3781 __ Adc(w27, w2, Operand(w3, ROR, 8));
3782 END();
3783
3784 RUN();
3785
armvixlb0c8ae22014-03-21 14:03:59 +00003786 ASSERT_EQUAL_64(0xffffffffffffffff, x5);
3787 ASSERT_EQUAL_64(INT64_C(1) << 60, x6);
3788 ASSERT_EQUAL_64(0xf0123456789abcdd, x7);
3789 ASSERT_EQUAL_64(0x0111111111111110, x8);
3790 ASSERT_EQUAL_64(0x1222222222222221, x9);
armvixlad96eda2013-06-14 11:42:37 +01003791
3792 ASSERT_EQUAL_32(0xffffffff, w10);
armvixlb0c8ae22014-03-21 14:03:59 +00003793 ASSERT_EQUAL_32(INT32_C(1) << 30, w11);
armvixlad96eda2013-06-14 11:42:37 +01003794 ASSERT_EQUAL_32(0xf89abcdd, w12);
3795 ASSERT_EQUAL_32(0x91111110, w13);
3796 ASSERT_EQUAL_32(0x9a222221, w14);
3797
armvixlb0c8ae22014-03-21 14:03:59 +00003798 ASSERT_EQUAL_64(0xffffffffffffffff + 1, x18);
3799 ASSERT_EQUAL_64((INT64_C(1) << 60) + 1, x19);
3800 ASSERT_EQUAL_64(0xf0123456789abcdd + 1, x20);
3801 ASSERT_EQUAL_64(0x0111111111111110 + 1, x21);
3802 ASSERT_EQUAL_64(0x1222222222222221 + 1, x22);
armvixlad96eda2013-06-14 11:42:37 +01003803
3804 ASSERT_EQUAL_32(0xffffffff + 1, w23);
armvixlb0c8ae22014-03-21 14:03:59 +00003805 ASSERT_EQUAL_32((INT32_C(1) << 30) + 1, w24);
armvixlad96eda2013-06-14 11:42:37 +01003806 ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
3807 ASSERT_EQUAL_32(0x91111110 + 1, w26);
3808 ASSERT_EQUAL_32(0x9a222221 + 1, w27);
3809
3810 // Check that adc correctly sets the condition flags.
3811 START();
3812 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00003813 __ Mov(x1, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01003814 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003815 __ Adds(x0, x0, Operand(0));
3816 __ Adcs(x10, x0, Operand(x1));
armvixlad96eda2013-06-14 11:42:37 +01003817 END();
3818
3819 RUN();
3820
3821 ASSERT_EQUAL_NZCV(ZCFlag);
armvixlf37fdc02014-02-05 13:22:16 +00003822 ASSERT_EQUAL_64(0, x10);
armvixlad96eda2013-06-14 11:42:37 +01003823
3824 START();
3825 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00003826 __ Mov(x1, 0x8000000000000000);
armvixlad96eda2013-06-14 11:42:37 +01003827 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003828 __ Adds(x0, x0, Operand(0));
3829 __ Adcs(x10, x0, Operand(x1, ASR, 63));
armvixlad96eda2013-06-14 11:42:37 +01003830 END();
3831
3832 RUN();
3833
3834 ASSERT_EQUAL_NZCV(ZCFlag);
armvixlf37fdc02014-02-05 13:22:16 +00003835 ASSERT_EQUAL_64(0, x10);
armvixlad96eda2013-06-14 11:42:37 +01003836
3837 START();
3838 __ Mov(x0, 0x10);
armvixlb0c8ae22014-03-21 14:03:59 +00003839 __ Mov(x1, 0x07ffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01003840 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003841 __ Adds(x0, x0, Operand(0));
3842 __ Adcs(x10, x0, Operand(x1, LSL, 4));
armvixlad96eda2013-06-14 11:42:37 +01003843 END();
3844
3845 RUN();
3846
3847 ASSERT_EQUAL_NZCV(NVFlag);
armvixlb0c8ae22014-03-21 14:03:59 +00003848 ASSERT_EQUAL_64(0x8000000000000000, x10);
armvixlf37fdc02014-02-05 13:22:16 +00003849
3850 // Check that sbc correctly sets the condition flags.
3851 START();
3852 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00003853 __ Mov(x1, 0xffffffffffffffff);
armvixlf37fdc02014-02-05 13:22:16 +00003854 // Clear the C flag.
3855 __ Adds(x0, x0, Operand(0));
3856 __ Sbcs(x10, x0, Operand(x1));
3857 END();
3858
3859 RUN();
3860
3861 ASSERT_EQUAL_NZCV(ZFlag);
3862 ASSERT_EQUAL_64(0, x10);
3863
3864 START();
3865 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00003866 __ Mov(x1, 0xffffffffffffffff);
armvixlf37fdc02014-02-05 13:22:16 +00003867 // Clear the C flag.
3868 __ Adds(x0, x0, Operand(0));
3869 __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3870 END();
3871
3872 RUN();
3873
3874 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +00003875 ASSERT_EQUAL_64(0x8000000000000001, x10);
armvixlf37fdc02014-02-05 13:22:16 +00003876
3877 START();
3878 __ Mov(x0, 0);
3879 // Clear the C flag.
3880 __ Adds(x0, x0, Operand(0));
armvixlb0c8ae22014-03-21 14:03:59 +00003881 __ Sbcs(x10, x0, Operand(0xffffffffffffffff));
armvixlf37fdc02014-02-05 13:22:16 +00003882 END();
3883
3884 RUN();
3885
3886 ASSERT_EQUAL_NZCV(ZFlag);
3887 ASSERT_EQUAL_64(0, x10);
3888
armvixlb0c8ae22014-03-21 14:03:59 +00003889 START();
armvixlf37fdc02014-02-05 13:22:16 +00003890 __ Mov(w0, 0x7fffffff);
3891 // Clear the C flag.
3892 __ Adds(x0, x0, Operand(0));
3893 __ Ngcs(w10, w0);
3894 END();
3895
3896 RUN();
3897
3898 ASSERT_EQUAL_NZCV(NFlag);
3899 ASSERT_EQUAL_64(0x80000000, x10);
3900
3901 START();
3902 // Clear the C flag.
3903 __ Adds(x0, x0, Operand(0));
armvixlb0c8ae22014-03-21 14:03:59 +00003904 __ Ngcs(x10, 0x7fffffffffffffff);
armvixlf37fdc02014-02-05 13:22:16 +00003905 END();
3906
3907 RUN();
3908
3909 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +00003910 ASSERT_EQUAL_64(0x8000000000000000, x10);
armvixlf37fdc02014-02-05 13:22:16 +00003911
armvixlb0c8ae22014-03-21 14:03:59 +00003912 START();
armvixlf37fdc02014-02-05 13:22:16 +00003913 __ Mov(x0, 0);
3914 // Set the C flag.
3915 __ Cmp(x0, Operand(x0));
3916 __ Sbcs(x10, x0, Operand(1));
3917 END();
3918
3919 RUN();
3920
3921 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +00003922 ASSERT_EQUAL_64(0xffffffffffffffff, x10);
armvixlf37fdc02014-02-05 13:22:16 +00003923
armvixlb0c8ae22014-03-21 14:03:59 +00003924 START();
armvixlf37fdc02014-02-05 13:22:16 +00003925 __ Mov(x0, 0);
3926 // Set the C flag.
3927 __ Cmp(x0, Operand(x0));
armvixlb0c8ae22014-03-21 14:03:59 +00003928 __ Ngcs(x10, 0x7fffffffffffffff);
armvixlf37fdc02014-02-05 13:22:16 +00003929 END();
3930
3931 RUN();
3932
3933 ASSERT_EQUAL_NZCV(NFlag);
armvixlb0c8ae22014-03-21 14:03:59 +00003934 ASSERT_EQUAL_64(0x8000000000000001, x10);
armvixlad96eda2013-06-14 11:42:37 +01003935
3936 TEARDOWN();
3937}
3938
3939
3940TEST(adc_sbc_extend) {
3941 SETUP();
3942
3943 START();
3944 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003945 __ Adds(x0, x0, Operand(0));
armvixlad96eda2013-06-14 11:42:37 +01003946
3947 __ Mov(x0, 0);
3948 __ Mov(x1, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00003949 __ Mov(x2, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +01003950
3951 __ Adc(x10, x1, Operand(w2, UXTB, 1));
3952 __ Adc(x11, x1, Operand(x2, SXTH, 2));
3953 __ Sbc(x12, x1, Operand(w2, UXTW, 4));
3954 __ Adc(x13, x1, Operand(x2, UXTX, 4));
3955
3956 __ Adc(w14, w1, Operand(w2, UXTB, 1));
3957 __ Adc(w15, w1, Operand(w2, SXTH, 2));
3958 __ Adc(w9, w1, Operand(w2, UXTW, 4));
3959
3960 // Set the C flag.
3961 __ Cmp(w0, Operand(w0));
3962
3963 __ Adc(x20, x1, Operand(w2, UXTB, 1));
3964 __ Adc(x21, x1, Operand(x2, SXTH, 2));
3965 __ Sbc(x22, x1, Operand(w2, UXTW, 4));
3966 __ Adc(x23, x1, Operand(x2, UXTX, 4));
3967
3968 __ Adc(w24, w1, Operand(w2, UXTB, 1));
3969 __ Adc(w25, w1, Operand(w2, SXTH, 2));
3970 __ Adc(w26, w1, Operand(w2, UXTW, 4));
3971 END();
3972
3973 RUN();
3974
3975 ASSERT_EQUAL_64(0x1df, x10);
armvixlb0c8ae22014-03-21 14:03:59 +00003976 ASSERT_EQUAL_64(0xffffffffffff37bd, x11);
3977 ASSERT_EQUAL_64(0xfffffff765432110, x12);
3978 ASSERT_EQUAL_64(0x123456789abcdef1, x13);
armvixlad96eda2013-06-14 11:42:37 +01003979
3980 ASSERT_EQUAL_32(0x1df, w14);
3981 ASSERT_EQUAL_32(0xffff37bd, w15);
3982 ASSERT_EQUAL_32(0x9abcdef1, w9);
3983
3984 ASSERT_EQUAL_64(0x1df + 1, x20);
armvixlb0c8ae22014-03-21 14:03:59 +00003985 ASSERT_EQUAL_64(0xffffffffffff37bd + 1, x21);
3986 ASSERT_EQUAL_64(0xfffffff765432110 + 1, x22);
3987 ASSERT_EQUAL_64(0x123456789abcdef1 + 1, x23);
armvixlad96eda2013-06-14 11:42:37 +01003988
3989 ASSERT_EQUAL_32(0x1df + 1, w24);
3990 ASSERT_EQUAL_32(0xffff37bd + 1, w25);
3991 ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
3992
3993 // Check that adc correctly sets the condition flags.
3994 START();
3995 __ Mov(x0, 0xff);
armvixlb0c8ae22014-03-21 14:03:59 +00003996 __ Mov(x1, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01003997 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00003998 __ Adds(x0, x0, Operand(0));
3999 __ Adcs(x10, x0, Operand(x1, SXTX, 1));
armvixlad96eda2013-06-14 11:42:37 +01004000 END();
4001
4002 RUN();
4003
4004 ASSERT_EQUAL_NZCV(CFlag);
4005
4006 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004007 __ Mov(x0, 0x7fffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004008 __ Mov(x1, 1);
4009 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00004010 __ Adds(x0, x0, Operand(0));
4011 __ Adcs(x10, x0, Operand(x1, UXTB, 2));
armvixlad96eda2013-06-14 11:42:37 +01004012 END();
4013
4014 RUN();
4015
4016 ASSERT_EQUAL_NZCV(NVFlag);
4017
4018 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004019 __ Mov(x0, 0x7fffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004020 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00004021 __ Adds(x0, x0, Operand(0));
4022 __ Adcs(x10, x0, Operand(1));
armvixlad96eda2013-06-14 11:42:37 +01004023 END();
4024
4025 RUN();
4026
4027 ASSERT_EQUAL_NZCV(NVFlag);
4028
4029 TEARDOWN();
4030}
4031
4032
4033TEST(adc_sbc_wide_imm) {
4034 SETUP();
4035
4036 START();
4037 __ Mov(x0, 0);
4038
4039 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00004040 __ Adds(x0, x0, Operand(0));
armvixlad96eda2013-06-14 11:42:37 +01004041
armvixlb0c8ae22014-03-21 14:03:59 +00004042 __ Adc(x7, x0, Operand(0x1234567890abcdef));
armvixlad96eda2013-06-14 11:42:37 +01004043 __ Adc(w8, w0, Operand(0xffffffff));
armvixlb0c8ae22014-03-21 14:03:59 +00004044 __ Sbc(x9, x0, Operand(0x1234567890abcdef));
armvixlf37fdc02014-02-05 13:22:16 +00004045 __ Sbc(w10, w0, Operand(0xffffffff));
armvixlb0c8ae22014-03-21 14:03:59 +00004046 __ Ngc(x11, Operand(0xffffffff00000000));
armvixlf37fdc02014-02-05 13:22:16 +00004047 __ Ngc(w12, Operand(0xffff0000));
armvixlad96eda2013-06-14 11:42:37 +01004048
4049 // Set the C flag.
4050 __ Cmp(w0, Operand(w0));
4051
armvixlb0c8ae22014-03-21 14:03:59 +00004052 __ Adc(x18, x0, Operand(0x1234567890abcdef));
armvixlf37fdc02014-02-05 13:22:16 +00004053 __ Adc(w19, w0, Operand(0xffffffff));
armvixlb0c8ae22014-03-21 14:03:59 +00004054 __ Sbc(x20, x0, Operand(0x1234567890abcdef));
armvixlf37fdc02014-02-05 13:22:16 +00004055 __ Sbc(w21, w0, Operand(0xffffffff));
armvixlb0c8ae22014-03-21 14:03:59 +00004056 __ Ngc(x22, Operand(0xffffffff00000000));
armvixlf37fdc02014-02-05 13:22:16 +00004057 __ Ngc(w23, Operand(0xffff0000));
armvixlad96eda2013-06-14 11:42:37 +01004058 END();
4059
4060 RUN();
4061
armvixlb0c8ae22014-03-21 14:03:59 +00004062 ASSERT_EQUAL_64(0x1234567890abcdef, x7);
armvixlad96eda2013-06-14 11:42:37 +01004063 ASSERT_EQUAL_64(0xffffffff, x8);
armvixlb0c8ae22014-03-21 14:03:59 +00004064 ASSERT_EQUAL_64(0xedcba9876f543210, x9);
armvixlf37fdc02014-02-05 13:22:16 +00004065 ASSERT_EQUAL_64(0, x10);
4066 ASSERT_EQUAL_64(0xffffffff, x11);
4067 ASSERT_EQUAL_64(0xffff, x12);
4068
armvixlb0c8ae22014-03-21 14:03:59 +00004069 ASSERT_EQUAL_64(0x1234567890abcdef + 1, x18);
armvixlf37fdc02014-02-05 13:22:16 +00004070 ASSERT_EQUAL_64(0, x19);
armvixlb0c8ae22014-03-21 14:03:59 +00004071 ASSERT_EQUAL_64(0xedcba9876f543211, x20);
armvixlf37fdc02014-02-05 13:22:16 +00004072 ASSERT_EQUAL_64(1, x21);
armvixlb0c8ae22014-03-21 14:03:59 +00004073 ASSERT_EQUAL_64(0x0000000100000000, x22);
4074 ASSERT_EQUAL_64(0x0000000000010000, x23);
armvixlad96eda2013-06-14 11:42:37 +01004075
4076 TEARDOWN();
4077}
4078
4079TEST(flags) {
4080 SETUP();
4081
4082 START();
4083 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00004084 __ Mov(x1, 0x1111111111111111);
armvixlad96eda2013-06-14 11:42:37 +01004085 __ Neg(x10, Operand(x0));
4086 __ Neg(x11, Operand(x1));
4087 __ Neg(w12, Operand(w1));
4088 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00004089 __ Adds(x0, x0, Operand(0));
armvixlad96eda2013-06-14 11:42:37 +01004090 __ Ngc(x13, Operand(x0));
4091 // Set the C flag.
4092 __ Cmp(x0, Operand(x0));
4093 __ Ngc(w14, Operand(w0));
4094 END();
4095
4096 RUN();
4097
4098 ASSERT_EQUAL_64(0, x10);
armvixlb0c8ae22014-03-21 14:03:59 +00004099 ASSERT_EQUAL_64(-0x1111111111111111, x11);
armvixlad96eda2013-06-14 11:42:37 +01004100 ASSERT_EQUAL_32(-0x11111111, w12);
armvixlb0c8ae22014-03-21 14:03:59 +00004101 ASSERT_EQUAL_64(-1, x13);
armvixlad96eda2013-06-14 11:42:37 +01004102 ASSERT_EQUAL_32(0, w14);
4103
4104 START();
4105 __ Mov(x0, 0);
4106 __ Cmp(x0, Operand(x0));
4107 END();
4108
4109 RUN();
4110
4111 ASSERT_EQUAL_NZCV(ZCFlag);
4112
4113 START();
4114 __ Mov(w0, 0);
4115 __ Cmp(w0, Operand(w0));
4116 END();
4117
4118 RUN();
4119
4120 ASSERT_EQUAL_NZCV(ZCFlag);
4121
4122 START();
4123 __ Mov(x0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00004124 __ Mov(x1, 0x1111111111111111);
armvixlad96eda2013-06-14 11:42:37 +01004125 __ Cmp(x0, Operand(x1));
4126 END();
4127
4128 RUN();
4129
4130 ASSERT_EQUAL_NZCV(NFlag);
4131
4132 START();
4133 __ Mov(w0, 0);
4134 __ Mov(w1, 0x11111111);
4135 __ Cmp(w0, Operand(w1));
4136 END();
4137
4138 RUN();
4139
4140 ASSERT_EQUAL_NZCV(NFlag);
4141
4142 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004143 __ Mov(x1, 0x1111111111111111);
armvixlad96eda2013-06-14 11:42:37 +01004144 __ Cmp(x1, Operand(0));
4145 END();
4146
4147 RUN();
4148
4149 ASSERT_EQUAL_NZCV(CFlag);
4150
4151 START();
4152 __ Mov(w1, 0x11111111);
4153 __ Cmp(w1, Operand(0));
4154 END();
4155
4156 RUN();
4157
4158 ASSERT_EQUAL_NZCV(CFlag);
4159
4160 START();
4161 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00004162 __ Mov(x1, 0x7fffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004163 __ Cmn(x1, Operand(x0));
4164 END();
4165
4166 RUN();
4167
4168 ASSERT_EQUAL_NZCV(NVFlag);
4169
4170 START();
4171 __ Mov(w0, 1);
4172 __ Mov(w1, 0x7fffffff);
4173 __ Cmn(w1, Operand(w0));
4174 END();
4175
4176 RUN();
4177
4178 ASSERT_EQUAL_NZCV(NVFlag);
4179
4180 START();
4181 __ Mov(x0, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00004182 __ Mov(x1, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004183 __ Cmn(x1, Operand(x0));
4184 END();
4185
4186 RUN();
4187
4188 ASSERT_EQUAL_NZCV(ZCFlag);
4189
4190 START();
4191 __ Mov(w0, 1);
4192 __ Mov(w1, 0xffffffff);
4193 __ Cmn(w1, Operand(w0));
4194 END();
4195
4196 RUN();
4197
4198 ASSERT_EQUAL_NZCV(ZCFlag);
4199
4200 START();
4201 __ Mov(w0, 0);
4202 __ Mov(w1, 1);
4203 // Clear the C flag.
armvixlf37fdc02014-02-05 13:22:16 +00004204 __ Adds(w0, w0, Operand(0));
4205 __ Ngcs(w0, Operand(w1));
armvixlad96eda2013-06-14 11:42:37 +01004206 END();
4207
4208 RUN();
4209
4210 ASSERT_EQUAL_NZCV(NFlag);
4211
4212 START();
4213 __ Mov(w0, 0);
4214 __ Mov(w1, 0);
4215 // Set the C flag.
4216 __ Cmp(w0, Operand(w0));
armvixlf37fdc02014-02-05 13:22:16 +00004217 __ Ngcs(w0, Operand(w1));
armvixlad96eda2013-06-14 11:42:37 +01004218 END();
4219
4220 RUN();
4221
4222 ASSERT_EQUAL_NZCV(ZCFlag);
4223
4224 TEARDOWN();
4225}
4226
4227
4228TEST(cmp_shift) {
4229 SETUP();
4230
4231 START();
4232 __ Mov(x18, 0xf0000000);
armvixlb0c8ae22014-03-21 14:03:59 +00004233 __ Mov(x19, 0xf000000010000000);
4234 __ Mov(x20, 0xf0000000f0000000);
4235 __ Mov(x21, 0x7800000078000000);
4236 __ Mov(x22, 0x3c0000003c000000);
4237 __ Mov(x23, 0x8000000780000000);
4238 __ Mov(x24, 0x0000000f00000000);
4239 __ Mov(x25, 0x00000003c0000000);
4240 __ Mov(x26, 0x8000000780000000);
armvixlad96eda2013-06-14 11:42:37 +01004241 __ Mov(x27, 0xc0000003);
4242
4243 __ Cmp(w20, Operand(w21, LSL, 1));
4244 __ Mrs(x0, NZCV);
4245
4246 __ Cmp(x20, Operand(x22, LSL, 2));
4247 __ Mrs(x1, NZCV);
4248
4249 __ Cmp(w19, Operand(w23, LSR, 3));
4250 __ Mrs(x2, NZCV);
4251
4252 __ Cmp(x18, Operand(x24, LSR, 4));
4253 __ Mrs(x3, NZCV);
4254
4255 __ Cmp(w20, Operand(w25, ASR, 2));
4256 __ Mrs(x4, NZCV);
4257
4258 __ Cmp(x20, Operand(x26, ASR, 3));
4259 __ Mrs(x5, NZCV);
4260
4261 __ Cmp(w27, Operand(w22, ROR, 28));
4262 __ Mrs(x6, NZCV);
4263
4264 __ Cmp(x20, Operand(x21, ROR, 31));
4265 __ Mrs(x7, NZCV);
4266 END();
4267
4268 RUN();
4269
4270 ASSERT_EQUAL_32(ZCFlag, w0);
4271 ASSERT_EQUAL_32(ZCFlag, w1);
4272 ASSERT_EQUAL_32(ZCFlag, w2);
4273 ASSERT_EQUAL_32(ZCFlag, w3);
4274 ASSERT_EQUAL_32(ZCFlag, w4);
4275 ASSERT_EQUAL_32(ZCFlag, w5);
4276 ASSERT_EQUAL_32(ZCFlag, w6);
4277 ASSERT_EQUAL_32(ZCFlag, w7);
4278
4279 TEARDOWN();
4280}
4281
4282
4283TEST(cmp_extend) {
4284 SETUP();
4285
4286 START();
4287 __ Mov(w20, 0x2);
4288 __ Mov(w21, 0x1);
armvixlb0c8ae22014-03-21 14:03:59 +00004289 __ Mov(x22, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004290 __ Mov(x23, 0xff);
armvixlb0c8ae22014-03-21 14:03:59 +00004291 __ Mov(x24, 0xfffffffffffffffe);
armvixlad96eda2013-06-14 11:42:37 +01004292 __ Mov(x25, 0xffff);
4293 __ Mov(x26, 0xffffffff);
4294
4295 __ Cmp(w20, Operand(w21, LSL, 1));
4296 __ Mrs(x0, NZCV);
4297
4298 __ Cmp(x22, Operand(x23, SXTB, 0));
4299 __ Mrs(x1, NZCV);
4300
4301 __ Cmp(x24, Operand(x23, SXTB, 1));
4302 __ Mrs(x2, NZCV);
4303
4304 __ Cmp(x24, Operand(x23, UXTB, 1));
4305 __ Mrs(x3, NZCV);
4306
4307 __ Cmp(w22, Operand(w25, UXTH));
4308 __ Mrs(x4, NZCV);
4309
4310 __ Cmp(x22, Operand(x25, SXTH));
4311 __ Mrs(x5, NZCV);
4312
4313 __ Cmp(x22, Operand(x26, UXTW));
4314 __ Mrs(x6, NZCV);
4315
4316 __ Cmp(x24, Operand(x26, SXTW, 1));
4317 __ Mrs(x7, NZCV);
4318 END();
4319
4320 RUN();
4321
4322 ASSERT_EQUAL_32(ZCFlag, w0);
4323 ASSERT_EQUAL_32(ZCFlag, w1);
4324 ASSERT_EQUAL_32(ZCFlag, w2);
4325 ASSERT_EQUAL_32(NCFlag, w3);
4326 ASSERT_EQUAL_32(NCFlag, w4);
4327 ASSERT_EQUAL_32(ZCFlag, w5);
4328 ASSERT_EQUAL_32(NCFlag, w6);
4329 ASSERT_EQUAL_32(ZCFlag, w7);
4330
4331 TEARDOWN();
4332}
4333
4334
4335TEST(ccmp) {
4336 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004337 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004338
4339 START();
4340 __ Mov(w16, 0);
4341 __ Mov(w17, 1);
armvixl578645f2013-08-15 17:21:42 +01004342 __ Cmp(w16, w16);
4343 __ Ccmp(w16, w17, NCFlag, eq);
armvixlad96eda2013-06-14 11:42:37 +01004344 __ Mrs(x0, NZCV);
4345
armvixl578645f2013-08-15 17:21:42 +01004346 __ Cmp(w16, w16);
4347 __ Ccmp(w16, w17, NCFlag, ne);
armvixlad96eda2013-06-14 11:42:37 +01004348 __ Mrs(x1, NZCV);
4349
armvixl578645f2013-08-15 17:21:42 +01004350 __ Cmp(x16, x16);
4351 __ Ccmn(x16, 2, NZCVFlag, eq);
armvixlad96eda2013-06-14 11:42:37 +01004352 __ Mrs(x2, NZCV);
4353
armvixl578645f2013-08-15 17:21:42 +01004354 __ Cmp(x16, x16);
4355 __ Ccmn(x16, 2, NZCVFlag, ne);
armvixlad96eda2013-06-14 11:42:37 +01004356 __ Mrs(x3, NZCV);
armvixl578645f2013-08-15 17:21:42 +01004357
armvixlc68cb642014-09-25 18:49:30 +01004358 // The MacroAssembler does not allow al as a condition.
armvixl578645f2013-08-15 17:21:42 +01004359 __ ccmp(x16, x16, NZCVFlag, al);
4360 __ Mrs(x4, NZCV);
4361
armvixlc68cb642014-09-25 18:49:30 +01004362 // The MacroAssembler does not allow nv as a condition.
armvixl578645f2013-08-15 17:21:42 +01004363 __ ccmp(x16, x16, NZCVFlag, nv);
4364 __ Mrs(x5, NZCV);
4365
armvixlad96eda2013-06-14 11:42:37 +01004366 END();
4367
4368 RUN();
4369
4370 ASSERT_EQUAL_32(NFlag, w0);
4371 ASSERT_EQUAL_32(NCFlag, w1);
4372 ASSERT_EQUAL_32(NoFlag, w2);
4373 ASSERT_EQUAL_32(NZCVFlag, w3);
armvixl578645f2013-08-15 17:21:42 +01004374 ASSERT_EQUAL_32(ZCFlag, w4);
4375 ASSERT_EQUAL_32(ZCFlag, w5);
armvixlad96eda2013-06-14 11:42:37 +01004376
4377 TEARDOWN();
4378}
4379
4380
4381TEST(ccmp_wide_imm) {
4382 SETUP();
4383
4384 START();
4385 __ Mov(w20, 0);
4386
4387 __ Cmp(w20, Operand(w20));
4388 __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4389 __ Mrs(x0, NZCV);
4390
4391 __ Cmp(w20, Operand(w20));
armvixlb0c8ae22014-03-21 14:03:59 +00004392 __ Ccmp(x20, Operand(0xffffffffffffffff), NZCVFlag, eq);
armvixlad96eda2013-06-14 11:42:37 +01004393 __ Mrs(x1, NZCV);
4394 END();
4395
4396 RUN();
4397
4398 ASSERT_EQUAL_32(NFlag, w0);
4399 ASSERT_EQUAL_32(NoFlag, w1);
4400
4401 TEARDOWN();
4402}
4403
4404
4405TEST(ccmp_shift_extend) {
4406 SETUP();
4407
4408 START();
4409 __ Mov(w20, 0x2);
4410 __ Mov(w21, 0x1);
armvixlb0c8ae22014-03-21 14:03:59 +00004411 __ Mov(x22, 0xffffffffffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004412 __ Mov(x23, 0xff);
armvixlb0c8ae22014-03-21 14:03:59 +00004413 __ Mov(x24, 0xfffffffffffffffe);
armvixlad96eda2013-06-14 11:42:37 +01004414
4415 __ Cmp(w20, Operand(w20));
4416 __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4417 __ Mrs(x0, NZCV);
4418
4419 __ Cmp(w20, Operand(w20));
4420 __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4421 __ Mrs(x1, NZCV);
4422
4423 __ Cmp(w20, Operand(w20));
4424 __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4425 __ Mrs(x2, NZCV);
4426
4427 __ Cmp(w20, Operand(w20));
4428 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4429 __ Mrs(x3, NZCV);
4430
4431 __ Cmp(w20, Operand(w20));
4432 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4433 __ Mrs(x4, NZCV);
4434 END();
4435
4436 RUN();
4437
4438 ASSERT_EQUAL_32(ZCFlag, w0);
4439 ASSERT_EQUAL_32(ZCFlag, w1);
4440 ASSERT_EQUAL_32(ZCFlag, w2);
4441 ASSERT_EQUAL_32(NCFlag, w3);
4442 ASSERT_EQUAL_32(NZCVFlag, w4);
4443
4444 TEARDOWN();
4445}
4446
4447
4448TEST(csel) {
4449 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004450 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004451
4452 START();
4453 __ Mov(x16, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00004454 __ Mov(x24, 0x0000000f0000000f);
4455 __ Mov(x25, 0x0000001f0000001f);
armvixlad96eda2013-06-14 11:42:37 +01004456
4457 __ Cmp(w16, Operand(0));
4458 __ Csel(w0, w24, w25, eq);
4459 __ Csel(w1, w24, w25, ne);
4460 __ Csinc(w2, w24, w25, mi);
4461 __ Csinc(w3, w24, w25, pl);
4462
armvixlc68cb642014-09-25 18:49:30 +01004463 // The MacroAssembler does not allow al or nv as a condition.
armvixl578645f2013-08-15 17:21:42 +01004464 __ csel(w13, w24, w25, al);
4465 __ csel(x14, x24, x25, nv);
4466
armvixlad96eda2013-06-14 11:42:37 +01004467 __ Cmp(x16, Operand(1));
4468 __ Csinv(x4, x24, x25, gt);
4469 __ Csinv(x5, x24, x25, le);
4470 __ Csneg(x6, x24, x25, hs);
4471 __ Csneg(x7, x24, x25, lo);
4472
4473 __ Cset(w8, ne);
4474 __ Csetm(w9, ne);
4475 __ Cinc(x10, x25, ne);
4476 __ Cinv(x11, x24, ne);
4477 __ Cneg(x12, x24, ne);
armvixl578645f2013-08-15 17:21:42 +01004478
armvixlc68cb642014-09-25 18:49:30 +01004479 // The MacroAssembler does not allow al or nv as a condition.
armvixl578645f2013-08-15 17:21:42 +01004480 __ csel(w15, w24, w25, al);
4481 __ csel(x17, x24, x25, nv);
4482
armvixlad96eda2013-06-14 11:42:37 +01004483 END();
4484
4485 RUN();
4486
4487 ASSERT_EQUAL_64(0x0000000f, x0);
4488 ASSERT_EQUAL_64(0x0000001f, x1);
4489 ASSERT_EQUAL_64(0x00000020, x2);
4490 ASSERT_EQUAL_64(0x0000000f, x3);
armvixlb0c8ae22014-03-21 14:03:59 +00004491 ASSERT_EQUAL_64(0xffffffe0ffffffe0, x4);
4492 ASSERT_EQUAL_64(0x0000000f0000000f, x5);
4493 ASSERT_EQUAL_64(0xffffffe0ffffffe1, x6);
4494 ASSERT_EQUAL_64(0x0000000f0000000f, x7);
armvixlad96eda2013-06-14 11:42:37 +01004495 ASSERT_EQUAL_64(0x00000001, x8);
4496 ASSERT_EQUAL_64(0xffffffff, x9);
armvixlb0c8ae22014-03-21 14:03:59 +00004497 ASSERT_EQUAL_64(0x0000001f00000020, x10);
4498 ASSERT_EQUAL_64(0xfffffff0fffffff0, x11);
4499 ASSERT_EQUAL_64(0xfffffff0fffffff1, x12);
armvixl578645f2013-08-15 17:21:42 +01004500 ASSERT_EQUAL_64(0x0000000f, x13);
armvixlb0c8ae22014-03-21 14:03:59 +00004501 ASSERT_EQUAL_64(0x0000000f0000000f, x14);
armvixl578645f2013-08-15 17:21:42 +01004502 ASSERT_EQUAL_64(0x0000000f, x15);
armvixlb0c8ae22014-03-21 14:03:59 +00004503 ASSERT_EQUAL_64(0x0000000f0000000f, x17);
armvixlad96eda2013-06-14 11:42:37 +01004504
4505 TEARDOWN();
4506}
4507
4508
armvixlf37fdc02014-02-05 13:22:16 +00004509TEST(csel_imm) {
4510 SETUP();
4511
4512 START();
4513 __ Mov(x18, 0);
4514 __ Mov(x19, 0x80000000);
armvixlb0c8ae22014-03-21 14:03:59 +00004515 __ Mov(x20, 0x8000000000000000);
armvixlf37fdc02014-02-05 13:22:16 +00004516
4517 __ Cmp(x18, Operand(0));
4518 __ Csel(w0, w19, -2, ne);
4519 __ Csel(w1, w19, -1, ne);
4520 __ Csel(w2, w19, 0, ne);
4521 __ Csel(w3, w19, 1, ne);
4522 __ Csel(w4, w19, 2, ne);
4523 __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4524 __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4525 __ Csel(w7, w19, 3, eq);
4526
4527 __ Csel(x8, x20, -2, ne);
4528 __ Csel(x9, x20, -1, ne);
4529 __ Csel(x10, x20, 0, ne);
4530 __ Csel(x11, x20, 1, ne);
4531 __ Csel(x12, x20, 2, ne);
4532 __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4533 __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4534 __ Csel(x15, x20, 3, eq);
4535
4536 END();
4537
4538 RUN();
4539
4540 ASSERT_EQUAL_32(-2, w0);
4541 ASSERT_EQUAL_32(-1, w1);
4542 ASSERT_EQUAL_32(0, w2);
4543 ASSERT_EQUAL_32(1, w3);
4544 ASSERT_EQUAL_32(2, w4);
4545 ASSERT_EQUAL_32(-1, w5);
4546 ASSERT_EQUAL_32(0x40000000, w6);
4547 ASSERT_EQUAL_32(0x80000000, w7);
4548
4549 ASSERT_EQUAL_64(-2, x8);
4550 ASSERT_EQUAL_64(-1, x9);
4551 ASSERT_EQUAL_64(0, x10);
4552 ASSERT_EQUAL_64(1, x11);
4553 ASSERT_EQUAL_64(2, x12);
4554 ASSERT_EQUAL_64(-1, x13);
armvixlb0c8ae22014-03-21 14:03:59 +00004555 ASSERT_EQUAL_64(0x4000000000000000, x14);
4556 ASSERT_EQUAL_64(0x8000000000000000, x15);
armvixlf37fdc02014-02-05 13:22:16 +00004557
4558 TEARDOWN();
4559}
4560
4561
armvixlad96eda2013-06-14 11:42:37 +01004562TEST(lslv) {
4563 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004564 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004565
armvixlb0c8ae22014-03-21 14:03:59 +00004566 uint64_t value = 0x0123456789abcdef;
armvixlad96eda2013-06-14 11:42:37 +01004567 int shift[] = {1, 3, 5, 9, 17, 33};
4568
4569 START();
4570 __ Mov(x0, value);
4571 __ Mov(w1, shift[0]);
4572 __ Mov(w2, shift[1]);
4573 __ Mov(w3, shift[2]);
4574 __ Mov(w4, shift[3]);
4575 __ Mov(w5, shift[4]);
4576 __ Mov(w6, shift[5]);
4577
armvixlc68cb642014-09-25 18:49:30 +01004578 // The MacroAssembler does not allow zr as an argument.
armvixlad96eda2013-06-14 11:42:37 +01004579 __ lslv(x0, x0, xzr);
4580
4581 __ Lsl(x16, x0, x1);
4582 __ Lsl(x17, x0, x2);
4583 __ Lsl(x18, x0, x3);
4584 __ Lsl(x19, x0, x4);
4585 __ Lsl(x20, x0, x5);
4586 __ Lsl(x21, x0, x6);
4587
4588 __ Lsl(w22, w0, w1);
4589 __ Lsl(w23, w0, w2);
4590 __ Lsl(w24, w0, w3);
4591 __ Lsl(w25, w0, w4);
4592 __ Lsl(w26, w0, w5);
4593 __ Lsl(w27, w0, w6);
4594 END();
4595
4596 RUN();
4597
4598 ASSERT_EQUAL_64(value, x0);
4599 ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
4600 ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
4601 ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
4602 ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
4603 ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
4604 ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
4605 ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
4606 ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
4607 ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
4608 ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
4609 ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
4610 ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
4611
4612 TEARDOWN();
4613}
4614
4615
4616TEST(lsrv) {
4617 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004618 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004619
armvixlb0c8ae22014-03-21 14:03:59 +00004620 uint64_t value = 0x0123456789abcdef;
armvixlad96eda2013-06-14 11:42:37 +01004621 int shift[] = {1, 3, 5, 9, 17, 33};
4622
4623 START();
4624 __ Mov(x0, value);
4625 __ Mov(w1, shift[0]);
4626 __ Mov(w2, shift[1]);
4627 __ Mov(w3, shift[2]);
4628 __ Mov(w4, shift[3]);
4629 __ Mov(w5, shift[4]);
4630 __ Mov(w6, shift[5]);
4631
armvixlc68cb642014-09-25 18:49:30 +01004632 // The MacroAssembler does not allow zr as an argument.
armvixlad96eda2013-06-14 11:42:37 +01004633 __ lsrv(x0, x0, xzr);
4634
4635 __ Lsr(x16, x0, x1);
4636 __ Lsr(x17, x0, x2);
4637 __ Lsr(x18, x0, x3);
4638 __ Lsr(x19, x0, x4);
4639 __ Lsr(x20, x0, x5);
4640 __ Lsr(x21, x0, x6);
4641
4642 __ Lsr(w22, w0, w1);
4643 __ Lsr(w23, w0, w2);
4644 __ Lsr(w24, w0, w3);
4645 __ Lsr(w25, w0, w4);
4646 __ Lsr(w26, w0, w5);
4647 __ Lsr(w27, w0, w6);
4648 END();
4649
4650 RUN();
4651
4652 ASSERT_EQUAL_64(value, x0);
4653 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4654 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4655 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4656 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4657 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4658 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4659
armvixlb0c8ae22014-03-21 14:03:59 +00004660 value &= 0xffffffff;
armvixlad96eda2013-06-14 11:42:37 +01004661 ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
4662 ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
4663 ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
4664 ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
4665 ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
4666 ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
4667
4668 TEARDOWN();
4669}
4670
4671
4672TEST(asrv) {
4673 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004674 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004675
armvixlb0c8ae22014-03-21 14:03:59 +00004676 int64_t value = 0xfedcba98fedcba98;
armvixlad96eda2013-06-14 11:42:37 +01004677 int shift[] = {1, 3, 5, 9, 17, 33};
4678
4679 START();
4680 __ Mov(x0, value);
4681 __ Mov(w1, shift[0]);
4682 __ Mov(w2, shift[1]);
4683 __ Mov(w3, shift[2]);
4684 __ Mov(w4, shift[3]);
4685 __ Mov(w5, shift[4]);
4686 __ Mov(w6, shift[5]);
4687
armvixlc68cb642014-09-25 18:49:30 +01004688 // The MacroAssembler does not allow zr as an argument.
armvixlad96eda2013-06-14 11:42:37 +01004689 __ asrv(x0, x0, xzr);
4690
4691 __ Asr(x16, x0, x1);
4692 __ Asr(x17, x0, x2);
4693 __ Asr(x18, x0, x3);
4694 __ Asr(x19, x0, x4);
4695 __ Asr(x20, x0, x5);
4696 __ Asr(x21, x0, x6);
4697
4698 __ Asr(w22, w0, w1);
4699 __ Asr(w23, w0, w2);
4700 __ Asr(w24, w0, w3);
4701 __ Asr(w25, w0, w4);
4702 __ Asr(w26, w0, w5);
4703 __ Asr(w27, w0, w6);
4704 END();
4705
4706 RUN();
4707
4708 ASSERT_EQUAL_64(value, x0);
4709 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
4710 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
4711 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
4712 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
4713 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
4714 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
4715
armvixlb0c8ae22014-03-21 14:03:59 +00004716 int32_t value32 = static_cast<int32_t>(value & 0xffffffff);
armvixlad96eda2013-06-14 11:42:37 +01004717 ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
4718 ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
4719 ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
4720 ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
4721 ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
4722 ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
4723
4724 TEARDOWN();
4725}
4726
4727
4728TEST(rorv) {
4729 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004730 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004731
armvixlb0c8ae22014-03-21 14:03:59 +00004732 uint64_t value = 0x0123456789abcdef;
armvixlad96eda2013-06-14 11:42:37 +01004733 int shift[] = {4, 8, 12, 16, 24, 36};
4734
4735 START();
4736 __ Mov(x0, value);
4737 __ Mov(w1, shift[0]);
4738 __ Mov(w2, shift[1]);
4739 __ Mov(w3, shift[2]);
4740 __ Mov(w4, shift[3]);
4741 __ Mov(w5, shift[4]);
4742 __ Mov(w6, shift[5]);
4743
armvixlc68cb642014-09-25 18:49:30 +01004744 // The MacroAssembler does not allow zr as an argument.
armvixlad96eda2013-06-14 11:42:37 +01004745 __ rorv(x0, x0, xzr);
4746
4747 __ Ror(x16, x0, x1);
4748 __ Ror(x17, x0, x2);
4749 __ Ror(x18, x0, x3);
4750 __ Ror(x19, x0, x4);
4751 __ Ror(x20, x0, x5);
4752 __ Ror(x21, x0, x6);
4753
4754 __ Ror(w22, w0, w1);
4755 __ Ror(w23, w0, w2);
4756 __ Ror(w24, w0, w3);
4757 __ Ror(w25, w0, w4);
4758 __ Ror(w26, w0, w5);
4759 __ Ror(w27, w0, w6);
4760 END();
4761
4762 RUN();
4763
4764 ASSERT_EQUAL_64(value, x0);
armvixlb0c8ae22014-03-21 14:03:59 +00004765 ASSERT_EQUAL_64(0xf0123456789abcde, x16);
4766 ASSERT_EQUAL_64(0xef0123456789abcd, x17);
4767 ASSERT_EQUAL_64(0xdef0123456789abc, x18);
4768 ASSERT_EQUAL_64(0xcdef0123456789ab, x19);
4769 ASSERT_EQUAL_64(0xabcdef0123456789, x20);
4770 ASSERT_EQUAL_64(0x789abcdef0123456, x21);
armvixlad96eda2013-06-14 11:42:37 +01004771 ASSERT_EQUAL_32(0xf89abcde, w22);
4772 ASSERT_EQUAL_32(0xef89abcd, w23);
4773 ASSERT_EQUAL_32(0xdef89abc, w24);
4774 ASSERT_EQUAL_32(0xcdef89ab, w25);
4775 ASSERT_EQUAL_32(0xabcdef89, w26);
4776 ASSERT_EQUAL_32(0xf89abcde, w27);
4777
4778 TEARDOWN();
4779}
4780
4781
4782TEST(bfm) {
4783 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004784 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004785
4786 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004787 __ Mov(x1, 0x0123456789abcdef);
armvixlad96eda2013-06-14 11:42:37 +01004788
armvixlb0c8ae22014-03-21 14:03:59 +00004789 __ Mov(x10, 0x8888888888888888);
4790 __ Mov(x11, 0x8888888888888888);
4791 __ Mov(x12, 0x8888888888888888);
4792 __ Mov(x13, 0x8888888888888888);
armvixlad96eda2013-06-14 11:42:37 +01004793 __ Mov(w20, 0x88888888);
4794 __ Mov(w21, 0x88888888);
4795
armvixlc68cb642014-09-25 18:49:30 +01004796 // There are no macro instruction for bfm.
armvixlad96eda2013-06-14 11:42:37 +01004797 __ bfm(x10, x1, 16, 31);
4798 __ bfm(x11, x1, 32, 15);
4799
4800 __ bfm(w20, w1, 16, 23);
4801 __ bfm(w21, w1, 24, 15);
4802
4803 // Aliases.
4804 __ Bfi(x12, x1, 16, 8);
4805 __ Bfxil(x13, x1, 16, 8);
4806 END();
4807
4808 RUN();
4809
4810
armvixlb0c8ae22014-03-21 14:03:59 +00004811 ASSERT_EQUAL_64(0x88888888888889ab, x10);
4812 ASSERT_EQUAL_64(0x8888cdef88888888, x11);
armvixlad96eda2013-06-14 11:42:37 +01004813
4814 ASSERT_EQUAL_32(0x888888ab, w20);
4815 ASSERT_EQUAL_32(0x88cdef88, w21);
4816
armvixlb0c8ae22014-03-21 14:03:59 +00004817 ASSERT_EQUAL_64(0x8888888888ef8888, x12);
4818 ASSERT_EQUAL_64(0x88888888888888ab, x13);
armvixlad96eda2013-06-14 11:42:37 +01004819
4820 TEARDOWN();
4821}
4822
4823
4824TEST(sbfm) {
4825 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004826 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004827
4828 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004829 __ Mov(x1, 0x0123456789abcdef);
4830 __ Mov(x2, 0xfedcba9876543210);
armvixlad96eda2013-06-14 11:42:37 +01004831
armvixlc68cb642014-09-25 18:49:30 +01004832 // There are no macro instruction for sbfm.
armvixlad96eda2013-06-14 11:42:37 +01004833 __ sbfm(x10, x1, 16, 31);
4834 __ sbfm(x11, x1, 32, 15);
4835 __ sbfm(x12, x1, 32, 47);
4836 __ sbfm(x13, x1, 48, 35);
4837
4838 __ sbfm(w14, w1, 16, 23);
4839 __ sbfm(w15, w1, 24, 15);
4840 __ sbfm(w16, w2, 16, 23);
4841 __ sbfm(w17, w2, 24, 15);
4842
4843 // Aliases.
4844 __ Asr(x18, x1, 32);
4845 __ Asr(x19, x2, 32);
4846 __ Sbfiz(x20, x1, 8, 16);
4847 __ Sbfiz(x21, x2, 8, 16);
4848 __ Sbfx(x22, x1, 8, 16);
4849 __ Sbfx(x23, x2, 8, 16);
armvixlf37fdc02014-02-05 13:22:16 +00004850 __ Sxtb(x24, w1);
armvixlad96eda2013-06-14 11:42:37 +01004851 __ Sxtb(x25, x2);
armvixlf37fdc02014-02-05 13:22:16 +00004852 __ Sxth(x26, w1);
armvixlad96eda2013-06-14 11:42:37 +01004853 __ Sxth(x27, x2);
armvixlf37fdc02014-02-05 13:22:16 +00004854 __ Sxtw(x28, w1);
armvixlad96eda2013-06-14 11:42:37 +01004855 __ Sxtw(x29, x2);
4856 END();
4857
4858 RUN();
4859
4860
armvixlb0c8ae22014-03-21 14:03:59 +00004861 ASSERT_EQUAL_64(0xffffffffffff89ab, x10);
4862 ASSERT_EQUAL_64(0xffffcdef00000000, x11);
4863 ASSERT_EQUAL_64(0x0000000000004567, x12);
4864 ASSERT_EQUAL_64(0x000789abcdef0000, x13);
armvixlad96eda2013-06-14 11:42:37 +01004865
4866 ASSERT_EQUAL_32(0xffffffab, w14);
4867 ASSERT_EQUAL_32(0xffcdef00, w15);
armvixlb0c8ae22014-03-21 14:03:59 +00004868 ASSERT_EQUAL_32(0x00000054, w16);
armvixlad96eda2013-06-14 11:42:37 +01004869 ASSERT_EQUAL_32(0x00321000, w17);
4870
armvixlb0c8ae22014-03-21 14:03:59 +00004871 ASSERT_EQUAL_64(0x0000000001234567, x18);
4872 ASSERT_EQUAL_64(0xfffffffffedcba98, x19);
4873 ASSERT_EQUAL_64(0xffffffffffcdef00, x20);
4874 ASSERT_EQUAL_64(0x0000000000321000, x21);
4875 ASSERT_EQUAL_64(0xffffffffffffabcd, x22);
4876 ASSERT_EQUAL_64(0x0000000000005432, x23);
4877 ASSERT_EQUAL_64(0xffffffffffffffef, x24);
4878 ASSERT_EQUAL_64(0x0000000000000010, x25);
4879 ASSERT_EQUAL_64(0xffffffffffffcdef, x26);
4880 ASSERT_EQUAL_64(0x0000000000003210, x27);
4881 ASSERT_EQUAL_64(0xffffffff89abcdef, x28);
4882 ASSERT_EQUAL_64(0x0000000076543210, x29);
armvixlad96eda2013-06-14 11:42:37 +01004883
4884 TEARDOWN();
4885}
4886
4887
4888TEST(ubfm) {
4889 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01004890 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01004891
4892 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004893 __ Mov(x1, 0x0123456789abcdef);
4894 __ Mov(x2, 0xfedcba9876543210);
armvixlad96eda2013-06-14 11:42:37 +01004895
armvixlb0c8ae22014-03-21 14:03:59 +00004896 __ Mov(x10, 0x8888888888888888);
4897 __ Mov(x11, 0x8888888888888888);
armvixlad96eda2013-06-14 11:42:37 +01004898
armvixlc68cb642014-09-25 18:49:30 +01004899 // There are no macro instruction for ubfm.
armvixlad96eda2013-06-14 11:42:37 +01004900 __ ubfm(x10, x1, 16, 31);
4901 __ ubfm(x11, x1, 32, 15);
4902 __ ubfm(x12, x1, 32, 47);
4903 __ ubfm(x13, x1, 48, 35);
4904
4905 __ ubfm(w25, w1, 16, 23);
4906 __ ubfm(w26, w1, 24, 15);
4907 __ ubfm(w27, w2, 16, 23);
4908 __ ubfm(w28, w2, 24, 15);
4909
4910 // Aliases
4911 __ Lsl(x15, x1, 63);
4912 __ Lsl(x16, x1, 0);
4913 __ Lsr(x17, x1, 32);
4914 __ Ubfiz(x18, x1, 8, 16);
4915 __ Ubfx(x19, x1, 8, 16);
4916 __ Uxtb(x20, x1);
4917 __ Uxth(x21, x1);
4918 __ Uxtw(x22, x1);
4919 END();
4920
4921 RUN();
4922
armvixlb0c8ae22014-03-21 14:03:59 +00004923 ASSERT_EQUAL_64(0x00000000000089ab, x10);
4924 ASSERT_EQUAL_64(0x0000cdef00000000, x11);
4925 ASSERT_EQUAL_64(0x0000000000004567, x12);
4926 ASSERT_EQUAL_64(0x000789abcdef0000, x13);
armvixlad96eda2013-06-14 11:42:37 +01004927
4928 ASSERT_EQUAL_32(0x000000ab, w25);
4929 ASSERT_EQUAL_32(0x00cdef00, w26);
armvixlb0c8ae22014-03-21 14:03:59 +00004930 ASSERT_EQUAL_32(0x00000054, w27);
armvixlad96eda2013-06-14 11:42:37 +01004931 ASSERT_EQUAL_32(0x00321000, w28);
4932
armvixlb0c8ae22014-03-21 14:03:59 +00004933 ASSERT_EQUAL_64(0x8000000000000000, x15);
4934 ASSERT_EQUAL_64(0x0123456789abcdef, x16);
4935 ASSERT_EQUAL_64(0x0000000001234567, x17);
4936 ASSERT_EQUAL_64(0x0000000000cdef00, x18);
4937 ASSERT_EQUAL_64(0x000000000000abcd, x19);
4938 ASSERT_EQUAL_64(0x00000000000000ef, x20);
4939 ASSERT_EQUAL_64(0x000000000000cdef, x21);
4940 ASSERT_EQUAL_64(0x0000000089abcdef, x22);
armvixlad96eda2013-06-14 11:42:37 +01004941
4942 TEARDOWN();
4943}
4944
4945
4946TEST(extr) {
4947 SETUP();
4948
4949 START();
armvixlb0c8ae22014-03-21 14:03:59 +00004950 __ Mov(x1, 0x0123456789abcdef);
4951 __ Mov(x2, 0xfedcba9876543210);
armvixlad96eda2013-06-14 11:42:37 +01004952
4953 __ Extr(w10, w1, w2, 0);
4954 __ Extr(w11, w1, w2, 1);
4955 __ Extr(x12, x2, x1, 2);
4956
4957 __ Ror(w13, w1, 0);
4958 __ Ror(w14, w2, 17);
4959 __ Ror(w15, w1, 31);
armvixl4a102ba2014-07-14 09:02:40 +01004960 __ Ror(x18, x2, 0);
4961 __ Ror(x19, x2, 1);
4962 __ Ror(x20, x1, 63);
armvixlad96eda2013-06-14 11:42:37 +01004963 END();
4964
4965 RUN();
4966
4967 ASSERT_EQUAL_64(0x76543210, x10);
4968 ASSERT_EQUAL_64(0xbb2a1908, x11);
armvixlb0c8ae22014-03-21 14:03:59 +00004969 ASSERT_EQUAL_64(0x0048d159e26af37b, x12);
armvixlad96eda2013-06-14 11:42:37 +01004970 ASSERT_EQUAL_64(0x89abcdef, x13);
4971 ASSERT_EQUAL_64(0x19083b2a, x14);
4972 ASSERT_EQUAL_64(0x13579bdf, x15);
armvixl4a102ba2014-07-14 09:02:40 +01004973 ASSERT_EQUAL_64(0xfedcba9876543210, x18);
4974 ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908, x19);
4975 ASSERT_EQUAL_64(0x02468acf13579bde, x20);
armvixlad96eda2013-06-14 11:42:37 +01004976
4977 TEARDOWN();
4978}
4979
4980
4981TEST(fmov_imm) {
4982 SETUP();
4983
4984 START();
4985 __ Fmov(s11, 1.0);
4986 __ Fmov(d22, -13.0);
4987 __ Fmov(s1, 255.0);
4988 __ Fmov(d2, 12.34567);
4989 __ Fmov(s3, 0.0);
4990 __ Fmov(d4, 0.0);
4991 __ Fmov(s5, kFP32PositiveInfinity);
4992 __ Fmov(d6, kFP64NegativeInfinity);
4993 END();
4994
4995 RUN();
4996
4997 ASSERT_EQUAL_FP32(1.0, s11);
4998 ASSERT_EQUAL_FP64(-13.0, d22);
4999 ASSERT_EQUAL_FP32(255.0, s1);
5000 ASSERT_EQUAL_FP64(12.34567, d2);
5001 ASSERT_EQUAL_FP32(0.0, s3);
5002 ASSERT_EQUAL_FP64(0.0, d4);
5003 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
5004 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
5005
5006 TEARDOWN();
5007}
5008
5009
5010TEST(fmov_reg) {
5011 SETUP();
5012
5013 START();
5014 __ Fmov(s20, 1.0);
5015 __ Fmov(w10, s20);
5016 __ Fmov(s30, w10);
5017 __ Fmov(s5, s20);
5018 __ Fmov(d1, -13.0);
5019 __ Fmov(x1, d1);
5020 __ Fmov(d2, x1);
5021 __ Fmov(d4, d1);
armvixlb0c8ae22014-03-21 14:03:59 +00005022 __ Fmov(d6, rawbits_to_double(0x0123456789abcdef));
armvixlad96eda2013-06-14 11:42:37 +01005023 __ Fmov(s6, s6);
5024 END();
5025
5026 RUN();
5027
5028 ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
5029 ASSERT_EQUAL_FP32(1.0, s30);
5030 ASSERT_EQUAL_FP32(1.0, s5);
5031 ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
5032 ASSERT_EQUAL_FP64(-13.0, d2);
5033 ASSERT_EQUAL_FP64(-13.0, d4);
5034 ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
5035
5036 TEARDOWN();
5037}
5038
5039
5040TEST(fadd) {
5041 SETUP();
5042
5043 START();
armvixlb0c8ae22014-03-21 14:03:59 +00005044 __ Fmov(s14, -0.0f);
5045 __ Fmov(s15, kFP32PositiveInfinity);
5046 __ Fmov(s16, kFP32NegativeInfinity);
5047 __ Fmov(s17, 3.25f);
5048 __ Fmov(s18, 1.0f);
5049 __ Fmov(s19, 0.0f);
armvixlad96eda2013-06-14 11:42:37 +01005050
5051 __ Fmov(d26, -0.0);
5052 __ Fmov(d27, kFP64PositiveInfinity);
5053 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005054 __ Fmov(d29, 0.0);
armvixlad96eda2013-06-14 11:42:37 +01005055 __ Fmov(d30, -2.0);
5056 __ Fmov(d31, 2.25);
5057
armvixlb0c8ae22014-03-21 14:03:59 +00005058 __ Fadd(s0, s17, s18);
5059 __ Fadd(s1, s18, s19);
5060 __ Fadd(s2, s14, s18);
5061 __ Fadd(s3, s15, s18);
5062 __ Fadd(s4, s16, s18);
5063 __ Fadd(s5, s15, s16);
5064 __ Fadd(s6, s16, s15);
armvixlad96eda2013-06-14 11:42:37 +01005065
armvixlb0c8ae22014-03-21 14:03:59 +00005066 __ Fadd(d7, d30, d31);
5067 __ Fadd(d8, d29, d31);
5068 __ Fadd(d9, d26, d31);
5069 __ Fadd(d10, d27, d31);
5070 __ Fadd(d11, d28, d31);
5071 __ Fadd(d12, d27, d28);
5072 __ Fadd(d13, d28, d27);
armvixlad96eda2013-06-14 11:42:37 +01005073 END();
5074
5075 RUN();
5076
5077 ASSERT_EQUAL_FP32(4.25, s0);
5078 ASSERT_EQUAL_FP32(1.0, s1);
5079 ASSERT_EQUAL_FP32(1.0, s2);
5080 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
5081 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
armvixlb0c8ae22014-03-21 14:03:59 +00005082 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5083 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5084 ASSERT_EQUAL_FP64(0.25, d7);
5085 ASSERT_EQUAL_FP64(2.25, d8);
5086 ASSERT_EQUAL_FP64(2.25, d9);
5087 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
5088 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
5089 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5090 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
armvixlad96eda2013-06-14 11:42:37 +01005091
5092 TEARDOWN();
5093}
5094
5095
5096TEST(fsub) {
5097 SETUP();
5098
5099 START();
armvixlb0c8ae22014-03-21 14:03:59 +00005100 __ Fmov(s14, -0.0f);
5101 __ Fmov(s15, kFP32PositiveInfinity);
5102 __ Fmov(s16, kFP32NegativeInfinity);
5103 __ Fmov(s17, 3.25f);
5104 __ Fmov(s18, 1.0f);
5105 __ Fmov(s19, 0.0f);
armvixlad96eda2013-06-14 11:42:37 +01005106
5107 __ Fmov(d26, -0.0);
5108 __ Fmov(d27, kFP64PositiveInfinity);
5109 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005110 __ Fmov(d29, 0.0);
armvixlad96eda2013-06-14 11:42:37 +01005111 __ Fmov(d30, -2.0);
5112 __ Fmov(d31, 2.25);
5113
armvixlb0c8ae22014-03-21 14:03:59 +00005114 __ Fsub(s0, s17, s18);
5115 __ Fsub(s1, s18, s19);
5116 __ Fsub(s2, s14, s18);
5117 __ Fsub(s3, s18, s15);
5118 __ Fsub(s4, s18, s16);
5119 __ Fsub(s5, s15, s15);
5120 __ Fsub(s6, s16, s16);
armvixlad96eda2013-06-14 11:42:37 +01005121
armvixlb0c8ae22014-03-21 14:03:59 +00005122 __ Fsub(d7, d30, d31);
5123 __ Fsub(d8, d29, d31);
5124 __ Fsub(d9, d26, d31);
5125 __ Fsub(d10, d31, d27);
5126 __ Fsub(d11, d31, d28);
5127 __ Fsub(d12, d27, d27);
5128 __ Fsub(d13, d28, d28);
armvixlad96eda2013-06-14 11:42:37 +01005129 END();
5130
5131 RUN();
5132
5133 ASSERT_EQUAL_FP32(2.25, s0);
5134 ASSERT_EQUAL_FP32(1.0, s1);
5135 ASSERT_EQUAL_FP32(-1.0, s2);
5136 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5137 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
armvixlb0c8ae22014-03-21 14:03:59 +00005138 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5139 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5140 ASSERT_EQUAL_FP64(-4.25, d7);
5141 ASSERT_EQUAL_FP64(-2.25, d8);
5142 ASSERT_EQUAL_FP64(-2.25, d9);
5143 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5144 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5145 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5146 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
armvixlad96eda2013-06-14 11:42:37 +01005147
5148 TEARDOWN();
5149}
5150
5151
5152TEST(fmul) {
5153 SETUP();
5154
5155 START();
armvixlb0c8ae22014-03-21 14:03:59 +00005156 __ Fmov(s14, -0.0f);
5157 __ Fmov(s15, kFP32PositiveInfinity);
5158 __ Fmov(s16, kFP32NegativeInfinity);
5159 __ Fmov(s17, 3.25f);
5160 __ Fmov(s18, 2.0f);
5161 __ Fmov(s19, 0.0f);
5162 __ Fmov(s20, -2.0f);
armvixlad96eda2013-06-14 11:42:37 +01005163
5164 __ Fmov(d26, -0.0);
5165 __ Fmov(d27, kFP64PositiveInfinity);
5166 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005167 __ Fmov(d29, 0.0);
armvixlad96eda2013-06-14 11:42:37 +01005168 __ Fmov(d30, -2.0);
5169 __ Fmov(d31, 2.25);
5170
armvixlb0c8ae22014-03-21 14:03:59 +00005171 __ Fmul(s0, s17, s18);
5172 __ Fmul(s1, s18, s19);
5173 __ Fmul(s2, s14, s14);
5174 __ Fmul(s3, s15, s20);
5175 __ Fmul(s4, s16, s20);
5176 __ Fmul(s5, s15, s19);
5177 __ Fmul(s6, s19, s16);
armvixlad96eda2013-06-14 11:42:37 +01005178
armvixlb0c8ae22014-03-21 14:03:59 +00005179 __ Fmul(d7, d30, d31);
5180 __ Fmul(d8, d29, d31);
5181 __ Fmul(d9, d26, d26);
5182 __ Fmul(d10, d27, d30);
5183 __ Fmul(d11, d28, d30);
5184 __ Fmul(d12, d27, d29);
5185 __ Fmul(d13, d29, d28);
armvixlad96eda2013-06-14 11:42:37 +01005186 END();
5187
5188 RUN();
5189
5190 ASSERT_EQUAL_FP32(6.5, s0);
5191 ASSERT_EQUAL_FP32(0.0, s1);
5192 ASSERT_EQUAL_FP32(0.0, s2);
5193 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
5194 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
armvixlb0c8ae22014-03-21 14:03:59 +00005195 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5196 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5197 ASSERT_EQUAL_FP64(-4.5, d7);
5198 ASSERT_EQUAL_FP64(0.0, d8);
5199 ASSERT_EQUAL_FP64(0.0, d9);
5200 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
5201 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
5202 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5203 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
armvixlad96eda2013-06-14 11:42:37 +01005204
5205 TEARDOWN();
5206}
5207
5208
armvixlb0c8ae22014-03-21 14:03:59 +00005209static void FmaddFmsubHelper(double n, double m, double a,
5210 double fmadd, double fmsub,
5211 double fnmadd, double fnmsub) {
armvixlad96eda2013-06-14 11:42:37 +01005212 SETUP();
armvixlad96eda2013-06-14 11:42:37 +01005213 START();
armvixlad96eda2013-06-14 11:42:37 +01005214
armvixlf37fdc02014-02-05 13:22:16 +00005215 __ Fmov(d0, n);
5216 __ Fmov(d1, m);
5217 __ Fmov(d2, a);
5218 __ Fmadd(d28, d0, d1, d2);
5219 __ Fmsub(d29, d0, d1, d2);
5220 __ Fnmadd(d30, d0, d1, d2);
5221 __ Fnmsub(d31, d0, d1, d2);
armvixlad96eda2013-06-14 11:42:37 +01005222
armvixlad96eda2013-06-14 11:42:37 +01005223 END();
armvixlad96eda2013-06-14 11:42:37 +01005224 RUN();
5225
armvixlf37fdc02014-02-05 13:22:16 +00005226 ASSERT_EQUAL_FP64(fmadd, d28);
5227 ASSERT_EQUAL_FP64(fmsub, d29);
armvixlb0c8ae22014-03-21 14:03:59 +00005228 ASSERT_EQUAL_FP64(fnmadd, d30);
5229 ASSERT_EQUAL_FP64(fnmsub, d31);
armvixlad96eda2013-06-14 11:42:37 +01005230
5231 TEARDOWN();
5232}
5233
5234
armvixlf37fdc02014-02-05 13:22:16 +00005235TEST(fmadd_fmsub_double) {
armvixlb0c8ae22014-03-21 14:03:59 +00005236 // It's hard to check the result of fused operations because the only way to
5237 // calculate the result is using fma, which is what the simulator uses anyway.
5238 // TODO(jbramley): Add tests to check behaviour against a hardware trace.
armvixlf37fdc02014-02-05 13:22:16 +00005239
armvixlb0c8ae22014-03-21 14:03:59 +00005240 // Basic operation.
5241 FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5242 FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
armvixlf37fdc02014-02-05 13:22:16 +00005243
armvixlb0c8ae22014-03-21 14:03:59 +00005244 // Check the sign of exact zeroes.
5245 // n m a fmadd fmsub fnmadd fnmsub
5246 FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5247 FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5248 FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5249 FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5250 FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0, +0.0, +0.0, +0.0);
5251 FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0, -0.0, +0.0, +0.0);
5252 FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0, +0.0, -0.0, +0.0);
5253 FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0, +0.0, +0.0, -0.0);
5254
5255 // Check NaN generation.
5256 FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5257 kFP64DefaultNaN, kFP64DefaultNaN,
5258 kFP64DefaultNaN, kFP64DefaultNaN);
5259 FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5260 kFP64DefaultNaN, kFP64DefaultNaN,
5261 kFP64DefaultNaN, kFP64DefaultNaN);
5262 FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5263 kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5264 kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5265 kFP64NegativeInfinity, // -inf + (-inf * 1) = -inf
5266 kFP64DefaultNaN); // -inf + ( inf * 1) = NaN
5267 FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5268 kFP64DefaultNaN, // inf + (-inf * 1) = NaN
5269 kFP64PositiveInfinity, // inf + ( inf * 1) = inf
5270 kFP64DefaultNaN, // -inf + ( inf * 1) = NaN
5271 kFP64NegativeInfinity); // -inf + (-inf * 1) = -inf
armvixlf37fdc02014-02-05 13:22:16 +00005272}
5273
5274
armvixlb0c8ae22014-03-21 14:03:59 +00005275static void FmaddFmsubHelper(float n, float m, float a,
5276 float fmadd, float fmsub,
5277 float fnmadd, float fnmsub) {
armvixlf37fdc02014-02-05 13:22:16 +00005278 SETUP();
5279 START();
5280
5281 __ Fmov(s0, n);
5282 __ Fmov(s1, m);
5283 __ Fmov(s2, a);
armvixlb0c8ae22014-03-21 14:03:59 +00005284 __ Fmadd(s28, s0, s1, s2);
5285 __ Fmsub(s29, s0, s1, s2);
5286 __ Fnmadd(s30, s0, s1, s2);
5287 __ Fnmsub(s31, s0, s1, s2);
armvixlf37fdc02014-02-05 13:22:16 +00005288
5289 END();
5290 RUN();
5291
armvixlb0c8ae22014-03-21 14:03:59 +00005292 ASSERT_EQUAL_FP32(fmadd, s28);
5293 ASSERT_EQUAL_FP32(fmsub, s29);
5294 ASSERT_EQUAL_FP32(fnmadd, s30);
5295 ASSERT_EQUAL_FP32(fnmsub, s31);
armvixlf37fdc02014-02-05 13:22:16 +00005296
5297 TEARDOWN();
5298}
5299
5300
5301TEST(fmadd_fmsub_float) {
armvixlb0c8ae22014-03-21 14:03:59 +00005302 // It's hard to check the result of fused operations because the only way to
5303 // calculate the result is using fma, which is what the simulator uses anyway.
5304 // TODO(jbramley): Add tests to check behaviour against a hardware trace.
armvixlf37fdc02014-02-05 13:22:16 +00005305
armvixlb0c8ae22014-03-21 14:03:59 +00005306 // Basic operation.
5307 FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5308 FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
armvixlf37fdc02014-02-05 13:22:16 +00005309
armvixlb0c8ae22014-03-21 14:03:59 +00005310 // Check the sign of exact zeroes.
5311 // n m a fmadd fmsub fnmadd fnmsub
5312 FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5313 FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5314 FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5315 FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5316 FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5317 FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5318 FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5319 FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5320
5321 // Check NaN generation.
5322 FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5323 kFP32DefaultNaN, kFP32DefaultNaN,
5324 kFP32DefaultNaN, kFP32DefaultNaN);
5325 FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5326 kFP32DefaultNaN, kFP32DefaultNaN,
5327 kFP32DefaultNaN, kFP32DefaultNaN);
5328 FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5329 kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5330 kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5331 kFP32NegativeInfinity, // -inf + (-inf * 1) = -inf
5332 kFP32DefaultNaN); // -inf + ( inf * 1) = NaN
5333 FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5334 kFP32DefaultNaN, // inf + (-inf * 1) = NaN
5335 kFP32PositiveInfinity, // inf + ( inf * 1) = inf
5336 kFP32DefaultNaN, // -inf + ( inf * 1) = NaN
5337 kFP32NegativeInfinity); // -inf + (-inf * 1) = -inf
armvixlf37fdc02014-02-05 13:22:16 +00005338}
5339
5340
armvixlb0c8ae22014-03-21 14:03:59 +00005341TEST(fmadd_fmsub_double_nans) {
5342 // Make sure that NaN propagation works correctly.
5343 double s1 = rawbits_to_double(0x7ff5555511111111);
5344 double s2 = rawbits_to_double(0x7ff5555522222222);
5345 double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5346 double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5347 double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5348 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5349 VIXL_ASSERT(IsSignallingNaN(s1));
5350 VIXL_ASSERT(IsSignallingNaN(s2));
5351 VIXL_ASSERT(IsSignallingNaN(sa));
5352 VIXL_ASSERT(IsQuietNaN(q1));
5353 VIXL_ASSERT(IsQuietNaN(q2));
5354 VIXL_ASSERT(IsQuietNaN(qa));
armvixlf37fdc02014-02-05 13:22:16 +00005355
armvixlb0c8ae22014-03-21 14:03:59 +00005356 // The input NaNs after passing through ProcessNaN.
5357 double s1_proc = rawbits_to_double(0x7ffd555511111111);
5358 double s2_proc = rawbits_to_double(0x7ffd555522222222);
5359 double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5360 double q1_proc = q1;
5361 double q2_proc = q2;
5362 double qa_proc = qa;
5363 VIXL_ASSERT(IsQuietNaN(s1_proc));
5364 VIXL_ASSERT(IsQuietNaN(s2_proc));
5365 VIXL_ASSERT(IsQuietNaN(sa_proc));
5366 VIXL_ASSERT(IsQuietNaN(q1_proc));
5367 VIXL_ASSERT(IsQuietNaN(q2_proc));
5368 VIXL_ASSERT(IsQuietNaN(qa_proc));
armvixlf37fdc02014-02-05 13:22:16 +00005369
armvixl5799d6c2014-05-01 11:05:00 +01005370 // Negated NaNs as it would be done on ARMv8 hardware.
5371 double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
5372 double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
5373 double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
5374 double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
5375 VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
5376 VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
5377 VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
5378 VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
5379
armvixlb0c8ae22014-03-21 14:03:59 +00005380 // Quiet NaNs are propagated.
armvixl5799d6c2014-05-01 11:05:00 +01005381 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005382 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005383 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5384 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5385 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5386 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5387 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
armvixlf37fdc02014-02-05 13:22:16 +00005388
armvixlb0c8ae22014-03-21 14:03:59 +00005389 // Signalling NaNs are propagated, and made quiet.
armvixl5799d6c2014-05-01 11:05:00 +01005390 FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005391 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005392 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5393 FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5394 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5395 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5396 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
armvixlf37fdc02014-02-05 13:22:16 +00005397
armvixlb0c8ae22014-03-21 14:03:59 +00005398 // Signalling NaNs take precedence over quiet NaNs.
armvixl5799d6c2014-05-01 11:05:00 +01005399 FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005400 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005401 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5402 FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5403 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5404 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5405 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
armvixlf37fdc02014-02-05 13:22:16 +00005406
armvixlb0c8ae22014-03-21 14:03:59 +00005407 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5408 FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5409 kFP64DefaultNaN, kFP64DefaultNaN,
5410 kFP64DefaultNaN, kFP64DefaultNaN);
5411 FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5412 kFP64DefaultNaN, kFP64DefaultNaN,
5413 kFP64DefaultNaN, kFP64DefaultNaN);
5414 FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5415 kFP64DefaultNaN, kFP64DefaultNaN,
5416 kFP64DefaultNaN, kFP64DefaultNaN);
5417 FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5418 kFP64DefaultNaN, kFP64DefaultNaN,
5419 kFP64DefaultNaN, kFP64DefaultNaN);
5420}
armvixlf37fdc02014-02-05 13:22:16 +00005421
armvixlf37fdc02014-02-05 13:22:16 +00005422
armvixlb0c8ae22014-03-21 14:03:59 +00005423TEST(fmadd_fmsub_float_nans) {
5424 // Make sure that NaN propagation works correctly.
5425 float s1 = rawbits_to_float(0x7f951111);
5426 float s2 = rawbits_to_float(0x7f952222);
5427 float sa = rawbits_to_float(0x7f95aaaa);
5428 float q1 = rawbits_to_float(0x7fea1111);
5429 float q2 = rawbits_to_float(0x7fea2222);
5430 float qa = rawbits_to_float(0x7feaaaaa);
5431 VIXL_ASSERT(IsSignallingNaN(s1));
5432 VIXL_ASSERT(IsSignallingNaN(s2));
5433 VIXL_ASSERT(IsSignallingNaN(sa));
5434 VIXL_ASSERT(IsQuietNaN(q1));
5435 VIXL_ASSERT(IsQuietNaN(q2));
5436 VIXL_ASSERT(IsQuietNaN(qa));
armvixlf37fdc02014-02-05 13:22:16 +00005437
armvixlb0c8ae22014-03-21 14:03:59 +00005438 // The input NaNs after passing through ProcessNaN.
5439 float s1_proc = rawbits_to_float(0x7fd51111);
5440 float s2_proc = rawbits_to_float(0x7fd52222);
5441 float sa_proc = rawbits_to_float(0x7fd5aaaa);
5442 float q1_proc = q1;
5443 float q2_proc = q2;
5444 float qa_proc = qa;
5445 VIXL_ASSERT(IsQuietNaN(s1_proc));
5446 VIXL_ASSERT(IsQuietNaN(s2_proc));
5447 VIXL_ASSERT(IsQuietNaN(sa_proc));
5448 VIXL_ASSERT(IsQuietNaN(q1_proc));
5449 VIXL_ASSERT(IsQuietNaN(q2_proc));
5450 VIXL_ASSERT(IsQuietNaN(qa_proc));
5451
armvixl5799d6c2014-05-01 11:05:00 +01005452 // Negated NaNs as it would be done on ARMv8 hardware.
5453 float s1_proc_neg = rawbits_to_float(0xffd51111);
5454 float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
5455 float q1_proc_neg = rawbits_to_float(0xffea1111);
5456 float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
5457 VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
5458 VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
5459 VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
5460 VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
5461
armvixlb0c8ae22014-03-21 14:03:59 +00005462 // Quiet NaNs are propagated.
armvixl5799d6c2014-05-01 11:05:00 +01005463 FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005464 FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005465 FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5466 FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5467 FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5468 FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5469 FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
armvixlb0c8ae22014-03-21 14:03:59 +00005470
5471 // Signalling NaNs are propagated, and made quiet.
armvixl5799d6c2014-05-01 11:05:00 +01005472 FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005473 FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005474 FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5475 FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5476 FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5477 FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5478 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
armvixlb0c8ae22014-03-21 14:03:59 +00005479
5480 // Signalling NaNs take precedence over quiet NaNs.
armvixl5799d6c2014-05-01 11:05:00 +01005481 FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
armvixlb0c8ae22014-03-21 14:03:59 +00005482 FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
armvixl5799d6c2014-05-01 11:05:00 +01005483 FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5484 FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5485 FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5486 FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5487 FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
armvixlb0c8ae22014-03-21 14:03:59 +00005488
5489 // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5490 FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5491 kFP32DefaultNaN, kFP32DefaultNaN,
5492 kFP32DefaultNaN, kFP32DefaultNaN);
5493 FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5494 kFP32DefaultNaN, kFP32DefaultNaN,
5495 kFP32DefaultNaN, kFP32DefaultNaN);
5496 FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5497 kFP32DefaultNaN, kFP32DefaultNaN,
5498 kFP32DefaultNaN, kFP32DefaultNaN);
5499 FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5500 kFP32DefaultNaN, kFP32DefaultNaN,
5501 kFP32DefaultNaN, kFP32DefaultNaN);
armvixlf37fdc02014-02-05 13:22:16 +00005502}
5503
5504
armvixlad96eda2013-06-14 11:42:37 +01005505TEST(fdiv) {
5506 SETUP();
5507
5508 START();
armvixlb0c8ae22014-03-21 14:03:59 +00005509 __ Fmov(s14, -0.0f);
5510 __ Fmov(s15, kFP32PositiveInfinity);
5511 __ Fmov(s16, kFP32NegativeInfinity);
5512 __ Fmov(s17, 3.25f);
5513 __ Fmov(s18, 2.0f);
5514 __ Fmov(s19, 2.0f);
5515 __ Fmov(s20, -2.0f);
armvixlad96eda2013-06-14 11:42:37 +01005516
5517 __ Fmov(d26, -0.0);
5518 __ Fmov(d27, kFP64PositiveInfinity);
5519 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005520 __ Fmov(d29, 0.0);
armvixlad96eda2013-06-14 11:42:37 +01005521 __ Fmov(d30, -2.0);
5522 __ Fmov(d31, 2.25);
5523
armvixlb0c8ae22014-03-21 14:03:59 +00005524 __ Fdiv(s0, s17, s18);
5525 __ Fdiv(s1, s18, s19);
5526 __ Fdiv(s2, s14, s18);
5527 __ Fdiv(s3, s18, s15);
5528 __ Fdiv(s4, s18, s16);
5529 __ Fdiv(s5, s15, s16);
5530 __ Fdiv(s6, s14, s14);
5531
5532 __ Fdiv(d7, d31, d30);
5533 __ Fdiv(d8, d29, d31);
5534 __ Fdiv(d9, d26, d31);
5535 __ Fdiv(d10, d31, d27);
5536 __ Fdiv(d11, d31, d28);
5537 __ Fdiv(d12, d28, d27);
5538 __ Fdiv(d13, d29, d29);
armvixlad96eda2013-06-14 11:42:37 +01005539 END();
5540
5541 RUN();
5542
armvixlb0c8ae22014-03-21 14:03:59 +00005543 ASSERT_EQUAL_FP32(1.625f, s0);
5544 ASSERT_EQUAL_FP32(1.0f, s1);
5545 ASSERT_EQUAL_FP32(-0.0f, s2);
5546 ASSERT_EQUAL_FP32(0.0f, s3);
5547 ASSERT_EQUAL_FP32(-0.0f, s4);
5548 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
5549 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
5550 ASSERT_EQUAL_FP64(-1.125, d7);
armvixlad96eda2013-06-14 11:42:37 +01005551 ASSERT_EQUAL_FP64(0.0, d8);
5552 ASSERT_EQUAL_FP64(-0.0, d9);
armvixlb0c8ae22014-03-21 14:03:59 +00005553 ASSERT_EQUAL_FP64(0.0, d10);
5554 ASSERT_EQUAL_FP64(-0.0, d11);
5555 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
5556 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
armvixlad96eda2013-06-14 11:42:37 +01005557
5558 TEARDOWN();
5559}
5560
5561
armvixlf37fdc02014-02-05 13:22:16 +00005562static float MinMaxHelper(float n,
5563 float m,
5564 bool min,
5565 float quiet_nan_substitute = 0.0) {
armvixlb0c8ae22014-03-21 14:03:59 +00005566 const uint64_t kFP32QuietNaNMask = 0x00400000;
armvixlf37fdc02014-02-05 13:22:16 +00005567 uint32_t raw_n = float_to_rawbits(n);
5568 uint32_t raw_m = float_to_rawbits(m);
armvixlad96eda2013-06-14 11:42:37 +01005569
armvixlf37fdc02014-02-05 13:22:16 +00005570 if (isnan(n) && ((raw_n & kFP32QuietNaNMask) == 0)) {
5571 // n is signalling NaN.
armvixlb0c8ae22014-03-21 14:03:59 +00005572 return rawbits_to_float(raw_n | kFP32QuietNaNMask);
armvixlf37fdc02014-02-05 13:22:16 +00005573 } else if (isnan(m) && ((raw_m & kFP32QuietNaNMask) == 0)) {
5574 // m is signalling NaN.
armvixlb0c8ae22014-03-21 14:03:59 +00005575 return rawbits_to_float(raw_m | kFP32QuietNaNMask);
armvixlf37fdc02014-02-05 13:22:16 +00005576 } else if (quiet_nan_substitute == 0.0) {
5577 if (isnan(n)) {
5578 // n is quiet NaN.
5579 return n;
5580 } else if (isnan(m)) {
5581 // m is quiet NaN.
5582 return m;
5583 }
5584 } else {
5585 // Substitute n or m if one is quiet, but not both.
5586 if (isnan(n) && !isnan(m)) {
5587 // n is quiet NaN: replace with substitute.
5588 n = quiet_nan_substitute;
5589 } else if (!isnan(n) && isnan(m)) {
5590 // m is quiet NaN: replace with substitute.
5591 m = quiet_nan_substitute;
armvixlad96eda2013-06-14 11:42:37 +01005592 }
5593 }
armvixlad96eda2013-06-14 11:42:37 +01005594
armvixlf37fdc02014-02-05 13:22:16 +00005595 if ((n == 0.0) && (m == 0.0) &&
5596 (copysign(1.0, n) != copysign(1.0, m))) {
5597 return min ? -0.0 : 0.0;
5598 }
armvixlad96eda2013-06-14 11:42:37 +01005599
armvixlf37fdc02014-02-05 13:22:16 +00005600 return min ? fminf(n, m) : fmaxf(n, m);
armvixlad96eda2013-06-14 11:42:37 +01005601}
5602
5603
armvixlf37fdc02014-02-05 13:22:16 +00005604static double MinMaxHelper(double n,
5605 double m,
5606 bool min,
5607 double quiet_nan_substitute = 0.0) {
armvixlb0c8ae22014-03-21 14:03:59 +00005608 const uint64_t kFP64QuietNaNMask = 0x0008000000000000;
armvixlf37fdc02014-02-05 13:22:16 +00005609 uint64_t raw_n = double_to_rawbits(n);
5610 uint64_t raw_m = double_to_rawbits(m);
armvixlad96eda2013-06-14 11:42:37 +01005611
armvixlf37fdc02014-02-05 13:22:16 +00005612 if (isnan(n) && ((raw_n & kFP64QuietNaNMask) == 0)) {
5613 // n is signalling NaN.
armvixlb0c8ae22014-03-21 14:03:59 +00005614 return rawbits_to_double(raw_n | kFP64QuietNaNMask);
armvixlf37fdc02014-02-05 13:22:16 +00005615 } else if (isnan(m) && ((raw_m & kFP64QuietNaNMask) == 0)) {
5616 // m is signalling NaN.
armvixlb0c8ae22014-03-21 14:03:59 +00005617 return rawbits_to_double(raw_m | kFP64QuietNaNMask);
armvixlf37fdc02014-02-05 13:22:16 +00005618 } else if (quiet_nan_substitute == 0.0) {
5619 if (isnan(n)) {
5620 // n is quiet NaN.
5621 return n;
5622 } else if (isnan(m)) {
5623 // m is quiet NaN.
5624 return m;
5625 }
5626 } else {
5627 // Substitute n or m if one is quiet, but not both.
5628 if (isnan(n) && !isnan(m)) {
5629 // n is quiet NaN: replace with substitute.
5630 n = quiet_nan_substitute;
5631 } else if (!isnan(n) && isnan(m)) {
5632 // m is quiet NaN: replace with substitute.
5633 m = quiet_nan_substitute;
armvixlad96eda2013-06-14 11:42:37 +01005634 }
5635 }
armvixlf37fdc02014-02-05 13:22:16 +00005636
5637 if ((n == 0.0) && (m == 0.0) &&
5638 (copysign(1.0, n) != copysign(1.0, m))) {
5639 return min ? -0.0 : 0.0;
5640 }
5641
5642 return min ? fmin(n, m) : fmax(n, m);
5643}
5644
5645
5646static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5647 double minnm, double maxnm) {
5648 SETUP();
5649
5650 START();
5651 __ Fmov(d0, n);
5652 __ Fmov(d1, m);
5653 __ Fmin(d28, d0, d1);
5654 __ Fmax(d29, d0, d1);
5655 __ Fminnm(d30, d0, d1);
5656 __ Fmaxnm(d31, d0, d1);
armvixlad96eda2013-06-14 11:42:37 +01005657 END();
5658
5659 RUN();
5660
armvixlf37fdc02014-02-05 13:22:16 +00005661 ASSERT_EQUAL_FP64(min, d28);
5662 ASSERT_EQUAL_FP64(max, d29);
5663 ASSERT_EQUAL_FP64(minnm, d30);
5664 ASSERT_EQUAL_FP64(maxnm, d31);
armvixlad96eda2013-06-14 11:42:37 +01005665
5666 TEARDOWN();
5667}
5668
5669
armvixlf37fdc02014-02-05 13:22:16 +00005670TEST(fmax_fmin_d) {
armvixlb0c8ae22014-03-21 14:03:59 +00005671 // Use non-standard NaNs to check that the payload bits are preserved.
5672 double snan = rawbits_to_double(0x7ff5555512345678);
5673 double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5674
5675 double snan_processed = rawbits_to_double(0x7ffd555512345678);
5676 double qnan_processed = qnan;
5677
5678 VIXL_ASSERT(IsSignallingNaN(snan));
5679 VIXL_ASSERT(IsQuietNaN(qnan));
5680 VIXL_ASSERT(IsQuietNaN(snan_processed));
5681 VIXL_ASSERT(IsQuietNaN(qnan_processed));
5682
armvixlf37fdc02014-02-05 13:22:16 +00005683 // Bootstrap tests.
5684 FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5685 FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5686 FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5687 kFP64NegativeInfinity, kFP64PositiveInfinity,
5688 kFP64NegativeInfinity, kFP64PositiveInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005689 FminFmaxDoubleHelper(snan, 0,
5690 snan_processed, snan_processed,
5691 snan_processed, snan_processed);
5692 FminFmaxDoubleHelper(0, snan,
5693 snan_processed, snan_processed,
5694 snan_processed, snan_processed);
5695 FminFmaxDoubleHelper(qnan, 0,
5696 qnan_processed, qnan_processed,
armvixlf37fdc02014-02-05 13:22:16 +00005697 0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00005698 FminFmaxDoubleHelper(0, qnan,
5699 qnan_processed, qnan_processed,
5700 0, 0);
5701 FminFmaxDoubleHelper(qnan, snan,
5702 snan_processed, snan_processed,
5703 snan_processed, snan_processed);
5704 FminFmaxDoubleHelper(snan, qnan,
5705 snan_processed, snan_processed,
5706 snan_processed, snan_processed);
armvixlf37fdc02014-02-05 13:22:16 +00005707
5708 // Iterate over all combinations of inputs.
5709 double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5710 -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5711 kFP64PositiveInfinity, kFP64NegativeInfinity,
5712 kFP64QuietNaN, kFP64SignallingNaN };
5713
5714 const int count = sizeof(inputs) / sizeof(inputs[0]);
5715
5716 for (int in = 0; in < count; in++) {
5717 double n = inputs[in];
5718 for (int im = 0; im < count; im++) {
5719 double m = inputs[im];
5720 FminFmaxDoubleHelper(n, m,
5721 MinMaxHelper(n, m, true),
5722 MinMaxHelper(n, m, false),
5723 MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5724 MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5725 }
5726 }
5727}
5728
5729
5730static void FminFmaxFloatHelper(float n, float m, float min, float max,
5731 float minnm, float maxnm) {
5732 SETUP();
5733
5734 START();
armvixlb0c8ae22014-03-21 14:03:59 +00005735 __ Fmov(s0, n);
5736 __ Fmov(s1, m);
armvixlf37fdc02014-02-05 13:22:16 +00005737 __ Fmin(s28, s0, s1);
5738 __ Fmax(s29, s0, s1);
5739 __ Fminnm(s30, s0, s1);
5740 __ Fmaxnm(s31, s0, s1);
5741 END();
5742
5743 RUN();
5744
5745 ASSERT_EQUAL_FP32(min, s28);
5746 ASSERT_EQUAL_FP32(max, s29);
5747 ASSERT_EQUAL_FP32(minnm, s30);
5748 ASSERT_EQUAL_FP32(maxnm, s31);
5749
5750 TEARDOWN();
5751}
5752
5753
5754TEST(fmax_fmin_s) {
armvixlb0c8ae22014-03-21 14:03:59 +00005755 // Use non-standard NaNs to check that the payload bits are preserved.
5756 float snan = rawbits_to_float(0x7f951234);
5757 float qnan = rawbits_to_float(0x7fea8765);
5758
5759 float snan_processed = rawbits_to_float(0x7fd51234);
5760 float qnan_processed = qnan;
5761
5762 VIXL_ASSERT(IsSignallingNaN(snan));
5763 VIXL_ASSERT(IsQuietNaN(qnan));
5764 VIXL_ASSERT(IsQuietNaN(snan_processed));
5765 VIXL_ASSERT(IsQuietNaN(qnan_processed));
5766
armvixlf37fdc02014-02-05 13:22:16 +00005767 // Bootstrap tests.
5768 FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5769 FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5770 FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5771 kFP32NegativeInfinity, kFP32PositiveInfinity,
5772 kFP32NegativeInfinity, kFP32PositiveInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00005773 FminFmaxFloatHelper(snan, 0,
5774 snan_processed, snan_processed,
5775 snan_processed, snan_processed);
5776 FminFmaxFloatHelper(0, snan,
5777 snan_processed, snan_processed,
5778 snan_processed, snan_processed);
5779 FminFmaxFloatHelper(qnan, 0,
5780 qnan_processed, qnan_processed,
armvixlf37fdc02014-02-05 13:22:16 +00005781 0, 0);
armvixlb0c8ae22014-03-21 14:03:59 +00005782 FminFmaxFloatHelper(0, qnan,
5783 qnan_processed, qnan_processed,
5784 0, 0);
5785 FminFmaxFloatHelper(qnan, snan,
5786 snan_processed, snan_processed,
5787 snan_processed, snan_processed);
5788 FminFmaxFloatHelper(snan, qnan,
5789 snan_processed, snan_processed,
5790 snan_processed, snan_processed);
armvixlf37fdc02014-02-05 13:22:16 +00005791
5792 // Iterate over all combinations of inputs.
5793 float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5794 -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5795 kFP32PositiveInfinity, kFP32NegativeInfinity,
5796 kFP32QuietNaN, kFP32SignallingNaN };
5797
5798 const int count = sizeof(inputs) / sizeof(inputs[0]);
5799
5800 for (int in = 0; in < count; in++) {
5801 float n = inputs[in];
5802 for (int im = 0; im < count; im++) {
5803 float m = inputs[im];
5804 FminFmaxFloatHelper(n, m,
5805 MinMaxHelper(n, m, true),
5806 MinMaxHelper(n, m, false),
5807 MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5808 MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5809 }
5810 }
5811}
5812
5813
armvixlad96eda2013-06-14 11:42:37 +01005814TEST(fccmp) {
5815 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01005816 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01005817
5818 START();
5819 __ Fmov(s16, 0.0);
5820 __ Fmov(s17, 0.5);
5821 __ Fmov(d18, -0.5);
5822 __ Fmov(d19, -1.0);
5823 __ Mov(x20, 0);
5824
armvixl578645f2013-08-15 17:21:42 +01005825 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005826 __ Fccmp(s16, s16, NoFlag, eq);
5827 __ Mrs(x0, NZCV);
5828
armvixl578645f2013-08-15 17:21:42 +01005829 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005830 __ Fccmp(s16, s16, VFlag, ne);
5831 __ Mrs(x1, NZCV);
5832
armvixl578645f2013-08-15 17:21:42 +01005833 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005834 __ Fccmp(s16, s17, CFlag, ge);
5835 __ Mrs(x2, NZCV);
5836
armvixl578645f2013-08-15 17:21:42 +01005837 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005838 __ Fccmp(s16, s17, CVFlag, lt);
5839 __ Mrs(x3, NZCV);
5840
armvixl578645f2013-08-15 17:21:42 +01005841 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005842 __ Fccmp(d18, d18, ZFlag, le);
5843 __ Mrs(x4, NZCV);
5844
armvixl578645f2013-08-15 17:21:42 +01005845 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005846 __ Fccmp(d18, d18, ZVFlag, gt);
5847 __ Mrs(x5, NZCV);
5848
armvixl578645f2013-08-15 17:21:42 +01005849 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005850 __ Fccmp(d18, d19, ZCVFlag, ls);
5851 __ Mrs(x6, NZCV);
5852
armvixl578645f2013-08-15 17:21:42 +01005853 __ Cmp(x20, 0);
armvixlad96eda2013-06-14 11:42:37 +01005854 __ Fccmp(d18, d19, NFlag, hi);
5855 __ Mrs(x7, NZCV);
armvixl578645f2013-08-15 17:21:42 +01005856
armvixlc68cb642014-09-25 18:49:30 +01005857 // The Macro Assembler does not allow al or nv as condition.
armvixl578645f2013-08-15 17:21:42 +01005858 __ fccmp(s16, s16, NFlag, al);
5859 __ Mrs(x8, NZCV);
5860
5861 __ fccmp(d18, d18, NFlag, nv);
5862 __ Mrs(x9, NZCV);
armvixlad96eda2013-06-14 11:42:37 +01005863 END();
5864
5865 RUN();
5866
5867 ASSERT_EQUAL_32(ZCFlag, w0);
5868 ASSERT_EQUAL_32(VFlag, w1);
5869 ASSERT_EQUAL_32(NFlag, w2);
5870 ASSERT_EQUAL_32(CVFlag, w3);
5871 ASSERT_EQUAL_32(ZCFlag, w4);
5872 ASSERT_EQUAL_32(ZVFlag, w5);
5873 ASSERT_EQUAL_32(CFlag, w6);
5874 ASSERT_EQUAL_32(NFlag, w7);
armvixl578645f2013-08-15 17:21:42 +01005875 ASSERT_EQUAL_32(ZCFlag, w8);
5876 ASSERT_EQUAL_32(ZCFlag, w9);
armvixlad96eda2013-06-14 11:42:37 +01005877
5878 TEARDOWN();
5879}
5880
5881
5882TEST(fcmp) {
5883 SETUP();
5884
5885 START();
armvixlf37fdc02014-02-05 13:22:16 +00005886
5887 // Some of these tests require a floating-point scratch register assigned to
5888 // the macro assembler, but most do not.
armvixlb0c8ae22014-03-21 14:03:59 +00005889 {
5890 UseScratchRegisterScope temps(&masm);
5891 temps.ExcludeAll();
5892 temps.Include(ip0, ip1);
armvixlf37fdc02014-02-05 13:22:16 +00005893
armvixlb0c8ae22014-03-21 14:03:59 +00005894 __ Fmov(s8, 0.0);
5895 __ Fmov(s9, 0.5);
5896 __ Mov(w18, 0x7f800001); // Single precision NaN.
5897 __ Fmov(s18, w18);
armvixlad96eda2013-06-14 11:42:37 +01005898
armvixlb0c8ae22014-03-21 14:03:59 +00005899 __ Fcmp(s8, s8);
5900 __ Mrs(x0, NZCV);
5901 __ Fcmp(s8, s9);
5902 __ Mrs(x1, NZCV);
5903 __ Fcmp(s9, s8);
5904 __ Mrs(x2, NZCV);
5905 __ Fcmp(s8, s18);
5906 __ Mrs(x3, NZCV);
5907 __ Fcmp(s18, s18);
5908 __ Mrs(x4, NZCV);
5909 __ Fcmp(s8, 0.0);
5910 __ Mrs(x5, NZCV);
5911 temps.Include(d0);
5912 __ Fcmp(s8, 255.0);
5913 temps.Exclude(d0);
5914 __ Mrs(x6, NZCV);
armvixlad96eda2013-06-14 11:42:37 +01005915
armvixlb0c8ae22014-03-21 14:03:59 +00005916 __ Fmov(d19, 0.0);
5917 __ Fmov(d20, 0.5);
5918 __ Mov(x21, 0x7ff0000000000001); // Double precision NaN.
5919 __ Fmov(d21, x21);
armvixlad96eda2013-06-14 11:42:37 +01005920
armvixlb0c8ae22014-03-21 14:03:59 +00005921 __ Fcmp(d19, d19);
5922 __ Mrs(x10, NZCV);
5923 __ Fcmp(d19, d20);
5924 __ Mrs(x11, NZCV);
5925 __ Fcmp(d20, d19);
5926 __ Mrs(x12, NZCV);
5927 __ Fcmp(d19, d21);
5928 __ Mrs(x13, NZCV);
5929 __ Fcmp(d21, d21);
5930 __ Mrs(x14, NZCV);
5931 __ Fcmp(d19, 0.0);
5932 __ Mrs(x15, NZCV);
5933 temps.Include(d0);
5934 __ Fcmp(d19, 12.3456);
5935 temps.Exclude(d0);
5936 __ Mrs(x16, NZCV);
5937 }
5938
armvixlad96eda2013-06-14 11:42:37 +01005939 END();
5940
5941 RUN();
5942
5943 ASSERT_EQUAL_32(ZCFlag, w0);
5944 ASSERT_EQUAL_32(NFlag, w1);
5945 ASSERT_EQUAL_32(CFlag, w2);
5946 ASSERT_EQUAL_32(CVFlag, w3);
5947 ASSERT_EQUAL_32(CVFlag, w4);
5948 ASSERT_EQUAL_32(ZCFlag, w5);
5949 ASSERT_EQUAL_32(NFlag, w6);
5950 ASSERT_EQUAL_32(ZCFlag, w10);
5951 ASSERT_EQUAL_32(NFlag, w11);
5952 ASSERT_EQUAL_32(CFlag, w12);
5953 ASSERT_EQUAL_32(CVFlag, w13);
5954 ASSERT_EQUAL_32(CVFlag, w14);
5955 ASSERT_EQUAL_32(ZCFlag, w15);
5956 ASSERT_EQUAL_32(NFlag, w16);
5957
5958 TEARDOWN();
5959}
5960
5961
5962TEST(fcsel) {
5963 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01005964 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01005965
5966 START();
5967 __ Mov(x16, 0);
5968 __ Fmov(s16, 1.0);
5969 __ Fmov(s17, 2.0);
5970 __ Fmov(d18, 3.0);
5971 __ Fmov(d19, 4.0);
5972
armvixl578645f2013-08-15 17:21:42 +01005973 __ Cmp(x16, 0);
armvixlad96eda2013-06-14 11:42:37 +01005974 __ Fcsel(s0, s16, s17, eq);
5975 __ Fcsel(s1, s16, s17, ne);
5976 __ Fcsel(d2, d18, d19, eq);
5977 __ Fcsel(d3, d18, d19, ne);
armvixlc68cb642014-09-25 18:49:30 +01005978 // The Macro Assembler does not allow al or nv as condition.
armvixl578645f2013-08-15 17:21:42 +01005979 __ fcsel(s4, s16, s17, al);
5980 __ fcsel(d5, d18, d19, nv);
armvixlad96eda2013-06-14 11:42:37 +01005981 END();
5982
5983 RUN();
5984
5985 ASSERT_EQUAL_FP32(1.0, s0);
5986 ASSERT_EQUAL_FP32(2.0, s1);
5987 ASSERT_EQUAL_FP64(3.0, d2);
5988 ASSERT_EQUAL_FP64(4.0, d3);
armvixl578645f2013-08-15 17:21:42 +01005989 ASSERT_EQUAL_FP32(1.0, s4);
5990 ASSERT_EQUAL_FP64(3.0, d5);
armvixlad96eda2013-06-14 11:42:37 +01005991
5992 TEARDOWN();
5993}
5994
5995
5996TEST(fneg) {
5997 SETUP();
5998
5999 START();
6000 __ Fmov(s16, 1.0);
6001 __ Fmov(s17, 0.0);
6002 __ Fmov(s18, kFP32PositiveInfinity);
6003 __ Fmov(d19, 1.0);
6004 __ Fmov(d20, 0.0);
6005 __ Fmov(d21, kFP64PositiveInfinity);
6006
6007 __ Fneg(s0, s16);
6008 __ Fneg(s1, s0);
6009 __ Fneg(s2, s17);
6010 __ Fneg(s3, s2);
6011 __ Fneg(s4, s18);
6012 __ Fneg(s5, s4);
6013 __ Fneg(d6, d19);
6014 __ Fneg(d7, d6);
6015 __ Fneg(d8, d20);
6016 __ Fneg(d9, d8);
6017 __ Fneg(d10, d21);
6018 __ Fneg(d11, d10);
6019 END();
6020
6021 RUN();
6022
6023 ASSERT_EQUAL_FP32(-1.0, s0);
6024 ASSERT_EQUAL_FP32(1.0, s1);
6025 ASSERT_EQUAL_FP32(-0.0, s2);
6026 ASSERT_EQUAL_FP32(0.0, s3);
6027 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
6028 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
6029 ASSERT_EQUAL_FP64(-1.0, d6);
6030 ASSERT_EQUAL_FP64(1.0, d7);
6031 ASSERT_EQUAL_FP64(-0.0, d8);
6032 ASSERT_EQUAL_FP64(0.0, d9);
6033 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
6034 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
6035
6036 TEARDOWN();
6037}
6038
6039
6040TEST(fabs) {
6041 SETUP();
6042
6043 START();
6044 __ Fmov(s16, -1.0);
6045 __ Fmov(s17, -0.0);
6046 __ Fmov(s18, kFP32NegativeInfinity);
6047 __ Fmov(d19, -1.0);
6048 __ Fmov(d20, -0.0);
6049 __ Fmov(d21, kFP64NegativeInfinity);
6050
6051 __ Fabs(s0, s16);
6052 __ Fabs(s1, s0);
6053 __ Fabs(s2, s17);
6054 __ Fabs(s3, s18);
6055 __ Fabs(d4, d19);
6056 __ Fabs(d5, d4);
6057 __ Fabs(d6, d20);
6058 __ Fabs(d7, d21);
6059 END();
6060
6061 RUN();
6062
6063 ASSERT_EQUAL_FP32(1.0, s0);
6064 ASSERT_EQUAL_FP32(1.0, s1);
6065 ASSERT_EQUAL_FP32(0.0, s2);
6066 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
6067 ASSERT_EQUAL_FP64(1.0, d4);
6068 ASSERT_EQUAL_FP64(1.0, d5);
6069 ASSERT_EQUAL_FP64(0.0, d6);
6070 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6071
6072 TEARDOWN();
6073}
6074
6075
6076TEST(fsqrt) {
6077 SETUP();
6078
6079 START();
6080 __ Fmov(s16, 0.0);
6081 __ Fmov(s17, 1.0);
6082 __ Fmov(s18, 0.25);
6083 __ Fmov(s19, 65536.0);
6084 __ Fmov(s20, -0.0);
6085 __ Fmov(s21, kFP32PositiveInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006086 __ Fmov(s22, -1.0);
6087 __ Fmov(d23, 0.0);
6088 __ Fmov(d24, 1.0);
6089 __ Fmov(d25, 0.25);
6090 __ Fmov(d26, 4294967296.0);
6091 __ Fmov(d27, -0.0);
6092 __ Fmov(d28, kFP64PositiveInfinity);
6093 __ Fmov(d29, -1.0);
armvixlad96eda2013-06-14 11:42:37 +01006094
6095 __ Fsqrt(s0, s16);
6096 __ Fsqrt(s1, s17);
6097 __ Fsqrt(s2, s18);
6098 __ Fsqrt(s3, s19);
6099 __ Fsqrt(s4, s20);
6100 __ Fsqrt(s5, s21);
armvixlb0c8ae22014-03-21 14:03:59 +00006101 __ Fsqrt(s6, s22);
armvixlad96eda2013-06-14 11:42:37 +01006102 __ Fsqrt(d7, d23);
6103 __ Fsqrt(d8, d24);
6104 __ Fsqrt(d9, d25);
6105 __ Fsqrt(d10, d26);
6106 __ Fsqrt(d11, d27);
armvixlb0c8ae22014-03-21 14:03:59 +00006107 __ Fsqrt(d12, d28);
6108 __ Fsqrt(d13, d29);
armvixlad96eda2013-06-14 11:42:37 +01006109 END();
6110
6111 RUN();
6112
6113 ASSERT_EQUAL_FP32(0.0, s0);
6114 ASSERT_EQUAL_FP32(1.0, s1);
6115 ASSERT_EQUAL_FP32(0.5, s2);
6116 ASSERT_EQUAL_FP32(256.0, s3);
6117 ASSERT_EQUAL_FP32(-0.0, s4);
6118 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
armvixlb0c8ae22014-03-21 14:03:59 +00006119 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
6120 ASSERT_EQUAL_FP64(0.0, d7);
6121 ASSERT_EQUAL_FP64(1.0, d8);
6122 ASSERT_EQUAL_FP64(0.5, d9);
6123 ASSERT_EQUAL_FP64(65536.0, d10);
6124 ASSERT_EQUAL_FP64(-0.0, d11);
6125 ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
6126 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
armvixlad96eda2013-06-14 11:42:37 +01006127
6128 TEARDOWN();
6129}
6130
6131
armvixlf37fdc02014-02-05 13:22:16 +00006132TEST(frinta) {
6133 SETUP();
6134
6135 START();
6136 __ Fmov(s16, 1.0);
6137 __ Fmov(s17, 1.1);
6138 __ Fmov(s18, 1.5);
6139 __ Fmov(s19, 1.9);
6140 __ Fmov(s20, 2.5);
6141 __ Fmov(s21, -1.5);
6142 __ Fmov(s22, -2.5);
6143 __ Fmov(s23, kFP32PositiveInfinity);
6144 __ Fmov(s24, kFP32NegativeInfinity);
6145 __ Fmov(s25, 0.0);
6146 __ Fmov(s26, -0.0);
armvixl5799d6c2014-05-01 11:05:00 +01006147 __ Fmov(s27, -0.2);
armvixlf37fdc02014-02-05 13:22:16 +00006148
6149 __ Frinta(s0, s16);
6150 __ Frinta(s1, s17);
6151 __ Frinta(s2, s18);
6152 __ Frinta(s3, s19);
6153 __ Frinta(s4, s20);
6154 __ Frinta(s5, s21);
6155 __ Frinta(s6, s22);
6156 __ Frinta(s7, s23);
6157 __ Frinta(s8, s24);
6158 __ Frinta(s9, s25);
6159 __ Frinta(s10, s26);
armvixl5799d6c2014-05-01 11:05:00 +01006160 __ Frinta(s11, s27);
armvixlf37fdc02014-02-05 13:22:16 +00006161
6162 __ Fmov(d16, 1.0);
6163 __ Fmov(d17, 1.1);
6164 __ Fmov(d18, 1.5);
6165 __ Fmov(d19, 1.9);
6166 __ Fmov(d20, 2.5);
6167 __ Fmov(d21, -1.5);
6168 __ Fmov(d22, -2.5);
6169 __ Fmov(d23, kFP32PositiveInfinity);
6170 __ Fmov(d24, kFP32NegativeInfinity);
6171 __ Fmov(d25, 0.0);
6172 __ Fmov(d26, -0.0);
armvixl5799d6c2014-05-01 11:05:00 +01006173 __ Fmov(d27, -0.2);
armvixlf37fdc02014-02-05 13:22:16 +00006174
armvixl5799d6c2014-05-01 11:05:00 +01006175 __ Frinta(d12, d16);
6176 __ Frinta(d13, d17);
6177 __ Frinta(d14, d18);
6178 __ Frinta(d15, d19);
6179 __ Frinta(d16, d20);
6180 __ Frinta(d17, d21);
6181 __ Frinta(d18, d22);
6182 __ Frinta(d19, d23);
6183 __ Frinta(d20, d24);
6184 __ Frinta(d21, d25);
6185 __ Frinta(d22, d26);
6186 __ Frinta(d23, d27);
armvixlf37fdc02014-02-05 13:22:16 +00006187 END();
6188
6189 RUN();
6190
6191 ASSERT_EQUAL_FP32(1.0, s0);
6192 ASSERT_EQUAL_FP32(1.0, s1);
6193 ASSERT_EQUAL_FP32(2.0, s2);
6194 ASSERT_EQUAL_FP32(2.0, s3);
6195 ASSERT_EQUAL_FP32(3.0, s4);
6196 ASSERT_EQUAL_FP32(-2.0, s5);
6197 ASSERT_EQUAL_FP32(-3.0, s6);
6198 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6199 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6200 ASSERT_EQUAL_FP32(0.0, s9);
6201 ASSERT_EQUAL_FP32(-0.0, s10);
armvixl5799d6c2014-05-01 11:05:00 +01006202 ASSERT_EQUAL_FP32(-0.0, s11);
armvixlf37fdc02014-02-05 13:22:16 +00006203 ASSERT_EQUAL_FP64(1.0, d12);
armvixl5799d6c2014-05-01 11:05:00 +01006204 ASSERT_EQUAL_FP64(1.0, d13);
armvixlf37fdc02014-02-05 13:22:16 +00006205 ASSERT_EQUAL_FP64(2.0, d14);
armvixl5799d6c2014-05-01 11:05:00 +01006206 ASSERT_EQUAL_FP64(2.0, d15);
6207 ASSERT_EQUAL_FP64(3.0, d16);
6208 ASSERT_EQUAL_FP64(-2.0, d17);
6209 ASSERT_EQUAL_FP64(-3.0, d18);
6210 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6211 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6212 ASSERT_EQUAL_FP64(0.0, d21);
6213 ASSERT_EQUAL_FP64(-0.0, d22);
6214 ASSERT_EQUAL_FP64(-0.0, d23);
6215
6216 TEARDOWN();
6217}
6218
6219
6220TEST(frintm) {
6221 SETUP();
6222
6223 START();
6224 __ Fmov(s16, 1.0);
6225 __ Fmov(s17, 1.1);
6226 __ Fmov(s18, 1.5);
6227 __ Fmov(s19, 1.9);
6228 __ Fmov(s20, 2.5);
6229 __ Fmov(s21, -1.5);
6230 __ Fmov(s22, -2.5);
6231 __ Fmov(s23, kFP32PositiveInfinity);
6232 __ Fmov(s24, kFP32NegativeInfinity);
6233 __ Fmov(s25, 0.0);
6234 __ Fmov(s26, -0.0);
6235 __ Fmov(s27, -0.2);
6236
6237 __ Frintm(s0, s16);
6238 __ Frintm(s1, s17);
6239 __ Frintm(s2, s18);
6240 __ Frintm(s3, s19);
6241 __ Frintm(s4, s20);
6242 __ Frintm(s5, s21);
6243 __ Frintm(s6, s22);
6244 __ Frintm(s7, s23);
6245 __ Frintm(s8, s24);
6246 __ Frintm(s9, s25);
6247 __ Frintm(s10, s26);
6248 __ Frintm(s11, s27);
6249
6250 __ Fmov(d16, 1.0);
6251 __ Fmov(d17, 1.1);
6252 __ Fmov(d18, 1.5);
6253 __ Fmov(d19, 1.9);
6254 __ Fmov(d20, 2.5);
6255 __ Fmov(d21, -1.5);
6256 __ Fmov(d22, -2.5);
6257 __ Fmov(d23, kFP32PositiveInfinity);
6258 __ Fmov(d24, kFP32NegativeInfinity);
6259 __ Fmov(d25, 0.0);
6260 __ Fmov(d26, -0.0);
6261 __ Fmov(d27, -0.2);
6262
6263 __ Frintm(d12, d16);
6264 __ Frintm(d13, d17);
6265 __ Frintm(d14, d18);
6266 __ Frintm(d15, d19);
6267 __ Frintm(d16, d20);
6268 __ Frintm(d17, d21);
6269 __ Frintm(d18, d22);
6270 __ Frintm(d19, d23);
6271 __ Frintm(d20, d24);
6272 __ Frintm(d21, d25);
6273 __ Frintm(d22, d26);
6274 __ Frintm(d23, d27);
6275 END();
6276
6277 RUN();
6278
6279 ASSERT_EQUAL_FP32(1.0, s0);
6280 ASSERT_EQUAL_FP32(1.0, s1);
6281 ASSERT_EQUAL_FP32(1.0, s2);
6282 ASSERT_EQUAL_FP32(1.0, s3);
6283 ASSERT_EQUAL_FP32(2.0, s4);
6284 ASSERT_EQUAL_FP32(-2.0, s5);
6285 ASSERT_EQUAL_FP32(-3.0, s6);
6286 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6287 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6288 ASSERT_EQUAL_FP32(0.0, s9);
6289 ASSERT_EQUAL_FP32(-0.0, s10);
6290 ASSERT_EQUAL_FP32(-1.0, s11);
6291 ASSERT_EQUAL_FP64(1.0, d12);
6292 ASSERT_EQUAL_FP64(1.0, d13);
6293 ASSERT_EQUAL_FP64(1.0, d14);
6294 ASSERT_EQUAL_FP64(1.0, d15);
6295 ASSERT_EQUAL_FP64(2.0, d16);
6296 ASSERT_EQUAL_FP64(-2.0, d17);
6297 ASSERT_EQUAL_FP64(-3.0, d18);
6298 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6299 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6300 ASSERT_EQUAL_FP64(0.0, d21);
6301 ASSERT_EQUAL_FP64(-0.0, d22);
6302 ASSERT_EQUAL_FP64(-1.0, d23);
armvixlf37fdc02014-02-05 13:22:16 +00006303
6304 TEARDOWN();
6305}
6306
6307
armvixlad96eda2013-06-14 11:42:37 +01006308TEST(frintn) {
6309 SETUP();
6310
6311 START();
6312 __ Fmov(s16, 1.0);
6313 __ Fmov(s17, 1.1);
6314 __ Fmov(s18, 1.5);
6315 __ Fmov(s19, 1.9);
6316 __ Fmov(s20, 2.5);
6317 __ Fmov(s21, -1.5);
6318 __ Fmov(s22, -2.5);
6319 __ Fmov(s23, kFP32PositiveInfinity);
6320 __ Fmov(s24, kFP32NegativeInfinity);
6321 __ Fmov(s25, 0.0);
6322 __ Fmov(s26, -0.0);
armvixl5799d6c2014-05-01 11:05:00 +01006323 __ Fmov(s27, -0.2);
armvixlad96eda2013-06-14 11:42:37 +01006324
6325 __ Frintn(s0, s16);
6326 __ Frintn(s1, s17);
6327 __ Frintn(s2, s18);
6328 __ Frintn(s3, s19);
6329 __ Frintn(s4, s20);
6330 __ Frintn(s5, s21);
6331 __ Frintn(s6, s22);
6332 __ Frintn(s7, s23);
6333 __ Frintn(s8, s24);
6334 __ Frintn(s9, s25);
6335 __ Frintn(s10, s26);
armvixl5799d6c2014-05-01 11:05:00 +01006336 __ Frintn(s11, s27);
armvixlad96eda2013-06-14 11:42:37 +01006337
6338 __ Fmov(d16, 1.0);
6339 __ Fmov(d17, 1.1);
6340 __ Fmov(d18, 1.5);
6341 __ Fmov(d19, 1.9);
6342 __ Fmov(d20, 2.5);
6343 __ Fmov(d21, -1.5);
6344 __ Fmov(d22, -2.5);
6345 __ Fmov(d23, kFP32PositiveInfinity);
6346 __ Fmov(d24, kFP32NegativeInfinity);
6347 __ Fmov(d25, 0.0);
6348 __ Fmov(d26, -0.0);
armvixl5799d6c2014-05-01 11:05:00 +01006349 __ Fmov(d27, -0.2);
armvixlad96eda2013-06-14 11:42:37 +01006350
armvixl5799d6c2014-05-01 11:05:00 +01006351 __ Frintn(d12, d16);
6352 __ Frintn(d13, d17);
6353 __ Frintn(d14, d18);
6354 __ Frintn(d15, d19);
6355 __ Frintn(d16, d20);
6356 __ Frintn(d17, d21);
6357 __ Frintn(d18, d22);
6358 __ Frintn(d19, d23);
6359 __ Frintn(d20, d24);
6360 __ Frintn(d21, d25);
6361 __ Frintn(d22, d26);
6362 __ Frintn(d23, d27);
armvixlad96eda2013-06-14 11:42:37 +01006363 END();
6364
6365 RUN();
6366
6367 ASSERT_EQUAL_FP32(1.0, s0);
6368 ASSERT_EQUAL_FP32(1.0, s1);
6369 ASSERT_EQUAL_FP32(2.0, s2);
6370 ASSERT_EQUAL_FP32(2.0, s3);
6371 ASSERT_EQUAL_FP32(2.0, s4);
6372 ASSERT_EQUAL_FP32(-2.0, s5);
6373 ASSERT_EQUAL_FP32(-2.0, s6);
6374 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6375 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6376 ASSERT_EQUAL_FP32(0.0, s9);
6377 ASSERT_EQUAL_FP32(-0.0, s10);
armvixl5799d6c2014-05-01 11:05:00 +01006378 ASSERT_EQUAL_FP32(-0.0, s11);
armvixlad96eda2013-06-14 11:42:37 +01006379 ASSERT_EQUAL_FP64(1.0, d12);
armvixl5799d6c2014-05-01 11:05:00 +01006380 ASSERT_EQUAL_FP64(1.0, d13);
armvixlad96eda2013-06-14 11:42:37 +01006381 ASSERT_EQUAL_FP64(2.0, d14);
6382 ASSERT_EQUAL_FP64(2.0, d15);
armvixl5799d6c2014-05-01 11:05:00 +01006383 ASSERT_EQUAL_FP64(2.0, d16);
armvixlad96eda2013-06-14 11:42:37 +01006384 ASSERT_EQUAL_FP64(-2.0, d17);
armvixl5799d6c2014-05-01 11:05:00 +01006385 ASSERT_EQUAL_FP64(-2.0, d18);
6386 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
6387 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
6388 ASSERT_EQUAL_FP64(0.0, d21);
6389 ASSERT_EQUAL_FP64(-0.0, d22);
6390 ASSERT_EQUAL_FP64(-0.0, d23);
armvixlad96eda2013-06-14 11:42:37 +01006391
6392 TEARDOWN();
6393}
6394
6395
6396TEST(frintz) {
6397 SETUP();
6398
6399 START();
6400 __ Fmov(s16, 1.0);
6401 __ Fmov(s17, 1.1);
6402 __ Fmov(s18, 1.5);
6403 __ Fmov(s19, 1.9);
6404 __ Fmov(s20, 2.5);
6405 __ Fmov(s21, -1.5);
6406 __ Fmov(s22, -2.5);
6407 __ Fmov(s23, kFP32PositiveInfinity);
6408 __ Fmov(s24, kFP32NegativeInfinity);
6409 __ Fmov(s25, 0.0);
6410 __ Fmov(s26, -0.0);
6411
6412 __ Frintz(s0, s16);
6413 __ Frintz(s1, s17);
6414 __ Frintz(s2, s18);
6415 __ Frintz(s3, s19);
6416 __ Frintz(s4, s20);
6417 __ Frintz(s5, s21);
6418 __ Frintz(s6, s22);
6419 __ Frintz(s7, s23);
6420 __ Frintz(s8, s24);
6421 __ Frintz(s9, s25);
6422 __ Frintz(s10, s26);
6423
6424 __ Fmov(d16, 1.0);
6425 __ Fmov(d17, 1.1);
6426 __ Fmov(d18, 1.5);
6427 __ Fmov(d19, 1.9);
6428 __ Fmov(d20, 2.5);
6429 __ Fmov(d21, -1.5);
6430 __ Fmov(d22, -2.5);
6431 __ Fmov(d23, kFP32PositiveInfinity);
6432 __ Fmov(d24, kFP32NegativeInfinity);
6433 __ Fmov(d25, 0.0);
6434 __ Fmov(d26, -0.0);
6435
6436 __ Frintz(d11, d16);
6437 __ Frintz(d12, d17);
6438 __ Frintz(d13, d18);
6439 __ Frintz(d14, d19);
6440 __ Frintz(d15, d20);
6441 __ Frintz(d16, d21);
6442 __ Frintz(d17, d22);
6443 __ Frintz(d18, d23);
6444 __ Frintz(d19, d24);
6445 __ Frintz(d20, d25);
6446 __ Frintz(d21, d26);
6447 END();
6448
6449 RUN();
6450
6451 ASSERT_EQUAL_FP32(1.0, s0);
6452 ASSERT_EQUAL_FP32(1.0, s1);
6453 ASSERT_EQUAL_FP32(1.0, s2);
6454 ASSERT_EQUAL_FP32(1.0, s3);
6455 ASSERT_EQUAL_FP32(2.0, s4);
6456 ASSERT_EQUAL_FP32(-1.0, s5);
6457 ASSERT_EQUAL_FP32(-2.0, s6);
6458 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6459 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6460 ASSERT_EQUAL_FP32(0.0, s9);
6461 ASSERT_EQUAL_FP32(-0.0, s10);
6462 ASSERT_EQUAL_FP64(1.0, d11);
6463 ASSERT_EQUAL_FP64(1.0, d12);
6464 ASSERT_EQUAL_FP64(1.0, d13);
6465 ASSERT_EQUAL_FP64(1.0, d14);
6466 ASSERT_EQUAL_FP64(2.0, d15);
6467 ASSERT_EQUAL_FP64(-1.0, d16);
6468 ASSERT_EQUAL_FP64(-2.0, d17);
6469 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
6470 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
6471 ASSERT_EQUAL_FP64(0.0, d20);
6472 ASSERT_EQUAL_FP64(-0.0, d21);
6473
6474 TEARDOWN();
6475}
6476
6477
armvixl578645f2013-08-15 17:21:42 +01006478TEST(fcvt_ds) {
armvixlad96eda2013-06-14 11:42:37 +01006479 SETUP();
6480
6481 START();
6482 __ Fmov(s16, 1.0);
6483 __ Fmov(s17, 1.1);
6484 __ Fmov(s18, 1.5);
6485 __ Fmov(s19, 1.9);
6486 __ Fmov(s20, 2.5);
6487 __ Fmov(s21, -1.5);
6488 __ Fmov(s22, -2.5);
6489 __ Fmov(s23, kFP32PositiveInfinity);
6490 __ Fmov(s24, kFP32NegativeInfinity);
6491 __ Fmov(s25, 0.0);
6492 __ Fmov(s26, -0.0);
armvixl578645f2013-08-15 17:21:42 +01006493 __ Fmov(s27, FLT_MAX);
6494 __ Fmov(s28, FLT_MIN);
6495 __ Fmov(s29, rawbits_to_float(0x7fc12345)); // Quiet NaN.
6496 __ Fmov(s30, rawbits_to_float(0x7f812345)); // Signalling NaN.
armvixlad96eda2013-06-14 11:42:37 +01006497
6498 __ Fcvt(d0, s16);
6499 __ Fcvt(d1, s17);
6500 __ Fcvt(d2, s18);
6501 __ Fcvt(d3, s19);
6502 __ Fcvt(d4, s20);
6503 __ Fcvt(d5, s21);
6504 __ Fcvt(d6, s22);
6505 __ Fcvt(d7, s23);
6506 __ Fcvt(d8, s24);
6507 __ Fcvt(d9, s25);
6508 __ Fcvt(d10, s26);
armvixl578645f2013-08-15 17:21:42 +01006509 __ Fcvt(d11, s27);
6510 __ Fcvt(d12, s28);
6511 __ Fcvt(d13, s29);
6512 __ Fcvt(d14, s30);
armvixlad96eda2013-06-14 11:42:37 +01006513 END();
6514
6515 RUN();
6516
6517 ASSERT_EQUAL_FP64(1.0f, d0);
6518 ASSERT_EQUAL_FP64(1.1f, d1);
6519 ASSERT_EQUAL_FP64(1.5f, d2);
6520 ASSERT_EQUAL_FP64(1.9f, d3);
6521 ASSERT_EQUAL_FP64(2.5f, d4);
6522 ASSERT_EQUAL_FP64(-1.5f, d5);
6523 ASSERT_EQUAL_FP64(-2.5f, d6);
6524 ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
6525 ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
6526 ASSERT_EQUAL_FP64(0.0f, d9);
6527 ASSERT_EQUAL_FP64(-0.0f, d10);
armvixl578645f2013-08-15 17:21:42 +01006528 ASSERT_EQUAL_FP64(FLT_MAX, d11);
6529 ASSERT_EQUAL_FP64(FLT_MIN, d12);
6530
6531 // Check that the NaN payload is preserved according to A64 conversion rules:
6532 // - The sign bit is preserved.
6533 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6534 // - The remaining mantissa bits are copied until they run out.
6535 // - The low-order bits that haven't already been assigned are set to 0.
6536 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6537 ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
armvixlad96eda2013-06-14 11:42:37 +01006538
6539 TEARDOWN();
6540}
6541
6542
armvixl578645f2013-08-15 17:21:42 +01006543TEST(fcvt_sd) {
armvixl5799d6c2014-05-01 11:05:00 +01006544 // Test simple conversions here. Complex behaviour (such as rounding
6545 // specifics) are tested in the simulator tests.
armvixl578645f2013-08-15 17:21:42 +01006546
armvixl5799d6c2014-05-01 11:05:00 +01006547 SETUP();
armvixl578645f2013-08-15 17:21:42 +01006548
armvixl5799d6c2014-05-01 11:05:00 +01006549 START();
6550 __ Fmov(d16, 1.0);
6551 __ Fmov(d17, 1.1);
6552 __ Fmov(d18, 1.5);
6553 __ Fmov(d19, 1.9);
6554 __ Fmov(d20, 2.5);
6555 __ Fmov(d21, -1.5);
6556 __ Fmov(d22, -2.5);
6557 __ Fmov(d23, kFP32PositiveInfinity);
6558 __ Fmov(d24, kFP32NegativeInfinity);
6559 __ Fmov(d25, 0.0);
6560 __ Fmov(d26, -0.0);
6561 __ Fmov(d27, FLT_MAX);
6562 __ Fmov(d28, FLT_MIN);
6563 __ Fmov(d29, rawbits_to_double(0x7ff82468a0000000)); // Quiet NaN.
6564 __ Fmov(d30, rawbits_to_double(0x7ff02468a0000000)); // Signalling NaN.
armvixl578645f2013-08-15 17:21:42 +01006565
armvixl5799d6c2014-05-01 11:05:00 +01006566 __ Fcvt(s0, d16);
6567 __ Fcvt(s1, d17);
6568 __ Fcvt(s2, d18);
6569 __ Fcvt(s3, d19);
6570 __ Fcvt(s4, d20);
6571 __ Fcvt(s5, d21);
6572 __ Fcvt(s6, d22);
6573 __ Fcvt(s7, d23);
6574 __ Fcvt(s8, d24);
6575 __ Fcvt(s9, d25);
6576 __ Fcvt(s10, d26);
6577 __ Fcvt(s11, d27);
6578 __ Fcvt(s12, d28);
6579 __ Fcvt(s13, d29);
6580 __ Fcvt(s14, d30);
6581 END();
armvixl578645f2013-08-15 17:21:42 +01006582
armvixl5799d6c2014-05-01 11:05:00 +01006583 RUN();
armvixl578645f2013-08-15 17:21:42 +01006584
armvixl5799d6c2014-05-01 11:05:00 +01006585 ASSERT_EQUAL_FP32(1.0f, s0);
6586 ASSERT_EQUAL_FP32(1.1f, s1);
6587 ASSERT_EQUAL_FP32(1.5f, s2);
6588 ASSERT_EQUAL_FP32(1.9f, s3);
6589 ASSERT_EQUAL_FP32(2.5f, s4);
6590 ASSERT_EQUAL_FP32(-1.5f, s5);
6591 ASSERT_EQUAL_FP32(-2.5f, s6);
6592 ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
6593 ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
6594 ASSERT_EQUAL_FP32(0.0f, s9);
6595 ASSERT_EQUAL_FP32(-0.0f, s10);
6596 ASSERT_EQUAL_FP32(FLT_MAX, s11);
6597 ASSERT_EQUAL_FP32(FLT_MIN, s12);
armvixl578645f2013-08-15 17:21:42 +01006598
armvixl5799d6c2014-05-01 11:05:00 +01006599 // Check that the NaN payload is preserved according to A64 conversion rules:
6600 // - The sign bit is preserved.
6601 // - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6602 // - The remaining mantissa bits are copied until they run out.
6603 // - The low-order bits that haven't already been assigned are set to 0.
6604 ASSERT_EQUAL_FP32(rawbits_to_float(0x7fc12345), s13);
6605 ASSERT_EQUAL_FP32(rawbits_to_float(0x7fc12345), s14);
armvixl578645f2013-08-15 17:21:42 +01006606
armvixl5799d6c2014-05-01 11:05:00 +01006607 TEARDOWN();
armvixl578645f2013-08-15 17:21:42 +01006608}
6609
6610
armvixlf37fdc02014-02-05 13:22:16 +00006611TEST(fcvtas) {
6612 SETUP();
6613
6614 START();
6615 __ Fmov(s0, 1.0);
6616 __ Fmov(s1, 1.1);
6617 __ Fmov(s2, 2.5);
6618 __ Fmov(s3, -2.5);
6619 __ Fmov(s4, kFP32PositiveInfinity);
6620 __ Fmov(s5, kFP32NegativeInfinity);
6621 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6622 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
6623 __ Fmov(d8, 1.0);
6624 __ Fmov(d9, 1.1);
6625 __ Fmov(d10, 2.5);
6626 __ Fmov(d11, -2.5);
6627 __ Fmov(d12, kFP64PositiveInfinity);
6628 __ Fmov(d13, kFP64NegativeInfinity);
6629 __ Fmov(d14, kWMaxInt - 1);
6630 __ Fmov(d15, kWMinInt + 1);
6631 __ Fmov(s17, 1.1);
6632 __ Fmov(s18, 2.5);
6633 __ Fmov(s19, -2.5);
6634 __ Fmov(s20, kFP32PositiveInfinity);
6635 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006636 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
armvixlf37fdc02014-02-05 13:22:16 +00006637 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
6638 __ Fmov(d24, 1.1);
6639 __ Fmov(d25, 2.5);
6640 __ Fmov(d26, -2.5);
6641 __ Fmov(d27, kFP64PositiveInfinity);
6642 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006643 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
armvixlf37fdc02014-02-05 13:22:16 +00006644 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
6645
6646 __ Fcvtas(w0, s0);
6647 __ Fcvtas(w1, s1);
6648 __ Fcvtas(w2, s2);
6649 __ Fcvtas(w3, s3);
6650 __ Fcvtas(w4, s4);
6651 __ Fcvtas(w5, s5);
6652 __ Fcvtas(w6, s6);
6653 __ Fcvtas(w7, s7);
6654 __ Fcvtas(w8, d8);
6655 __ Fcvtas(w9, d9);
6656 __ Fcvtas(w10, d10);
6657 __ Fcvtas(w11, d11);
6658 __ Fcvtas(w12, d12);
6659 __ Fcvtas(w13, d13);
6660 __ Fcvtas(w14, d14);
6661 __ Fcvtas(w15, d15);
6662 __ Fcvtas(x17, s17);
6663 __ Fcvtas(x18, s18);
6664 __ Fcvtas(x19, s19);
6665 __ Fcvtas(x20, s20);
6666 __ Fcvtas(x21, s21);
6667 __ Fcvtas(x22, s22);
6668 __ Fcvtas(x23, s23);
6669 __ Fcvtas(x24, d24);
6670 __ Fcvtas(x25, d25);
6671 __ Fcvtas(x26, d26);
6672 __ Fcvtas(x27, d27);
6673 __ Fcvtas(x28, d28);
6674 __ Fcvtas(x29, d29);
6675 __ Fcvtas(x30, d30);
6676 END();
6677
6678 RUN();
6679
6680 ASSERT_EQUAL_64(1, x0);
6681 ASSERT_EQUAL_64(1, x1);
6682 ASSERT_EQUAL_64(3, x2);
6683 ASSERT_EQUAL_64(0xfffffffd, x3);
6684 ASSERT_EQUAL_64(0x7fffffff, x4);
6685 ASSERT_EQUAL_64(0x80000000, x5);
6686 ASSERT_EQUAL_64(0x7fffff80, x6);
6687 ASSERT_EQUAL_64(0x80000080, x7);
6688 ASSERT_EQUAL_64(1, x8);
6689 ASSERT_EQUAL_64(1, x9);
6690 ASSERT_EQUAL_64(3, x10);
6691 ASSERT_EQUAL_64(0xfffffffd, x11);
6692 ASSERT_EQUAL_64(0x7fffffff, x12);
6693 ASSERT_EQUAL_64(0x80000000, x13);
6694 ASSERT_EQUAL_64(0x7ffffffe, x14);
6695 ASSERT_EQUAL_64(0x80000001, x15);
6696 ASSERT_EQUAL_64(1, x17);
6697 ASSERT_EQUAL_64(3, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00006698 ASSERT_EQUAL_64(0xfffffffffffffffd, x19);
6699 ASSERT_EQUAL_64(0x7fffffffffffffff, x20);
6700 ASSERT_EQUAL_64(0x8000000000000000, x21);
6701 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
6702 ASSERT_EQUAL_64(0x8000008000000000, x23);
armvixlf37fdc02014-02-05 13:22:16 +00006703 ASSERT_EQUAL_64(1, x24);
6704 ASSERT_EQUAL_64(3, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00006705 ASSERT_EQUAL_64(0xfffffffffffffffd, x26);
6706 ASSERT_EQUAL_64(0x7fffffffffffffff, x27);
6707 ASSERT_EQUAL_64(0x8000000000000000, x28);
6708 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
6709 ASSERT_EQUAL_64(0x8000000000000400, x30);
armvixlf37fdc02014-02-05 13:22:16 +00006710
6711 TEARDOWN();
6712}
6713
6714
6715TEST(fcvtau) {
6716 SETUP();
6717
6718 START();
6719 __ Fmov(s0, 1.0);
6720 __ Fmov(s1, 1.1);
6721 __ Fmov(s2, 2.5);
6722 __ Fmov(s3, -2.5);
6723 __ Fmov(s4, kFP32PositiveInfinity);
6724 __ Fmov(s5, kFP32NegativeInfinity);
6725 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
6726 __ Fmov(d8, 1.0);
6727 __ Fmov(d9, 1.1);
6728 __ Fmov(d10, 2.5);
6729 __ Fmov(d11, -2.5);
6730 __ Fmov(d12, kFP64PositiveInfinity);
6731 __ Fmov(d13, kFP64NegativeInfinity);
6732 __ Fmov(d14, 0xfffffffe);
6733 __ Fmov(s16, 1.0);
6734 __ Fmov(s17, 1.1);
6735 __ Fmov(s18, 2.5);
6736 __ Fmov(s19, -2.5);
6737 __ Fmov(s20, kFP32PositiveInfinity);
6738 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006739 __ Fmov(s22, 0xffffff0000000000); // Largest float < UINT64_MAX.
armvixlf37fdc02014-02-05 13:22:16 +00006740 __ Fmov(d24, 1.1);
6741 __ Fmov(d25, 2.5);
6742 __ Fmov(d26, -2.5);
6743 __ Fmov(d27, kFP64PositiveInfinity);
6744 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006745 __ Fmov(d29, 0xfffffffffffff800); // Largest double < UINT64_MAX.
6746 __ Fmov(s30, 0x100000000);
armvixlf37fdc02014-02-05 13:22:16 +00006747
6748 __ Fcvtau(w0, s0);
6749 __ Fcvtau(w1, s1);
6750 __ Fcvtau(w2, s2);
6751 __ Fcvtau(w3, s3);
6752 __ Fcvtau(w4, s4);
6753 __ Fcvtau(w5, s5);
6754 __ Fcvtau(w6, s6);
6755 __ Fcvtau(w8, d8);
6756 __ Fcvtau(w9, d9);
6757 __ Fcvtau(w10, d10);
6758 __ Fcvtau(w11, d11);
6759 __ Fcvtau(w12, d12);
6760 __ Fcvtau(w13, d13);
6761 __ Fcvtau(w14, d14);
6762 __ Fcvtau(w15, d15);
6763 __ Fcvtau(x16, s16);
6764 __ Fcvtau(x17, s17);
6765 __ Fcvtau(x18, s18);
6766 __ Fcvtau(x19, s19);
6767 __ Fcvtau(x20, s20);
6768 __ Fcvtau(x21, s21);
6769 __ Fcvtau(x22, s22);
6770 __ Fcvtau(x24, d24);
6771 __ Fcvtau(x25, d25);
6772 __ Fcvtau(x26, d26);
6773 __ Fcvtau(x27, d27);
6774 __ Fcvtau(x28, d28);
6775 __ Fcvtau(x29, d29);
6776 __ Fcvtau(w30, s30);
6777 END();
6778
6779 RUN();
6780
6781 ASSERT_EQUAL_64(1, x0);
6782 ASSERT_EQUAL_64(1, x1);
6783 ASSERT_EQUAL_64(3, x2);
6784 ASSERT_EQUAL_64(0, x3);
6785 ASSERT_EQUAL_64(0xffffffff, x4);
6786 ASSERT_EQUAL_64(0, x5);
6787 ASSERT_EQUAL_64(0xffffff00, x6);
6788 ASSERT_EQUAL_64(1, x8);
6789 ASSERT_EQUAL_64(1, x9);
6790 ASSERT_EQUAL_64(3, x10);
6791 ASSERT_EQUAL_64(0, x11);
6792 ASSERT_EQUAL_64(0xffffffff, x12);
6793 ASSERT_EQUAL_64(0, x13);
6794 ASSERT_EQUAL_64(0xfffffffe, x14);
6795 ASSERT_EQUAL_64(1, x16);
6796 ASSERT_EQUAL_64(1, x17);
6797 ASSERT_EQUAL_64(3, x18);
6798 ASSERT_EQUAL_64(0, x19);
armvixlb0c8ae22014-03-21 14:03:59 +00006799 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
armvixlf37fdc02014-02-05 13:22:16 +00006800 ASSERT_EQUAL_64(0, x21);
armvixlb0c8ae22014-03-21 14:03:59 +00006801 ASSERT_EQUAL_64(0xffffff0000000000, x22);
armvixlf37fdc02014-02-05 13:22:16 +00006802 ASSERT_EQUAL_64(1, x24);
6803 ASSERT_EQUAL_64(3, x25);
6804 ASSERT_EQUAL_64(0, x26);
armvixlb0c8ae22014-03-21 14:03:59 +00006805 ASSERT_EQUAL_64(0xffffffffffffffff, x27);
armvixlf37fdc02014-02-05 13:22:16 +00006806 ASSERT_EQUAL_64(0, x28);
armvixlb0c8ae22014-03-21 14:03:59 +00006807 ASSERT_EQUAL_64(0xfffffffffffff800, x29);
armvixlf37fdc02014-02-05 13:22:16 +00006808 ASSERT_EQUAL_64(0xffffffff, x30);
6809
6810 TEARDOWN();
6811}
6812
6813
armvixlad96eda2013-06-14 11:42:37 +01006814TEST(fcvtms) {
6815 SETUP();
6816
6817 START();
6818 __ Fmov(s0, 1.0);
6819 __ Fmov(s1, 1.1);
6820 __ Fmov(s2, 1.5);
6821 __ Fmov(s3, -1.5);
6822 __ Fmov(s4, kFP32PositiveInfinity);
6823 __ Fmov(s5, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006824 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6825 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006826 __ Fmov(d8, 1.0);
6827 __ Fmov(d9, 1.1);
6828 __ Fmov(d10, 1.5);
6829 __ Fmov(d11, -1.5);
6830 __ Fmov(d12, kFP64PositiveInfinity);
6831 __ Fmov(d13, kFP64NegativeInfinity);
6832 __ Fmov(d14, kWMaxInt - 1);
6833 __ Fmov(d15, kWMinInt + 1);
6834 __ Fmov(s17, 1.1);
6835 __ Fmov(s18, 1.5);
6836 __ Fmov(s19, -1.5);
6837 __ Fmov(s20, kFP32PositiveInfinity);
6838 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006839 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
6840 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006841 __ Fmov(d24, 1.1);
6842 __ Fmov(d25, 1.5);
6843 __ Fmov(d26, -1.5);
6844 __ Fmov(d27, kFP64PositiveInfinity);
6845 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006846 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
6847 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006848
6849 __ Fcvtms(w0, s0);
6850 __ Fcvtms(w1, s1);
6851 __ Fcvtms(w2, s2);
6852 __ Fcvtms(w3, s3);
6853 __ Fcvtms(w4, s4);
6854 __ Fcvtms(w5, s5);
6855 __ Fcvtms(w6, s6);
6856 __ Fcvtms(w7, s7);
6857 __ Fcvtms(w8, d8);
6858 __ Fcvtms(w9, d9);
6859 __ Fcvtms(w10, d10);
6860 __ Fcvtms(w11, d11);
6861 __ Fcvtms(w12, d12);
6862 __ Fcvtms(w13, d13);
6863 __ Fcvtms(w14, d14);
6864 __ Fcvtms(w15, d15);
6865 __ Fcvtms(x17, s17);
6866 __ Fcvtms(x18, s18);
6867 __ Fcvtms(x19, s19);
6868 __ Fcvtms(x20, s20);
6869 __ Fcvtms(x21, s21);
6870 __ Fcvtms(x22, s22);
6871 __ Fcvtms(x23, s23);
6872 __ Fcvtms(x24, d24);
6873 __ Fcvtms(x25, d25);
6874 __ Fcvtms(x26, d26);
6875 __ Fcvtms(x27, d27);
6876 __ Fcvtms(x28, d28);
6877 __ Fcvtms(x29, d29);
6878 __ Fcvtms(x30, d30);
6879 END();
6880
6881 RUN();
6882
6883 ASSERT_EQUAL_64(1, x0);
6884 ASSERT_EQUAL_64(1, x1);
6885 ASSERT_EQUAL_64(1, x2);
6886 ASSERT_EQUAL_64(0xfffffffe, x3);
6887 ASSERT_EQUAL_64(0x7fffffff, x4);
6888 ASSERT_EQUAL_64(0x80000000, x5);
6889 ASSERT_EQUAL_64(0x7fffff80, x6);
6890 ASSERT_EQUAL_64(0x80000080, x7);
6891 ASSERT_EQUAL_64(1, x8);
6892 ASSERT_EQUAL_64(1, x9);
6893 ASSERT_EQUAL_64(1, x10);
6894 ASSERT_EQUAL_64(0xfffffffe, x11);
6895 ASSERT_EQUAL_64(0x7fffffff, x12);
6896 ASSERT_EQUAL_64(0x80000000, x13);
6897 ASSERT_EQUAL_64(0x7ffffffe, x14);
6898 ASSERT_EQUAL_64(0x80000001, x15);
6899 ASSERT_EQUAL_64(1, x17);
6900 ASSERT_EQUAL_64(1, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00006901 ASSERT_EQUAL_64(0xfffffffffffffffe, x19);
6902 ASSERT_EQUAL_64(0x7fffffffffffffff, x20);
6903 ASSERT_EQUAL_64(0x8000000000000000, x21);
6904 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
6905 ASSERT_EQUAL_64(0x8000008000000000, x23);
armvixlad96eda2013-06-14 11:42:37 +01006906 ASSERT_EQUAL_64(1, x24);
6907 ASSERT_EQUAL_64(1, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00006908 ASSERT_EQUAL_64(0xfffffffffffffffe, x26);
6909 ASSERT_EQUAL_64(0x7fffffffffffffff, x27);
6910 ASSERT_EQUAL_64(0x8000000000000000, x28);
6911 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
6912 ASSERT_EQUAL_64(0x8000000000000400, x30);
armvixlad96eda2013-06-14 11:42:37 +01006913
6914 TEARDOWN();
6915}
6916
6917
6918TEST(fcvtmu) {
6919 SETUP();
6920
6921 START();
6922 __ Fmov(s0, 1.0);
6923 __ Fmov(s1, 1.1);
6924 __ Fmov(s2, 1.5);
6925 __ Fmov(s3, -1.5);
6926 __ Fmov(s4, kFP32PositiveInfinity);
6927 __ Fmov(s5, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006928 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
6929 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006930 __ Fmov(d8, 1.0);
6931 __ Fmov(d9, 1.1);
6932 __ Fmov(d10, 1.5);
6933 __ Fmov(d11, -1.5);
6934 __ Fmov(d12, kFP64PositiveInfinity);
6935 __ Fmov(d13, kFP64NegativeInfinity);
6936 __ Fmov(d14, kWMaxInt - 1);
6937 __ Fmov(d15, kWMinInt + 1);
6938 __ Fmov(s17, 1.1);
6939 __ Fmov(s18, 1.5);
6940 __ Fmov(s19, -1.5);
6941 __ Fmov(s20, kFP32PositiveInfinity);
6942 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006943 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
6944 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006945 __ Fmov(d24, 1.1);
6946 __ Fmov(d25, 1.5);
6947 __ Fmov(d26, -1.5);
6948 __ Fmov(d27, kFP64PositiveInfinity);
6949 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00006950 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
6951 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01006952
6953 __ Fcvtmu(w0, s0);
6954 __ Fcvtmu(w1, s1);
6955 __ Fcvtmu(w2, s2);
6956 __ Fcvtmu(w3, s3);
6957 __ Fcvtmu(w4, s4);
6958 __ Fcvtmu(w5, s5);
6959 __ Fcvtmu(w6, s6);
6960 __ Fcvtmu(w7, s7);
6961 __ Fcvtmu(w8, d8);
6962 __ Fcvtmu(w9, d9);
6963 __ Fcvtmu(w10, d10);
6964 __ Fcvtmu(w11, d11);
6965 __ Fcvtmu(w12, d12);
6966 __ Fcvtmu(w13, d13);
6967 __ Fcvtmu(w14, d14);
6968 __ Fcvtmu(x17, s17);
6969 __ Fcvtmu(x18, s18);
6970 __ Fcvtmu(x19, s19);
6971 __ Fcvtmu(x20, s20);
6972 __ Fcvtmu(x21, s21);
6973 __ Fcvtmu(x22, s22);
6974 __ Fcvtmu(x23, s23);
6975 __ Fcvtmu(x24, d24);
6976 __ Fcvtmu(x25, d25);
6977 __ Fcvtmu(x26, d26);
6978 __ Fcvtmu(x27, d27);
6979 __ Fcvtmu(x28, d28);
6980 __ Fcvtmu(x29, d29);
6981 __ Fcvtmu(x30, d30);
6982 END();
6983
6984 RUN();
6985
6986 ASSERT_EQUAL_64(1, x0);
6987 ASSERT_EQUAL_64(1, x1);
6988 ASSERT_EQUAL_64(1, x2);
6989 ASSERT_EQUAL_64(0, x3);
6990 ASSERT_EQUAL_64(0xffffffff, x4);
6991 ASSERT_EQUAL_64(0, x5);
6992 ASSERT_EQUAL_64(0x7fffff80, x6);
6993 ASSERT_EQUAL_64(0, x7);
6994 ASSERT_EQUAL_64(1, x8);
6995 ASSERT_EQUAL_64(1, x9);
6996 ASSERT_EQUAL_64(1, x10);
6997 ASSERT_EQUAL_64(0, x11);
6998 ASSERT_EQUAL_64(0xffffffff, x12);
6999 ASSERT_EQUAL_64(0, x13);
7000 ASSERT_EQUAL_64(0x7ffffffe, x14);
7001 ASSERT_EQUAL_64(1, x17);
7002 ASSERT_EQUAL_64(1, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00007003 ASSERT_EQUAL_64(0, x19);
7004 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
7005 ASSERT_EQUAL_64(0, x21);
7006 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
7007 ASSERT_EQUAL_64(0, x23);
armvixlad96eda2013-06-14 11:42:37 +01007008 ASSERT_EQUAL_64(1, x24);
7009 ASSERT_EQUAL_64(1, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00007010 ASSERT_EQUAL_64(0, x26);
7011 ASSERT_EQUAL_64(0xffffffffffffffff, x27);
7012 ASSERT_EQUAL_64(0, x28);
7013 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
7014 ASSERT_EQUAL_64(0, x30);
armvixlad96eda2013-06-14 11:42:37 +01007015
7016 TEARDOWN();
7017}
7018
7019
7020TEST(fcvtns) {
7021 SETUP();
7022
7023 START();
7024 __ Fmov(s0, 1.0);
7025 __ Fmov(s1, 1.1);
7026 __ Fmov(s2, 1.5);
7027 __ Fmov(s3, -1.5);
7028 __ Fmov(s4, kFP32PositiveInfinity);
7029 __ Fmov(s5, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007030 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7031 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007032 __ Fmov(d8, 1.0);
7033 __ Fmov(d9, 1.1);
7034 __ Fmov(d10, 1.5);
7035 __ Fmov(d11, -1.5);
7036 __ Fmov(d12, kFP64PositiveInfinity);
7037 __ Fmov(d13, kFP64NegativeInfinity);
7038 __ Fmov(d14, kWMaxInt - 1);
7039 __ Fmov(d15, kWMinInt + 1);
7040 __ Fmov(s17, 1.1);
7041 __ Fmov(s18, 1.5);
7042 __ Fmov(s19, -1.5);
7043 __ Fmov(s20, kFP32PositiveInfinity);
7044 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007045 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
7046 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007047 __ Fmov(d24, 1.1);
7048 __ Fmov(d25, 1.5);
7049 __ Fmov(d26, -1.5);
7050 __ Fmov(d27, kFP64PositiveInfinity);
7051 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007052 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
7053 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007054
7055 __ Fcvtns(w0, s0);
7056 __ Fcvtns(w1, s1);
7057 __ Fcvtns(w2, s2);
7058 __ Fcvtns(w3, s3);
7059 __ Fcvtns(w4, s4);
7060 __ Fcvtns(w5, s5);
7061 __ Fcvtns(w6, s6);
7062 __ Fcvtns(w7, s7);
7063 __ Fcvtns(w8, d8);
7064 __ Fcvtns(w9, d9);
7065 __ Fcvtns(w10, d10);
7066 __ Fcvtns(w11, d11);
7067 __ Fcvtns(w12, d12);
7068 __ Fcvtns(w13, d13);
7069 __ Fcvtns(w14, d14);
7070 __ Fcvtns(w15, d15);
7071 __ Fcvtns(x17, s17);
7072 __ Fcvtns(x18, s18);
7073 __ Fcvtns(x19, s19);
7074 __ Fcvtns(x20, s20);
7075 __ Fcvtns(x21, s21);
7076 __ Fcvtns(x22, s22);
7077 __ Fcvtns(x23, s23);
7078 __ Fcvtns(x24, d24);
7079 __ Fcvtns(x25, d25);
7080 __ Fcvtns(x26, d26);
7081 __ Fcvtns(x27, d27);
7082 __ Fcvtns(x28, d28);
7083 __ Fcvtns(x29, d29);
7084 __ Fcvtns(x30, d30);
7085 END();
7086
7087 RUN();
7088
7089 ASSERT_EQUAL_64(1, x0);
7090 ASSERT_EQUAL_64(1, x1);
7091 ASSERT_EQUAL_64(2, x2);
7092 ASSERT_EQUAL_64(0xfffffffe, x3);
7093 ASSERT_EQUAL_64(0x7fffffff, x4);
7094 ASSERT_EQUAL_64(0x80000000, x5);
7095 ASSERT_EQUAL_64(0x7fffff80, x6);
7096 ASSERT_EQUAL_64(0x80000080, x7);
7097 ASSERT_EQUAL_64(1, x8);
7098 ASSERT_EQUAL_64(1, x9);
7099 ASSERT_EQUAL_64(2, x10);
7100 ASSERT_EQUAL_64(0xfffffffe, x11);
7101 ASSERT_EQUAL_64(0x7fffffff, x12);
7102 ASSERT_EQUAL_64(0x80000000, x13);
7103 ASSERT_EQUAL_64(0x7ffffffe, x14);
7104 ASSERT_EQUAL_64(0x80000001, x15);
7105 ASSERT_EQUAL_64(1, x17);
7106 ASSERT_EQUAL_64(2, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00007107 ASSERT_EQUAL_64(0xfffffffffffffffe, x19);
7108 ASSERT_EQUAL_64(0x7fffffffffffffff, x20);
7109 ASSERT_EQUAL_64(0x8000000000000000, x21);
7110 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
7111 ASSERT_EQUAL_64(0x8000008000000000, x23);
armvixlad96eda2013-06-14 11:42:37 +01007112 ASSERT_EQUAL_64(1, x24);
7113 ASSERT_EQUAL_64(2, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00007114 ASSERT_EQUAL_64(0xfffffffffffffffe, x26);
7115 ASSERT_EQUAL_64(0x7fffffffffffffff, x27);
7116 ASSERT_EQUAL_64(0x8000000000000000, x28);
7117 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
7118 ASSERT_EQUAL_64(0x8000000000000400, x30);
armvixlad96eda2013-06-14 11:42:37 +01007119
7120 TEARDOWN();
7121}
7122
7123
7124TEST(fcvtnu) {
7125 SETUP();
7126
7127 START();
7128 __ Fmov(s0, 1.0);
7129 __ Fmov(s1, 1.1);
7130 __ Fmov(s2, 1.5);
7131 __ Fmov(s3, -1.5);
7132 __ Fmov(s4, kFP32PositiveInfinity);
7133 __ Fmov(s5, kFP32NegativeInfinity);
7134 __ Fmov(s6, 0xffffff00); // Largest float < UINT32_MAX.
7135 __ Fmov(d8, 1.0);
7136 __ Fmov(d9, 1.1);
7137 __ Fmov(d10, 1.5);
7138 __ Fmov(d11, -1.5);
7139 __ Fmov(d12, kFP64PositiveInfinity);
7140 __ Fmov(d13, kFP64NegativeInfinity);
7141 __ Fmov(d14, 0xfffffffe);
7142 __ Fmov(s16, 1.0);
7143 __ Fmov(s17, 1.1);
7144 __ Fmov(s18, 1.5);
7145 __ Fmov(s19, -1.5);
7146 __ Fmov(s20, kFP32PositiveInfinity);
7147 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007148 __ Fmov(s22, 0xffffff0000000000); // Largest float < UINT64_MAX.
armvixlad96eda2013-06-14 11:42:37 +01007149 __ Fmov(d24, 1.1);
7150 __ Fmov(d25, 1.5);
7151 __ Fmov(d26, -1.5);
7152 __ Fmov(d27, kFP64PositiveInfinity);
7153 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007154 __ Fmov(d29, 0xfffffffffffff800); // Largest double < UINT64_MAX.
7155 __ Fmov(s30, 0x100000000);
armvixlad96eda2013-06-14 11:42:37 +01007156
7157 __ Fcvtnu(w0, s0);
7158 __ Fcvtnu(w1, s1);
7159 __ Fcvtnu(w2, s2);
7160 __ Fcvtnu(w3, s3);
7161 __ Fcvtnu(w4, s4);
7162 __ Fcvtnu(w5, s5);
7163 __ Fcvtnu(w6, s6);
7164 __ Fcvtnu(w8, d8);
7165 __ Fcvtnu(w9, d9);
7166 __ Fcvtnu(w10, d10);
7167 __ Fcvtnu(w11, d11);
7168 __ Fcvtnu(w12, d12);
7169 __ Fcvtnu(w13, d13);
7170 __ Fcvtnu(w14, d14);
7171 __ Fcvtnu(w15, d15);
7172 __ Fcvtnu(x16, s16);
7173 __ Fcvtnu(x17, s17);
7174 __ Fcvtnu(x18, s18);
7175 __ Fcvtnu(x19, s19);
7176 __ Fcvtnu(x20, s20);
7177 __ Fcvtnu(x21, s21);
7178 __ Fcvtnu(x22, s22);
7179 __ Fcvtnu(x24, d24);
7180 __ Fcvtnu(x25, d25);
7181 __ Fcvtnu(x26, d26);
7182 __ Fcvtnu(x27, d27);
7183 __ Fcvtnu(x28, d28);
7184 __ Fcvtnu(x29, d29);
7185 __ Fcvtnu(w30, s30);
7186 END();
7187
7188 RUN();
7189
7190 ASSERT_EQUAL_64(1, x0);
7191 ASSERT_EQUAL_64(1, x1);
7192 ASSERT_EQUAL_64(2, x2);
7193 ASSERT_EQUAL_64(0, x3);
7194 ASSERT_EQUAL_64(0xffffffff, x4);
7195 ASSERT_EQUAL_64(0, x5);
7196 ASSERT_EQUAL_64(0xffffff00, x6);
7197 ASSERT_EQUAL_64(1, x8);
7198 ASSERT_EQUAL_64(1, x9);
7199 ASSERT_EQUAL_64(2, x10);
7200 ASSERT_EQUAL_64(0, x11);
7201 ASSERT_EQUAL_64(0xffffffff, x12);
7202 ASSERT_EQUAL_64(0, x13);
7203 ASSERT_EQUAL_64(0xfffffffe, x14);
7204 ASSERT_EQUAL_64(1, x16);
7205 ASSERT_EQUAL_64(1, x17);
7206 ASSERT_EQUAL_64(2, x18);
7207 ASSERT_EQUAL_64(0, x19);
armvixlb0c8ae22014-03-21 14:03:59 +00007208 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
armvixlad96eda2013-06-14 11:42:37 +01007209 ASSERT_EQUAL_64(0, x21);
armvixlb0c8ae22014-03-21 14:03:59 +00007210 ASSERT_EQUAL_64(0xffffff0000000000, x22);
armvixlad96eda2013-06-14 11:42:37 +01007211 ASSERT_EQUAL_64(1, x24);
7212 ASSERT_EQUAL_64(2, x25);
7213 ASSERT_EQUAL_64(0, x26);
armvixlb0c8ae22014-03-21 14:03:59 +00007214 ASSERT_EQUAL_64(0xffffffffffffffff, x27);
armvixlad96eda2013-06-14 11:42:37 +01007215 ASSERT_EQUAL_64(0, x28);
armvixlb0c8ae22014-03-21 14:03:59 +00007216 ASSERT_EQUAL_64(0xfffffffffffff800, x29);
armvixlad96eda2013-06-14 11:42:37 +01007217 ASSERT_EQUAL_64(0xffffffff, x30);
7218
7219 TEARDOWN();
7220}
7221
7222
7223TEST(fcvtzs) {
7224 SETUP();
7225
7226 START();
7227 __ Fmov(s0, 1.0);
7228 __ Fmov(s1, 1.1);
7229 __ Fmov(s2, 1.5);
7230 __ Fmov(s3, -1.5);
7231 __ Fmov(s4, kFP32PositiveInfinity);
7232 __ Fmov(s5, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007233 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7234 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007235 __ Fmov(d8, 1.0);
7236 __ Fmov(d9, 1.1);
7237 __ Fmov(d10, 1.5);
7238 __ Fmov(d11, -1.5);
7239 __ Fmov(d12, kFP64PositiveInfinity);
7240 __ Fmov(d13, kFP64NegativeInfinity);
7241 __ Fmov(d14, kWMaxInt - 1);
7242 __ Fmov(d15, kWMinInt + 1);
7243 __ Fmov(s17, 1.1);
7244 __ Fmov(s18, 1.5);
7245 __ Fmov(s19, -1.5);
7246 __ Fmov(s20, kFP32PositiveInfinity);
7247 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007248 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
7249 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007250 __ Fmov(d24, 1.1);
7251 __ Fmov(d25, 1.5);
7252 __ Fmov(d26, -1.5);
7253 __ Fmov(d27, kFP64PositiveInfinity);
7254 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007255 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
7256 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007257
7258 __ Fcvtzs(w0, s0);
7259 __ Fcvtzs(w1, s1);
7260 __ Fcvtzs(w2, s2);
7261 __ Fcvtzs(w3, s3);
7262 __ Fcvtzs(w4, s4);
7263 __ Fcvtzs(w5, s5);
7264 __ Fcvtzs(w6, s6);
7265 __ Fcvtzs(w7, s7);
7266 __ Fcvtzs(w8, d8);
7267 __ Fcvtzs(w9, d9);
7268 __ Fcvtzs(w10, d10);
7269 __ Fcvtzs(w11, d11);
7270 __ Fcvtzs(w12, d12);
7271 __ Fcvtzs(w13, d13);
7272 __ Fcvtzs(w14, d14);
7273 __ Fcvtzs(w15, d15);
7274 __ Fcvtzs(x17, s17);
7275 __ Fcvtzs(x18, s18);
7276 __ Fcvtzs(x19, s19);
7277 __ Fcvtzs(x20, s20);
7278 __ Fcvtzs(x21, s21);
7279 __ Fcvtzs(x22, s22);
7280 __ Fcvtzs(x23, s23);
7281 __ Fcvtzs(x24, d24);
7282 __ Fcvtzs(x25, d25);
7283 __ Fcvtzs(x26, d26);
7284 __ Fcvtzs(x27, d27);
7285 __ Fcvtzs(x28, d28);
7286 __ Fcvtzs(x29, d29);
7287 __ Fcvtzs(x30, d30);
7288 END();
7289
7290 RUN();
7291
7292 ASSERT_EQUAL_64(1, x0);
7293 ASSERT_EQUAL_64(1, x1);
7294 ASSERT_EQUAL_64(1, x2);
7295 ASSERT_EQUAL_64(0xffffffff, x3);
7296 ASSERT_EQUAL_64(0x7fffffff, x4);
7297 ASSERT_EQUAL_64(0x80000000, x5);
7298 ASSERT_EQUAL_64(0x7fffff80, x6);
7299 ASSERT_EQUAL_64(0x80000080, x7);
7300 ASSERT_EQUAL_64(1, x8);
7301 ASSERT_EQUAL_64(1, x9);
7302 ASSERT_EQUAL_64(1, x10);
7303 ASSERT_EQUAL_64(0xffffffff, x11);
7304 ASSERT_EQUAL_64(0x7fffffff, x12);
7305 ASSERT_EQUAL_64(0x80000000, x13);
7306 ASSERT_EQUAL_64(0x7ffffffe, x14);
7307 ASSERT_EQUAL_64(0x80000001, x15);
7308 ASSERT_EQUAL_64(1, x17);
7309 ASSERT_EQUAL_64(1, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00007310 ASSERT_EQUAL_64(0xffffffffffffffff, x19);
7311 ASSERT_EQUAL_64(0x7fffffffffffffff, x20);
7312 ASSERT_EQUAL_64(0x8000000000000000, x21);
7313 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
7314 ASSERT_EQUAL_64(0x8000008000000000, x23);
armvixlad96eda2013-06-14 11:42:37 +01007315 ASSERT_EQUAL_64(1, x24);
7316 ASSERT_EQUAL_64(1, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00007317 ASSERT_EQUAL_64(0xffffffffffffffff, x26);
7318 ASSERT_EQUAL_64(0x7fffffffffffffff, x27);
7319 ASSERT_EQUAL_64(0x8000000000000000, x28);
7320 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
7321 ASSERT_EQUAL_64(0x8000000000000400, x30);
armvixlad96eda2013-06-14 11:42:37 +01007322
7323 TEARDOWN();
7324}
7325
7326TEST(fcvtzu) {
7327 SETUP();
7328
7329 START();
7330 __ Fmov(s0, 1.0);
7331 __ Fmov(s1, 1.1);
7332 __ Fmov(s2, 1.5);
7333 __ Fmov(s3, -1.5);
7334 __ Fmov(s4, kFP32PositiveInfinity);
7335 __ Fmov(s5, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007336 __ Fmov(s6, 0x7fffff80); // Largest float < INT32_MAX.
7337 __ Fneg(s7, s6); // Smallest float > INT32_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007338 __ Fmov(d8, 1.0);
7339 __ Fmov(d9, 1.1);
7340 __ Fmov(d10, 1.5);
7341 __ Fmov(d11, -1.5);
7342 __ Fmov(d12, kFP64PositiveInfinity);
7343 __ Fmov(d13, kFP64NegativeInfinity);
7344 __ Fmov(d14, kWMaxInt - 1);
7345 __ Fmov(d15, kWMinInt + 1);
7346 __ Fmov(s17, 1.1);
7347 __ Fmov(s18, 1.5);
7348 __ Fmov(s19, -1.5);
7349 __ Fmov(s20, kFP32PositiveInfinity);
7350 __ Fmov(s21, kFP32NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007351 __ Fmov(s22, 0x7fffff8000000000); // Largest float < INT64_MAX.
7352 __ Fneg(s23, s22); // Smallest float > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007353 __ Fmov(d24, 1.1);
7354 __ Fmov(d25, 1.5);
7355 __ Fmov(d26, -1.5);
7356 __ Fmov(d27, kFP64PositiveInfinity);
7357 __ Fmov(d28, kFP64NegativeInfinity);
armvixlb0c8ae22014-03-21 14:03:59 +00007358 __ Fmov(d29, 0x7ffffffffffffc00); // Largest double < INT64_MAX.
7359 __ Fneg(d30, d29); // Smallest double > INT64_MIN.
armvixlad96eda2013-06-14 11:42:37 +01007360
7361 __ Fcvtzu(w0, s0);
7362 __ Fcvtzu(w1, s1);
7363 __ Fcvtzu(w2, s2);
7364 __ Fcvtzu(w3, s3);
7365 __ Fcvtzu(w4, s4);
7366 __ Fcvtzu(w5, s5);
7367 __ Fcvtzu(w6, s6);
7368 __ Fcvtzu(w7, s7);
7369 __ Fcvtzu(w8, d8);
7370 __ Fcvtzu(w9, d9);
7371 __ Fcvtzu(w10, d10);
7372 __ Fcvtzu(w11, d11);
7373 __ Fcvtzu(w12, d12);
7374 __ Fcvtzu(w13, d13);
7375 __ Fcvtzu(w14, d14);
7376 __ Fcvtzu(x17, s17);
7377 __ Fcvtzu(x18, s18);
7378 __ Fcvtzu(x19, s19);
7379 __ Fcvtzu(x20, s20);
7380 __ Fcvtzu(x21, s21);
7381 __ Fcvtzu(x22, s22);
7382 __ Fcvtzu(x23, s23);
7383 __ Fcvtzu(x24, d24);
7384 __ Fcvtzu(x25, d25);
7385 __ Fcvtzu(x26, d26);
7386 __ Fcvtzu(x27, d27);
7387 __ Fcvtzu(x28, d28);
7388 __ Fcvtzu(x29, d29);
7389 __ Fcvtzu(x30, d30);
7390 END();
7391
7392 RUN();
7393
7394 ASSERT_EQUAL_64(1, x0);
7395 ASSERT_EQUAL_64(1, x1);
7396 ASSERT_EQUAL_64(1, x2);
7397 ASSERT_EQUAL_64(0, x3);
7398 ASSERT_EQUAL_64(0xffffffff, x4);
7399 ASSERT_EQUAL_64(0, x5);
7400 ASSERT_EQUAL_64(0x7fffff80, x6);
7401 ASSERT_EQUAL_64(0, x7);
7402 ASSERT_EQUAL_64(1, x8);
7403 ASSERT_EQUAL_64(1, x9);
7404 ASSERT_EQUAL_64(1, x10);
7405 ASSERT_EQUAL_64(0, x11);
7406 ASSERT_EQUAL_64(0xffffffff, x12);
7407 ASSERT_EQUAL_64(0, x13);
7408 ASSERT_EQUAL_64(0x7ffffffe, x14);
7409 ASSERT_EQUAL_64(1, x17);
7410 ASSERT_EQUAL_64(1, x18);
armvixlb0c8ae22014-03-21 14:03:59 +00007411 ASSERT_EQUAL_64(0, x19);
7412 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
7413 ASSERT_EQUAL_64(0, x21);
7414 ASSERT_EQUAL_64(0x7fffff8000000000, x22);
7415 ASSERT_EQUAL_64(0, x23);
armvixlad96eda2013-06-14 11:42:37 +01007416 ASSERT_EQUAL_64(1, x24);
7417 ASSERT_EQUAL_64(1, x25);
armvixlb0c8ae22014-03-21 14:03:59 +00007418 ASSERT_EQUAL_64(0, x26);
7419 ASSERT_EQUAL_64(0xffffffffffffffff, x27);
7420 ASSERT_EQUAL_64(0, x28);
7421 ASSERT_EQUAL_64(0x7ffffffffffffc00, x29);
7422 ASSERT_EQUAL_64(0, x30);
armvixlad96eda2013-06-14 11:42:37 +01007423
7424 TEARDOWN();
7425}
7426
7427
armvixl578645f2013-08-15 17:21:42 +01007428// Test that scvtf and ucvtf can convert the 64-bit input into the expected
7429// value. All possible values of 'fbits' are tested. The expected value is
7430// modified accordingly in each case.
7431//
7432// The expected value is specified as the bit encoding of the expected double
7433// produced by scvtf (expected_scvtf_bits) as well as ucvtf
7434// (expected_ucvtf_bits).
7435//
7436// Where the input value is representable by int32_t or uint32_t, conversions
7437// from W registers will also be tested.
7438static void TestUScvtfHelper(uint64_t in,
7439 uint64_t expected_scvtf_bits,
7440 uint64_t expected_ucvtf_bits) {
7441 uint64_t u64 = in;
7442 uint32_t u32 = u64 & 0xffffffff;
7443 int64_t s64 = static_cast<int64_t>(in);
7444 int32_t s32 = s64 & 0x7fffffff;
7445
7446 bool cvtf_s32 = (s64 == s32);
7447 bool cvtf_u32 = (u64 == u32);
7448
7449 double results_scvtf_x[65];
7450 double results_ucvtf_x[65];
7451 double results_scvtf_w[33];
7452 double results_ucvtf_w[33];
7453
armvixlad96eda2013-06-14 11:42:37 +01007454 SETUP();
armvixlad96eda2013-06-14 11:42:37 +01007455 START();
armvixlad96eda2013-06-14 11:42:37 +01007456
armvixlb0c8ae22014-03-21 14:03:59 +00007457 __ Mov(x0, reinterpret_cast<uintptr_t>(results_scvtf_x));
7458 __ Mov(x1, reinterpret_cast<uintptr_t>(results_ucvtf_x));
7459 __ Mov(x2, reinterpret_cast<uintptr_t>(results_scvtf_w));
7460 __ Mov(x3, reinterpret_cast<uintptr_t>(results_ucvtf_w));
armvixl578645f2013-08-15 17:21:42 +01007461
7462 __ Mov(x10, s64);
7463
7464 // Corrupt the top word, in case it is accidentally used during W-register
7465 // conversions.
7466 __ Mov(x11, 0x5555555555555555);
7467 __ Bfi(x11, x10, 0, kWRegSize);
7468
7469 // Test integer conversions.
7470 __ Scvtf(d0, x10);
7471 __ Ucvtf(d1, x10);
7472 __ Scvtf(d2, w11);
7473 __ Ucvtf(d3, w11);
7474 __ Str(d0, MemOperand(x0));
7475 __ Str(d1, MemOperand(x1));
7476 __ Str(d2, MemOperand(x2));
7477 __ Str(d3, MemOperand(x3));
7478
7479 // Test all possible values of fbits.
7480 for (int fbits = 1; fbits <= 32; fbits++) {
7481 __ Scvtf(d0, x10, fbits);
7482 __ Ucvtf(d1, x10, fbits);
7483 __ Scvtf(d2, w11, fbits);
7484 __ Ucvtf(d3, w11, fbits);
7485 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7486 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7487 __ Str(d2, MemOperand(x2, fbits * kDRegSizeInBytes));
7488 __ Str(d3, MemOperand(x3, fbits * kDRegSizeInBytes));
7489 }
7490
7491 // Conversions from W registers can only handle fbits values <= 32, so just
7492 // test conversions from X registers for 32 < fbits <= 64.
7493 for (int fbits = 33; fbits <= 64; fbits++) {
7494 __ Scvtf(d0, x10, fbits);
7495 __ Ucvtf(d1, x10, fbits);
7496 __ Str(d0, MemOperand(x0, fbits * kDRegSizeInBytes));
7497 __ Str(d1, MemOperand(x1, fbits * kDRegSizeInBytes));
7498 }
7499
7500 END();
armvixlad96eda2013-06-14 11:42:37 +01007501 RUN();
7502
armvixl578645f2013-08-15 17:21:42 +01007503 // Check the results.
7504 double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7505 double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7506
7507 for (int fbits = 0; fbits <= 32; fbits++) {
7508 double expected_scvtf = expected_scvtf_base / pow(2, fbits);
7509 double expected_ucvtf = expected_ucvtf_base / pow(2, fbits);
7510 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7511 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7512 if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7513 if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7514 }
7515 for (int fbits = 33; fbits <= 64; fbits++) {
7516 double expected_scvtf = expected_scvtf_base / pow(2, fbits);
7517 double expected_ucvtf = expected_ucvtf_base / pow(2, fbits);
7518 ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7519 ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7520 }
armvixlad96eda2013-06-14 11:42:37 +01007521
7522 TEARDOWN();
7523}
7524
7525
armvixl578645f2013-08-15 17:21:42 +01007526TEST(scvtf_ucvtf_double) {
7527 // Simple conversions of positive numbers which require no rounding; the
7528 // results should not depened on the rounding mode, and ucvtf and scvtf should
7529 // produce the same result.
7530 TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7531 TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7532 TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7533 TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7534 TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7535 // Test mantissa extremities.
7536 TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7537 // The largest int32_t that fits in a double.
7538 TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7539 // Values that would be negative if treated as an int32_t.
7540 TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7541 TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7542 TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7543 // The largest int64_t that fits in a double.
7544 TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7545 // Check for bit pattern reproduction.
7546 TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7547 TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7548
7549 // Simple conversions of negative int64_t values. These require no rounding,
7550 // and the results should not depend on the rounding mode.
7551 TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7552 TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7553 TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7554
7555 // Conversions which require rounding.
7556 TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7557 TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7558 TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7559 TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7560 TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7561 TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7562 TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7563 TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7564 TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7565 TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7566 TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7567 TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7568 TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7569 // Check rounding of negative int64_t values (and large uint64_t values).
7570 TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7571 TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7572 TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7573 TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7574 TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7575 TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7576 TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7577 TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7578 TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7579 TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7580 TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7581 TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7582 TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7583 // Round up to produce a result that's too big for the input to represent.
7584 TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7585 TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7586 TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7587 TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7588}
7589
7590
7591// The same as TestUScvtfHelper, but convert to floats.
7592static void TestUScvtf32Helper(uint64_t in,
7593 uint32_t expected_scvtf_bits,
7594 uint32_t expected_ucvtf_bits) {
7595 uint64_t u64 = in;
7596 uint32_t u32 = u64 & 0xffffffff;
7597 int64_t s64 = static_cast<int64_t>(in);
7598 int32_t s32 = s64 & 0x7fffffff;
7599
7600 bool cvtf_s32 = (s64 == s32);
7601 bool cvtf_u32 = (u64 == u32);
7602
7603 float results_scvtf_x[65];
7604 float results_ucvtf_x[65];
7605 float results_scvtf_w[33];
7606 float results_ucvtf_w[33];
7607
armvixlad96eda2013-06-14 11:42:37 +01007608 SETUP();
armvixlad96eda2013-06-14 11:42:37 +01007609 START();
armvixlad96eda2013-06-14 11:42:37 +01007610
armvixlb0c8ae22014-03-21 14:03:59 +00007611 __ Mov(x0, reinterpret_cast<uintptr_t>(results_scvtf_x));
7612 __ Mov(x1, reinterpret_cast<uintptr_t>(results_ucvtf_x));
7613 __ Mov(x2, reinterpret_cast<uintptr_t>(results_scvtf_w));
7614 __ Mov(x3, reinterpret_cast<uintptr_t>(results_ucvtf_w));
armvixl578645f2013-08-15 17:21:42 +01007615
7616 __ Mov(x10, s64);
7617
7618 // Corrupt the top word, in case it is accidentally used during W-register
7619 // conversions.
7620 __ Mov(x11, 0x5555555555555555);
7621 __ Bfi(x11, x10, 0, kWRegSize);
7622
7623 // Test integer conversions.
7624 __ Scvtf(s0, x10);
7625 __ Ucvtf(s1, x10);
7626 __ Scvtf(s2, w11);
7627 __ Ucvtf(s3, w11);
7628 __ Str(s0, MemOperand(x0));
7629 __ Str(s1, MemOperand(x1));
7630 __ Str(s2, MemOperand(x2));
7631 __ Str(s3, MemOperand(x3));
7632
7633 // Test all possible values of fbits.
7634 for (int fbits = 1; fbits <= 32; fbits++) {
7635 __ Scvtf(s0, x10, fbits);
7636 __ Ucvtf(s1, x10, fbits);
7637 __ Scvtf(s2, w11, fbits);
7638 __ Ucvtf(s3, w11, fbits);
7639 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7640 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7641 __ Str(s2, MemOperand(x2, fbits * kSRegSizeInBytes));
7642 __ Str(s3, MemOperand(x3, fbits * kSRegSizeInBytes));
7643 }
7644
7645 // Conversions from W registers can only handle fbits values <= 32, so just
7646 // test conversions from X registers for 32 < fbits <= 64.
7647 for (int fbits = 33; fbits <= 64; fbits++) {
7648 __ Scvtf(s0, x10, fbits);
7649 __ Ucvtf(s1, x10, fbits);
7650 __ Str(s0, MemOperand(x0, fbits * kSRegSizeInBytes));
7651 __ Str(s1, MemOperand(x1, fbits * kSRegSizeInBytes));
7652 }
armvixlad96eda2013-06-14 11:42:37 +01007653
7654 END();
armvixlad96eda2013-06-14 11:42:37 +01007655 RUN();
7656
armvixl578645f2013-08-15 17:21:42 +01007657 // Check the results.
7658 float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7659 float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
armvixlad96eda2013-06-14 11:42:37 +01007660
armvixl578645f2013-08-15 17:21:42 +01007661 for (int fbits = 0; fbits <= 32; fbits++) {
armvixlf37fdc02014-02-05 13:22:16 +00007662 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7663 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
armvixl578645f2013-08-15 17:21:42 +01007664 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7665 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7666 if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7667 if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7668 break;
7669 }
7670 for (int fbits = 33; fbits <= 64; fbits++) {
7671 break;
armvixlf37fdc02014-02-05 13:22:16 +00007672 float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7673 float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
armvixl578645f2013-08-15 17:21:42 +01007674 ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7675 ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7676 }
armvixlad96eda2013-06-14 11:42:37 +01007677
7678 TEARDOWN();
7679}
7680
7681
armvixl578645f2013-08-15 17:21:42 +01007682TEST(scvtf_ucvtf_float) {
7683 // Simple conversions of positive numbers which require no rounding; the
7684 // results should not depened on the rounding mode, and ucvtf and scvtf should
7685 // produce the same result.
7686 TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7687 TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7688 TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7689 TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7690 TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7691 // Test mantissa extremities.
7692 TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7693 TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7694 // The largest int32_t that fits in a float.
7695 TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7696 // Values that would be negative if treated as an int32_t.
7697 TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7698 TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7699 TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7700 // The largest int64_t that fits in a float.
7701 TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7702 // Check for bit pattern reproduction.
7703 TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7704
7705 // Simple conversions of negative int64_t values. These require no rounding,
7706 // and the results should not depend on the rounding mode.
7707 TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7708 TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7709
7710 // Conversions which require rounding.
7711 TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7712 TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7713 TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7714 TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7715 TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7716 TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7717 TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7718 TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7719 TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7720 TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7721 TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7722 TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7723 TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7724 // Check rounding of negative int64_t values (and large uint64_t values).
7725 TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7726 TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7727 TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7728 TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7729 TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7730 TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7731 TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7732 TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7733 TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7734 TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7735 TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7736 TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7737 TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7738 // Round up to produce a result that's too big for the input to represent.
7739 TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7740 TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7741 TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7742 TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7743 TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7744 TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7745 TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7746 TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7747}
7748
7749
armvixlad96eda2013-06-14 11:42:37 +01007750TEST(system_mrs) {
7751 SETUP();
7752
7753 START();
7754 __ Mov(w0, 0);
7755 __ Mov(w1, 1);
7756 __ Mov(w2, 0x80000000);
7757
7758 // Set the Z and C flags.
7759 __ Cmp(w0, w0);
7760 __ Mrs(x3, NZCV);
7761
7762 // Set the N flag.
7763 __ Cmp(w0, w1);
7764 __ Mrs(x4, NZCV);
7765
7766 // Set the Z, C and V flags.
armvixlf37fdc02014-02-05 13:22:16 +00007767 __ Adds(w0, w2, w2);
armvixlad96eda2013-06-14 11:42:37 +01007768 __ Mrs(x5, NZCV);
armvixl578645f2013-08-15 17:21:42 +01007769
7770 // Read the default FPCR.
7771 __ Mrs(x6, FPCR);
armvixlad96eda2013-06-14 11:42:37 +01007772 END();
7773
7774 RUN();
7775
armvixl578645f2013-08-15 17:21:42 +01007776 // NZCV
armvixlad96eda2013-06-14 11:42:37 +01007777 ASSERT_EQUAL_32(ZCFlag, w3);
7778 ASSERT_EQUAL_32(NFlag, w4);
7779 ASSERT_EQUAL_32(ZCVFlag, w5);
7780
armvixl578645f2013-08-15 17:21:42 +01007781 // FPCR
7782 // The default FPCR on Linux-based platforms is 0.
7783 ASSERT_EQUAL_32(0, w6);
7784
armvixlad96eda2013-06-14 11:42:37 +01007785 TEARDOWN();
7786}
7787
7788
7789TEST(system_msr) {
armvixl578645f2013-08-15 17:21:42 +01007790 // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
7791 const uint64_t fpcr_core = 0x07c00000;
7792
7793 // All FPCR fields (including fields which may be read-as-zero):
7794 // Stride, Len
7795 // IDE, IXE, UFE, OFE, DZE, IOE
7796 const uint64_t fpcr_all = fpcr_core | 0x00379f00;
7797
armvixlad96eda2013-06-14 11:42:37 +01007798 SETUP();
7799
7800 START();
7801 __ Mov(w0, 0);
7802 __ Mov(w1, 0x7fffffff);
7803
7804 __ Mov(x7, 0);
7805
7806 __ Mov(x10, NVFlag);
7807 __ Cmp(w0, w0); // Set Z and C.
7808 __ Msr(NZCV, x10); // Set N and V.
7809 // The Msr should have overwritten every flag set by the Cmp.
7810 __ Cinc(x7, x7, mi); // N
7811 __ Cinc(x7, x7, ne); // !Z
7812 __ Cinc(x7, x7, lo); // !C
7813 __ Cinc(x7, x7, vs); // V
7814
7815 __ Mov(x10, ZCFlag);
7816 __ Cmn(w1, w1); // Set N and V.
7817 __ Msr(NZCV, x10); // Set Z and C.
7818 // The Msr should have overwritten every flag set by the Cmn.
7819 __ Cinc(x7, x7, pl); // !N
7820 __ Cinc(x7, x7, eq); // Z
7821 __ Cinc(x7, x7, hs); // C
7822 __ Cinc(x7, x7, vc); // !V
7823
armvixl578645f2013-08-15 17:21:42 +01007824 // All core FPCR fields must be writable.
7825 __ Mov(x8, fpcr_core);
7826 __ Msr(FPCR, x8);
7827 __ Mrs(x8, FPCR);
7828
7829 // All FPCR fields, including optional ones. This part of the test doesn't
7830 // achieve much other than ensuring that supported fields can be cleared by
7831 // the next test.
7832 __ Mov(x9, fpcr_all);
7833 __ Msr(FPCR, x9);
7834 __ Mrs(x9, FPCR);
7835 __ And(x9, x9, fpcr_core);
7836
7837 // The undefined bits must ignore writes.
7838 // It's conceivable that a future version of the architecture could use these
7839 // fields (making this test fail), but in the meantime this is a useful test
7840 // for the simulator.
7841 __ Mov(x10, ~fpcr_all);
7842 __ Msr(FPCR, x10);
7843 __ Mrs(x10, FPCR);
7844
armvixlad96eda2013-06-14 11:42:37 +01007845 END();
7846
7847 RUN();
7848
7849 // We should have incremented x7 (from 0) exactly 8 times.
7850 ASSERT_EQUAL_64(8, x7);
7851
armvixl578645f2013-08-15 17:21:42 +01007852 ASSERT_EQUAL_64(fpcr_core, x8);
7853 ASSERT_EQUAL_64(fpcr_core, x9);
7854 ASSERT_EQUAL_64(0, x10);
7855
armvixlad96eda2013-06-14 11:42:37 +01007856 TEARDOWN();
7857}
7858
7859
7860TEST(system_nop) {
7861 SETUP();
7862 RegisterDump before;
7863
7864 START();
7865 before.Dump(&masm);
7866 __ Nop();
7867 END();
7868
7869 RUN();
7870
7871 ASSERT_EQUAL_REGISTERS(before);
7872 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7873
7874 TEARDOWN();
7875}
7876
7877
7878TEST(zero_dest) {
7879 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01007880 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01007881 RegisterDump before;
7882
7883 START();
7884 // Preserve the stack pointer, in case we clobber it.
7885 __ Mov(x30, sp);
7886 // Initialize the other registers used in this test.
armvixlb0c8ae22014-03-21 14:03:59 +00007887 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01007888 __ Mov(x0, 0);
7889 __ Mov(x1, literal_base);
7890 for (unsigned i = 2; i < x30.code(); i++) {
7891 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7892 }
7893 before.Dump(&masm);
7894
7895 // All of these instructions should be NOPs in these forms, but have
7896 // alternate forms which can write into the stack pointer.
7897 __ add(xzr, x0, x1);
7898 __ add(xzr, x1, xzr);
7899 __ add(xzr, xzr, x1);
7900
7901 __ and_(xzr, x0, x2);
7902 __ and_(xzr, x2, xzr);
7903 __ and_(xzr, xzr, x2);
7904
7905 __ bic(xzr, x0, x3);
7906 __ bic(xzr, x3, xzr);
7907 __ bic(xzr, xzr, x3);
7908
7909 __ eon(xzr, x0, x4);
7910 __ eon(xzr, x4, xzr);
7911 __ eon(xzr, xzr, x4);
7912
7913 __ eor(xzr, x0, x5);
7914 __ eor(xzr, x5, xzr);
7915 __ eor(xzr, xzr, x5);
7916
7917 __ orr(xzr, x0, x6);
7918 __ orr(xzr, x6, xzr);
7919 __ orr(xzr, xzr, x6);
7920
7921 __ sub(xzr, x0, x7);
7922 __ sub(xzr, x7, xzr);
7923 __ sub(xzr, xzr, x7);
7924
7925 // Swap the saved stack pointer with the real one. If sp was written
7926 // during the test, it will show up in x30. This is done because the test
7927 // framework assumes that sp will be valid at the end of the test.
7928 __ Mov(x29, x30);
7929 __ Mov(x30, sp);
7930 __ Mov(sp, x29);
7931 // We used x29 as a scratch register, so reset it to make sure it doesn't
7932 // trigger a test failure.
7933 __ Add(x29, x28, x1);
7934 END();
7935
7936 RUN();
7937
7938 ASSERT_EQUAL_REGISTERS(before);
7939 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7940
7941 TEARDOWN();
7942}
7943
7944
7945TEST(zero_dest_setflags) {
7946 SETUP();
armvixlc68cb642014-09-25 18:49:30 +01007947 ALLOW_ASM();
armvixlad96eda2013-06-14 11:42:37 +01007948 RegisterDump before;
7949
7950 START();
7951 // Preserve the stack pointer, in case we clobber it.
7952 __ Mov(x30, sp);
7953 // Initialize the other registers used in this test.
armvixlb0c8ae22014-03-21 14:03:59 +00007954 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01007955 __ Mov(x0, 0);
7956 __ Mov(x1, literal_base);
7957 for (int i = 2; i < 30; i++) {
7958 __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
7959 }
7960 before.Dump(&masm);
7961
7962 // All of these instructions should only write to the flags in these forms,
7963 // but have alternate forms which can write into the stack pointer.
armvixlf37fdc02014-02-05 13:22:16 +00007964 __ adds(xzr, x0, Operand(x1, UXTX));
7965 __ adds(xzr, x1, Operand(xzr, UXTX));
7966 __ adds(xzr, x1, 1234);
7967 __ adds(xzr, x0, x1);
7968 __ adds(xzr, x1, xzr);
7969 __ adds(xzr, xzr, x1);
armvixlad96eda2013-06-14 11:42:37 +01007970
armvixlf37fdc02014-02-05 13:22:16 +00007971 __ ands(xzr, x2, ~0xf);
7972 __ ands(xzr, xzr, ~0xf);
7973 __ ands(xzr, x0, x2);
7974 __ ands(xzr, x2, xzr);
7975 __ ands(xzr, xzr, x2);
armvixlad96eda2013-06-14 11:42:37 +01007976
armvixlf37fdc02014-02-05 13:22:16 +00007977 __ bics(xzr, x3, ~0xf);
7978 __ bics(xzr, xzr, ~0xf);
7979 __ bics(xzr, x0, x3);
7980 __ bics(xzr, x3, xzr);
7981 __ bics(xzr, xzr, x3);
armvixlad96eda2013-06-14 11:42:37 +01007982
armvixlf37fdc02014-02-05 13:22:16 +00007983 __ subs(xzr, x0, Operand(x3, UXTX));
7984 __ subs(xzr, x3, Operand(xzr, UXTX));
7985 __ subs(xzr, x3, 1234);
7986 __ subs(xzr, x0, x3);
7987 __ subs(xzr, x3, xzr);
7988 __ subs(xzr, xzr, x3);
armvixlad96eda2013-06-14 11:42:37 +01007989
7990 // Swap the saved stack pointer with the real one. If sp was written
7991 // during the test, it will show up in x30. This is done because the test
7992 // framework assumes that sp will be valid at the end of the test.
7993 __ Mov(x29, x30);
7994 __ Mov(x30, sp);
7995 __ Mov(sp, x29);
7996 // We used x29 as a scratch register, so reset it to make sure it doesn't
7997 // trigger a test failure.
7998 __ Add(x29, x28, x1);
7999 END();
8000
8001 RUN();
8002
8003 ASSERT_EQUAL_REGISTERS(before);
8004
8005 TEARDOWN();
8006}
8007
8008
8009TEST(register_bit) {
8010 // No code generation takes place in this test, so no need to setup and
8011 // teardown.
8012
8013 // Simple tests.
armvixlb0c8ae22014-03-21 14:03:59 +00008014 assert(x0.Bit() == (UINT64_C(1) << 0));
8015 assert(x1.Bit() == (UINT64_C(1) << 1));
8016 assert(x10.Bit() == (UINT64_C(1) << 10));
armvixlad96eda2013-06-14 11:42:37 +01008017
8018 // AAPCS64 definitions.
armvixlb0c8ae22014-03-21 14:03:59 +00008019 assert(lr.Bit() == (UINT64_C(1) << kLinkRegCode));
armvixlad96eda2013-06-14 11:42:37 +01008020
8021 // Fixed (hardware) definitions.
armvixlb0c8ae22014-03-21 14:03:59 +00008022 assert(xzr.Bit() == (UINT64_C(1) << kZeroRegCode));
armvixlad96eda2013-06-14 11:42:37 +01008023
8024 // Internal ABI definitions.
armvixlb0c8ae22014-03-21 14:03:59 +00008025 assert(sp.Bit() == (UINT64_C(1) << kSPRegInternalCode));
armvixlad96eda2013-06-14 11:42:37 +01008026 assert(sp.Bit() != xzr.Bit());
8027
8028 // xn.Bit() == wn.Bit() at all times, for the same n.
8029 assert(x0.Bit() == w0.Bit());
8030 assert(x1.Bit() == w1.Bit());
8031 assert(x10.Bit() == w10.Bit());
8032 assert(xzr.Bit() == wzr.Bit());
8033 assert(sp.Bit() == wsp.Bit());
8034}
8035
8036
8037TEST(stack_pointer_override) {
8038 // This test generates some stack maintenance code, but the test only checks
8039 // the reported state.
8040 SETUP();
8041 START();
8042
8043 // The default stack pointer in VIXL is sp.
8044 assert(sp.Is(__ StackPointer()));
8045 __ SetStackPointer(x0);
8046 assert(x0.Is(__ StackPointer()));
8047 __ SetStackPointer(x28);
8048 assert(x28.Is(__ StackPointer()));
8049 __ SetStackPointer(sp);
8050 assert(sp.Is(__ StackPointer()));
8051
8052 END();
8053 RUN();
8054 TEARDOWN();
8055}
8056
8057
8058TEST(peek_poke_simple) {
8059 SETUP();
8060 START();
8061
8062 static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
8063 static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
8064 x12.Bit() | x13.Bit();
8065
8066 // The literal base is chosen to have two useful properties:
8067 // * When multiplied by small values (such as a register index), this value
8068 // is clearly readable in the result.
8069 // * The value is not formed from repeating fixed-size smaller values, so it
8070 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008071 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008072
8073 // Initialize the registers.
8074 __ Mov(x0, literal_base);
8075 __ Add(x1, x0, x0);
8076 __ Add(x2, x1, x0);
8077 __ Add(x3, x2, x0);
8078
8079 __ Claim(32);
8080
8081 // Simple exchange.
8082 // After this test:
8083 // x0-x3 should be unchanged.
8084 // w10-w13 should contain the lower words of x0-x3.
8085 __ Poke(x0, 0);
8086 __ Poke(x1, 8);
8087 __ Poke(x2, 16);
8088 __ Poke(x3, 24);
8089 Clobber(&masm, x0_to_x3);
8090 __ Peek(x0, 0);
8091 __ Peek(x1, 8);
8092 __ Peek(x2, 16);
8093 __ Peek(x3, 24);
8094
8095 __ Poke(w0, 0);
8096 __ Poke(w1, 4);
8097 __ Poke(w2, 8);
8098 __ Poke(w3, 12);
8099 Clobber(&masm, x10_to_x13);
8100 __ Peek(w10, 0);
8101 __ Peek(w11, 4);
8102 __ Peek(w12, 8);
8103 __ Peek(w13, 12);
8104
8105 __ Drop(32);
8106
8107 END();
8108 RUN();
8109
8110 ASSERT_EQUAL_64(literal_base * 1, x0);
8111 ASSERT_EQUAL_64(literal_base * 2, x1);
8112 ASSERT_EQUAL_64(literal_base * 3, x2);
8113 ASSERT_EQUAL_64(literal_base * 4, x3);
8114
8115 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8116 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8117 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8118 ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
8119
8120 TEARDOWN();
8121}
8122
8123
8124TEST(peek_poke_unaligned) {
8125 SETUP();
8126 START();
8127
8128 // The literal base is chosen to have two useful properties:
8129 // * When multiplied by small values (such as a register index), this value
8130 // is clearly readable in the result.
8131 // * The value is not formed from repeating fixed-size smaller values, so it
8132 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008133 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008134
8135 // Initialize the registers.
8136 __ Mov(x0, literal_base);
8137 __ Add(x1, x0, x0);
8138 __ Add(x2, x1, x0);
8139 __ Add(x3, x2, x0);
8140 __ Add(x4, x3, x0);
8141 __ Add(x5, x4, x0);
8142 __ Add(x6, x5, x0);
8143
8144 __ Claim(32);
8145
8146 // Unaligned exchanges.
8147 // After this test:
8148 // x0-x6 should be unchanged.
8149 // w10-w12 should contain the lower words of x0-x2.
8150 __ Poke(x0, 1);
8151 Clobber(&masm, x0.Bit());
8152 __ Peek(x0, 1);
8153 __ Poke(x1, 2);
8154 Clobber(&masm, x1.Bit());
8155 __ Peek(x1, 2);
8156 __ Poke(x2, 3);
8157 Clobber(&masm, x2.Bit());
8158 __ Peek(x2, 3);
8159 __ Poke(x3, 4);
8160 Clobber(&masm, x3.Bit());
8161 __ Peek(x3, 4);
8162 __ Poke(x4, 5);
8163 Clobber(&masm, x4.Bit());
8164 __ Peek(x4, 5);
8165 __ Poke(x5, 6);
8166 Clobber(&masm, x5.Bit());
8167 __ Peek(x5, 6);
8168 __ Poke(x6, 7);
8169 Clobber(&masm, x6.Bit());
8170 __ Peek(x6, 7);
8171
8172 __ Poke(w0, 1);
8173 Clobber(&masm, w10.Bit());
8174 __ Peek(w10, 1);
8175 __ Poke(w1, 2);
8176 Clobber(&masm, w11.Bit());
8177 __ Peek(w11, 2);
8178 __ Poke(w2, 3);
8179 Clobber(&masm, w12.Bit());
8180 __ Peek(w12, 3);
8181
8182 __ Drop(32);
8183
8184 END();
8185 RUN();
8186
8187 ASSERT_EQUAL_64(literal_base * 1, x0);
8188 ASSERT_EQUAL_64(literal_base * 2, x1);
8189 ASSERT_EQUAL_64(literal_base * 3, x2);
8190 ASSERT_EQUAL_64(literal_base * 4, x3);
8191 ASSERT_EQUAL_64(literal_base * 5, x4);
8192 ASSERT_EQUAL_64(literal_base * 6, x5);
8193 ASSERT_EQUAL_64(literal_base * 7, x6);
8194
8195 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8196 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8197 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8198
8199 TEARDOWN();
8200}
8201
8202
8203TEST(peek_poke_endianness) {
8204 SETUP();
8205 START();
8206
8207 // The literal base is chosen to have two useful properties:
8208 // * When multiplied by small values (such as a register index), this value
8209 // is clearly readable in the result.
8210 // * The value is not formed from repeating fixed-size smaller values, so it
8211 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008212 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008213
8214 // Initialize the registers.
8215 __ Mov(x0, literal_base);
8216 __ Add(x1, x0, x0);
8217
8218 __ Claim(32);
8219
8220 // Endianness tests.
8221 // After this section:
8222 // x4 should match x0[31:0]:x0[63:32]
8223 // w5 should match w1[15:0]:w1[31:16]
8224 __ Poke(x0, 0);
8225 __ Poke(x0, 8);
8226 __ Peek(x4, 4);
8227
8228 __ Poke(w1, 0);
8229 __ Poke(w1, 4);
8230 __ Peek(w5, 2);
8231
8232 __ Drop(32);
8233
8234 END();
8235 RUN();
8236
8237 uint64_t x0_expected = literal_base * 1;
8238 uint64_t x1_expected = literal_base * 2;
8239 uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
8240 uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
8241 ((x1_expected >> 16) & 0x0000ffff);
8242
8243 ASSERT_EQUAL_64(x0_expected, x0);
8244 ASSERT_EQUAL_64(x1_expected, x1);
8245 ASSERT_EQUAL_64(x4_expected, x4);
8246 ASSERT_EQUAL_64(x5_expected, x5);
8247
8248 TEARDOWN();
8249}
8250
8251
8252TEST(peek_poke_mixed) {
8253 SETUP();
8254 START();
8255
8256 // The literal base is chosen to have two useful properties:
8257 // * When multiplied by small values (such as a register index), this value
8258 // is clearly readable in the result.
8259 // * The value is not formed from repeating fixed-size smaller values, so it
8260 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008261 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008262
8263 // Initialize the registers.
8264 __ Mov(x0, literal_base);
8265 __ Add(x1, x0, x0);
8266 __ Add(x2, x1, x0);
8267 __ Add(x3, x2, x0);
8268
8269 __ Claim(32);
8270
8271 // Mix with other stack operations.
8272 // After this section:
8273 // x0-x3 should be unchanged.
8274 // x6 should match x1[31:0]:x0[63:32]
8275 // w7 should match x1[15:0]:x0[63:48]
8276 __ Poke(x1, 8);
8277 __ Poke(x0, 0);
8278 {
armvixlb0c8ae22014-03-21 14:03:59 +00008279 VIXL_ASSERT(__ StackPointer().Is(sp));
armvixlad96eda2013-06-14 11:42:37 +01008280 __ Mov(x4, __ StackPointer());
8281 __ SetStackPointer(x4);
8282
8283 __ Poke(wzr, 0); // Clobber the space we're about to drop.
8284 __ Drop(4);
8285 __ Peek(x6, 0);
8286 __ Claim(8);
8287 __ Peek(w7, 10);
8288 __ Poke(x3, 28);
8289 __ Poke(xzr, 0); // Clobber the space we're about to drop.
8290 __ Drop(8);
8291 __ Poke(x2, 12);
8292 __ Push(w0);
8293
8294 __ Mov(sp, __ StackPointer());
8295 __ SetStackPointer(sp);
8296 }
8297
8298 __ Pop(x0, x1, x2, x3);
8299
8300 END();
8301 RUN();
8302
8303 uint64_t x0_expected = literal_base * 1;
8304 uint64_t x1_expected = literal_base * 2;
8305 uint64_t x2_expected = literal_base * 3;
8306 uint64_t x3_expected = literal_base * 4;
8307 uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8308 uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8309 ((x0_expected >> 48) & 0x0000ffff);
8310
8311 ASSERT_EQUAL_64(x0_expected, x0);
8312 ASSERT_EQUAL_64(x1_expected, x1);
8313 ASSERT_EQUAL_64(x2_expected, x2);
8314 ASSERT_EQUAL_64(x3_expected, x3);
8315 ASSERT_EQUAL_64(x6_expected, x6);
8316 ASSERT_EQUAL_64(x7_expected, x7);
8317
8318 TEARDOWN();
8319}
8320
8321
armvixlc68cb642014-09-25 18:49:30 +01008322TEST(peek_poke_reglist) {
8323 SETUP();
8324 START();
8325
8326 // The literal base is chosen to have two useful properties:
8327 // * When multiplied by small values (such as a register index), this value
8328 // is clearly readable in the result.
8329 // * The value is not formed from repeating fixed-size smaller values, so it
8330 // can be used to detect endianness-related errors.
8331 uint64_t base = 0x0100001000100101;
8332
8333 // Initialize the registers.
8334 __ Mov(x1, base);
8335 __ Add(x2, x1, x1);
8336 __ Add(x3, x2, x1);
8337 __ Add(x4, x3, x1);
8338
8339 CPURegList list_1(x1, x2, x3, x4);
8340 CPURegList list_2(x11, x12, x13, x14);
8341 int list_1_size = list_1.TotalSizeInBytes();
8342
8343 __ Claim(2 * list_1_size);
8344
8345 __ PokeCPURegList(list_1, 0);
8346 __ PokeXRegList(list_1.list(), list_1_size);
8347 __ PeekCPURegList(list_2, 2 * kXRegSizeInBytes);
8348 __ PeekXRegList(x15.Bit(), kWRegSizeInBytes);
8349 __ PeekWRegList(w16.Bit() | w17.Bit(), 3 * kXRegSizeInBytes);
8350
8351 __ Drop(2 * list_1_size);
8352
8353
8354 uint64_t base_d = 0x1010010001000010;
8355
8356 // Initialize the registers.
8357 __ Mov(x1, base_d);
8358 __ Add(x2, x1, x1);
8359 __ Add(x3, x2, x1);
8360 __ Add(x4, x3, x1);
8361 __ Fmov(d1, x1);
8362 __ Fmov(d2, x2);
8363 __ Fmov(d3, x3);
8364 __ Fmov(d4, x4);
8365
8366 CPURegList list_d_1(d1, d2, d3, d4);
8367 CPURegList list_d_2(d11, d12, d13, d14);
8368 int list_d_1_size = list_d_1.TotalSizeInBytes();
8369
8370 __ Claim(2 * list_d_1_size);
8371
8372 __ PokeCPURegList(list_d_1, 0);
8373 __ PokeDRegList(list_d_1.list(), list_d_1_size);
8374 __ PeekCPURegList(list_d_2, 2 * kDRegSizeInBytes);
8375 __ PeekDRegList(d15.Bit(), kSRegSizeInBytes);
8376 __ PeekSRegList(s16.Bit() | s17.Bit(), 3 * kDRegSizeInBytes);
8377
8378 __ Drop(2 * list_d_1_size);
8379
8380
8381 END();
8382 RUN();
8383
8384 ASSERT_EQUAL_64(3 * base, x11);
8385 ASSERT_EQUAL_64(4 * base, x12);
8386 ASSERT_EQUAL_64(1 * base, x13);
8387 ASSERT_EQUAL_64(2 * base, x14);
8388 ASSERT_EQUAL_64(((1 * base) >> kWRegSize) | ((2 * base) << kWRegSize), x15);
8389 ASSERT_EQUAL_64(2 * base, x14);
8390 ASSERT_EQUAL_32((4 * base) & kWRegMask, w16);
8391 ASSERT_EQUAL_32((4 * base) >> kWRegSize, w17);
8392
8393 ASSERT_EQUAL_FP64(rawbits_to_double(3 * base_d), d11);
8394 ASSERT_EQUAL_FP64(rawbits_to_double(4 * base_d), d12);
8395 ASSERT_EQUAL_FP64(rawbits_to_double(1 * base_d), d13);
8396 ASSERT_EQUAL_FP64(rawbits_to_double(2 * base_d), d14);
8397 ASSERT_EQUAL_FP64(
8398 rawbits_to_double((base_d >> kSRegSize) | ((2 * base_d) << kSRegSize)),
8399 d15);
8400 ASSERT_EQUAL_FP64(rawbits_to_double(2 * base_d), d14);
8401 ASSERT_EQUAL_FP32(rawbits_to_float((4 * base_d) & kSRegMask), s16);
8402 ASSERT_EQUAL_FP32(rawbits_to_float((4 * base_d) >> kSRegSize), s17);
8403
8404 TEARDOWN();
8405}
8406
8407
armvixlad96eda2013-06-14 11:42:37 +01008408// This enum is used only as an argument to the push-pop test helpers.
8409enum PushPopMethod {
8410 // Push or Pop using the Push and Pop methods, with blocks of up to four
8411 // registers. (Smaller blocks will be used if necessary.)
8412 PushPopByFour,
8413
8414 // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8415 PushPopRegList
8416};
8417
8418
8419// The maximum number of registers that can be used by the PushPopXReg* tests,
8420// where a reg_count field is provided.
8421static int const kPushPopXRegMaxRegCount = -1;
8422
8423// Test a simple push-pop pattern:
8424// * Claim <claim> bytes to set the stack alignment.
8425// * Push <reg_count> registers with size <reg_size>.
8426// * Clobber the register contents.
8427// * Pop <reg_count> registers to restore the original contents.
8428// * Drop <claim> bytes to restore the original stack pointer.
8429//
8430// Different push and pop methods can be specified independently to test for
8431// proper word-endian behaviour.
8432static void PushPopXRegSimpleHelper(int reg_count,
8433 int claim,
8434 int reg_size,
8435 PushPopMethod push_method,
8436 PushPopMethod pop_method) {
8437 SETUP();
8438
8439 START();
8440
8441 // Arbitrarily pick a register to use as a stack pointer.
8442 const Register& stack_pointer = x20;
8443 const RegList allowed = ~stack_pointer.Bit();
8444 if (reg_count == kPushPopXRegMaxRegCount) {
8445 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8446 }
8447 // Work out which registers to use, based on reg_size.
8448 Register r[kNumberOfRegisters];
8449 Register x[kNumberOfRegisters];
8450 RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8451 allowed);
8452
8453 // The literal base is chosen to have two useful properties:
8454 // * When multiplied by small values (such as a register index), this value
8455 // is clearly readable in the result.
8456 // * The value is not formed from repeating fixed-size smaller values, so it
8457 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008458 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008459
8460 {
armvixlb0c8ae22014-03-21 14:03:59 +00008461 VIXL_ASSERT(__ StackPointer().Is(sp));
armvixlad96eda2013-06-14 11:42:37 +01008462 __ Mov(stack_pointer, __ StackPointer());
8463 __ SetStackPointer(stack_pointer);
8464
8465 int i;
8466
8467 // Initialize the registers.
8468 for (i = 0; i < reg_count; i++) {
8469 // Always write into the X register, to ensure that the upper word is
8470 // properly ignored by Push when testing W registers.
8471 __ Mov(x[i], literal_base * i);
8472 }
8473
8474 // Claim memory first, as requested.
8475 __ Claim(claim);
8476
8477 switch (push_method) {
8478 case PushPopByFour:
8479 // Push high-numbered registers first (to the highest addresses).
8480 for (i = reg_count; i >= 4; i -= 4) {
8481 __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8482 }
8483 // Finish off the leftovers.
8484 switch (i) {
8485 case 3: __ Push(r[2], r[1], r[0]); break;
8486 case 2: __ Push(r[1], r[0]); break;
8487 case 1: __ Push(r[0]); break;
armvixlb0c8ae22014-03-21 14:03:59 +00008488 default: VIXL_ASSERT(i == 0); break;
armvixlad96eda2013-06-14 11:42:37 +01008489 }
8490 break;
8491 case PushPopRegList:
8492 __ PushSizeRegList(list, reg_size);
8493 break;
8494 }
8495
8496 // Clobber all the registers, to ensure that they get repopulated by Pop.
8497 Clobber(&masm, list);
8498
8499 switch (pop_method) {
8500 case PushPopByFour:
8501 // Pop low-numbered registers first (from the lowest addresses).
8502 for (i = 0; i <= (reg_count-4); i += 4) {
8503 __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8504 }
8505 // Finish off the leftovers.
8506 switch (reg_count - i) {
8507 case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
8508 case 2: __ Pop(r[i], r[i+1]); break;
8509 case 1: __ Pop(r[i]); break;
armvixlb0c8ae22014-03-21 14:03:59 +00008510 default: VIXL_ASSERT(i == reg_count); break;
armvixlad96eda2013-06-14 11:42:37 +01008511 }
8512 break;
8513 case PushPopRegList:
8514 __ PopSizeRegList(list, reg_size);
8515 break;
8516 }
8517
8518 // Drop memory to restore stack_pointer.
8519 __ Drop(claim);
8520
8521 __ Mov(sp, __ StackPointer());
8522 __ SetStackPointer(sp);
8523 }
8524
8525 END();
8526
8527 RUN();
8528
8529 // Check that the register contents were preserved.
8530 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8531 // that the upper word was properly cleared by Pop.
armvixlb0c8ae22014-03-21 14:03:59 +00008532 literal_base &= (0xffffffffffffffff >> (64-reg_size));
armvixlad96eda2013-06-14 11:42:37 +01008533 for (int i = 0; i < reg_count; i++) {
8534 if (x[i].Is(xzr)) {
8535 ASSERT_EQUAL_64(0, x[i]);
8536 } else {
8537 ASSERT_EQUAL_64(literal_base * i, x[i]);
8538 }
8539 }
8540
8541 TEARDOWN();
8542}
8543
8544
8545TEST(push_pop_xreg_simple_32) {
8546 for (int claim = 0; claim <= 8; claim++) {
8547 for (int count = 0; count <= 8; count++) {
8548 PushPopXRegSimpleHelper(count, claim, kWRegSize,
8549 PushPopByFour, PushPopByFour);
8550 PushPopXRegSimpleHelper(count, claim, kWRegSize,
8551 PushPopByFour, PushPopRegList);
8552 PushPopXRegSimpleHelper(count, claim, kWRegSize,
8553 PushPopRegList, PushPopByFour);
8554 PushPopXRegSimpleHelper(count, claim, kWRegSize,
8555 PushPopRegList, PushPopRegList);
8556 }
8557 // Test with the maximum number of registers.
8558 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8559 claim, kWRegSize, PushPopByFour, PushPopByFour);
8560 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8561 claim, kWRegSize, PushPopByFour, PushPopRegList);
8562 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8563 claim, kWRegSize, PushPopRegList, PushPopByFour);
8564 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8565 claim, kWRegSize, PushPopRegList, PushPopRegList);
8566 }
8567}
8568
8569
8570TEST(push_pop_xreg_simple_64) {
8571 for (int claim = 0; claim <= 8; claim++) {
8572 for (int count = 0; count <= 8; count++) {
8573 PushPopXRegSimpleHelper(count, claim, kXRegSize,
8574 PushPopByFour, PushPopByFour);
8575 PushPopXRegSimpleHelper(count, claim, kXRegSize,
8576 PushPopByFour, PushPopRegList);
8577 PushPopXRegSimpleHelper(count, claim, kXRegSize,
8578 PushPopRegList, PushPopByFour);
8579 PushPopXRegSimpleHelper(count, claim, kXRegSize,
8580 PushPopRegList, PushPopRegList);
8581 }
8582 // Test with the maximum number of registers.
8583 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8584 claim, kXRegSize, PushPopByFour, PushPopByFour);
8585 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8586 claim, kXRegSize, PushPopByFour, PushPopRegList);
8587 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8588 claim, kXRegSize, PushPopRegList, PushPopByFour);
8589 PushPopXRegSimpleHelper(kPushPopXRegMaxRegCount,
8590 claim, kXRegSize, PushPopRegList, PushPopRegList);
8591 }
8592}
8593
8594
8595// The maximum number of registers that can be used by the PushPopFPXReg* tests,
8596// where a reg_count field is provided.
8597static int const kPushPopFPXRegMaxRegCount = -1;
8598
8599// Test a simple push-pop pattern:
8600// * Claim <claim> bytes to set the stack alignment.
8601// * Push <reg_count> FP registers with size <reg_size>.
8602// * Clobber the register contents.
8603// * Pop <reg_count> FP registers to restore the original contents.
8604// * Drop <claim> bytes to restore the original stack pointer.
8605//
8606// Different push and pop methods can be specified independently to test for
8607// proper word-endian behaviour.
8608static void PushPopFPXRegSimpleHelper(int reg_count,
8609 int claim,
8610 int reg_size,
8611 PushPopMethod push_method,
8612 PushPopMethod pop_method) {
8613 SETUP();
8614
8615 START();
8616
8617 // We can use any floating-point register. None of them are reserved for
8618 // debug code, for example.
8619 static RegList const allowed = ~0;
8620 if (reg_count == kPushPopFPXRegMaxRegCount) {
8621 reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8622 }
8623 // Work out which registers to use, based on reg_size.
8624 FPRegister v[kNumberOfRegisters];
8625 FPRegister d[kNumberOfRegisters];
8626 RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8627 allowed);
8628
8629 // Arbitrarily pick a register to use as a stack pointer.
8630 const Register& stack_pointer = x10;
8631
8632 // The literal base is chosen to have two useful properties:
8633 // * When multiplied (using an integer) by small values (such as a register
8634 // index), this value is clearly readable in the result.
8635 // * The value is not formed from repeating fixed-size smaller values, so it
8636 // can be used to detect endianness-related errors.
8637 // * It is never a floating-point NaN, and will therefore always compare
8638 // equal to itself.
armvixlb0c8ae22014-03-21 14:03:59 +00008639 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008640
8641 {
armvixlb0c8ae22014-03-21 14:03:59 +00008642 VIXL_ASSERT(__ StackPointer().Is(sp));
armvixlad96eda2013-06-14 11:42:37 +01008643 __ Mov(stack_pointer, __ StackPointer());
8644 __ SetStackPointer(stack_pointer);
8645
8646 int i;
8647
8648 // Initialize the registers, using X registers to load the literal.
8649 __ Mov(x0, 0);
8650 __ Mov(x1, literal_base);
8651 for (i = 0; i < reg_count; i++) {
8652 // Always write into the D register, to ensure that the upper word is
8653 // properly ignored by Push when testing S registers.
8654 __ Fmov(d[i], x0);
8655 // Calculate the next literal.
8656 __ Add(x0, x0, x1);
8657 }
8658
8659 // Claim memory first, as requested.
8660 __ Claim(claim);
8661
8662 switch (push_method) {
8663 case PushPopByFour:
8664 // Push high-numbered registers first (to the highest addresses).
8665 for (i = reg_count; i >= 4; i -= 4) {
8666 __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8667 }
8668 // Finish off the leftovers.
8669 switch (i) {
8670 case 3: __ Push(v[2], v[1], v[0]); break;
8671 case 2: __ Push(v[1], v[0]); break;
8672 case 1: __ Push(v[0]); break;
armvixlb0c8ae22014-03-21 14:03:59 +00008673 default: VIXL_ASSERT(i == 0); break;
armvixlad96eda2013-06-14 11:42:37 +01008674 }
8675 break;
8676 case PushPopRegList:
8677 __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8678 break;
8679 }
8680
8681 // Clobber all the registers, to ensure that they get repopulated by Pop.
8682 ClobberFP(&masm, list);
8683
8684 switch (pop_method) {
8685 case PushPopByFour:
8686 // Pop low-numbered registers first (from the lowest addresses).
8687 for (i = 0; i <= (reg_count-4); i += 4) {
8688 __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8689 }
8690 // Finish off the leftovers.
8691 switch (reg_count - i) {
8692 case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
8693 case 2: __ Pop(v[i], v[i+1]); break;
8694 case 1: __ Pop(v[i]); break;
armvixlb0c8ae22014-03-21 14:03:59 +00008695 default: VIXL_ASSERT(i == reg_count); break;
armvixlad96eda2013-06-14 11:42:37 +01008696 }
8697 break;
8698 case PushPopRegList:
8699 __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8700 break;
8701 }
8702
8703 // Drop memory to restore the stack pointer.
8704 __ Drop(claim);
8705
8706 __ Mov(sp, __ StackPointer());
8707 __ SetStackPointer(sp);
8708 }
8709
8710 END();
8711
8712 RUN();
8713
8714 // Check that the register contents were preserved.
8715 // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8716 // test that the upper word was properly cleared by Pop.
armvixlb0c8ae22014-03-21 14:03:59 +00008717 literal_base &= (0xffffffffffffffff >> (64-reg_size));
armvixlad96eda2013-06-14 11:42:37 +01008718 for (int i = 0; i < reg_count; i++) {
8719 uint64_t literal = literal_base * i;
8720 double expected;
8721 memcpy(&expected, &literal, sizeof(expected));
8722 ASSERT_EQUAL_FP64(expected, d[i]);
8723 }
8724
8725 TEARDOWN();
8726}
8727
8728
8729TEST(push_pop_fp_xreg_simple_32) {
8730 for (int claim = 0; claim <= 8; claim++) {
8731 for (int count = 0; count <= 8; count++) {
8732 PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
8733 PushPopByFour, PushPopByFour);
8734 PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
8735 PushPopByFour, PushPopRegList);
8736 PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
8737 PushPopRegList, PushPopByFour);
8738 PushPopFPXRegSimpleHelper(count, claim, kSRegSize,
8739 PushPopRegList, PushPopRegList);
8740 }
8741 // Test with the maximum number of registers.
8742 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
8743 PushPopByFour, PushPopByFour);
8744 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
8745 PushPopByFour, PushPopRegList);
8746 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
8747 PushPopRegList, PushPopByFour);
8748 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kSRegSize,
8749 PushPopRegList, PushPopRegList);
8750 }
8751}
8752
8753
8754TEST(push_pop_fp_xreg_simple_64) {
8755 for (int claim = 0; claim <= 8; claim++) {
8756 for (int count = 0; count <= 8; count++) {
8757 PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
8758 PushPopByFour, PushPopByFour);
8759 PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
8760 PushPopByFour, PushPopRegList);
8761 PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
8762 PushPopRegList, PushPopByFour);
8763 PushPopFPXRegSimpleHelper(count, claim, kDRegSize,
8764 PushPopRegList, PushPopRegList);
8765 }
8766 // Test with the maximum number of registers.
8767 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
8768 PushPopByFour, PushPopByFour);
8769 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
8770 PushPopByFour, PushPopRegList);
8771 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
8772 PushPopRegList, PushPopByFour);
8773 PushPopFPXRegSimpleHelper(kPushPopFPXRegMaxRegCount, claim, kDRegSize,
8774 PushPopRegList, PushPopRegList);
8775 }
8776}
8777
8778
8779// Push and pop data using an overlapping combination of Push/Pop and
8780// RegList-based methods.
8781static void PushPopXRegMixedMethodsHelper(int claim, int reg_size) {
8782 SETUP();
8783
8784 // Arbitrarily pick a register to use as a stack pointer.
8785 const Register& stack_pointer = x5;
8786 const RegList allowed = ~stack_pointer.Bit();
8787 // Work out which registers to use, based on reg_size.
8788 Register r[10];
8789 Register x[10];
8790 PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8791
8792 // Calculate some handy register lists.
8793 RegList r0_to_r3 = 0;
8794 for (int i = 0; i <= 3; i++) {
8795 r0_to_r3 |= x[i].Bit();
8796 }
8797 RegList r4_to_r5 = 0;
8798 for (int i = 4; i <= 5; i++) {
8799 r4_to_r5 |= x[i].Bit();
8800 }
8801 RegList r6_to_r9 = 0;
8802 for (int i = 6; i <= 9; i++) {
8803 r6_to_r9 |= x[i].Bit();
8804 }
8805
8806 // The literal base is chosen to have two useful properties:
8807 // * When multiplied by small values (such as a register index), this value
8808 // is clearly readable in the result.
8809 // * The value is not formed from repeating fixed-size smaller values, so it
8810 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008811 uint64_t literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008812
8813 START();
8814 {
armvixlb0c8ae22014-03-21 14:03:59 +00008815 VIXL_ASSERT(__ StackPointer().Is(sp));
armvixlad96eda2013-06-14 11:42:37 +01008816 __ Mov(stack_pointer, __ StackPointer());
8817 __ SetStackPointer(stack_pointer);
8818
8819 // Claim memory first, as requested.
8820 __ Claim(claim);
8821
8822 __ Mov(x[3], literal_base * 3);
8823 __ Mov(x[2], literal_base * 2);
8824 __ Mov(x[1], literal_base * 1);
8825 __ Mov(x[0], literal_base * 0);
8826
8827 __ PushSizeRegList(r0_to_r3, reg_size);
8828 __ Push(r[3], r[2]);
8829
8830 Clobber(&masm, r0_to_r3);
8831 __ PopSizeRegList(r0_to_r3, reg_size);
8832
8833 __ Push(r[2], r[1], r[3], r[0]);
8834
8835 Clobber(&masm, r4_to_r5);
8836 __ Pop(r[4], r[5]);
8837 Clobber(&masm, r6_to_r9);
8838 __ Pop(r[6], r[7], r[8], r[9]);
8839
8840 // Drop memory to restore stack_pointer.
8841 __ Drop(claim);
8842
8843 __ Mov(sp, __ StackPointer());
8844 __ SetStackPointer(sp);
8845 }
8846
8847 END();
8848
8849 RUN();
8850
8851 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8852 // that the upper word was properly cleared by Pop.
armvixlb0c8ae22014-03-21 14:03:59 +00008853 literal_base &= (0xffffffffffffffff >> (64-reg_size));
armvixlad96eda2013-06-14 11:42:37 +01008854
8855 ASSERT_EQUAL_64(literal_base * 3, x[9]);
8856 ASSERT_EQUAL_64(literal_base * 2, x[8]);
8857 ASSERT_EQUAL_64(literal_base * 0, x[7]);
8858 ASSERT_EQUAL_64(literal_base * 3, x[6]);
8859 ASSERT_EQUAL_64(literal_base * 1, x[5]);
8860 ASSERT_EQUAL_64(literal_base * 2, x[4]);
8861
8862 TEARDOWN();
8863}
8864
8865
8866TEST(push_pop_xreg_mixed_methods_64) {
8867 for (int claim = 0; claim <= 8; claim++) {
8868 PushPopXRegMixedMethodsHelper(claim, kXRegSize);
8869 }
8870}
8871
8872
8873TEST(push_pop_xreg_mixed_methods_32) {
8874 for (int claim = 0; claim <= 8; claim++) {
8875 PushPopXRegMixedMethodsHelper(claim, kWRegSize);
8876 }
8877}
8878
8879
8880// Push and pop data using overlapping X- and W-sized quantities.
8881static void PushPopXRegWXOverlapHelper(int reg_count, int claim) {
8882 SETUP();
8883
8884 // Arbitrarily pick a register to use as a stack pointer.
8885 const Register& stack_pointer = x10;
8886 const RegList allowed = ~stack_pointer.Bit();
8887 if (reg_count == kPushPopXRegMaxRegCount) {
8888 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8889 }
8890 // Work out which registers to use, based on reg_size.
8891 Register w[kNumberOfRegisters];
8892 Register x[kNumberOfRegisters];
8893 RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8894
8895 // The number of W-sized slots we expect to pop. When we pop, we alternate
8896 // between W and X registers, so we need reg_count*1.5 W-sized slots.
8897 int const requested_w_slots = reg_count + reg_count / 2;
8898
8899 // Track what _should_ be on the stack, using W-sized slots.
8900 static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8901 uint32_t stack[kMaxWSlots];
8902 for (int i = 0; i < kMaxWSlots; i++) {
8903 stack[i] = 0xdeadbeef;
8904 }
8905
8906 // The literal base is chosen to have two useful properties:
8907 // * When multiplied by small values (such as a register index), this value
8908 // is clearly readable in the result.
8909 // * The value is not formed from repeating fixed-size smaller values, so it
8910 // can be used to detect endianness-related errors.
armvixlb0c8ae22014-03-21 14:03:59 +00008911 static uint64_t const literal_base = 0x0100001000100101;
armvixlad96eda2013-06-14 11:42:37 +01008912 static uint64_t const literal_base_hi = literal_base >> 32;
8913 static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8914 static uint64_t const literal_base_w = literal_base & 0xffffffff;
8915
8916 START();
8917 {
armvixlb0c8ae22014-03-21 14:03:59 +00008918 VIXL_ASSERT(__ StackPointer().Is(sp));
armvixlad96eda2013-06-14 11:42:37 +01008919 __ Mov(stack_pointer, __ StackPointer());
8920 __ SetStackPointer(stack_pointer);
8921
8922 // Initialize the registers.
8923 for (int i = 0; i < reg_count; i++) {
8924 // Always write into the X register, to ensure that the upper word is
8925 // properly ignored by Push when testing W registers.
8926 __ Mov(x[i], literal_base * i);
8927 }
8928
8929 // Claim memory first, as requested.
8930 __ Claim(claim);
8931
8932 // The push-pop pattern is as follows:
8933 // Push: Pop:
8934 // x[0](hi) -> w[0]
8935 // x[0](lo) -> x[1](hi)
8936 // w[1] -> x[1](lo)
8937 // w[1] -> w[2]
8938 // x[2](hi) -> x[2](hi)
8939 // x[2](lo) -> x[2](lo)
8940 // x[2](hi) -> w[3]
8941 // x[2](lo) -> x[4](hi)
8942 // x[2](hi) -> x[4](lo)
8943 // x[2](lo) -> w[5]
8944 // w[3] -> x[5](hi)
8945 // w[3] -> x[6](lo)
8946 // w[3] -> w[7]
8947 // w[3] -> x[8](hi)
8948 // x[4](hi) -> x[8](lo)
8949 // x[4](lo) -> w[9]
8950 // ... pattern continues ...
8951 //
8952 // That is, registers are pushed starting with the lower numbers,
8953 // alternating between x and w registers, and pushing i%4+1 copies of each,
8954 // where i is the register number.
8955 // Registers are popped starting with the higher numbers one-by-one,
8956 // alternating between x and w registers, but only popping one at a time.
8957 //
8958 // This pattern provides a wide variety of alignment effects and overlaps.
8959
8960 // ---- Push ----
8961
8962 int active_w_slots = 0;
8963 for (int i = 0; active_w_slots < requested_w_slots; i++) {
armvixlb0c8ae22014-03-21 14:03:59 +00008964 VIXL_ASSERT(i < reg_count);
armvixlad96eda2013-06-14 11:42:37 +01008965 // In order to test various arguments to PushMultipleTimes, and to try to
8966 // exercise different alignment and overlap effects, we push each
8967 // register a different number of times.
8968 int times = i % 4 + 1;
8969 if (i & 1) {
8970 // Push odd-numbered registers as W registers.
8971 __ PushMultipleTimes(times, w[i]);
8972 // Fill in the expected stack slots.
8973 for (int j = 0; j < times; j++) {
8974 if (w[i].Is(wzr)) {
8975 // The zero register always writes zeroes.
8976 stack[active_w_slots++] = 0;
8977 } else {
8978 stack[active_w_slots++] = literal_base_w * i;
8979 }
8980 }
8981 } else {
8982 // Push even-numbered registers as X registers.
8983 __ PushMultipleTimes(times, x[i]);
8984 // Fill in the expected stack slots.
8985 for (int j = 0; j < times; j++) {
8986 if (x[i].Is(xzr)) {
8987 // The zero register always writes zeroes.
8988 stack[active_w_slots++] = 0;
8989 stack[active_w_slots++] = 0;
8990 } else {
8991 stack[active_w_slots++] = literal_base_hi * i;
8992 stack[active_w_slots++] = literal_base_lo * i;
8993 }
8994 }
8995 }
8996 }
8997 // Because we were pushing several registers at a time, we probably pushed
8998 // more than we needed to.
8999 if (active_w_slots > requested_w_slots) {
9000 __ Drop((active_w_slots - requested_w_slots) * kWRegSizeInBytes);
9001 // Bump the number of active W-sized slots back to where it should be,
9002 // and fill the empty space with a dummy value.
9003 do {
9004 stack[active_w_slots--] = 0xdeadbeef;
9005 } while (active_w_slots > requested_w_slots);
9006 }
9007
9008 // ---- Pop ----
9009
9010 Clobber(&masm, list);
9011
9012 // If popping an even number of registers, the first one will be X-sized.
9013 // Otherwise, the first one will be W-sized.
9014 bool next_is_64 = !(reg_count & 1);
9015 for (int i = reg_count-1; i >= 0; i--) {
9016 if (next_is_64) {
9017 __ Pop(x[i]);
9018 active_w_slots -= 2;
9019 } else {
9020 __ Pop(w[i]);
9021 active_w_slots -= 1;
9022 }
9023 next_is_64 = !next_is_64;
9024 }
armvixlb0c8ae22014-03-21 14:03:59 +00009025 VIXL_ASSERT(active_w_slots == 0);
armvixlad96eda2013-06-14 11:42:37 +01009026
9027 // Drop memory to restore stack_pointer.
9028 __ Drop(claim);
9029
9030 __ Mov(sp, __ StackPointer());
9031 __ SetStackPointer(sp);
9032 }
9033
9034 END();
9035
9036 RUN();
9037
9038 int slot = 0;
9039 for (int i = 0; i < reg_count; i++) {
9040 // Even-numbered registers were written as W registers.
9041 // Odd-numbered registers were written as X registers.
9042 bool expect_64 = (i & 1);
9043 uint64_t expected;
9044
9045 if (expect_64) {
9046 uint64_t hi = stack[slot++];
9047 uint64_t lo = stack[slot++];
9048 expected = (hi << 32) | lo;
9049 } else {
9050 expected = stack[slot++];
9051 }
9052
9053 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
9054 // test that the upper word was properly cleared by Pop.
9055 if (x[i].Is(xzr)) {
9056 ASSERT_EQUAL_64(0, x[i]);
9057 } else {
9058 ASSERT_EQUAL_64(expected, x[i]);
9059 }
9060 }
armvixlb0c8ae22014-03-21 14:03:59 +00009061 VIXL_ASSERT(slot == requested_w_slots);
armvixlad96eda2013-06-14 11:42:37 +01009062
9063 TEARDOWN();
9064}
9065
9066
9067TEST(push_pop_xreg_wx_overlap) {
9068 for (int claim = 0; claim <= 8; claim++) {
9069 for (int count = 1; count <= 8; count++) {
9070 PushPopXRegWXOverlapHelper(count, claim);
9071 }
9072 // Test with the maximum number of registers.
9073 PushPopXRegWXOverlapHelper(kPushPopXRegMaxRegCount, claim);
9074 }
9075}
9076
9077
9078TEST(push_pop_sp) {
9079 SETUP();
9080
9081 START();
9082
armvixlb0c8ae22014-03-21 14:03:59 +00009083 VIXL_ASSERT(sp.Is(__ StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01009084
armvixlb0c8ae22014-03-21 14:03:59 +00009085 __ Mov(x3, 0x3333333333333333);
9086 __ Mov(x2, 0x2222222222222222);
9087 __ Mov(x1, 0x1111111111111111);
9088 __ Mov(x0, 0x0000000000000000);
armvixlad96eda2013-06-14 11:42:37 +01009089 __ Claim(2 * kXRegSizeInBytes);
9090 __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9091 __ Push(x3, x2);
9092 __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9093 __ Push(x2, x1, x3, x0);
9094 __ Pop(x4, x5);
9095 __ Pop(x6, x7, x8, x9);
9096
9097 __ Claim(2 * kXRegSizeInBytes);
9098 __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
9099 __ Push(w3, w1, w2, w0);
9100 __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
9101 __ Pop(w14, w15, w16, w17);
9102
9103 __ Claim(2 * kXRegSizeInBytes);
9104 __ Push(w2, w2, w1, w1);
9105 __ Push(x3, x3);
9106 __ Pop(w18, w19, w20, w21);
9107 __ Pop(x22, x23);
9108
9109 __ Claim(2 * kXRegSizeInBytes);
9110 __ PushXRegList(x1.Bit() | x22.Bit());
9111 __ PopXRegList(x24.Bit() | x26.Bit());
9112
9113 __ Claim(2 * kXRegSizeInBytes);
9114 __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
9115 __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
9116
9117 __ Claim(2 * kXRegSizeInBytes);
9118 __ PushXRegList(0);
9119 __ PopXRegList(0);
9120 __ PushXRegList(0xffffffff);
9121 __ PopXRegList(0xffffffff);
9122 __ Drop(12 * kXRegSizeInBytes);
9123 END();
9124
9125 RUN();
9126
armvixlb0c8ae22014-03-21 14:03:59 +00009127 ASSERT_EQUAL_64(0x1111111111111111, x3);
9128 ASSERT_EQUAL_64(0x0000000000000000, x2);
9129 ASSERT_EQUAL_64(0x3333333333333333, x1);
9130 ASSERT_EQUAL_64(0x2222222222222222, x0);
9131 ASSERT_EQUAL_64(0x3333333333333333, x9);
9132 ASSERT_EQUAL_64(0x2222222222222222, x8);
9133 ASSERT_EQUAL_64(0x0000000000000000, x7);
9134 ASSERT_EQUAL_64(0x3333333333333333, x6);
9135 ASSERT_EQUAL_64(0x1111111111111111, x5);
9136 ASSERT_EQUAL_64(0x2222222222222222, x4);
armvixlad96eda2013-06-14 11:42:37 +01009137
9138 ASSERT_EQUAL_32(0x11111111U, w13);
9139 ASSERT_EQUAL_32(0x33333333U, w12);
9140 ASSERT_EQUAL_32(0x00000000U, w11);
9141 ASSERT_EQUAL_32(0x22222222U, w10);
9142 ASSERT_EQUAL_32(0x11111111U, w17);
9143 ASSERT_EQUAL_32(0x00000000U, w16);
9144 ASSERT_EQUAL_32(0x33333333U, w15);
9145 ASSERT_EQUAL_32(0x22222222U, w14);
9146
9147 ASSERT_EQUAL_32(0x11111111U, w18);
9148 ASSERT_EQUAL_32(0x11111111U, w19);
9149 ASSERT_EQUAL_32(0x11111111U, w20);
9150 ASSERT_EQUAL_32(0x11111111U, w21);
armvixlb0c8ae22014-03-21 14:03:59 +00009151 ASSERT_EQUAL_64(0x3333333333333333, x22);
9152 ASSERT_EQUAL_64(0x0000000000000000, x23);
armvixlad96eda2013-06-14 11:42:37 +01009153
armvixlb0c8ae22014-03-21 14:03:59 +00009154 ASSERT_EQUAL_64(0x3333333333333333, x24);
9155 ASSERT_EQUAL_64(0x3333333333333333, x26);
armvixlad96eda2013-06-14 11:42:37 +01009156
9157 ASSERT_EQUAL_32(0x33333333U, w25);
9158 ASSERT_EQUAL_32(0x00000000U, w27);
9159 ASSERT_EQUAL_32(0x22222222U, w28);
9160 ASSERT_EQUAL_32(0x33333333U, w29);
9161 TEARDOWN();
9162}
9163
9164
9165TEST(noreg) {
9166 // This test doesn't generate any code, but it verifies some invariants
9167 // related to NoReg.
armvixlb0c8ae22014-03-21 14:03:59 +00009168 VIXL_CHECK(NoReg.Is(NoFPReg));
9169 VIXL_CHECK(NoFPReg.Is(NoReg));
9170 VIXL_CHECK(NoReg.Is(NoCPUReg));
9171 VIXL_CHECK(NoCPUReg.Is(NoReg));
9172 VIXL_CHECK(NoFPReg.Is(NoCPUReg));
9173 VIXL_CHECK(NoCPUReg.Is(NoFPReg));
armvixlad96eda2013-06-14 11:42:37 +01009174
armvixlb0c8ae22014-03-21 14:03:59 +00009175 VIXL_CHECK(NoReg.IsNone());
9176 VIXL_CHECK(NoFPReg.IsNone());
9177 VIXL_CHECK(NoCPUReg.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01009178}
9179
9180
9181TEST(isvalid) {
9182 // This test doesn't generate any code, but it verifies some invariants
9183 // related to IsValid().
armvixlb0c8ae22014-03-21 14:03:59 +00009184 VIXL_CHECK(!NoReg.IsValid());
9185 VIXL_CHECK(!NoFPReg.IsValid());
9186 VIXL_CHECK(!NoCPUReg.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009187
armvixlb0c8ae22014-03-21 14:03:59 +00009188 VIXL_CHECK(x0.IsValid());
9189 VIXL_CHECK(w0.IsValid());
9190 VIXL_CHECK(x30.IsValid());
9191 VIXL_CHECK(w30.IsValid());
9192 VIXL_CHECK(xzr.IsValid());
9193 VIXL_CHECK(wzr.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009194
armvixlb0c8ae22014-03-21 14:03:59 +00009195 VIXL_CHECK(sp.IsValid());
9196 VIXL_CHECK(wsp.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009197
armvixlb0c8ae22014-03-21 14:03:59 +00009198 VIXL_CHECK(d0.IsValid());
9199 VIXL_CHECK(s0.IsValid());
9200 VIXL_CHECK(d31.IsValid());
9201 VIXL_CHECK(s31.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009202
armvixlb0c8ae22014-03-21 14:03:59 +00009203 VIXL_CHECK(x0.IsValidRegister());
9204 VIXL_CHECK(w0.IsValidRegister());
9205 VIXL_CHECK(xzr.IsValidRegister());
9206 VIXL_CHECK(wzr.IsValidRegister());
9207 VIXL_CHECK(sp.IsValidRegister());
9208 VIXL_CHECK(wsp.IsValidRegister());
9209 VIXL_CHECK(!x0.IsValidFPRegister());
9210 VIXL_CHECK(!w0.IsValidFPRegister());
9211 VIXL_CHECK(!xzr.IsValidFPRegister());
9212 VIXL_CHECK(!wzr.IsValidFPRegister());
9213 VIXL_CHECK(!sp.IsValidFPRegister());
9214 VIXL_CHECK(!wsp.IsValidFPRegister());
armvixlad96eda2013-06-14 11:42:37 +01009215
armvixlb0c8ae22014-03-21 14:03:59 +00009216 VIXL_CHECK(d0.IsValidFPRegister());
9217 VIXL_CHECK(s0.IsValidFPRegister());
9218 VIXL_CHECK(!d0.IsValidRegister());
9219 VIXL_CHECK(!s0.IsValidRegister());
armvixlad96eda2013-06-14 11:42:37 +01009220
9221 // Test the same as before, but using CPURegister types. This shouldn't make
9222 // any difference.
armvixlb0c8ae22014-03-21 14:03:59 +00009223 VIXL_CHECK(static_cast<CPURegister>(x0).IsValid());
9224 VIXL_CHECK(static_cast<CPURegister>(w0).IsValid());
9225 VIXL_CHECK(static_cast<CPURegister>(x30).IsValid());
9226 VIXL_CHECK(static_cast<CPURegister>(w30).IsValid());
9227 VIXL_CHECK(static_cast<CPURegister>(xzr).IsValid());
9228 VIXL_CHECK(static_cast<CPURegister>(wzr).IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009229
armvixlb0c8ae22014-03-21 14:03:59 +00009230 VIXL_CHECK(static_cast<CPURegister>(sp).IsValid());
9231 VIXL_CHECK(static_cast<CPURegister>(wsp).IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009232
armvixlb0c8ae22014-03-21 14:03:59 +00009233 VIXL_CHECK(static_cast<CPURegister>(d0).IsValid());
9234 VIXL_CHECK(static_cast<CPURegister>(s0).IsValid());
9235 VIXL_CHECK(static_cast<CPURegister>(d31).IsValid());
9236 VIXL_CHECK(static_cast<CPURegister>(s31).IsValid());
armvixlad96eda2013-06-14 11:42:37 +01009237
armvixlb0c8ae22014-03-21 14:03:59 +00009238 VIXL_CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9239 VIXL_CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9240 VIXL_CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9241 VIXL_CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9242 VIXL_CHECK(static_cast<CPURegister>(sp).IsValidRegister());
9243 VIXL_CHECK(static_cast<CPURegister>(wsp).IsValidRegister());
9244 VIXL_CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9245 VIXL_CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9246 VIXL_CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9247 VIXL_CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9248 VIXL_CHECK(!static_cast<CPURegister>(sp).IsValidFPRegister());
9249 VIXL_CHECK(!static_cast<CPURegister>(wsp).IsValidFPRegister());
armvixlad96eda2013-06-14 11:42:37 +01009250
armvixlb0c8ae22014-03-21 14:03:59 +00009251 VIXL_CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9252 VIXL_CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9253 VIXL_CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9254 VIXL_CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
armvixlad96eda2013-06-14 11:42:37 +01009255}
9256
9257
9258TEST(printf) {
armvixlc68cb642014-09-25 18:49:30 +01009259 SETUP();
armvixlad96eda2013-06-14 11:42:37 +01009260 START();
9261
9262 char const * test_plain_string = "Printf with no arguments.\n";
9263 char const * test_substring = "'This is a substring.'";
9264 RegisterDump before;
9265
9266 // Initialize x29 to the value of the stack pointer. We will use x29 as a
9267 // temporary stack pointer later, and initializing it in this way allows the
9268 // RegisterDump check to pass.
9269 __ Mov(x29, __ StackPointer());
9270
9271 // Test simple integer arguments.
9272 __ Mov(x0, 1234);
9273 __ Mov(x1, 0x1234);
9274
9275 // Test simple floating-point arguments.
9276 __ Fmov(d0, 1.234);
9277
9278 // Test pointer (string) arguments.
9279 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9280
9281 // Test the maximum number of arguments, and sign extension.
9282 __ Mov(w3, 0xffffffff);
9283 __ Mov(w4, 0xffffffff);
9284 __ Mov(x5, 0xffffffffffffffff);
9285 __ Mov(x6, 0xffffffffffffffff);
9286 __ Fmov(s1, 1.234);
9287 __ Fmov(s2, 2.345);
9288 __ Fmov(d3, 3.456);
9289 __ Fmov(d4, 4.567);
9290
9291 // Test printing callee-saved registers.
9292 __ Mov(x28, 0x123456789abcdef);
9293 __ Fmov(d10, 42.0);
9294
9295 // Test with three arguments.
9296 __ Mov(x10, 3);
9297 __ Mov(x11, 40);
9298 __ Mov(x12, 500);
9299
armvixl5799d6c2014-05-01 11:05:00 +01009300 // A single character.
9301 __ Mov(w13, 'x');
9302
9303 // Check that we don't clobber any registers.
armvixlad96eda2013-06-14 11:42:37 +01009304 before.Dump(&masm);
9305
9306 __ Printf(test_plain_string); // NOLINT(runtime/printf)
armvixl5799d6c2014-05-01 11:05:00 +01009307 __ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
9308 __ Printf("w5: %" PRId32 ", x5: %" PRId64"\n", w5, x5);
armvixlad96eda2013-06-14 11:42:37 +01009309 __ Printf("d0: %f\n", d0);
9310 __ Printf("Test %%s: %s\n", x2);
9311 __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9312 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9313 w3, w4, x5, x6);
9314 __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
armvixl5799d6c2014-05-01 11:05:00 +01009315 __ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
armvixlad96eda2013-06-14 11:42:37 +01009316 __ Printf("%g\n", d10);
armvixl5799d6c2014-05-01 11:05:00 +01009317 __ Printf("%%%%%s%%%c%%\n", x2, w13);
9318
9319 // Print the stack pointer (sp).
9320 __ Printf("StackPointer(sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9321 __ StackPointer(), __ StackPointer().W());
armvixlad96eda2013-06-14 11:42:37 +01009322
9323 // Test with a different stack pointer.
9324 const Register old_stack_pointer = __ StackPointer();
armvixl5799d6c2014-05-01 11:05:00 +01009325 __ Mov(x29, old_stack_pointer);
armvixlad96eda2013-06-14 11:42:37 +01009326 __ SetStackPointer(x29);
armvixl5799d6c2014-05-01 11:05:00 +01009327 // Print the stack pointer (not sp).
9328 __ Printf("StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9329 __ StackPointer(), __ StackPointer().W());
9330 __ Mov(old_stack_pointer, __ StackPointer());
armvixlad96eda2013-06-14 11:42:37 +01009331 __ SetStackPointer(old_stack_pointer);
9332
armvixl5799d6c2014-05-01 11:05:00 +01009333 // Test with three arguments.
armvixlad96eda2013-06-14 11:42:37 +01009334 __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
9335
armvixl5799d6c2014-05-01 11:05:00 +01009336 // Mixed argument types.
9337 __ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
9338 w3, s1, x5, d3);
9339 __ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
9340 s1, d3, w3, x5);
9341
armvixlad96eda2013-06-14 11:42:37 +01009342 END();
9343 RUN();
9344
9345 // We cannot easily test the output of the Printf sequences, and because
9346 // Printf preserves all registers by default, we can't look at the number of
9347 // bytes that were printed. However, the printf_no_preserve test should check
9348 // that, and here we just test that we didn't clobber any registers.
9349 ASSERT_EQUAL_REGISTERS(before);
9350
9351 TEARDOWN();
armvixlad96eda2013-06-14 11:42:37 +01009352}
9353
9354
9355TEST(printf_no_preserve) {
armvixlad96eda2013-06-14 11:42:37 +01009356 SETUP();
9357 START();
9358
9359 char const * test_plain_string = "Printf with no arguments.\n";
9360 char const * test_substring = "'This is a substring.'";
9361
9362 __ PrintfNoPreserve(test_plain_string);
9363 __ Mov(x19, x0);
9364
9365 // Test simple integer arguments.
9366 __ Mov(x0, 1234);
9367 __ Mov(x1, 0x1234);
9368 __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
9369 __ Mov(x20, x0);
9370
9371 // Test simple floating-point arguments.
9372 __ Fmov(d0, 1.234);
9373 __ PrintfNoPreserve("d0: %f\n", d0);
9374 __ Mov(x21, x0);
9375
9376 // Test pointer (string) arguments.
9377 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9378 __ PrintfNoPreserve("Test %%s: %s\n", x2);
9379 __ Mov(x22, x0);
9380
9381 // Test the maximum number of arguments, and sign extension.
9382 __ Mov(w3, 0xffffffff);
9383 __ Mov(w4, 0xffffffff);
9384 __ Mov(x5, 0xffffffffffffffff);
9385 __ Mov(x6, 0xffffffffffffffff);
9386 __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
9387 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9388 w3, w4, x5, x6);
9389 __ Mov(x23, x0);
9390
9391 __ Fmov(s1, 1.234);
9392 __ Fmov(s2, 2.345);
9393 __ Fmov(d3, 3.456);
9394 __ Fmov(d4, 4.567);
9395 __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9396 __ Mov(x24, x0);
9397
9398 // Test printing callee-saved registers.
9399 __ Mov(x28, 0x123456789abcdef);
armvixl5799d6c2014-05-01 11:05:00 +01009400 __ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
armvixlad96eda2013-06-14 11:42:37 +01009401 __ Mov(x25, x0);
9402
9403 __ Fmov(d10, 42.0);
9404 __ PrintfNoPreserve("%g\n", d10);
9405 __ Mov(x26, x0);
9406
9407 // Test with a different stack pointer.
9408 const Register old_stack_pointer = __ StackPointer();
9409 __ Mov(x29, old_stack_pointer);
9410 __ SetStackPointer(x29);
armvixl5799d6c2014-05-01 11:05:00 +01009411 // Print the stack pointer (not sp).
9412 __ PrintfNoPreserve(
9413 "StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
9414 __ StackPointer(), __ StackPointer().W());
armvixlad96eda2013-06-14 11:42:37 +01009415 __ Mov(x27, x0);
armvixlad96eda2013-06-14 11:42:37 +01009416 __ Mov(old_stack_pointer, __ StackPointer());
9417 __ SetStackPointer(old_stack_pointer);
9418
9419 // Test with three arguments.
9420 __ Mov(x3, 3);
9421 __ Mov(x4, 40);
9422 __ Mov(x5, 500);
9423 __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9424 __ Mov(x28, x0);
9425
armvixl5799d6c2014-05-01 11:05:00 +01009426 // Mixed argument types.
9427 __ Mov(w3, 0xffffffff);
9428 __ Fmov(s1, 1.234);
9429 __ Mov(x5, 0xffffffffffffffff);
9430 __ Fmov(d3, 3.456);
9431 __ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
9432 w3, s1, x5, d3);
9433 __ Mov(x29, x0);
9434
armvixlad96eda2013-06-14 11:42:37 +01009435 END();
9436 RUN();
9437
9438 // We cannot easily test the exact output of the Printf sequences, but we can
9439 // use the return code to check that the string length was correct.
9440
9441 // Printf with no arguments.
9442 ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9443 // x0: 1234, x1: 0x00001234
9444 ASSERT_EQUAL_64(25, x20);
9445 // d0: 1.234000
9446 ASSERT_EQUAL_64(13, x21);
9447 // Test %s: 'This is a substring.'
9448 ASSERT_EQUAL_64(32, x22);
9449 // w3(uint32): 4294967295
9450 // w4(int32): -1
9451 // x5(uint64): 18446744073709551615
9452 // x6(int64): -1
9453 ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9454 // %f: 1.234000
9455 // %g: 2.345
9456 // %e: 3.456000e+00
9457 // %E: 4.567000E+00
9458 ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
armvixl5799d6c2014-05-01 11:05:00 +01009459 // 0x89abcdef, 0x123456789abcdef
9460 ASSERT_EQUAL_64(30, x25);
armvixlad96eda2013-06-14 11:42:37 +01009461 // 42
9462 ASSERT_EQUAL_64(3, x26);
armvixl5799d6c2014-05-01 11:05:00 +01009463 // StackPointer(not sp): 0x00007fb037ae2370, 0x37ae2370
armvixlad96eda2013-06-14 11:42:37 +01009464 // Note: This is an example value, but the field width is fixed here so the
9465 // string length is still predictable.
armvixl5799d6c2014-05-01 11:05:00 +01009466 ASSERT_EQUAL_64(53, x27);
armvixlad96eda2013-06-14 11:42:37 +01009467 // 3=3, 4=40, 5=500
9468 ASSERT_EQUAL_64(17, x28);
armvixl5799d6c2014-05-01 11:05:00 +01009469 // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
9470 ASSERT_EQUAL_64(69, x29);
armvixlad96eda2013-06-14 11:42:37 +01009471
9472 TEARDOWN();
armvixlad96eda2013-06-14 11:42:37 +01009473}
9474
9475
9476#ifndef USE_SIMULATOR
9477TEST(trace) {
9478 // The Trace helper should not generate any code unless the simulator (or
9479 // debugger) is being used.
9480 SETUP();
9481 START();
9482
9483 Label start;
9484 __ Bind(&start);
9485 __ Trace(LOG_ALL, TRACE_ENABLE);
9486 __ Trace(LOG_ALL, TRACE_DISABLE);
armvixlb0c8ae22014-03-21 14:03:59 +00009487 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&start) == 0);
armvixlad96eda2013-06-14 11:42:37 +01009488
9489 END();
9490 TEARDOWN();
9491}
9492#endif
9493
9494
9495#ifndef USE_SIMULATOR
9496TEST(log) {
9497 // The Log helper should not generate any code unless the simulator (or
9498 // debugger) is being used.
9499 SETUP();
9500 START();
9501
9502 Label start;
9503 __ Bind(&start);
9504 __ Log(LOG_ALL);
armvixlb0c8ae22014-03-21 14:03:59 +00009505 VIXL_CHECK(__ SizeOfCodeGeneratedSince(&start) == 0);
armvixlad96eda2013-06-14 11:42:37 +01009506
9507 END();
9508 TEARDOWN();
9509}
9510#endif
9511
9512
9513TEST(instruction_accurate_scope) {
9514 SETUP();
9515 START();
9516
9517 // By default macro instructions are allowed.
armvixlb0c8ae22014-03-21 14:03:59 +00009518 VIXL_ASSERT(masm.AllowMacroInstructions());
armvixlad96eda2013-06-14 11:42:37 +01009519 {
armvixlc68cb642014-09-25 18:49:30 +01009520 InstructionAccurateScope scope1(&masm, 2);
armvixlb0c8ae22014-03-21 14:03:59 +00009521 VIXL_ASSERT(!masm.AllowMacroInstructions());
armvixlc68cb642014-09-25 18:49:30 +01009522 __ nop();
armvixlad96eda2013-06-14 11:42:37 +01009523 {
armvixlc68cb642014-09-25 18:49:30 +01009524 InstructionAccurateScope scope2(&masm, 1);
armvixlb0c8ae22014-03-21 14:03:59 +00009525 VIXL_ASSERT(!masm.AllowMacroInstructions());
armvixlc68cb642014-09-25 18:49:30 +01009526 __ nop();
armvixlad96eda2013-06-14 11:42:37 +01009527 }
armvixlb0c8ae22014-03-21 14:03:59 +00009528 VIXL_ASSERT(!masm.AllowMacroInstructions());
armvixlad96eda2013-06-14 11:42:37 +01009529 }
armvixlb0c8ae22014-03-21 14:03:59 +00009530 VIXL_ASSERT(masm.AllowMacroInstructions());
armvixlad96eda2013-06-14 11:42:37 +01009531
9532 {
9533 InstructionAccurateScope scope(&masm, 2);
9534 __ add(x0, x0, x0);
9535 __ sub(x0, x0, x0);
9536 }
9537
9538 END();
9539 RUN();
9540 TEARDOWN();
9541}
9542
9543
9544TEST(blr_lr) {
9545 // A simple test to check that the simulator correcty handle "blr lr".
9546 SETUP();
9547
9548 START();
9549 Label target;
9550 Label end;
9551
9552 __ Mov(x0, 0x0);
9553 __ Adr(lr, &target);
9554
9555 __ Blr(lr);
9556 __ Mov(x0, 0xdeadbeef);
9557 __ B(&end);
9558
9559 __ Bind(&target);
9560 __ Mov(x0, 0xc001c0de);
9561
9562 __ Bind(&end);
9563 END();
9564
9565 RUN();
9566
9567 ASSERT_EQUAL_64(0xc001c0de, x0);
9568
9569 TEARDOWN();
9570}
9571
armvixlf37fdc02014-02-05 13:22:16 +00009572
9573TEST(barriers) {
9574 // Generate all supported barriers, this is just a smoke test
9575 SETUP();
9576
9577 START();
9578
9579 // DMB
9580 __ Dmb(FullSystem, BarrierAll);
9581 __ Dmb(FullSystem, BarrierReads);
9582 __ Dmb(FullSystem, BarrierWrites);
9583 __ Dmb(FullSystem, BarrierOther);
9584
9585 __ Dmb(InnerShareable, BarrierAll);
9586 __ Dmb(InnerShareable, BarrierReads);
9587 __ Dmb(InnerShareable, BarrierWrites);
9588 __ Dmb(InnerShareable, BarrierOther);
9589
9590 __ Dmb(NonShareable, BarrierAll);
9591 __ Dmb(NonShareable, BarrierReads);
9592 __ Dmb(NonShareable, BarrierWrites);
9593 __ Dmb(NonShareable, BarrierOther);
9594
9595 __ Dmb(OuterShareable, BarrierAll);
9596 __ Dmb(OuterShareable, BarrierReads);
9597 __ Dmb(OuterShareable, BarrierWrites);
9598 __ Dmb(OuterShareable, BarrierOther);
9599
9600 // DSB
9601 __ Dsb(FullSystem, BarrierAll);
9602 __ Dsb(FullSystem, BarrierReads);
9603 __ Dsb(FullSystem, BarrierWrites);
9604 __ Dsb(FullSystem, BarrierOther);
9605
9606 __ Dsb(InnerShareable, BarrierAll);
9607 __ Dsb(InnerShareable, BarrierReads);
9608 __ Dsb(InnerShareable, BarrierWrites);
9609 __ Dsb(InnerShareable, BarrierOther);
9610
9611 __ Dsb(NonShareable, BarrierAll);
9612 __ Dsb(NonShareable, BarrierReads);
9613 __ Dsb(NonShareable, BarrierWrites);
9614 __ Dsb(NonShareable, BarrierOther);
9615
9616 __ Dsb(OuterShareable, BarrierAll);
9617 __ Dsb(OuterShareable, BarrierReads);
9618 __ Dsb(OuterShareable, BarrierWrites);
9619 __ Dsb(OuterShareable, BarrierOther);
9620
9621 // ISB
9622 __ Isb();
9623
9624 END();
9625
9626 RUN();
9627
9628 TEARDOWN();
9629}
9630
armvixlb0c8ae22014-03-21 14:03:59 +00009631
9632TEST(process_nan_double) {
9633 // Make sure that NaN propagation works correctly.
9634 double sn = rawbits_to_double(0x7ff5555511111111);
9635 double qn = rawbits_to_double(0x7ffaaaaa11111111);
9636 VIXL_ASSERT(IsSignallingNaN(sn));
9637 VIXL_ASSERT(IsQuietNaN(qn));
9638
9639 // The input NaNs after passing through ProcessNaN.
9640 double sn_proc = rawbits_to_double(0x7ffd555511111111);
9641 double qn_proc = qn;
9642 VIXL_ASSERT(IsQuietNaN(sn_proc));
9643 VIXL_ASSERT(IsQuietNaN(qn_proc));
9644
9645 SETUP();
9646 START();
9647
9648 // Execute a number of instructions which all use ProcessNaN, and check that
9649 // they all handle the NaN correctly.
9650 __ Fmov(d0, sn);
9651 __ Fmov(d10, qn);
9652
9653 // Operations that always propagate NaNs unchanged, even signalling NaNs.
9654 // - Signalling NaN
9655 __ Fmov(d1, d0);
9656 __ Fabs(d2, d0);
9657 __ Fneg(d3, d0);
9658 // - Quiet NaN
9659 __ Fmov(d11, d10);
9660 __ Fabs(d12, d10);
9661 __ Fneg(d13, d10);
9662
9663 // Operations that use ProcessNaN.
9664 // - Signalling NaN
9665 __ Fsqrt(d4, d0);
9666 __ Frinta(d5, d0);
9667 __ Frintn(d6, d0);
9668 __ Frintz(d7, d0);
9669 // - Quiet NaN
9670 __ Fsqrt(d14, d10);
9671 __ Frinta(d15, d10);
9672 __ Frintn(d16, d10);
9673 __ Frintz(d17, d10);
9674
9675 // The behaviour of fcvt is checked in TEST(fcvt_sd).
9676
9677 END();
9678 RUN();
9679
9680 uint64_t qn_raw = double_to_rawbits(qn);
9681 uint64_t sn_raw = double_to_rawbits(sn);
9682
9683 // - Signalling NaN
9684 ASSERT_EQUAL_FP64(sn, d1);
9685 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
9686 ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
9687 // - Quiet NaN
9688 ASSERT_EQUAL_FP64(qn, d11);
9689 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
9690 ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
9691
9692 // - Signalling NaN
9693 ASSERT_EQUAL_FP64(sn_proc, d4);
9694 ASSERT_EQUAL_FP64(sn_proc, d5);
9695 ASSERT_EQUAL_FP64(sn_proc, d6);
9696 ASSERT_EQUAL_FP64(sn_proc, d7);
9697 // - Quiet NaN
9698 ASSERT_EQUAL_FP64(qn_proc, d14);
9699 ASSERT_EQUAL_FP64(qn_proc, d15);
9700 ASSERT_EQUAL_FP64(qn_proc, d16);
9701 ASSERT_EQUAL_FP64(qn_proc, d17);
9702
9703 TEARDOWN();
9704}
9705
9706
9707TEST(process_nan_float) {
9708 // Make sure that NaN propagation works correctly.
9709 float sn = rawbits_to_float(0x7f951111);
9710 float qn = rawbits_to_float(0x7fea1111);
9711 VIXL_ASSERT(IsSignallingNaN(sn));
9712 VIXL_ASSERT(IsQuietNaN(qn));
9713
9714 // The input NaNs after passing through ProcessNaN.
9715 float sn_proc = rawbits_to_float(0x7fd51111);
9716 float qn_proc = qn;
9717 VIXL_ASSERT(IsQuietNaN(sn_proc));
9718 VIXL_ASSERT(IsQuietNaN(qn_proc));
9719
9720 SETUP();
9721 START();
9722
9723 // Execute a number of instructions which all use ProcessNaN, and check that
9724 // they all handle the NaN correctly.
9725 __ Fmov(s0, sn);
9726 __ Fmov(s10, qn);
9727
9728 // Operations that always propagate NaNs unchanged, even signalling NaNs.
9729 // - Signalling NaN
9730 __ Fmov(s1, s0);
9731 __ Fabs(s2, s0);
9732 __ Fneg(s3, s0);
9733 // - Quiet NaN
9734 __ Fmov(s11, s10);
9735 __ Fabs(s12, s10);
9736 __ Fneg(s13, s10);
9737
9738 // Operations that use ProcessNaN.
9739 // - Signalling NaN
9740 __ Fsqrt(s4, s0);
9741 __ Frinta(s5, s0);
9742 __ Frintn(s6, s0);
9743 __ Frintz(s7, s0);
9744 // - Quiet NaN
9745 __ Fsqrt(s14, s10);
9746 __ Frinta(s15, s10);
9747 __ Frintn(s16, s10);
9748 __ Frintz(s17, s10);
9749
9750 // The behaviour of fcvt is checked in TEST(fcvt_sd).
9751
9752 END();
9753 RUN();
9754
9755 uint32_t qn_raw = float_to_rawbits(qn);
9756 uint32_t sn_raw = float_to_rawbits(sn);
9757
9758 // - Signalling NaN
9759 ASSERT_EQUAL_FP32(sn, s1);
9760 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
9761 ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
9762 // - Quiet NaN
9763 ASSERT_EQUAL_FP32(qn, s11);
9764 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
9765 ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
9766
9767 // - Signalling NaN
9768 ASSERT_EQUAL_FP32(sn_proc, s4);
9769 ASSERT_EQUAL_FP32(sn_proc, s5);
9770 ASSERT_EQUAL_FP32(sn_proc, s6);
9771 ASSERT_EQUAL_FP32(sn_proc, s7);
9772 // - Quiet NaN
9773 ASSERT_EQUAL_FP32(qn_proc, s14);
9774 ASSERT_EQUAL_FP32(qn_proc, s15);
9775 ASSERT_EQUAL_FP32(qn_proc, s16);
9776 ASSERT_EQUAL_FP32(qn_proc, s17);
9777
9778 TEARDOWN();
9779}
9780
9781
9782static void ProcessNaNsHelper(double n, double m, double expected) {
9783 VIXL_ASSERT(isnan(n) || isnan(m));
9784 VIXL_ASSERT(isnan(expected));
9785
9786 SETUP();
9787 START();
9788
9789 // Execute a number of instructions which all use ProcessNaNs, and check that
9790 // they all propagate NaNs correctly.
9791 __ Fmov(d0, n);
9792 __ Fmov(d1, m);
9793
9794 __ Fadd(d2, d0, d1);
9795 __ Fsub(d3, d0, d1);
9796 __ Fmul(d4, d0, d1);
9797 __ Fdiv(d5, d0, d1);
9798 __ Fmax(d6, d0, d1);
9799 __ Fmin(d7, d0, d1);
9800
9801 END();
9802 RUN();
9803
9804 ASSERT_EQUAL_FP64(expected, d2);
9805 ASSERT_EQUAL_FP64(expected, d3);
9806 ASSERT_EQUAL_FP64(expected, d4);
9807 ASSERT_EQUAL_FP64(expected, d5);
9808 ASSERT_EQUAL_FP64(expected, d6);
9809 ASSERT_EQUAL_FP64(expected, d7);
9810
9811 TEARDOWN();
9812}
9813
9814
9815TEST(process_nans_double) {
9816 // Make sure that NaN propagation works correctly.
9817 double sn = rawbits_to_double(0x7ff5555511111111);
9818 double sm = rawbits_to_double(0x7ff5555522222222);
9819 double qn = rawbits_to_double(0x7ffaaaaa11111111);
9820 double qm = rawbits_to_double(0x7ffaaaaa22222222);
9821 VIXL_ASSERT(IsSignallingNaN(sn));
9822 VIXL_ASSERT(IsSignallingNaN(sm));
9823 VIXL_ASSERT(IsQuietNaN(qn));
9824 VIXL_ASSERT(IsQuietNaN(qm));
9825
9826 // The input NaNs after passing through ProcessNaN.
9827 double sn_proc = rawbits_to_double(0x7ffd555511111111);
9828 double sm_proc = rawbits_to_double(0x7ffd555522222222);
9829 double qn_proc = qn;
9830 double qm_proc = qm;
9831 VIXL_ASSERT(IsQuietNaN(sn_proc));
9832 VIXL_ASSERT(IsQuietNaN(sm_proc));
9833 VIXL_ASSERT(IsQuietNaN(qn_proc));
9834 VIXL_ASSERT(IsQuietNaN(qm_proc));
9835
9836 // Quiet NaNs are propagated.
9837 ProcessNaNsHelper(qn, 0, qn_proc);
9838 ProcessNaNsHelper(0, qm, qm_proc);
9839 ProcessNaNsHelper(qn, qm, qn_proc);
9840
9841 // Signalling NaNs are propagated, and made quiet.
9842 ProcessNaNsHelper(sn, 0, sn_proc);
9843 ProcessNaNsHelper(0, sm, sm_proc);
9844 ProcessNaNsHelper(sn, sm, sn_proc);
9845
9846 // Signalling NaNs take precedence over quiet NaNs.
9847 ProcessNaNsHelper(sn, qm, sn_proc);
9848 ProcessNaNsHelper(qn, sm, sm_proc);
9849 ProcessNaNsHelper(sn, sm, sn_proc);
9850}
9851
9852
9853static void ProcessNaNsHelper(float n, float m, float expected) {
9854 VIXL_ASSERT(isnan(n) || isnan(m));
9855 VIXL_ASSERT(isnan(expected));
9856
9857 SETUP();
9858 START();
9859
9860 // Execute a number of instructions which all use ProcessNaNs, and check that
9861 // they all propagate NaNs correctly.
9862 __ Fmov(s0, n);
9863 __ Fmov(s1, m);
9864
9865 __ Fadd(s2, s0, s1);
9866 __ Fsub(s3, s0, s1);
9867 __ Fmul(s4, s0, s1);
9868 __ Fdiv(s5, s0, s1);
9869 __ Fmax(s6, s0, s1);
9870 __ Fmin(s7, s0, s1);
9871
9872 END();
9873 RUN();
9874
9875 ASSERT_EQUAL_FP32(expected, s2);
9876 ASSERT_EQUAL_FP32(expected, s3);
9877 ASSERT_EQUAL_FP32(expected, s4);
9878 ASSERT_EQUAL_FP32(expected, s5);
9879 ASSERT_EQUAL_FP32(expected, s6);
9880 ASSERT_EQUAL_FP32(expected, s7);
9881
9882 TEARDOWN();
9883}
9884
9885
9886TEST(process_nans_float) {
9887 // Make sure that NaN propagation works correctly.
9888 float sn = rawbits_to_float(0x7f951111);
9889 float sm = rawbits_to_float(0x7f952222);
9890 float qn = rawbits_to_float(0x7fea1111);
9891 float qm = rawbits_to_float(0x7fea2222);
9892 VIXL_ASSERT(IsSignallingNaN(sn));
9893 VIXL_ASSERT(IsSignallingNaN(sm));
9894 VIXL_ASSERT(IsQuietNaN(qn));
9895 VIXL_ASSERT(IsQuietNaN(qm));
9896
9897 // The input NaNs after passing through ProcessNaN.
9898 float sn_proc = rawbits_to_float(0x7fd51111);
9899 float sm_proc = rawbits_to_float(0x7fd52222);
9900 float qn_proc = qn;
9901 float qm_proc = qm;
9902 VIXL_ASSERT(IsQuietNaN(sn_proc));
9903 VIXL_ASSERT(IsQuietNaN(sm_proc));
9904 VIXL_ASSERT(IsQuietNaN(qn_proc));
9905 VIXL_ASSERT(IsQuietNaN(qm_proc));
9906
9907 // Quiet NaNs are propagated.
9908 ProcessNaNsHelper(qn, 0, qn_proc);
9909 ProcessNaNsHelper(0, qm, qm_proc);
9910 ProcessNaNsHelper(qn, qm, qn_proc);
9911
9912 // Signalling NaNs are propagated, and made quiet.
9913 ProcessNaNsHelper(sn, 0, sn_proc);
9914 ProcessNaNsHelper(0, sm, sm_proc);
9915 ProcessNaNsHelper(sn, sm, sn_proc);
9916
9917 // Signalling NaNs take precedence over quiet NaNs.
9918 ProcessNaNsHelper(sn, qm, sn_proc);
9919 ProcessNaNsHelper(qn, sm, sm_proc);
9920 ProcessNaNsHelper(sn, sm, sn_proc);
9921}
9922
9923
9924static void DefaultNaNHelper(float n, float m, float a) {
9925 VIXL_ASSERT(isnan(n) || isnan(m) || isnan(a));
9926
9927 bool test_1op = isnan(n);
9928 bool test_2op = isnan(n) || isnan(m);
9929
9930 SETUP();
9931 START();
9932
9933 // Enable Default-NaN mode in the FPCR.
9934 __ Mrs(x0, FPCR);
9935 __ Orr(x1, x0, DN_mask);
9936 __ Msr(FPCR, x1);
9937
9938 // Execute a number of instructions which all use ProcessNaNs, and check that
9939 // they all produce the default NaN.
9940 __ Fmov(s0, n);
9941 __ Fmov(s1, m);
9942 __ Fmov(s2, a);
9943
9944 if (test_1op) {
9945 // Operations that always propagate NaNs unchanged, even signalling NaNs.
9946 __ Fmov(s10, s0);
9947 __ Fabs(s11, s0);
9948 __ Fneg(s12, s0);
9949
9950 // Operations that use ProcessNaN.
9951 __ Fsqrt(s13, s0);
9952 __ Frinta(s14, s0);
9953 __ Frintn(s15, s0);
9954 __ Frintz(s16, s0);
9955
9956 // Fcvt usually has special NaN handling, but it respects default-NaN mode.
9957 __ Fcvt(d17, s0);
9958 }
9959
9960 if (test_2op) {
9961 __ Fadd(s18, s0, s1);
9962 __ Fsub(s19, s0, s1);
9963 __ Fmul(s20, s0, s1);
9964 __ Fdiv(s21, s0, s1);
9965 __ Fmax(s22, s0, s1);
9966 __ Fmin(s23, s0, s1);
9967 }
9968
9969 __ Fmadd(s24, s0, s1, s2);
9970 __ Fmsub(s25, s0, s1, s2);
9971 __ Fnmadd(s26, s0, s1, s2);
9972 __ Fnmsub(s27, s0, s1, s2);
9973
9974 // Restore FPCR.
9975 __ Msr(FPCR, x0);
9976
9977 END();
9978 RUN();
9979
9980 if (test_1op) {
9981 uint32_t n_raw = float_to_rawbits(n);
9982 ASSERT_EQUAL_FP32(n, s10);
9983 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
9984 ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
9985 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
9986 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
9987 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
9988 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
9989 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
9990 }
9991
9992 if (test_2op) {
9993 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
9994 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
9995 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
9996 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
9997 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
9998 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
9999 }
10000
10001 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
10002 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
10003 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
10004 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
10005
10006 TEARDOWN();
10007}
10008
10009
10010TEST(default_nan_float) {
10011 float sn = rawbits_to_float(0x7f951111);
10012 float sm = rawbits_to_float(0x7f952222);
10013 float sa = rawbits_to_float(0x7f95aaaa);
10014 float qn = rawbits_to_float(0x7fea1111);
10015 float qm = rawbits_to_float(0x7fea2222);
10016 float qa = rawbits_to_float(0x7feaaaaa);
10017 VIXL_ASSERT(IsSignallingNaN(sn));
10018 VIXL_ASSERT(IsSignallingNaN(sm));
10019 VIXL_ASSERT(IsSignallingNaN(sa));
10020 VIXL_ASSERT(IsQuietNaN(qn));
10021 VIXL_ASSERT(IsQuietNaN(qm));
10022 VIXL_ASSERT(IsQuietNaN(qa));
10023
10024 // - Signalling NaNs
10025 DefaultNaNHelper(sn, 0.0f, 0.0f);
10026 DefaultNaNHelper(0.0f, sm, 0.0f);
10027 DefaultNaNHelper(0.0f, 0.0f, sa);
10028 DefaultNaNHelper(sn, sm, 0.0f);
10029 DefaultNaNHelper(0.0f, sm, sa);
10030 DefaultNaNHelper(sn, 0.0f, sa);
10031 DefaultNaNHelper(sn, sm, sa);
10032 // - Quiet NaNs
10033 DefaultNaNHelper(qn, 0.0f, 0.0f);
10034 DefaultNaNHelper(0.0f, qm, 0.0f);
10035 DefaultNaNHelper(0.0f, 0.0f, qa);
10036 DefaultNaNHelper(qn, qm, 0.0f);
10037 DefaultNaNHelper(0.0f, qm, qa);
10038 DefaultNaNHelper(qn, 0.0f, qa);
10039 DefaultNaNHelper(qn, qm, qa);
10040 // - Mixed NaNs
10041 DefaultNaNHelper(qn, sm, sa);
10042 DefaultNaNHelper(sn, qm, sa);
10043 DefaultNaNHelper(sn, sm, qa);
10044 DefaultNaNHelper(qn, qm, sa);
10045 DefaultNaNHelper(sn, qm, qa);
10046 DefaultNaNHelper(qn, sm, qa);
10047 DefaultNaNHelper(qn, qm, qa);
10048}
10049
10050
10051static void DefaultNaNHelper(double n, double m, double a) {
10052 VIXL_ASSERT(isnan(n) || isnan(m) || isnan(a));
10053
10054 bool test_1op = isnan(n);
10055 bool test_2op = isnan(n) || isnan(m);
10056
10057 SETUP();
10058 START();
10059
10060 // Enable Default-NaN mode in the FPCR.
10061 __ Mrs(x0, FPCR);
10062 __ Orr(x1, x0, DN_mask);
10063 __ Msr(FPCR, x1);
10064
10065 // Execute a number of instructions which all use ProcessNaNs, and check that
10066 // they all produce the default NaN.
10067 __ Fmov(d0, n);
10068 __ Fmov(d1, m);
10069 __ Fmov(d2, a);
10070
10071 if (test_1op) {
10072 // Operations that always propagate NaNs unchanged, even signalling NaNs.
10073 __ Fmov(d10, d0);
10074 __ Fabs(d11, d0);
10075 __ Fneg(d12, d0);
10076
10077 // Operations that use ProcessNaN.
10078 __ Fsqrt(d13, d0);
10079 __ Frinta(d14, d0);
10080 __ Frintn(d15, d0);
10081 __ Frintz(d16, d0);
10082
10083 // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10084 __ Fcvt(s17, d0);
10085 }
10086
10087 if (test_2op) {
10088 __ Fadd(d18, d0, d1);
10089 __ Fsub(d19, d0, d1);
10090 __ Fmul(d20, d0, d1);
10091 __ Fdiv(d21, d0, d1);
10092 __ Fmax(d22, d0, d1);
10093 __ Fmin(d23, d0, d1);
10094 }
10095
10096 __ Fmadd(d24, d0, d1, d2);
10097 __ Fmsub(d25, d0, d1, d2);
10098 __ Fnmadd(d26, d0, d1, d2);
10099 __ Fnmsub(d27, d0, d1, d2);
10100
10101 // Restore FPCR.
10102 __ Msr(FPCR, x0);
10103
10104 END();
10105 RUN();
10106
10107 if (test_1op) {
10108 uint64_t n_raw = double_to_rawbits(n);
10109 ASSERT_EQUAL_FP64(n, d10);
10110 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10111 ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10112 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
10113 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
10114 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
10115 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
10116 ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
10117 }
10118
10119 if (test_2op) {
10120 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
10121 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
10122 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
10123 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
10124 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
10125 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
10126 }
10127
10128 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
10129 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
10130 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
10131 ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
10132
10133 TEARDOWN();
10134}
10135
10136
10137TEST(default_nan_double) {
10138 double sn = rawbits_to_double(0x7ff5555511111111);
10139 double sm = rawbits_to_double(0x7ff5555522222222);
10140 double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10141 double qn = rawbits_to_double(0x7ffaaaaa11111111);
10142 double qm = rawbits_to_double(0x7ffaaaaa22222222);
10143 double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10144 VIXL_ASSERT(IsSignallingNaN(sn));
10145 VIXL_ASSERT(IsSignallingNaN(sm));
10146 VIXL_ASSERT(IsSignallingNaN(sa));
10147 VIXL_ASSERT(IsQuietNaN(qn));
10148 VIXL_ASSERT(IsQuietNaN(qm));
10149 VIXL_ASSERT(IsQuietNaN(qa));
10150
10151 // - Signalling NaNs
10152 DefaultNaNHelper(sn, 0.0, 0.0);
10153 DefaultNaNHelper(0.0, sm, 0.0);
10154 DefaultNaNHelper(0.0, 0.0, sa);
10155 DefaultNaNHelper(sn, sm, 0.0);
10156 DefaultNaNHelper(0.0, sm, sa);
10157 DefaultNaNHelper(sn, 0.0, sa);
10158 DefaultNaNHelper(sn, sm, sa);
10159 // - Quiet NaNs
10160 DefaultNaNHelper(qn, 0.0, 0.0);
10161 DefaultNaNHelper(0.0, qm, 0.0);
10162 DefaultNaNHelper(0.0, 0.0, qa);
10163 DefaultNaNHelper(qn, qm, 0.0);
10164 DefaultNaNHelper(0.0, qm, qa);
10165 DefaultNaNHelper(qn, 0.0, qa);
10166 DefaultNaNHelper(qn, qm, qa);
10167 // - Mixed NaNs
10168 DefaultNaNHelper(qn, sm, sa);
10169 DefaultNaNHelper(sn, qm, sa);
10170 DefaultNaNHelper(sn, sm, qa);
10171 DefaultNaNHelper(qn, qm, sa);
10172 DefaultNaNHelper(sn, qm, qa);
10173 DefaultNaNHelper(qn, sm, qa);
10174 DefaultNaNHelper(qn, qm, qa);
10175}
10176
10177
armvixl4a102ba2014-07-14 09:02:40 +010010178TEST(ldar_stlr) {
10179 // The middle value is read, modified, and written. The padding exists only to
10180 // check for over-write.
10181 uint8_t b[] = {0, 0x12, 0};
10182 uint16_t h[] = {0, 0x1234, 0};
10183 uint32_t w[] = {0, 0x12345678, 0};
10184 uint64_t x[] = {0, 0x123456789abcdef0, 0};
10185
10186 SETUP();
10187 START();
10188
10189 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
10190 __ Ldarb(w0, MemOperand(x10));
10191 __ Add(w0, w0, 1);
10192 __ Stlrb(w0, MemOperand(x10));
10193
10194 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
10195 __ Ldarh(w0, MemOperand(x10));
10196 __ Add(w0, w0, 1);
10197 __ Stlrh(w0, MemOperand(x10));
10198
10199 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
10200 __ Ldar(w0, MemOperand(x10));
10201 __ Add(w0, w0, 1);
10202 __ Stlr(w0, MemOperand(x10));
10203
10204 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
10205 __ Ldar(x0, MemOperand(x10));
10206 __ Add(x0, x0, 1);
10207 __ Stlr(x0, MemOperand(x10));
10208
10209 END();
10210 RUN();
10211
10212 ASSERT_EQUAL_32(0x13, b[1]);
10213 ASSERT_EQUAL_32(0x1235, h[1]);
10214 ASSERT_EQUAL_32(0x12345679, w[1]);
10215 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
10216
10217 // Check for over-write.
10218 ASSERT_EQUAL_32(0, b[0]);
10219 ASSERT_EQUAL_32(0, b[2]);
10220 ASSERT_EQUAL_32(0, h[0]);
10221 ASSERT_EQUAL_32(0, h[2]);
10222 ASSERT_EQUAL_32(0, w[0]);
10223 ASSERT_EQUAL_32(0, w[2]);
10224 ASSERT_EQUAL_32(0, x[0]);
10225 ASSERT_EQUAL_32(0, x[2]);
10226
10227 TEARDOWN();
10228}
10229
10230
10231TEST(ldxr_stxr) {
10232 // The middle value is read, modified, and written. The padding exists only to
10233 // check for over-write.
10234 uint8_t b[] = {0, 0x12, 0};
10235 uint16_t h[] = {0, 0x1234, 0};
10236 uint32_t w[] = {0, 0x12345678, 0};
10237 uint64_t x[] = {0, 0x123456789abcdef0, 0};
10238
10239 // As above, but get suitably-aligned values for ldxp and stxp.
10240 uint32_t wp_data[] = {0, 0, 0, 0, 0};
10241 uint32_t * wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
10242 wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
10243 wp[2] = 0x87654321;
10244 uint64_t xp_data[] = {0, 0, 0, 0, 0};
10245 uint64_t * xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
10246 xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
10247 xp[2] = 0x0fedcba987654321;
10248
10249 SETUP();
10250 START();
10251
10252 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
10253 Label try_b;
10254 __ Bind(&try_b);
10255 __ Ldxrb(w0, MemOperand(x10));
10256 __ Add(w0, w0, 1);
10257 __ Stxrb(w5, w0, MemOperand(x10));
10258 __ Cbnz(w5, &try_b);
10259
10260 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
10261 Label try_h;
10262 __ Bind(&try_h);
10263 __ Ldxrh(w0, MemOperand(x10));
10264 __ Add(w0, w0, 1);
10265 __ Stxrh(w5, w0, MemOperand(x10));
10266 __ Cbnz(w5, &try_h);
10267
10268 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
10269 Label try_w;
10270 __ Bind(&try_w);
10271 __ Ldxr(w0, MemOperand(x10));
10272 __ Add(w0, w0, 1);
10273 __ Stxr(w5, w0, MemOperand(x10));
10274 __ Cbnz(w5, &try_w);
10275
10276 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
10277 Label try_x;
10278 __ Bind(&try_x);
10279 __ Ldxr(x0, MemOperand(x10));
10280 __ Add(x0, x0, 1);
10281 __ Stxr(w5, x0, MemOperand(x10));
10282 __ Cbnz(w5, &try_x);
10283
10284 __ Mov(x10, reinterpret_cast<uintptr_t>(&wp[1]));
10285 Label try_wp;
10286 __ Bind(&try_wp);
10287 __ Ldxp(w0, w1, MemOperand(x10));
10288 __ Add(w0, w0, 1);
10289 __ Add(w1, w1, 1);
10290 __ Stxp(w5, w0, w1, MemOperand(x10));
10291 __ Cbnz(w5, &try_wp);
10292
10293 __ Mov(x10, reinterpret_cast<uintptr_t>(&xp[1]));
10294 Label try_xp;
10295 __ Bind(&try_xp);
10296 __ Ldxp(x0, x1, MemOperand(x10));
10297 __ Add(x0, x0, 1);
10298 __ Add(x1, x1, 1);
10299 __ Stxp(w5, x0, x1, MemOperand(x10));
10300 __ Cbnz(w5, &try_xp);
10301
10302 END();
10303 RUN();
10304
10305 ASSERT_EQUAL_32(0x13, b[1]);
10306 ASSERT_EQUAL_32(0x1235, h[1]);
10307 ASSERT_EQUAL_32(0x12345679, w[1]);
10308 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
10309 ASSERT_EQUAL_32(0x12345679, wp[1]);
10310 ASSERT_EQUAL_32(0x87654322, wp[2]);
10311 ASSERT_EQUAL_64(0x123456789abcdef1, xp[1]);
10312 ASSERT_EQUAL_64(0x0fedcba987654322, xp[2]);
10313
10314 // Check for over-write.
10315 ASSERT_EQUAL_32(0, b[0]);
10316 ASSERT_EQUAL_32(0, b[2]);
10317 ASSERT_EQUAL_32(0, h[0]);
10318 ASSERT_EQUAL_32(0, h[2]);
10319 ASSERT_EQUAL_32(0, w[0]);
10320 ASSERT_EQUAL_32(0, w[2]);
10321 ASSERT_EQUAL_64(0, x[0]);
10322 ASSERT_EQUAL_64(0, x[2]);
10323 ASSERT_EQUAL_32(0, wp[0]);
10324 ASSERT_EQUAL_32(0, wp[3]);
10325 ASSERT_EQUAL_64(0, xp[0]);
10326 ASSERT_EQUAL_64(0, xp[3]);
10327
10328 TEARDOWN();
10329}
10330
10331
10332TEST(ldaxr_stlxr) {
10333 // The middle value is read, modified, and written. The padding exists only to
10334 // check for over-write.
10335 uint8_t b[] = {0, 0x12, 0};
10336 uint16_t h[] = {0, 0x1234, 0};
10337 uint32_t w[] = {0, 0x12345678, 0};
10338 uint64_t x[] = {0, 0x123456789abcdef0, 0};
10339
10340 // As above, but get suitably-aligned values for ldxp and stxp.
10341 uint32_t wp_data[] = {0, 0, 0, 0, 0};
10342 uint32_t * wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
10343 wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
10344 wp[2] = 0x87654321;
10345 uint64_t xp_data[] = {0, 0, 0, 0, 0};
10346 uint64_t * xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
10347 xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
10348 xp[2] = 0x0fedcba987654321;
10349
10350 SETUP();
10351 START();
10352
10353 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
10354 Label try_b;
10355 __ Bind(&try_b);
10356 __ Ldaxrb(w0, MemOperand(x10));
10357 __ Add(w0, w0, 1);
10358 __ Stlxrb(w5, w0, MemOperand(x10));
10359 __ Cbnz(w5, &try_b);
10360
10361 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
10362 Label try_h;
10363 __ Bind(&try_h);
10364 __ Ldaxrh(w0, MemOperand(x10));
10365 __ Add(w0, w0, 1);
10366 __ Stlxrh(w5, w0, MemOperand(x10));
10367 __ Cbnz(w5, &try_h);
10368
10369 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
10370 Label try_w;
10371 __ Bind(&try_w);
10372 __ Ldaxr(w0, MemOperand(x10));
10373 __ Add(w0, w0, 1);
10374 __ Stlxr(w5, w0, MemOperand(x10));
10375 __ Cbnz(w5, &try_w);
10376
10377 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
10378 Label try_x;
10379 __ Bind(&try_x);
10380 __ Ldaxr(x0, MemOperand(x10));
10381 __ Add(x0, x0, 1);
10382 __ Stlxr(w5, x0, MemOperand(x10));
10383 __ Cbnz(w5, &try_x);
10384
10385 __ Mov(x10, reinterpret_cast<uintptr_t>(&wp[1]));
10386 Label try_wp;
10387 __ Bind(&try_wp);
10388 __ Ldaxp(w0, w1, MemOperand(x10));
10389 __ Add(w0, w0, 1);
10390 __ Add(w1, w1, 1);
10391 __ Stlxp(w5, w0, w1, MemOperand(x10));
10392 __ Cbnz(w5, &try_wp);
10393
10394 __ Mov(x10, reinterpret_cast<uintptr_t>(&xp[1]));
10395 Label try_xp;
10396 __ Bind(&try_xp);
10397 __ Ldaxp(x0, x1, MemOperand(x10));
10398 __ Add(x0, x0, 1);
10399 __ Add(x1, x1, 1);
10400 __ Stlxp(w5, x0, x1, MemOperand(x10));
10401 __ Cbnz(w5, &try_xp);
10402
10403 END();
10404 RUN();
10405
10406 ASSERT_EQUAL_32(0x13, b[1]);
10407 ASSERT_EQUAL_32(0x1235, h[1]);
10408 ASSERT_EQUAL_32(0x12345679, w[1]);
10409 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
10410 ASSERT_EQUAL_32(0x12345679, wp[1]);
10411 ASSERT_EQUAL_32(0x87654322, wp[2]);
10412 ASSERT_EQUAL_64(0x123456789abcdef1, xp[1]);
10413 ASSERT_EQUAL_64(0x0fedcba987654322, xp[2]);
10414
10415 // Check for over-write.
10416 ASSERT_EQUAL_32(0, b[0]);
10417 ASSERT_EQUAL_32(0, b[2]);
10418 ASSERT_EQUAL_32(0, h[0]);
10419 ASSERT_EQUAL_32(0, h[2]);
10420 ASSERT_EQUAL_32(0, w[0]);
10421 ASSERT_EQUAL_32(0, w[2]);
10422 ASSERT_EQUAL_64(0, x[0]);
10423 ASSERT_EQUAL_64(0, x[2]);
10424 ASSERT_EQUAL_32(0, wp[0]);
10425 ASSERT_EQUAL_32(0, wp[3]);
10426 ASSERT_EQUAL_64(0, xp[0]);
10427 ASSERT_EQUAL_64(0, xp[3]);
10428
10429 TEARDOWN();
10430}
10431
10432
10433TEST(clrex) {
10434 // This data should never be written.
10435 uint64_t data[] = {0, 0, 0};
10436 uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
10437
10438 SETUP();
10439 START();
10440
10441 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
10442 __ Mov(w6, 0);
10443
10444 __ Ldxrb(w0, MemOperand(x10));
10445 __ Clrex();
10446 __ Add(w0, w0, 1);
10447 __ Stxrb(w5, w0, MemOperand(x10));
10448 __ Add(w6, w6, w5);
10449
10450 __ Ldxrh(w0, MemOperand(x10));
10451 __ Clrex();
10452 __ Add(w0, w0, 1);
10453 __ Stxrh(w5, w0, MemOperand(x10));
10454 __ Add(w6, w6, w5);
10455
10456 __ Ldxr(w0, MemOperand(x10));
10457 __ Clrex();
10458 __ Add(w0, w0, 1);
10459 __ Stxr(w5, w0, MemOperand(x10));
10460 __ Add(w6, w6, w5);
10461
10462 __ Ldxr(x0, MemOperand(x10));
10463 __ Clrex();
10464 __ Add(x0, x0, 1);
10465 __ Stxr(w5, x0, MemOperand(x10));
10466 __ Add(w6, w6, w5);
10467
10468 __ Ldxp(w0, w1, MemOperand(x10));
10469 __ Clrex();
10470 __ Add(w0, w0, 1);
10471 __ Add(w1, w1, 1);
10472 __ Stxp(w5, w0, w1, MemOperand(x10));
10473 __ Add(w6, w6, w5);
10474
10475 __ Ldxp(x0, x1, MemOperand(x10));
10476 __ Clrex();
10477 __ Add(x0, x0, 1);
10478 __ Add(x1, x1, 1);
10479 __ Stxp(w5, x0, x1, MemOperand(x10));
10480 __ Add(w6, w6, w5);
10481
10482 // Acquire-release variants.
10483
10484 __ Ldaxrb(w0, MemOperand(x10));
10485 __ Clrex();
10486 __ Add(w0, w0, 1);
10487 __ Stlxrb(w5, w0, MemOperand(x10));
10488 __ Add(w6, w6, w5);
10489
10490 __ Ldaxrh(w0, MemOperand(x10));
10491 __ Clrex();
10492 __ Add(w0, w0, 1);
10493 __ Stlxrh(w5, w0, MemOperand(x10));
10494 __ Add(w6, w6, w5);
10495
10496 __ Ldaxr(w0, MemOperand(x10));
10497 __ Clrex();
10498 __ Add(w0, w0, 1);
10499 __ Stlxr(w5, w0, MemOperand(x10));
10500 __ Add(w6, w6, w5);
10501
10502 __ Ldaxr(x0, MemOperand(x10));
10503 __ Clrex();
10504 __ Add(x0, x0, 1);
10505 __ Stlxr(w5, x0, MemOperand(x10));
10506 __ Add(w6, w6, w5);
10507
10508 __ Ldaxp(w0, w1, MemOperand(x10));
10509 __ Clrex();
10510 __ Add(w0, w0, 1);
10511 __ Add(w1, w1, 1);
10512 __ Stlxp(w5, w0, w1, MemOperand(x10));
10513 __ Add(w6, w6, w5);
10514
10515 __ Ldaxp(x0, x1, MemOperand(x10));
10516 __ Clrex();
10517 __ Add(x0, x0, 1);
10518 __ Add(x1, x1, 1);
10519 __ Stlxp(w5, x0, x1, MemOperand(x10));
10520 __ Add(w6, w6, w5);
10521
10522 END();
10523 RUN();
10524
10525 // None of the 12 store-exclusives should have succeeded.
10526 ASSERT_EQUAL_32(12, w6);
10527
10528 ASSERT_EQUAL_64(0, data[0]);
10529 ASSERT_EQUAL_64(0, data[1]);
10530 ASSERT_EQUAL_64(0, data[2]);
10531}
10532
10533
10534#ifdef USE_SIMULATOR
10535// Check that the simulator occasionally makes store-exclusive fail.
10536TEST(ldxr_stxr_fail) {
10537 uint64_t data[] = {0, 0, 0};
10538 uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
10539
10540 // Impose a hard limit on the number of attempts, so the test cannot hang.
10541 static const uint64_t kWatchdog = 10000;
10542 Label done;
10543
10544 SETUP();
10545 START();
10546
10547 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
10548 __ Mov(x11, kWatchdog);
10549
10550 // This loop is the opposite of what we normally do with ldxr and stxr; we
10551 // keep trying until we fail (or the watchdog counter runs out).
10552 Label try_b;
10553 __ Bind(&try_b);
10554 __ Ldxrb(w0, MemOperand(x10));
10555 __ Stxrb(w5, w0, MemOperand(x10));
10556 // Check the watchdog counter.
10557 __ Sub(x11, x11, 1);
10558 __ Cbz(x11, &done);
10559 // Check the exclusive-store result.
10560 __ Cbz(w5, &try_b);
10561
10562 Label try_h;
10563 __ Bind(&try_h);
10564 __ Ldxrh(w0, MemOperand(x10));
10565 __ Stxrh(w5, w0, MemOperand(x10));
10566 __ Sub(x11, x11, 1);
10567 __ Cbz(x11, &done);
10568 __ Cbz(w5, &try_h);
10569
10570 Label try_w;
10571 __ Bind(&try_w);
10572 __ Ldxr(w0, MemOperand(x10));
10573 __ Stxr(w5, w0, MemOperand(x10));
10574 __ Sub(x11, x11, 1);
10575 __ Cbz(x11, &done);
10576 __ Cbz(w5, &try_w);
10577
10578 Label try_x;
10579 __ Bind(&try_x);
10580 __ Ldxr(x0, MemOperand(x10));
10581 __ Stxr(w5, x0, MemOperand(x10));
10582 __ Sub(x11, x11, 1);
10583 __ Cbz(x11, &done);
10584 __ Cbz(w5, &try_x);
10585
10586 Label try_wp;
10587 __ Bind(&try_wp);
10588 __ Ldxp(w0, w1, MemOperand(x10));
10589 __ Stxp(w5, w0, w1, MemOperand(x10));
10590 __ Sub(x11, x11, 1);
10591 __ Cbz(x11, &done);
10592 __ Cbz(w5, &try_wp);
10593
10594 Label try_xp;
10595 __ Bind(&try_xp);
10596 __ Ldxp(x0, x1, MemOperand(x10));
10597 __ Stxp(w5, x0, x1, MemOperand(x10));
10598 __ Sub(x11, x11, 1);
10599 __ Cbz(x11, &done);
10600 __ Cbz(w5, &try_xp);
10601
10602 __ Bind(&done);
10603 // Trigger an error if x11 (watchdog) is zero.
10604 __ Cmp(x11, 0);
10605 __ Cset(x12, eq);
10606
10607 END();
10608 RUN();
10609
10610 // Check that the watchdog counter didn't run out.
10611 ASSERT_EQUAL_64(0, x12);
10612}
10613#endif
10614
10615
10616#ifdef USE_SIMULATOR
10617// Check that the simulator occasionally makes store-exclusive fail.
10618TEST(ldaxr_stlxr_fail) {
10619 uint64_t data[] = {0, 0, 0};
10620 uint64_t * data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
10621
10622 // Impose a hard limit on the number of attempts, so the test cannot hang.
10623 static const uint64_t kWatchdog = 10000;
10624 Label done;
10625
10626 SETUP();
10627 START();
10628
10629 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
10630 __ Mov(x11, kWatchdog);
10631
10632 // This loop is the opposite of what we normally do with ldxr and stxr; we
10633 // keep trying until we fail (or the watchdog counter runs out).
10634 Label try_b;
10635 __ Bind(&try_b);
10636 __ Ldxrb(w0, MemOperand(x10));
10637 __ Stxrb(w5, w0, MemOperand(x10));
10638 // Check the watchdog counter.
10639 __ Sub(x11, x11, 1);
10640 __ Cbz(x11, &done);
10641 // Check the exclusive-store result.
10642 __ Cbz(w5, &try_b);
10643
10644 Label try_h;
10645 __ Bind(&try_h);
10646 __ Ldaxrh(w0, MemOperand(x10));
10647 __ Stlxrh(w5, w0, MemOperand(x10));
10648 __ Sub(x11, x11, 1);
10649 __ Cbz(x11, &done);
10650 __ Cbz(w5, &try_h);
10651
10652 Label try_w;
10653 __ Bind(&try_w);
10654 __ Ldaxr(w0, MemOperand(x10));
10655 __ Stlxr(w5, w0, MemOperand(x10));
10656 __ Sub(x11, x11, 1);
10657 __ Cbz(x11, &done);
10658 __ Cbz(w5, &try_w);
10659
10660 Label try_x;
10661 __ Bind(&try_x);
10662 __ Ldaxr(x0, MemOperand(x10));
10663 __ Stlxr(w5, x0, MemOperand(x10));
10664 __ Sub(x11, x11, 1);
10665 __ Cbz(x11, &done);
10666 __ Cbz(w5, &try_x);
10667
10668 Label try_wp;
10669 __ Bind(&try_wp);
10670 __ Ldaxp(w0, w1, MemOperand(x10));
10671 __ Stlxp(w5, w0, w1, MemOperand(x10));
10672 __ Sub(x11, x11, 1);
10673 __ Cbz(x11, &done);
10674 __ Cbz(w5, &try_wp);
10675
10676 Label try_xp;
10677 __ Bind(&try_xp);
10678 __ Ldaxp(x0, x1, MemOperand(x10));
10679 __ Stlxp(w5, x0, x1, MemOperand(x10));
10680 __ Sub(x11, x11, 1);
10681 __ Cbz(x11, &done);
10682 __ Cbz(w5, &try_xp);
10683
10684 __ Bind(&done);
10685 // Trigger an error if x11 (watchdog) is zero.
10686 __ Cmp(x11, 0);
10687 __ Cset(x12, eq);
10688
10689 END();
10690 RUN();
10691
10692 // Check that the watchdog counter didn't run out.
10693 ASSERT_EQUAL_64(0, x12);
10694}
10695#endif
10696
10697
10698TEST(load_store_tagged_immediate_offset) {
10699 uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
10700 int tag_count = sizeof(tags) / sizeof(tags[0]);
10701
10702 const int kMaxDataLength = 128;
10703
10704 for (int i = 0; i < tag_count; i++) {
10705 unsigned char src[kMaxDataLength];
10706 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
10707 uint64_t src_tag = tags[i];
10708 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
10709
10710 for (int k = 0; k < kMaxDataLength; k++) {
10711 src[k] = k + 1;
10712 }
10713
10714 for (int j = 0; j < tag_count; j++) {
10715 unsigned char dst[kMaxDataLength];
10716 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
10717 uint64_t dst_tag = tags[j];
10718 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
10719
10720 memset(dst, 0, kMaxDataLength);
10721
10722 SETUP();
armvixlc68cb642014-09-25 18:49:30 +010010723 ALLOW_ASM();
armvixl4a102ba2014-07-14 09:02:40 +010010724 START();
10725
10726 __ Mov(x0, src_tagged);
10727 __ Mov(x1, dst_tagged);
10728
10729 int offset = 0;
10730
10731 // Scaled-immediate offsets.
10732
10733 __ ldp(x2, x3, MemOperand(x0, offset));
10734 __ stp(x2, x3, MemOperand(x1, offset));
10735 offset += 2 * kXRegSizeInBytes;
10736
10737 __ ldpsw(x2, x3, MemOperand(x0, offset));
10738 __ stp(w2, w3, MemOperand(x1, offset));
10739 offset += 2 * kWRegSizeInBytes;
10740
10741 __ ldp(d0, d1, MemOperand(x0, offset));
10742 __ stp(d0, d1, MemOperand(x1, offset));
10743 offset += 2 * kDRegSizeInBytes;
10744
10745 __ ldp(w2, w3, MemOperand(x0, offset));
10746 __ stp(w2, w3, MemOperand(x1, offset));
10747 offset += 2 * kWRegSizeInBytes;
10748
10749 __ ldp(s0, s1, MemOperand(x0, offset));
10750 __ stp(s0, s1, MemOperand(x1, offset));
10751 offset += 2 * kSRegSizeInBytes;
10752
10753 __ ldr(x2, MemOperand(x0, offset), RequireScaledOffset);
10754 __ str(x2, MemOperand(x1, offset), RequireScaledOffset);
10755 offset += kXRegSizeInBytes;
10756
10757 __ ldr(d0, MemOperand(x0, offset), RequireScaledOffset);
10758 __ str(d0, MemOperand(x1, offset), RequireScaledOffset);
10759 offset += kDRegSizeInBytes;
10760
10761 __ ldr(w2, MemOperand(x0, offset), RequireScaledOffset);
10762 __ str(w2, MemOperand(x1, offset), RequireScaledOffset);
10763 offset += kWRegSizeInBytes;
10764
10765 __ ldr(s0, MemOperand(x0, offset), RequireScaledOffset);
10766 __ str(s0, MemOperand(x1, offset), RequireScaledOffset);
10767 offset += kSRegSizeInBytes;
10768
10769 __ ldrh(w2, MemOperand(x0, offset), RequireScaledOffset);
10770 __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
10771 offset += 2;
10772
10773 __ ldrsh(w2, MemOperand(x0, offset), RequireScaledOffset);
10774 __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
10775 offset += 2;
10776
10777 __ ldrb(w2, MemOperand(x0, offset), RequireScaledOffset);
10778 __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
10779 offset += 1;
10780
10781 __ ldrsb(w2, MemOperand(x0, offset), RequireScaledOffset);
10782 __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
10783 offset += 1;
10784
10785 // Unscaled-immediate offsets.
10786
10787 __ ldur(x2, MemOperand(x0, offset), RequireUnscaledOffset);
10788 __ stur(x2, MemOperand(x1, offset), RequireUnscaledOffset);
10789 offset += kXRegSizeInBytes;
10790
10791 __ ldur(d0, MemOperand(x0, offset), RequireUnscaledOffset);
10792 __ stur(d0, MemOperand(x1, offset), RequireUnscaledOffset);
10793 offset += kDRegSizeInBytes;
10794
10795 __ ldur(w2, MemOperand(x0, offset), RequireUnscaledOffset);
10796 __ stur(w2, MemOperand(x1, offset), RequireUnscaledOffset);
10797 offset += kWRegSizeInBytes;
10798
10799 __ ldur(s0, MemOperand(x0, offset), RequireUnscaledOffset);
10800 __ stur(s0, MemOperand(x1, offset), RequireUnscaledOffset);
10801 offset += kSRegSizeInBytes;
10802
10803 __ ldurh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
10804 __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
10805 offset += 2;
10806
10807 __ ldursh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
10808 __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
10809 offset += 2;
10810
10811 __ ldurb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
10812 __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
10813 offset += 1;
10814
10815 __ ldursb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
10816 __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
10817 offset += 1;
10818
10819 // Extract the tag (so we can test that it was preserved correctly).
10820 __ Ubfx(x0, x0, kAddressTagOffset, kAddressTagWidth);
10821 __ Ubfx(x1, x1, kAddressTagOffset, kAddressTagWidth);
10822
10823 VIXL_ASSERT(kMaxDataLength >= offset);
10824
10825 END();
10826 RUN();
10827
10828 ASSERT_EQUAL_64(src_tag, x0);
10829 ASSERT_EQUAL_64(dst_tag, x1);
10830
10831 for (int k = 0; k < offset; k++) {
10832 VIXL_CHECK(src[k] == dst[k]);
10833 }
10834
10835 TEARDOWN();
10836 }
10837 }
10838}
10839
10840
10841TEST(load_store_tagged_immediate_preindex) {
10842 uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
10843 int tag_count = sizeof(tags) / sizeof(tags[0]);
10844
10845 const int kMaxDataLength = 128;
10846
10847 for (int i = 0; i < tag_count; i++) {
10848 unsigned char src[kMaxDataLength];
10849 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
10850 uint64_t src_tag = tags[i];
10851 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
10852
10853 for (int k = 0; k < kMaxDataLength; k++) {
10854 src[k] = k + 1;
10855 }
10856
10857 for (int j = 0; j < tag_count; j++) {
10858 unsigned char dst[kMaxDataLength];
10859 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
10860 uint64_t dst_tag = tags[j];
10861 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
10862
10863 for (int k = 0; k < kMaxDataLength; k++) {
10864 dst[k] = 0;
10865 }
10866
10867 SETUP();
armvixlc68cb642014-09-25 18:49:30 +010010868 ALLOW_ASM();
armvixl4a102ba2014-07-14 09:02:40 +010010869 START();
10870
10871 // Each MemOperand must apply a pre-index equal to the size of the
10872 // previous access.
10873
10874 // Start with a non-zero preindex.
10875 int preindex = 63 * kXRegSizeInBytes;
armvixlc68cb642014-09-25 18:49:30 +010010876 int data_length = 0;
armvixl4a102ba2014-07-14 09:02:40 +010010877
10878 __ Mov(x0, src_tagged - preindex);
10879 __ Mov(x1, dst_tagged - preindex);
10880
10881 __ ldp(x2, x3, MemOperand(x0, preindex, PreIndex));
10882 __ stp(x2, x3, MemOperand(x1, preindex, PreIndex));
10883 preindex = 2 * kXRegSizeInBytes;
armvixlc68cb642014-09-25 18:49:30 +010010884 data_length = preindex;
armvixl4a102ba2014-07-14 09:02:40 +010010885
10886 __ ldpsw(x2, x3, MemOperand(x0, preindex, PreIndex));
10887 __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
10888 preindex = 2 * kWRegSizeInBytes;
10889 data_length += preindex;
10890
10891 __ ldp(d0, d1, MemOperand(x0, preindex, PreIndex));
10892 __ stp(d0, d1, MemOperand(x1, preindex, PreIndex));
10893 preindex = 2 * kDRegSizeInBytes;
10894 data_length += preindex;
10895
10896 __ ldp(w2, w3, MemOperand(x0, preindex, PreIndex));
10897 __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
10898 preindex = 2 * kWRegSizeInBytes;
10899 data_length += preindex;
10900
10901 __ ldp(s0, s1, MemOperand(x0, preindex, PreIndex));
10902 __ stp(s0, s1, MemOperand(x1, preindex, PreIndex));
10903 preindex = 2 * kSRegSizeInBytes;
10904 data_length += preindex;
10905
10906 __ ldr(x2, MemOperand(x0, preindex, PreIndex));
10907 __ str(x2, MemOperand(x1, preindex, PreIndex));
10908 preindex = kXRegSizeInBytes;
10909 data_length += preindex;
10910
10911 __ ldr(d0, MemOperand(x0, preindex, PreIndex));
10912 __ str(d0, MemOperand(x1, preindex, PreIndex));
10913 preindex = kDRegSizeInBytes;
10914 data_length += preindex;
10915
10916 __ ldr(w2, MemOperand(x0, preindex, PreIndex));
10917 __ str(w2, MemOperand(x1, preindex, PreIndex));
10918 preindex = kWRegSizeInBytes;
10919 data_length += preindex;
10920
10921 __ ldr(s0, MemOperand(x0, preindex, PreIndex));
10922 __ str(s0, MemOperand(x1, preindex, PreIndex));
10923 preindex = kSRegSizeInBytes;
10924 data_length += preindex;
10925
10926 __ ldrh(w2, MemOperand(x0, preindex, PreIndex));
10927 __ strh(w2, MemOperand(x1, preindex, PreIndex));
10928 preindex = 2;
10929 data_length += preindex;
10930
10931 __ ldrsh(w2, MemOperand(x0, preindex, PreIndex));
10932 __ strh(w2, MemOperand(x1, preindex, PreIndex));
10933 preindex = 2;
10934 data_length += preindex;
10935
10936 __ ldrb(w2, MemOperand(x0, preindex, PreIndex));
10937 __ strb(w2, MemOperand(x1, preindex, PreIndex));
10938 preindex = 1;
10939 data_length += preindex;
10940
10941 __ ldrsb(w2, MemOperand(x0, preindex, PreIndex));
10942 __ strb(w2, MemOperand(x1, preindex, PreIndex));
10943 preindex = 1;
10944 data_length += preindex;
10945
10946 VIXL_ASSERT(kMaxDataLength >= data_length);
10947
10948 END();
10949 RUN();
10950
10951 // Check that the preindex was correctly applied in each operation, and
10952 // that the tag was preserved.
10953 ASSERT_EQUAL_64(src_tagged + data_length - preindex, x0);
10954 ASSERT_EQUAL_64(dst_tagged + data_length - preindex, x1);
10955
10956 for (int k = 0; k < data_length; k++) {
10957 VIXL_CHECK(src[k] == dst[k]);
10958 }
10959
10960 TEARDOWN();
10961 }
10962 }
10963}
10964
10965
10966TEST(load_store_tagged_immediate_postindex) {
10967 uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
10968 int tag_count = sizeof(tags) / sizeof(tags[0]);
10969
10970 const int kMaxDataLength = 128;
10971
10972 for (int i = 0; i < tag_count; i++) {
10973 unsigned char src[kMaxDataLength];
10974 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
10975 uint64_t src_tag = tags[i];
10976 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
10977
10978 for (int k = 0; k < kMaxDataLength; k++) {
10979 src[k] = k + 1;
10980 }
10981
10982 for (int j = 0; j < tag_count; j++) {
10983 unsigned char dst[kMaxDataLength];
10984 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
10985 uint64_t dst_tag = tags[j];
10986 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
10987
10988 for (int k = 0; k < kMaxDataLength; k++) {
10989 dst[k] = 0;
10990 }
10991
10992 SETUP();
armvixlc68cb642014-09-25 18:49:30 +010010993 ALLOW_ASM();
armvixl4a102ba2014-07-14 09:02:40 +010010994 START();
10995
armvixlc68cb642014-09-25 18:49:30 +010010996 int postindex = 2 * kXRegSizeInBytes;
10997 int data_length = 0;
10998
armvixl4a102ba2014-07-14 09:02:40 +010010999 __ Mov(x0, src_tagged);
11000 __ Mov(x1, dst_tagged);
11001
armvixl4a102ba2014-07-14 09:02:40 +010011002 __ ldp(x2, x3, MemOperand(x0, postindex, PostIndex));
11003 __ stp(x2, x3, MemOperand(x1, postindex, PostIndex));
armvixlc68cb642014-09-25 18:49:30 +010011004 data_length = postindex;
armvixl4a102ba2014-07-14 09:02:40 +010011005
11006 postindex = 2 * kWRegSizeInBytes;
11007 __ ldpsw(x2, x3, MemOperand(x0, postindex, PostIndex));
11008 __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
11009 data_length += postindex;
11010
11011 postindex = 2 * kDRegSizeInBytes;
11012 __ ldp(d0, d1, MemOperand(x0, postindex, PostIndex));
11013 __ stp(d0, d1, MemOperand(x1, postindex, PostIndex));
11014 data_length += postindex;
11015
11016 postindex = 2 * kWRegSizeInBytes;
11017 __ ldp(w2, w3, MemOperand(x0, postindex, PostIndex));
11018 __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
11019 data_length += postindex;
11020
11021 postindex = 2 * kSRegSizeInBytes;
11022 __ ldp(s0, s1, MemOperand(x0, postindex, PostIndex));
11023 __ stp(s0, s1, MemOperand(x1, postindex, PostIndex));
11024 data_length += postindex;
11025
11026 postindex = kXRegSizeInBytes;
11027 __ ldr(x2, MemOperand(x0, postindex, PostIndex));
11028 __ str(x2, MemOperand(x1, postindex, PostIndex));
11029 data_length += postindex;
11030
11031 postindex = kDRegSizeInBytes;
11032 __ ldr(d0, MemOperand(x0, postindex, PostIndex));
11033 __ str(d0, MemOperand(x1, postindex, PostIndex));
11034 data_length += postindex;
11035
11036 postindex = kWRegSizeInBytes;
11037 __ ldr(w2, MemOperand(x0, postindex, PostIndex));
11038 __ str(w2, MemOperand(x1, postindex, PostIndex));
11039 data_length += postindex;
11040
11041 postindex = kSRegSizeInBytes;
11042 __ ldr(s0, MemOperand(x0, postindex, PostIndex));
11043 __ str(s0, MemOperand(x1, postindex, PostIndex));
11044 data_length += postindex;
11045
11046 postindex = 2;
11047 __ ldrh(w2, MemOperand(x0, postindex, PostIndex));
11048 __ strh(w2, MemOperand(x1, postindex, PostIndex));
11049 data_length += postindex;
11050
11051 postindex = 2;
11052 __ ldrsh(w2, MemOperand(x0, postindex, PostIndex));
11053 __ strh(w2, MemOperand(x1, postindex, PostIndex));
11054 data_length += postindex;
11055
11056 postindex = 1;
11057 __ ldrb(w2, MemOperand(x0, postindex, PostIndex));
11058 __ strb(w2, MemOperand(x1, postindex, PostIndex));
11059 data_length += postindex;
11060
11061 postindex = 1;
11062 __ ldrsb(w2, MemOperand(x0, postindex, PostIndex));
11063 __ strb(w2, MemOperand(x1, postindex, PostIndex));
11064 data_length += postindex;
11065
11066 VIXL_ASSERT(kMaxDataLength >= data_length);
11067
11068 END();
11069 RUN();
11070
11071 // Check that the postindex was correctly applied in each operation, and
11072 // that the tag was preserved.
11073 ASSERT_EQUAL_64(src_tagged + data_length, x0);
11074 ASSERT_EQUAL_64(dst_tagged + data_length, x1);
11075
11076 for (int k = 0; k < data_length; k++) {
11077 VIXL_CHECK(src[k] == dst[k]);
11078 }
11079
11080 TEARDOWN();
11081 }
11082 }
11083}
11084
11085
11086TEST(load_store_tagged_register_offset) {
11087 uint64_t tags[] = { 0x00, 0x1, 0x55, 0xff };
11088 int tag_count = sizeof(tags) / sizeof(tags[0]);
11089
11090 const int kMaxDataLength = 128;
11091
11092 for (int i = 0; i < tag_count; i++) {
11093 unsigned char src[kMaxDataLength];
11094 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
11095 uint64_t src_tag = tags[i];
11096 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
11097
11098 for (int k = 0; k < kMaxDataLength; k++) {
11099 src[k] = k + 1;
11100 }
11101
11102 for (int j = 0; j < tag_count; j++) {
11103 unsigned char dst[kMaxDataLength];
11104 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
11105 uint64_t dst_tag = tags[j];
11106 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
11107
11108 // Also tag the offset register; the operation should still succeed.
11109 for (int o = 0; o < tag_count; o++) {
11110 uint64_t offset_base = CPU::SetPointerTag(UINT64_C(0), tags[o]);
11111 int data_length = 0;
11112
11113 for (int k = 0; k < kMaxDataLength; k++) {
11114 dst[k] = 0;
11115 }
11116
11117 SETUP();
armvixlc68cb642014-09-25 18:49:30 +010011118 ALLOW_ASM();
armvixl4a102ba2014-07-14 09:02:40 +010011119 START();
11120
11121 __ Mov(x0, src_tagged);
11122 __ Mov(x1, dst_tagged);
11123
11124 __ Mov(x10, offset_base + data_length);
11125 __ ldr(x2, MemOperand(x0, x10));
11126 __ str(x2, MemOperand(x1, x10));
11127 data_length += kXRegSizeInBytes;
11128
11129 __ Mov(x10, offset_base + data_length);
11130 __ ldr(d0, MemOperand(x0, x10));
11131 __ str(d0, MemOperand(x1, x10));
11132 data_length += kDRegSizeInBytes;
11133
11134 __ Mov(x10, offset_base + data_length);
11135 __ ldr(w2, MemOperand(x0, x10));
11136 __ str(w2, MemOperand(x1, x10));
11137 data_length += kWRegSizeInBytes;
11138
11139 __ Mov(x10, offset_base + data_length);
11140 __ ldr(s0, MemOperand(x0, x10));
11141 __ str(s0, MemOperand(x1, x10));
11142 data_length += kSRegSizeInBytes;
11143
11144 __ Mov(x10, offset_base + data_length);
11145 __ ldrh(w2, MemOperand(x0, x10));
11146 __ strh(w2, MemOperand(x1, x10));
11147 data_length += 2;
11148
11149 __ Mov(x10, offset_base + data_length);
11150 __ ldrsh(w2, MemOperand(x0, x10));
11151 __ strh(w2, MemOperand(x1, x10));
11152 data_length += 2;
11153
11154 __ Mov(x10, offset_base + data_length);
11155 __ ldrb(w2, MemOperand(x0, x10));
11156 __ strb(w2, MemOperand(x1, x10));
11157 data_length += 1;
11158
11159 __ Mov(x10, offset_base + data_length);
11160 __ ldrsb(w2, MemOperand(x0, x10));
11161 __ strb(w2, MemOperand(x1, x10));
11162 data_length += 1;
11163
11164 VIXL_ASSERT(kMaxDataLength >= data_length);
11165
11166 END();
11167 RUN();
11168
11169 // Check that the postindex was correctly applied in each operation, and
11170 // that the tag was preserved.
11171 ASSERT_EQUAL_64(src_tagged, x0);
11172 ASSERT_EQUAL_64(dst_tagged, x1);
11173 ASSERT_EQUAL_64(offset_base + data_length - 1, x10);
11174
11175 for (int k = 0; k < data_length; k++) {
11176 VIXL_CHECK(src[k] == dst[k]);
11177 }
11178
11179 TEARDOWN();
11180 }
11181 }
11182 }
11183}
11184
11185
armvixlc68cb642014-09-25 18:49:30 +010011186TEST(branch_tagged) {
11187 SETUP();
11188 START();
11189
11190 Label loop, loop_entry, done;
11191 __ Adr(x0, &loop);
11192 __ Mov(x1, 0);
11193 __ B(&loop_entry);
11194
11195 __ Bind(&loop);
11196 __ Add(x1, x1, 1); // Count successful jumps.
11197
11198 // Advance to the next tag, then bail out if we've come back around to tag 0.
11199 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
11200 __ Tst(x0, kAddressTagMask);
11201 __ B(eq, &done);
11202
11203 __ Bind(&loop_entry);
11204 __ Br(x0);
11205
11206 __ Bind(&done);
11207
11208 END();
11209 RUN();
11210
11211 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
11212
11213 TEARDOWN();
11214}
11215
11216
11217TEST(branch_and_link_tagged) {
11218 SETUP();
11219 START();
11220
11221 Label loop, loop_entry, done;
11222 __ Adr(x0, &loop);
11223 __ Mov(x1, 0);
11224 __ B(&loop_entry);
11225
11226 __ Bind(&loop);
11227
11228 // Bail out (before counting a successful jump) if lr appears to be tagged.
11229 __ Tst(lr, kAddressTagMask);
11230 __ B(ne, &done);
11231
11232 __ Add(x1, x1, 1); // Count successful jumps.
11233
11234 // Advance to the next tag, then bail out if we've come back around to tag 0.
11235 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
11236 __ Tst(x0, kAddressTagMask);
11237 __ B(eq, &done);
11238
11239 __ Bind(&loop_entry);
11240 __ Blr(x0);
11241
11242 __ Bind(&done);
11243
11244 END();
11245 RUN();
11246
11247 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
11248
11249 TEARDOWN();
11250}
11251
11252
11253TEST(branch_tagged_and_adr_adrp) {
11254 SETUP_CUSTOM(BUF_SIZE, PageOffsetDependentCode);
11255 START();
11256
11257 Label loop, loop_entry, done;
11258 __ Adr(x0, &loop);
11259 __ Mov(x1, 0);
11260 __ B(&loop_entry);
11261
11262 __ Bind(&loop);
11263
11264 // Bail out (before counting a successful jump) if `adr x10, ...` is tagged.
11265 __ Adr(x10, &done);
11266 __ Tst(x10, kAddressTagMask);
11267 __ B(ne, &done);
11268
11269 // Bail out (before counting a successful jump) if `adrp x11, ...` is tagged.
11270 __ Adrp(x11, &done);
11271 __ Tst(x11, kAddressTagMask);
11272 __ B(ne, &done);
11273
11274 __ Add(x1, x1, 1); // Count successful iterations.
11275
11276 // Advance to the next tag, then bail out if we've come back around to tag 0.
11277 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
11278 __ Tst(x0, kAddressTagMask);
11279 __ B(eq, &done);
11280
11281 __ Bind(&loop_entry);
11282 __ Br(x0);
11283
11284 __ Bind(&done);
11285
11286 END();
11287 RUN();
11288
11289 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
11290
11291 TEARDOWN();
11292}
11293
11294
armvixlad96eda2013-06-14 11:42:37 +010011295} // namespace vixl