blob: 48c1a077e077a00da9719f28f014cc46aa7142fe [file] [log] [blame]
Alexandre Ramesd3832962016-07-04 15:03:43 +01001// Copyright 2014, VIXL authors
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef VIXL_AARCH64_TEST_UTILS_AARCH64_H_
28#define VIXL_AARCH64_TEST_UTILS_AARCH64_H_
29
30#include "test-runner.h"
31
32#include "aarch64/cpu-aarch64.h"
33#include "aarch64/disasm-aarch64.h"
34#include "aarch64/macro-assembler-aarch64.h"
35#include "aarch64/simulator-aarch64.h"
36
37namespace vixl {
38namespace aarch64 {
39
40// Signalling and quiet NaNs in double format, constructed such that the bottom
41// 32 bits look like a signalling or quiet NaN (as appropriate) when interpreted
42// as a float. These values are not architecturally significant, but they're
43// useful in tests for initialising registers.
44extern const double kFP64SignallingNaN;
45extern const double kFP64QuietNaN;
46
47// Signalling and quiet NaNs in float format.
48extern const float kFP32SignallingNaN;
49extern const float kFP32QuietNaN;
50
Jacob Bramleyca789742018-09-13 14:25:46 +010051// Signalling and quiet NaNs in half-precision float format.
52extern const Float16 kFP16SignallingNaN;
53extern const Float16 kFP16QuietNaN;
54
Jacob Bramleyd77a8e42019-02-12 16:52:24 +000055// Vector registers don't naturally fit any C++ native type, so define a class
56// with convenient accessors.
57// Note that this has to be a POD type so that we can use 'offsetof' with it.
58template <int kSizeInBytes>
59struct VectorValue {
60 template <typename T>
Jacob Bramley03c0b512019-02-22 16:42:06 +000061 T GetLane(int lane) const {
Jacob Bramleyd77a8e42019-02-12 16:52:24 +000062 size_t lane_size = sizeof(T);
Jacob Bramley03c0b512019-02-22 16:42:06 +000063 VIXL_CHECK(lane >= 0);
Jacob Bramleyd77a8e42019-02-12 16:52:24 +000064 VIXL_CHECK(kSizeInBytes >= ((lane + 1) * lane_size));
65 T result;
66 memcpy(&result, bytes + (lane * lane_size), lane_size);
67 return result;
68 }
69
70 template <typename T>
71 void SetLane(int lane, T value) {
72 size_t lane_size = sizeof(value);
73 VIXL_CHECK(kSizeInBytes >= ((lane + 1) * lane_size));
74 memcpy(bytes + (lane * lane_size), &value, lane_size);
75 }
76
77 bool Equals(const VectorValue<kSizeInBytes>& other) const {
78 return memcmp(bytes, other.bytes, kSizeInBytes) == 0;
79 }
80
81 uint8_t bytes[kSizeInBytes];
Alexandre Ramesd3832962016-07-04 15:03:43 +010082};
83
Jacob Bramleyd77a8e42019-02-12 16:52:24 +000084// It would be convenient to make these subclasses, so we can provide convenient
85// constructors and utility methods specific to each register type, but we can't
86// do that because it makes the result a non-POD type, and then we can't use
87// 'offsetof' in RegisterDump::Dump.
88typedef VectorValue<kQRegSizeInBytes> QRegisterValue;
89typedef VectorValue<kZRegMaxSizeInBytes> ZRegisterValue;
90typedef VectorValue<kPRegMaxSizeInBytes> PRegisterValue;
91
Alexandre Ramesd3832962016-07-04 15:03:43 +010092// RegisterDump: Object allowing integer, floating point and flags registers
93// to be saved to itself for future reference.
94class RegisterDump {
95 public:
96 RegisterDump() : completed_(false) {
97 VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
98 VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
Carey Williamsd8bb3572018-04-10 11:58:07 +010099 VIXL_ASSERT(sizeof(dump_.h_[0]) == kHRegSizeInBytes);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100100 VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
101 VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
102 VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
103 VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
104 VIXL_ASSERT(sizeof(dump_.q_[0]) == kQRegSizeInBytes);
105 }
106
107 // The Dump method generates code to store a snapshot of the register values.
108 // It needs to be able to use the stack temporarily, and requires that the
109 // current stack pointer is sp, and is properly aligned.
110 //
111 // The dumping code is generated though the given MacroAssembler. No registers
112 // are corrupted in the process, but the stack is used briefly. The flags will
113 // be corrupted during this call.
114 void Dump(MacroAssembler* assm);
115
116 // Register accessors.
117 inline int32_t wreg(unsigned code) const {
118 if (code == kSPRegInternalCode) {
119 return wspreg();
120 }
121 VIXL_ASSERT(RegAliasesMatch(code));
122 return dump_.w_[code];
123 }
124
125 inline int64_t xreg(unsigned code) const {
126 if (code == kSPRegInternalCode) {
127 return spreg();
128 }
129 VIXL_ASSERT(RegAliasesMatch(code));
130 return dump_.x_[code];
131 }
132
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100133 // VRegister accessors.
Carey Williamsd8bb3572018-04-10 11:58:07 +0100134 inline uint16_t hreg_bits(unsigned code) const {
Jacob Bramley03c0b512019-02-22 16:42:06 +0000135 VIXL_ASSERT(VRegAliasesMatch(code));
Carey Williamsd8bb3572018-04-10 11:58:07 +0100136 return dump_.h_[code];
137 }
138
Alexandre Ramesd3832962016-07-04 15:03:43 +0100139 inline uint32_t sreg_bits(unsigned code) const {
Jacob Bramley03c0b512019-02-22 16:42:06 +0000140 VIXL_ASSERT(VRegAliasesMatch(code));
Alexandre Ramesd3832962016-07-04 15:03:43 +0100141 return dump_.s_[code];
142 }
143
Jacob Bramleyca789742018-09-13 14:25:46 +0100144 inline Float16 hreg(unsigned code) const {
145 return RawbitsToFloat16(hreg_bits(code));
Carey Williamsd8bb3572018-04-10 11:58:07 +0100146 }
147
Alexandre Ramesd3832962016-07-04 15:03:43 +0100148 inline float sreg(unsigned code) const {
149 return RawbitsToFloat(sreg_bits(code));
150 }
151
152 inline uint64_t dreg_bits(unsigned code) const {
Jacob Bramley03c0b512019-02-22 16:42:06 +0000153 VIXL_ASSERT(VRegAliasesMatch(code));
Alexandre Ramesd3832962016-07-04 15:03:43 +0100154 return dump_.d_[code];
155 }
156
157 inline double dreg(unsigned code) const {
158 return RawbitsToDouble(dreg_bits(code));
159 }
160
Jacob Bramleyd77a8e42019-02-12 16:52:24 +0000161 inline QRegisterValue qreg(unsigned code) const { return dump_.q_[code]; }
Alexandre Ramesd3832962016-07-04 15:03:43 +0100162
Jacob Bramley03c0b512019-02-22 16:42:06 +0000163 template <typename T>
164 inline T zreg_lane(unsigned code, int lane) const {
165 VIXL_ASSERT(VRegAliasesMatch(code));
166 VIXL_ASSERT(CPUHas(CPUFeatures::kSVE));
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100167 VIXL_ASSERT(lane < GetSVELaneCount(sizeof(T) * kBitsPerByte));
Jacob Bramley03c0b512019-02-22 16:42:06 +0000168 return dump_.z_[code].GetLane<T>(lane);
169 }
170
171 inline uint64_t zreg_lane(unsigned code,
172 unsigned size_in_bits,
173 int lane) const {
174 switch (size_in_bits) {
175 case kBRegSize:
176 return zreg_lane<uint8_t>(code, lane);
177 case kHRegSize:
178 return zreg_lane<uint16_t>(code, lane);
179 case kSRegSize:
180 return zreg_lane<uint32_t>(code, lane);
181 case kDRegSize:
182 return zreg_lane<uint64_t>(code, lane);
183 }
184 VIXL_UNREACHABLE();
185 return 0;
186 }
187
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100188 inline uint64_t preg_lane(unsigned code,
189 unsigned p_bits_per_lane,
190 int lane) const {
191 VIXL_ASSERT(CPUHas(CPUFeatures::kSVE));
192 VIXL_ASSERT(lane < GetSVELaneCount(p_bits_per_lane * kZRegBitsPerPRegBit));
193 // Load a chunk and extract the necessary bits. The chunk size is arbitrary.
194 typedef uint64_t Chunk;
195 const size_t kChunkSizeInBits = sizeof(Chunk) * kBitsPerByte;
196 VIXL_ASSERT(IsPowerOf2(p_bits_per_lane));
197 VIXL_ASSERT(p_bits_per_lane <= kChunkSizeInBits);
198
199 int chunk_index = (lane * p_bits_per_lane) / kChunkSizeInBits;
200 int bit_index = (lane * p_bits_per_lane) % kChunkSizeInBits;
201 Chunk chunk = dump_.p_[code].GetLane<Chunk>(chunk_index);
202 return (chunk >> bit_index) & GetUintMask(p_bits_per_lane);
203 }
204
205 inline int GetSVELaneCount(int lane_size_in_bits) const {
206 VIXL_ASSERT((dump_.vl_ % lane_size_in_bits) == 0);
207 uint64_t count = dump_.vl_ / lane_size_in_bits;
208 VIXL_ASSERT(count <= INT_MAX);
209 return static_cast<int>(count);
210 }
211
212 template <typename T>
213 inline bool HasSVELane(T reg, int lane) const {
214 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
215 return lane < GetSVELaneCount(reg.GetLaneSizeInBits());
216 }
217
Jacob Bramley199339d2019-08-05 18:49:13 +0100218 template <typename T>
219 inline uint64_t GetSVELane(T reg, int lane) const {
220 VIXL_ASSERT(HasSVELane(reg, lane));
221 if (reg.IsZRegister()) {
222 return zreg_lane(reg.GetCode(), reg.GetLaneSizeInBits(), lane);
223 } else if (reg.IsPRegister()) {
224 VIXL_ASSERT((reg.GetLaneSizeInBits() % kZRegBitsPerPRegBit) == 0);
225 return preg_lane(reg.GetCode(),
226 reg.GetLaneSizeInBits() / kZRegBitsPerPRegBit,
227 lane);
228 } else {
229 VIXL_ABORT();
230 }
231 }
232
Alexandre Ramesd3832962016-07-04 15:03:43 +0100233 // Stack pointer accessors.
234 inline int64_t spreg() const {
235 VIXL_ASSERT(SPRegAliasesMatch());
236 return dump_.sp_;
237 }
238
239 inline int32_t wspreg() const {
240 VIXL_ASSERT(SPRegAliasesMatch());
241 return static_cast<int32_t>(dump_.wsp_);
242 }
243
244 // Flags accessors.
245 inline uint32_t flags_nzcv() const {
246 VIXL_ASSERT(IsComplete());
247 VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0);
248 return dump_.flags_ & Flags_mask;
249 }
250
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000251 inline bool IsComplete() const { return completed_; }
Alexandre Ramesd3832962016-07-04 15:03:43 +0100252
253 private:
254 // Indicate whether the dump operation has been completed.
255 bool completed_;
256
257 // Check that the lower 32 bits of x<code> exactly match the 32 bits of
258 // w<code>. A failure of this test most likely represents a failure in the
259 // ::Dump method, or a failure in the simulator.
260 bool RegAliasesMatch(unsigned code) const {
261 VIXL_ASSERT(IsComplete());
262 VIXL_ASSERT(code < kNumberOfRegisters);
263 return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
264 }
265
266 // As RegAliasesMatch, but for the stack pointer.
267 bool SPRegAliasesMatch() const {
268 VIXL_ASSERT(IsComplete());
269 return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
270 }
271
Jacob Bramley03c0b512019-02-22 16:42:06 +0000272 // As RegAliasesMatch, but for Z and V registers.
273 bool VRegAliasesMatch(unsigned code) const {
Alexandre Ramesd3832962016-07-04 15:03:43 +0100274 VIXL_ASSERT(IsComplete());
Jacob Bramley03c0b512019-02-22 16:42:06 +0000275 VIXL_ASSERT(code < kNumberOfVRegisters);
276 bool match = ((dump_.q_[code].GetLane<uint64_t>(0) == dump_.d_[code]) &&
277 ((dump_.d_[code] & kSRegMask) == dump_.s_[code]) &&
278 ((dump_.s_[code] & kHRegMask) == dump_.h_[code]));
279 if (CPUHas(CPUFeatures::kSVE)) {
280 bool z_match =
281 memcmp(&dump_.q_[code], &dump_.z_[code], kQRegSizeInBytes) == 0;
282 match = match && z_match;
283 }
284 return match;
Alexandre Ramesd3832962016-07-04 15:03:43 +0100285 }
286
Jacob Bramleyd77a8e42019-02-12 16:52:24 +0000287 // Record the CPUFeatures enabled when Dump was called.
288 CPUFeatures dump_cpu_features_;
289
290 // Convenience pass-through for CPU feature checks.
291 bool CPUHas(CPUFeatures::Feature feature0,
292 CPUFeatures::Feature feature1 = CPUFeatures::kNone,
293 CPUFeatures::Feature feature2 = CPUFeatures::kNone,
294 CPUFeatures::Feature feature3 = CPUFeatures::kNone) const {
295 return dump_cpu_features_.Has(feature0, feature1, feature2, feature3);
296 }
297
Alexandre Ramesd3832962016-07-04 15:03:43 +0100298 // Store all the dumped elements in a simple struct so the implementation can
299 // use offsetof to quickly find the correct field.
300 struct dump_t {
301 // Core registers.
302 uint64_t x_[kNumberOfRegisters];
303 uint32_t w_[kNumberOfRegisters];
304
305 // Floating-point registers, as raw bits.
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100306 uint64_t d_[kNumberOfVRegisters];
307 uint32_t s_[kNumberOfVRegisters];
308 uint16_t h_[kNumberOfVRegisters];
Alexandre Ramesd3832962016-07-04 15:03:43 +0100309
310 // Vector registers.
Jacob Bramleyd77a8e42019-02-12 16:52:24 +0000311 QRegisterValue q_[kNumberOfVRegisters];
312 ZRegisterValue z_[kNumberOfZRegisters];
313
314 PRegisterValue p_[kNumberOfPRegisters];
Alexandre Ramesd3832962016-07-04 15:03:43 +0100315
316 // The stack pointer.
317 uint64_t sp_;
318 uint64_t wsp_;
319
320 // NZCV flags, stored in bits 28 to 31.
321 // bit[31] : Negative
322 // bit[30] : Zero
323 // bit[29] : Carry
324 // bit[28] : oVerflow
325 uint64_t flags_;
Jacob Bramleyd77a8e42019-02-12 16:52:24 +0000326
327 // The SVE "VL" (vector length) in bits.
328 uint64_t vl_;
Alexandre Ramesd3832962016-07-04 15:03:43 +0100329 } dump_;
330};
331
Jacob Bramleya1bc22a2019-02-08 17:20:37 +0000332// Some tests want to check that a value is _not_ equal to a reference value.
333// These enum values can be used to control the error reporting behaviour.
334enum ExpectedResult { kExpectEqual, kExpectNotEqual };
335
336// The Equal* methods return true if the result matches the reference value.
337// They all print an error message to the console if the result is incorrect
338// (according to the ExpectedResult argument, or kExpectEqual if it is absent).
339//
Alexandre Ramesd3832962016-07-04 15:03:43 +0100340// Some of these methods don't use the RegisterDump argument, but they have to
341// accept them so that they can overload those that take register arguments.
342bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result);
Jacob Bramleya1bc22a2019-02-08 17:20:37 +0000343bool Equal64(uint64_t reference,
344 const RegisterDump*,
345 uint64_t result,
346 ExpectedResult option = kExpectEqual);
Jacob Bramleyd77a8e42019-02-12 16:52:24 +0000347bool Equal128(QRegisterValue expected,
348 const RegisterDump*,
349 QRegisterValue result);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100350
Jacob Bramleyca789742018-09-13 14:25:46 +0100351bool EqualFP16(Float16 expected, const RegisterDump*, uint16_t result);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100352bool EqualFP32(float expected, const RegisterDump*, float result);
353bool EqualFP64(double expected, const RegisterDump*, double result);
354
355bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg);
Jacob Bramleya1bc22a2019-02-08 17:20:37 +0000356bool Equal64(uint64_t reference,
357 const RegisterDump* core,
358 const Register& reg,
359 ExpectedResult option = kExpectEqual);
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000360bool Equal64(uint64_t expected,
361 const RegisterDump* core,
362 const VRegister& vreg);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100363
Jacob Bramleyca789742018-09-13 14:25:46 +0100364bool EqualFP16(Float16 expected,
365 const RegisterDump* core,
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100366 const VRegister& fpreg);
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000367bool EqualFP32(float expected,
368 const RegisterDump* core,
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100369 const VRegister& fpreg);
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000370bool EqualFP64(double expected,
371 const RegisterDump* core,
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100372 const VRegister& fpreg);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100373
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000374bool Equal64(const Register& reg0,
375 const RegisterDump* core,
Jacob Bramleya1bc22a2019-02-08 17:20:37 +0000376 const Register& reg1,
377 ExpectedResult option = kExpectEqual);
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000378bool Equal128(uint64_t expected_h,
379 uint64_t expected_l,
380 const RegisterDump* core,
381 const VRegister& reg);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100382
383bool EqualNzcv(uint32_t expected, uint32_t result);
384
385bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
386
Jacob Bramleya1bc22a2019-02-08 17:20:37 +0000387template <typename T0, typename T1>
388bool NotEqual64(T0 reference, const RegisterDump* core, T1 result) {
389 return !Equal64(reference, core, result, kExpectNotEqual);
390}
391
Jacob Bramley03c0b512019-02-22 16:42:06 +0000392bool EqualSVELane(uint64_t expected,
393 const RegisterDump* core,
394 const ZRegister& reg,
395 int lane);
396
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100397bool EqualSVELane(uint64_t expected,
398 const RegisterDump* core,
Jacob Bramley199339d2019-08-05 18:49:13 +0100399 const PRegister& reg,
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100400 int lane);
401
Jacob Bramley6069fd42019-06-24 10:20:45 +0100402// Check that each SVE lane matches the corresponding expected[] value. The
403// highest-indexed array element maps to the lowest-numbered lane.
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100404template <typename T, int N, typename R>
Jacob Bramley03c0b512019-02-22 16:42:06 +0000405bool EqualSVE(const T (&expected)[N],
406 const RegisterDump* core,
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100407 const R& reg,
408 bool* printed_warning) {
409 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
410 VIXL_ASSERT(reg.HasLaneSize());
Jacob Bramley9d06c4d2019-05-13 18:15:06 +0100411 // Evaluate and report errors on every lane, rather than just the first.
Jacob Bramley03c0b512019-02-22 16:42:06 +0000412 bool equal = true;
413 for (int lane = 0; lane < N; ++lane) {
Jacob Bramley2eaecf12019-05-01 15:46:34 +0100414 if (!core->HasSVELane(reg, lane)) {
415 if (*printed_warning == false) {
416 *printed_warning = true;
417 printf(
418 "Warning: Ignoring SVE lanes beyond VL (%d bytes) "
419 "because the CPU does not implement them.\n",
420 core->GetSVELaneCount(kBRegSize));
421 }
422 break;
423 }
Jacob Bramley9d06c4d2019-05-13 18:15:06 +0100424 // Map the highest-indexed array element to the lowest-numbered lane.
425 equal = EqualSVELane(expected[N - lane - 1], core, reg, lane) && equal;
Jacob Bramley03c0b512019-02-22 16:42:06 +0000426 }
427 return equal;
428}
429
Jacob Bramley6069fd42019-06-24 10:20:45 +0100430// Check that each SVE lanes matches the `expected` value.
Jacob Bramley199339d2019-08-05 18:49:13 +0100431template <typename R>
432bool EqualSVE(uint64_t expected,
Jacob Bramley6069fd42019-06-24 10:20:45 +0100433 const RegisterDump* core,
434 const R& reg,
435 bool* printed_warning) {
436 VIXL_ASSERT(reg.IsZRegister() || reg.IsPRegister());
437 VIXL_ASSERT(reg.HasLaneSize());
438 USE(printed_warning);
439 // Evaluate and report errors on every lane, rather than just the first.
440 bool equal = true;
441 for (int lane = 0; lane < core->GetSVELaneCount(reg.GetLaneSizeInBits());
442 ++lane) {
443 equal = EqualSVELane(expected, core, reg, lane) && equal;
444 }
445 return equal;
446}
447
Jacob Bramley199339d2019-08-05 18:49:13 +0100448// Check that two Z or P registers are equal.
449template <typename R>
450bool EqualSVE(const R& expected,
451 const RegisterDump* core,
452 const R& result,
453 bool* printed_warning) {
454 VIXL_ASSERT(result.IsZRegister() || result.IsPRegister());
455 VIXL_ASSERT(AreSameFormat(expected, result));
456 USE(printed_warning);
457
458 // If the lane size is omitted, pick a default.
459 if (!result.HasLaneSize()) {
460 return EqualSVE(expected.VnB(), core, result.VnB(), printed_warning);
461 }
462
463 // Evaluate and report errors on every lane, rather than just the first.
464 bool equal = true;
465 int lane_size = result.GetLaneSizeInBits();
466 for (int lane = 0; lane < core->GetSVELaneCount(lane_size); ++lane) {
467 uint64_t expected_lane = core->GetSVELane(expected, lane);
468 equal = equal && EqualSVELane(expected_lane, core, result, lane);
469 }
470 return equal;
471}
472
473bool EqualMemory(const void* expected,
474 const void* result,
475 size_t size_in_bytes);
476
Alexandre Ramesd3832962016-07-04 15:03:43 +0100477// Populate the w, x and r arrays with registers from the 'allowed' mask. The
478// r array will be populated with <reg_size>-sized registers,
479//
480// This allows for tests which use large, parameterized blocks of registers
481// (such as the push and pop tests), but where certain registers must be
482// avoided as they are used for other purposes.
483//
484// Any of w, x, or r can be NULL if they are not required.
485//
486// The return value is a RegList indicating which registers were allocated.
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000487RegList PopulateRegisterArray(Register* w,
488 Register* x,
489 Register* r,
490 int reg_size,
491 int reg_count,
492 RegList allowed);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100493
494// As PopulateRegisterArray, but for floating-point registers.
Jacob Bramleycf93ad52019-04-15 16:00:22 +0100495RegList PopulateVRegisterArray(VRegister* s,
496 VRegister* d,
497 VRegister* v,
498 int reg_size,
499 int reg_count,
500 RegList allowed);
Alexandre Ramesd3832962016-07-04 15:03:43 +0100501
502// Ovewrite the contents of the specified registers. This enables tests to
503// check that register contents are written in cases where it's likely that the
504// correct outcome could already be stored in the register.
505//
506// This always overwrites X-sized registers. If tests are operating on W
507// registers, a subsequent write into an aliased W register should clear the
508// top word anyway, so clobbering the full X registers should make tests more
509// rigorous.
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000510void Clobber(MacroAssembler* masm,
511 RegList reg_list,
Alexandre Ramesd3832962016-07-04 15:03:43 +0100512 uint64_t const value = 0xfedcba9876543210);
513
514// As Clobber, but for FP registers.
Pierre Langloisbde2e4b2017-01-24 17:41:26 +0000515void ClobberFP(MacroAssembler* masm,
516 RegList reg_list,
Alexandre Ramesd3832962016-07-04 15:03:43 +0100517 double const value = kFP64SignallingNaN);
518
519// As Clobber, but for a CPURegList with either FP or integer registers. When
520// using this method, the clobber value is always the default for the basic
521// Clobber or ClobberFP functions.
522void Clobber(MacroAssembler* masm, CPURegList reg_list);
523
524} // namespace aarch64
525} // namespace vixl
526
527#endif // VIXL_AARCH64_TEST_UTILS_AARCH64_H_