blob: dcf06c6d714b80450762830a85f96836fa710ef4 [file] [log] [blame]
armvixlad96eda2013-06-14 11:42:37 +01001// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "a64/macro-assembler-a64.h"
28namespace vixl {
29
armvixlc68cb642014-09-25 18:49:30 +010030
31LiteralPool::LiteralPool(Assembler* assm)
32 : assm_(assm), first_use_(-1), monitor_(0) {
33}
34
35
36LiteralPool::~LiteralPool() {
37 VIXL_ASSERT(IsEmpty());
38 VIXL_ASSERT(!IsBlocked());
39}
40
41
42void LiteralPool::Reset() {
43 std::vector<RawLiteral*>::iterator it, end;
44 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
45 delete *it;
46 }
47 entries_.clear();
48 first_use_ = -1;
49 monitor_ = 0;
50}
51
52
53size_t LiteralPool::Size() const {
54 size_t size = 0;
55 std::vector<RawLiteral*>::const_iterator it, end;
56 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
57 size += (*it)->size();
58 }
59
60 // account for the pool header.
61 return size + kInstructionSize;
62}
63
64
65void LiteralPool::Release() {
66 if (--monitor_ == 0) {
67 // Has the literal pool been blocked for too long?
68 VIXL_ASSERT(assm_->CursorOffset() < MaxCursorOffset());
69 }
70}
71
72
73void LiteralPool::CheckEmitFor(size_t amount, EmitOption option) {
74 if (IsEmpty() || IsBlocked()) return;
75
76 ptrdiff_t distance = assm_->CursorOffset() + amount - first_use_;
77 if (distance >= kRecommendedLiteralPoolRange) {
78 Emit(option);
79 }
80}
81
82
83void LiteralPool::Emit(EmitOption option) {
84 // There is an issue if we are asked to emit a blocked or empty pool.
85 VIXL_ASSERT(!IsBlocked());
86 VIXL_ASSERT(!IsEmpty());
87
88 size_t pool_size = Size();
89 size_t emit_size = pool_size;
90 if (option == kBranchRequired) emit_size += kInstructionSize;
91 Label end_of_pool;
92
93 CodeBufferCheckScope guard(assm_,
94 emit_size,
95 CodeBufferCheckScope::kCheck,
96 CodeBufferCheckScope::kExactSize);
97 if (option == kBranchRequired) assm_->b(&end_of_pool);
98
99 // Marker indicating the size of the literal pool in 32-bit words.
100 VIXL_ASSERT((pool_size % kWRegSizeInBytes) == 0);
101 assm_->ldr(xzr, pool_size / kWRegSizeInBytes);
102
103 // Now populate the literal pool.
104 std::vector<RawLiteral*>::iterator it, end;
105 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
106 VIXL_ASSERT((*it)->IsUsed());
107 assm_->place(*it);
108 delete *it;
109 }
110
111 if (option == kBranchRequired) assm_->bind(&end_of_pool);
112
113 entries_.clear();
114 first_use_ = -1;
115}
116
117
118ptrdiff_t LiteralPool::NextCheckOffset() {
119 if (IsEmpty()) {
120 return assm_->CursorOffset() + kRecommendedLiteralPoolRange;
121 }
122
123 VIXL_ASSERT(
124 ((assm_->CursorOffset() - first_use_) < kRecommendedLiteralPoolRange) ||
125 IsBlocked());
126
127 return first_use_ + kRecommendedLiteralPoolRange;
128}
129
130
131EmissionCheckScope::EmissionCheckScope(MacroAssembler* masm, size_t size) {
132 masm->EnsureEmitFor(size);
133#ifdef DEBUG
134 masm_ = masm;
135 masm->Bind(&start_);
136 size_ = size;
137 masm->AcquireBuffer();
138#endif
139}
140
141
142EmissionCheckScope::~EmissionCheckScope() {
143#ifdef DEBUG
144 masm_->ReleaseBuffer();
145 VIXL_ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) <= size_);
146#endif
147}
148
149
150MacroAssembler::MacroAssembler(size_t capacity,
151 PositionIndependentCodeOption pic)
152 : Assembler(capacity, pic),
153#ifdef DEBUG
154 allow_macro_instructions_(true),
155#endif
156 sp_(sp),
157 tmp_list_(ip0, ip1),
158 fptmp_list_(d31),
159 literal_pool_(this) {
160 checkpoint_ = NextCheckOffset();
161}
162
163
164MacroAssembler::MacroAssembler(byte * buffer,
165 size_t capacity,
166 PositionIndependentCodeOption pic)
167 : Assembler(buffer, capacity, pic),
168#ifdef DEBUG
169 allow_macro_instructions_(true),
170#endif
171 sp_(sp),
172 tmp_list_(ip0, ip1),
173 fptmp_list_(d31),
174 literal_pool_(this) {
175 checkpoint_ = NextCheckOffset();
176}
177
178
179MacroAssembler::~MacroAssembler() {
180}
181
182
183void MacroAssembler::Reset() {
184 Assembler::Reset();
185
186 VIXL_ASSERT(!literal_pool_.IsBlocked());
187 literal_pool_.Reset();
188
189 checkpoint_ = NextCheckOffset();
190}
191
192
193void MacroAssembler::FinalizeCode() {
194 if (!literal_pool_.IsEmpty()) literal_pool_.Emit();
195
196 Assembler::FinalizeCode();
197}
198
199
armvixlb0c8ae22014-03-21 14:03:59 +0000200void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
201 VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
202 ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
203 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
204 B(static_cast<Condition>(type), label);
205 } else {
206 switch (type) {
207 case always: B(label); break;
208 case never: break;
209 case reg_zero: Cbz(reg, label); break;
210 case reg_not_zero: Cbnz(reg, label); break;
211 case reg_bit_clear: Tbz(reg, bit, label); break;
212 case reg_bit_set: Tbnz(reg, bit, label); break;
213 default:
214 VIXL_UNREACHABLE();
215 }
216 }
217}
218
armvixlad96eda2013-06-14 11:42:37 +0100219void MacroAssembler::And(const Register& rd,
220 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000221 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000222 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000223 LogicalMacro(rd, rn, operand, AND);
224}
225
226
227void MacroAssembler::Ands(const Register& rd,
228 const Register& rn,
229 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000230 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000231 LogicalMacro(rd, rn, operand, ANDS);
armvixlad96eda2013-06-14 11:42:37 +0100232}
233
234
235void MacroAssembler::Tst(const Register& rn,
236 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000237 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000238 Ands(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100239}
240
241
242void MacroAssembler::Bic(const Register& rd,
243 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000244 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000245 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000246 LogicalMacro(rd, rn, operand, BIC);
247}
248
249
250void MacroAssembler::Bics(const Register& rd,
251 const Register& rn,
252 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000253 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000254 LogicalMacro(rd, rn, operand, BICS);
armvixlad96eda2013-06-14 11:42:37 +0100255}
256
257
258void MacroAssembler::Orr(const Register& rd,
259 const Register& rn,
260 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000261 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100262 LogicalMacro(rd, rn, operand, ORR);
263}
264
265
266void MacroAssembler::Orn(const Register& rd,
267 const Register& rn,
268 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000269 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100270 LogicalMacro(rd, rn, operand, ORN);
271}
272
273
274void MacroAssembler::Eor(const Register& rd,
275 const Register& rn,
276 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000277 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100278 LogicalMacro(rd, rn, operand, EOR);
279}
280
281
282void MacroAssembler::Eon(const Register& rd,
283 const Register& rn,
284 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000285 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100286 LogicalMacro(rd, rn, operand, EON);
287}
288
289
290void MacroAssembler::LogicalMacro(const Register& rd,
291 const Register& rn,
292 const Operand& operand,
293 LogicalOp op) {
armvixlc68cb642014-09-25 18:49:30 +0100294 // The worst case for size is logical immediate to sp:
295 // * up to 4 instructions to materialise the constant
296 // * 1 instruction to do the operation
297 // * 1 instruction to move to sp
298 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000299 UseScratchRegisterScope temps(this);
300
armvixlad96eda2013-06-14 11:42:37 +0100301 if (operand.IsImmediate()) {
302 int64_t immediate = operand.immediate();
303 unsigned reg_size = rd.size();
armvixlad96eda2013-06-14 11:42:37 +0100304
305 // If the operation is NOT, invert the operation and immediate.
306 if ((op & NOT) == NOT) {
307 op = static_cast<LogicalOp>(op & ~NOT);
308 immediate = ~immediate;
armvixlad96eda2013-06-14 11:42:37 +0100309 }
310
armvixl4a102ba2014-07-14 09:02:40 +0100311 // Ignore the top 32 bits of an immediate if we're moving to a W register.
312 if (rd.Is32Bits()) {
313 // Check that the top 32 bits are consistent.
314 VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
315 ((immediate >> kWRegSize) == -1));
316 immediate &= kWRegMask;
317 }
318
319 VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
320
armvixlad96eda2013-06-14 11:42:37 +0100321 // Special cases for all set or all clear immediates.
322 if (immediate == 0) {
323 switch (op) {
324 case AND:
325 Mov(rd, 0);
326 return;
327 case ORR: // Fall through.
328 case EOR:
329 Mov(rd, rn);
330 return;
331 case ANDS: // Fall through.
332 case BICS:
333 break;
334 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000335 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100336 }
armvixlb0c8ae22014-03-21 14:03:59 +0000337 } else if ((rd.Is64Bits() && (immediate == -1)) ||
338 (rd.Is32Bits() && (immediate == 0xffffffff))) {
armvixlad96eda2013-06-14 11:42:37 +0100339 switch (op) {
340 case AND:
341 Mov(rd, rn);
342 return;
343 case ORR:
344 Mov(rd, immediate);
345 return;
346 case EOR:
347 Mvn(rd, rn);
348 return;
349 case ANDS: // Fall through.
350 case BICS:
351 break;
352 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000353 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100354 }
355 }
356
357 unsigned n, imm_s, imm_r;
358 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
359 // Immediate can be encoded in the instruction.
360 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
361 } else {
362 // Immediate can't be encoded: synthesize using move immediate.
armvixlb0c8ae22014-03-21 14:03:59 +0000363 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100364 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
365
armvixlad96eda2013-06-14 11:42:37 +0100366 if (rd.Is(sp)) {
367 // If rd is the stack pointer we cannot use it as the destination
368 // register so we use the temp register as an intermediate again.
armvixl4a102ba2014-07-14 09:02:40 +0100369 Logical(temp, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100370 Mov(sp, temp);
371 } else {
armvixl4a102ba2014-07-14 09:02:40 +0100372 Logical(rd, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100373 }
374 }
375 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000376 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100377 // Add/sub extended supports shift <= 4. We want to support exactly the
378 // same modes here.
armvixlb0c8ae22014-03-21 14:03:59 +0000379 VIXL_ASSERT(operand.shift_amount() <= 4);
380 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100381 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000382
383 temps.Exclude(operand.reg());
384 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100385 EmitExtendShift(temp, operand.reg(), operand.extend(),
386 operand.shift_amount());
387 Logical(rd, rn, Operand(temp), op);
388 } else {
389 // The operand can be encoded in the instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000390 VIXL_ASSERT(operand.IsShiftedRegister());
armvixlad96eda2013-06-14 11:42:37 +0100391 Logical(rd, rn, operand, op);
392 }
393}
394
395
armvixlf37fdc02014-02-05 13:22:16 +0000396void MacroAssembler::Mov(const Register& rd,
397 const Operand& operand,
398 DiscardMoveMode discard_mode) {
armvixlb0c8ae22014-03-21 14:03:59 +0000399 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100400 // The worst case for size is mov immediate with up to 4 instructions.
401 MacroEmissionCheckScope guard(this);
402
armvixlad96eda2013-06-14 11:42:37 +0100403 if (operand.IsImmediate()) {
404 // Call the macro assembler for generic immediates.
405 Mov(rd, operand.immediate());
406 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
407 // Emit a shift instruction if moving a shifted register. This operation
408 // could also be achieved using an orr instruction (like orn used by Mvn),
409 // but using a shift instruction makes the disassembly clearer.
410 EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
411 } else if (operand.IsExtendedRegister()) {
412 // Emit an extend instruction if moving an extended register. This handles
413 // extend with post-shift operations, too.
414 EmitExtendShift(rd, operand.reg(), operand.extend(),
415 operand.shift_amount());
416 } else {
417 // Otherwise, emit a register move only if the registers are distinct, or
armvixlf37fdc02014-02-05 13:22:16 +0000418 // if they are not X registers.
419 //
420 // Note that mov(w0, w0) is not a no-op because it clears the top word of
421 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
422 // registers is not required to clear the top word of the X register. In
423 // this case, the instruction is discarded.
424 //
armvixlad96eda2013-06-14 11:42:37 +0100425 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
armvixlf37fdc02014-02-05 13:22:16 +0000426 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
427 (discard_mode == kDontDiscardForSameWReg))) {
armvixlad96eda2013-06-14 11:42:37 +0100428 mov(rd, operand.reg());
429 }
430 }
431}
432
433
434void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000435 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100436 // The worst case for size is mvn immediate with up to 4 instructions.
437 MacroEmissionCheckScope guard(this);
438
armvixlad96eda2013-06-14 11:42:37 +0100439 if (operand.IsImmediate()) {
440 // Call the macro assembler for generic immediates.
441 Mvn(rd, operand.immediate());
442 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000443 UseScratchRegisterScope temps(this);
444 temps.Exclude(operand.reg());
445
armvixlad96eda2013-06-14 11:42:37 +0100446 // Emit two instructions for the extend case. This differs from Mov, as
447 // the extend and invert can't be achieved in one instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000448 Register temp = temps.AcquireSameSizeAs(rd);
armvixlad96eda2013-06-14 11:42:37 +0100449 EmitExtendShift(temp, operand.reg(), operand.extend(),
450 operand.shift_amount());
451 mvn(rd, Operand(temp));
452 } else {
453 // Otherwise, register and shifted register cases can be handled by the
454 // assembler directly, using orn.
455 mvn(rd, operand);
456 }
457}
458
459
460void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
armvixlb0c8ae22014-03-21 14:03:59 +0000461 VIXL_ASSERT(allow_macro_instructions_);
462 VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
armvixlc68cb642014-09-25 18:49:30 +0100463 // The worst case for size is mov 64-bit immediate to sp:
464 // * up to 4 instructions to materialise the constant
465 // * 1 instruction to move to sp
466 MacroEmissionCheckScope guard(this);
armvixlad96eda2013-06-14 11:42:37 +0100467
468 // Immediates on Aarch64 can be produced using an initial value, and zero to
469 // three move keep operations.
470 //
471 // Initial values can be generated with:
472 // 1. 64-bit move zero (movz).
armvixlf37fdc02014-02-05 13:22:16 +0000473 // 2. 32-bit move inverted (movn).
474 // 3. 64-bit move inverted.
armvixlad96eda2013-06-14 11:42:37 +0100475 // 4. 32-bit orr immediate.
476 // 5. 64-bit orr immediate.
armvixlf37fdc02014-02-05 13:22:16 +0000477 // Move-keep may then be used to modify each of the 16-bit half words.
armvixlad96eda2013-06-14 11:42:37 +0100478 //
479 // The code below supports all five initial value generators, and
armvixlf37fdc02014-02-05 13:22:16 +0000480 // applying move-keep operations to move-zero and move-inverted initial
481 // values.
armvixlad96eda2013-06-14 11:42:37 +0100482
armvixl4a102ba2014-07-14 09:02:40 +0100483 // Try to move the immediate in one instruction, and if that fails, switch to
484 // using multiple instructions.
485 if (!TryOneInstrMoveImmediate(rd, imm)) {
486 unsigned reg_size = rd.size();
487
armvixlad96eda2013-06-14 11:42:37 +0100488 // Generic immediate case. Imm will be represented by
489 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
armvixlf37fdc02014-02-05 13:22:16 +0000490 // A move-zero or move-inverted is generated for the first non-zero or
491 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
armvixlad96eda2013-06-14 11:42:37 +0100492
armvixlf37fdc02014-02-05 13:22:16 +0000493 uint64_t ignored_halfword = 0;
494 bool invert_move = false;
495 // If the number of 0xffff halfwords is greater than the number of 0x0000
496 // halfwords, it's more efficient to use move-inverted.
497 if (CountClearHalfWords(~imm, reg_size) >
498 CountClearHalfWords(imm, reg_size)) {
armvixlb0c8ae22014-03-21 14:03:59 +0000499 ignored_halfword = 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000500 invert_move = true;
501 }
502
503 // Mov instructions can't move values into the stack pointer, so set up a
504 // temporary register, if needed.
armvixlb0c8ae22014-03-21 14:03:59 +0000505 UseScratchRegisterScope temps(this);
506 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
armvixlad96eda2013-06-14 11:42:37 +0100507
armvixlf37fdc02014-02-05 13:22:16 +0000508 // Iterate through the halfwords. Use movn/movz for the first non-ignored
509 // halfword, and movk for subsequent halfwords.
armvixlb0c8ae22014-03-21 14:03:59 +0000510 VIXL_ASSERT((reg_size % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +0100511 bool first_mov_done = false;
512 for (unsigned i = 0; i < (temp.size() / 16); i++) {
armvixlb0c8ae22014-03-21 14:03:59 +0000513 uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000514 if (imm16 != ignored_halfword) {
armvixlad96eda2013-06-14 11:42:37 +0100515 if (!first_mov_done) {
armvixlf37fdc02014-02-05 13:22:16 +0000516 if (invert_move) {
armvixlb0c8ae22014-03-21 14:03:59 +0000517 movn(temp, ~imm16 & 0xffff, 16 * i);
armvixlf37fdc02014-02-05 13:22:16 +0000518 } else {
519 movz(temp, imm16, 16 * i);
520 }
armvixlad96eda2013-06-14 11:42:37 +0100521 first_mov_done = true;
522 } else {
523 // Construct a wider constant.
524 movk(temp, imm16, 16 * i);
525 }
526 }
527 }
528
armvixlb0c8ae22014-03-21 14:03:59 +0000529 VIXL_ASSERT(first_mov_done);
armvixlf37fdc02014-02-05 13:22:16 +0000530
531 // Move the temporary if the original destination register was the stack
532 // pointer.
armvixlad96eda2013-06-14 11:42:37 +0100533 if (rd.IsSP()) {
534 mov(rd, temp);
535 }
armvixlad96eda2013-06-14 11:42:37 +0100536 }
537}
538
539
armvixlf37fdc02014-02-05 13:22:16 +0000540unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000541 VIXL_ASSERT((reg_size % 8) == 0);
armvixlf37fdc02014-02-05 13:22:16 +0000542 int count = 0;
543 for (unsigned i = 0; i < (reg_size / 16); i++) {
544 if ((imm & 0xffff) == 0) {
545 count++;
546 }
547 imm >>= 16;
548 }
549 return count;
550}
551
552
armvixl4a102ba2014-07-14 09:02:40 +0100553// The movz instruction can generate immediates containing an arbitrary 16-bit
554// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
armvixlad96eda2013-06-14 11:42:37 +0100555bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000556 VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
armvixlf37fdc02014-02-05 13:22:16 +0000557 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
armvixlad96eda2013-06-14 11:42:37 +0100558}
559
560
561// The movn instruction can generate immediates containing an arbitrary 16-bit
562// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
563bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
564 return IsImmMovz(~imm, reg_size);
565}
566
567
568void MacroAssembler::Ccmp(const Register& rn,
569 const Operand& operand,
570 StatusFlags nzcv,
571 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000572 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000573 if (operand.IsImmediate() && (operand.immediate() < 0)) {
574 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
575 } else {
576 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
577 }
armvixlad96eda2013-06-14 11:42:37 +0100578}
579
580
581void MacroAssembler::Ccmn(const Register& rn,
582 const Operand& operand,
583 StatusFlags nzcv,
584 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000585 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000586 if (operand.IsImmediate() && (operand.immediate() < 0)) {
587 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
588 } else {
589 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
590 }
armvixlad96eda2013-06-14 11:42:37 +0100591}
592
593
594void MacroAssembler::ConditionalCompareMacro(const Register& rn,
595 const Operand& operand,
596 StatusFlags nzcv,
597 Condition cond,
598 ConditionalCompareOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000599 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlc68cb642014-09-25 18:49:30 +0100600 // The worst case for size is ccmp immediate:
601 // * up to 4 instructions to materialise the constant
602 // * 1 instruction for ccmp
603 MacroEmissionCheckScope guard(this);
604
armvixlad96eda2013-06-14 11:42:37 +0100605 if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
606 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
607 // The immediate can be encoded in the instruction, or the operand is an
608 // unshifted register: call the assembler.
609 ConditionalCompare(rn, operand, nzcv, cond, op);
610 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000611 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100612 // The operand isn't directly supported by the instruction: perform the
613 // operation on a temporary register.
armvixlb0c8ae22014-03-21 14:03:59 +0000614 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000615 Mov(temp, operand);
616 ConditionalCompare(rn, temp, nzcv, cond, op);
617 }
618}
619
620
621void MacroAssembler::Csel(const Register& rd,
622 const Register& rn,
623 const Operand& operand,
624 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000625 VIXL_ASSERT(allow_macro_instructions_);
626 VIXL_ASSERT(!rd.IsZero());
627 VIXL_ASSERT(!rn.IsZero());
628 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlc68cb642014-09-25 18:49:30 +0100629 // The worst case for size is csel immediate:
630 // * up to 4 instructions to materialise the constant
631 // * 1 instruction for csel
632 MacroEmissionCheckScope guard(this);
633
armvixlf37fdc02014-02-05 13:22:16 +0000634 if (operand.IsImmediate()) {
635 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
636 // register.
637 int64_t imm = operand.immediate();
638 Register zr = AppropriateZeroRegFor(rn);
639 if (imm == 0) {
640 csel(rd, rn, zr, cond);
641 } else if (imm == 1) {
642 csinc(rd, rn, zr, cond);
643 } else if (imm == -1) {
644 csinv(rd, rn, zr, cond);
armvixlad96eda2013-06-14 11:42:37 +0100645 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000646 UseScratchRegisterScope temps(this);
647 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000648 Mov(temp, operand.immediate());
649 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100650 }
armvixlf37fdc02014-02-05 13:22:16 +0000651 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
652 // Unshifted register argument.
653 csel(rd, rn, operand.reg(), cond);
654 } else {
655 // All other arguments.
armvixlb0c8ae22014-03-21 14:03:59 +0000656 UseScratchRegisterScope temps(this);
657 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000658 Mov(temp, operand);
659 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100660 }
661}
662
663
664void MacroAssembler::Add(const Register& rd,
665 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000666 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000667 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100668 if (operand.IsImmediate() && (operand.immediate() < 0) &&
669 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000670 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100671 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000672 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
673 }
674}
675
676
677void MacroAssembler::Adds(const Register& rd,
678 const Register& rn,
679 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000680 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100681 if (operand.IsImmediate() && (operand.immediate() < 0) &&
682 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000683 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
684 } else {
685 AddSubMacro(rd, rn, operand, SetFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100686 }
687}
688
689
690void MacroAssembler::Sub(const Register& rd,
691 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000692 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000693 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100694 if (operand.IsImmediate() && (operand.immediate() < 0) &&
695 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000696 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100697 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000698 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
699 }
700}
701
702
703void MacroAssembler::Subs(const Register& rd,
704 const Register& rn,
705 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000706 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100707 if (operand.IsImmediate() && (operand.immediate() < 0) &&
708 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000709 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
710 } else {
711 AddSubMacro(rd, rn, operand, SetFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100712 }
713}
714
715
716void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000717 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000718 Adds(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100719}
720
721
722void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000723 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000724 Subs(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100725}
726
727
armvixlb0c8ae22014-03-21 14:03:59 +0000728void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
729 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100730 // The worst case for size is:
731 // * 1 to materialise the constant, using literal pool if necessary
732 // * 1 instruction for fcmp
733 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000734 if (value != 0.0) {
735 UseScratchRegisterScope temps(this);
736 FPRegister tmp = temps.AcquireSameSizeAs(fn);
737 Fmov(tmp, value);
738 fcmp(fn, tmp);
739 } else {
740 fcmp(fn, value);
741 }
742}
743
744
745void MacroAssembler::Fmov(FPRegister fd, double imm) {
746 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100747 // Floating point immediates are loaded through the literal pool.
748 MacroEmissionCheckScope guard(this);
749
armvixlb0c8ae22014-03-21 14:03:59 +0000750 if (fd.Is32Bits()) {
751 Fmov(fd, static_cast<float>(imm));
752 return;
753 }
754
755 VIXL_ASSERT(fd.Is64Bits());
756 if (IsImmFP64(imm)) {
757 fmov(fd, imm);
758 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
759 fmov(fd, xzr);
760 } else {
armvixlc68cb642014-09-25 18:49:30 +0100761 RawLiteral* literal = literal_pool_.Add(imm);
762 ldr(fd, literal);
armvixlb0c8ae22014-03-21 14:03:59 +0000763 }
764}
765
766
767void MacroAssembler::Fmov(FPRegister fd, float imm) {
768 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100769 // Floating point immediates are loaded through the literal pool.
770 MacroEmissionCheckScope guard(this);
771
armvixlb0c8ae22014-03-21 14:03:59 +0000772 if (fd.Is64Bits()) {
773 Fmov(fd, static_cast<double>(imm));
774 return;
775 }
776
777 VIXL_ASSERT(fd.Is32Bits());
778 if (IsImmFP32(imm)) {
779 fmov(fd, imm);
780 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
781 fmov(fd, wzr);
782 } else {
armvixlc68cb642014-09-25 18:49:30 +0100783 RawLiteral* literal = literal_pool_.Add(imm);
784 ldr(fd, literal);
armvixlb0c8ae22014-03-21 14:03:59 +0000785 }
786}
787
788
789
armvixlad96eda2013-06-14 11:42:37 +0100790void MacroAssembler::Neg(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000791 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000792 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100793 if (operand.IsImmediate()) {
794 Mov(rd, -operand.immediate());
795 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000796 Sub(rd, AppropriateZeroRegFor(rd), operand);
armvixlad96eda2013-06-14 11:42:37 +0100797 }
798}
799
800
armvixlf37fdc02014-02-05 13:22:16 +0000801void MacroAssembler::Negs(const Register& rd,
802 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000803 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000804 Subs(rd, AppropriateZeroRegFor(rd), operand);
805}
806
807
armvixl4a102ba2014-07-14 09:02:40 +0100808bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
809 int64_t imm) {
810 unsigned n, imm_s, imm_r;
811 int reg_size = dst.size();
812
813 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
814 // Immediate can be represented in a move zero instruction. Movz can't write
815 // to the stack pointer.
816 movz(dst, imm);
817 return true;
818 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
819 // Immediate can be represented in a move negative instruction. Movn can't
820 // write to the stack pointer.
821 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
822 return true;
823 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
824 // Immediate can be represented in a logical orr instruction.
825 VIXL_ASSERT(!dst.IsZero());
826 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
827 return true;
828 }
829 return false;
830}
831
832
833Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
834 int64_t imm) {
835 int reg_size = dst.size();
836
837 // Encode the immediate in a single move instruction, if possible.
838 if (TryOneInstrMoveImmediate(dst, imm)) {
839 // The move was successful; nothing to do here.
840 } else {
841 // Pre-shift the immediate to the least-significant bits of the register.
842 int shift_low = CountTrailingZeros(imm, reg_size);
843 int64_t imm_low = imm >> shift_low;
844
845 // Pre-shift the immediate to the most-significant bits of the register,
846 // inserting set bits in the least-significant bits.
847 int shift_high = CountLeadingZeros(imm, reg_size);
848 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
849
850 if (TryOneInstrMoveImmediate(dst, imm_low)) {
851 // The new immediate has been moved into the destination's low bits:
852 // return a new leftward-shifting operand.
853 return Operand(dst, LSL, shift_low);
854 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
855 // The new immediate has been moved into the destination's high bits:
856 // return a new rightward-shifting operand.
857 return Operand(dst, LSR, shift_high);
858 } else {
859 Mov(dst, imm);
860 }
861 }
862 return Operand(dst);
863}
864
865
armvixlad96eda2013-06-14 11:42:37 +0100866void MacroAssembler::AddSubMacro(const Register& rd,
867 const Register& rn,
868 const Operand& operand,
869 FlagsUpdate S,
870 AddSubOp op) {
armvixlc68cb642014-09-25 18:49:30 +0100871 // Worst case is add/sub immediate:
872 // * up to 4 instructions to materialise the constant
873 // * 1 instruction for add/sub
874 MacroEmissionCheckScope guard(this);
875
armvixlf37fdc02014-02-05 13:22:16 +0000876 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
877 (S == LeaveFlags)) {
878 // The instruction would be a nop. Avoid generating useless code.
879 return;
880 }
881
armvixlad96eda2013-06-14 11:42:37 +0100882 if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
883 (rn.IsZero() && !operand.IsShiftedRegister()) ||
884 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
armvixlb0c8ae22014-03-21 14:03:59 +0000885 UseScratchRegisterScope temps(this);
886 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100887 if (operand.IsImmediate()) {
888 Operand imm_operand =
889 MoveImmediateForShiftedOp(temp, operand.immediate());
890 AddSub(rd, rn, imm_operand, S, op);
891 } else {
892 Mov(temp, operand);
893 AddSub(rd, rn, temp, S, op);
894 }
armvixlad96eda2013-06-14 11:42:37 +0100895 } else {
896 AddSub(rd, rn, operand, S, op);
897 }
898}
899
900
901void MacroAssembler::Adc(const Register& rd,
902 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000903 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000904 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000905 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
906}
907
908
909void MacroAssembler::Adcs(const Register& rd,
910 const Register& rn,
911 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000912 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000913 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
armvixlad96eda2013-06-14 11:42:37 +0100914}
915
916
917void MacroAssembler::Sbc(const Register& rd,
918 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000919 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000920 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000921 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
922}
923
924
925void MacroAssembler::Sbcs(const Register& rd,
926 const Register& rn,
927 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000928 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000929 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
armvixlad96eda2013-06-14 11:42:37 +0100930}
931
932
933void MacroAssembler::Ngc(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000934 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000935 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100936 Register zr = AppropriateZeroRegFor(rd);
armvixlf37fdc02014-02-05 13:22:16 +0000937 Sbc(rd, zr, operand);
938}
939
940
941void MacroAssembler::Ngcs(const Register& rd,
942 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000943 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000944 Register zr = AppropriateZeroRegFor(rd);
945 Sbcs(rd, zr, operand);
armvixlad96eda2013-06-14 11:42:37 +0100946}
947
948
949void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
950 const Register& rn,
951 const Operand& operand,
952 FlagsUpdate S,
953 AddSubWithCarryOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000954 VIXL_ASSERT(rd.size() == rn.size());
armvixlc68cb642014-09-25 18:49:30 +0100955 // Worst case is addc/subc immediate:
956 // * up to 4 instructions to materialise the constant
957 // * 1 instruction for add/sub
958 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000959 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100960
961 if (operand.IsImmediate() ||
962 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
963 // Add/sub with carry (immediate or ROR shifted register.)
armvixlb0c8ae22014-03-21 14:03:59 +0000964 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100965 Mov(temp, operand);
966 AddSubWithCarry(rd, rn, Operand(temp), S, op);
967 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
968 // Add/sub with carry (shifted register).
armvixlb0c8ae22014-03-21 14:03:59 +0000969 VIXL_ASSERT(operand.reg().size() == rd.size());
970 VIXL_ASSERT(operand.shift() != ROR);
971 VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
armvixlad96eda2013-06-14 11:42:37 +0100972 operand.shift_amount()));
armvixlb0c8ae22014-03-21 14:03:59 +0000973 temps.Exclude(operand.reg());
974 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100975 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
976 AddSubWithCarry(rd, rn, Operand(temp), S, op);
977 } else if (operand.IsExtendedRegister()) {
978 // Add/sub with carry (extended register).
armvixlb0c8ae22014-03-21 14:03:59 +0000979 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100980 // Add/sub extended supports a shift <= 4. We want to support exactly the
981 // same modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000982 VIXL_ASSERT(operand.shift_amount() <= 4);
983 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100984 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000985 temps.Exclude(operand.reg());
986 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100987 EmitExtendShift(temp, operand.reg(), operand.extend(),
988 operand.shift_amount());
989 AddSubWithCarry(rd, rn, Operand(temp), S, op);
990 } else {
991 // The addressing mode is directly supported by the instruction.
992 AddSubWithCarry(rd, rn, operand, S, op);
993 }
994}
995
996
997#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
998void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
armvixlc68cb642014-09-25 18:49:30 +0100999 VIXL_ASSERT(allow_macro_instructions_); \
armvixlad96eda2013-06-14 11:42:37 +01001000 LoadStoreMacro(REG, addr, OP); \
1001}
1002LS_MACRO_LIST(DEFINE_FUNCTION)
1003#undef DEFINE_FUNCTION
1004
1005void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
1006 const MemOperand& addr,
1007 LoadStoreOp op) {
armvixlc68cb642014-09-25 18:49:30 +01001008 // Worst case is ldr/str pre/post index:
1009 // * 1 instruction for ldr/str
1010 // * up to 4 instructions to materialise the constant
1011 // * 1 instruction to update the base
1012 MacroEmissionCheckScope guard(this);
1013
armvixlad96eda2013-06-14 11:42:37 +01001014 int64_t offset = addr.offset();
1015 LSDataSize size = CalcLSDataSize(op);
1016
1017 // Check if an immediate offset fits in the immediate field of the
1018 // appropriate instruction. If not, emit two instructions to perform
1019 // the operation.
1020 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
1021 !IsImmLSUnscaled(offset)) {
1022 // Immediate offset that can't be encoded using unsigned or unscaled
1023 // addressing modes.
armvixlb0c8ae22014-03-21 14:03:59 +00001024 UseScratchRegisterScope temps(this);
1025 Register temp = temps.AcquireSameSizeAs(addr.base());
armvixlad96eda2013-06-14 11:42:37 +01001026 Mov(temp, addr.offset());
1027 LoadStore(rt, MemOperand(addr.base(), temp), op);
1028 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
1029 // Post-index beyond unscaled addressing range.
1030 LoadStore(rt, MemOperand(addr.base()), op);
1031 Add(addr.base(), addr.base(), Operand(offset));
1032 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
1033 // Pre-index beyond unscaled addressing range.
1034 Add(addr.base(), addr.base(), Operand(offset));
1035 LoadStore(rt, MemOperand(addr.base()), op);
1036 } else {
1037 // Encodable in one load/store instruction.
1038 LoadStore(rt, addr, op);
1039 }
1040}
1041
1042
armvixlc68cb642014-09-25 18:49:30 +01001043#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1044void MacroAssembler::FN(const REGTYPE REG, \
1045 const REGTYPE REG2, \
1046 const MemOperand& addr) { \
1047 VIXL_ASSERT(allow_macro_instructions_); \
1048 LoadStorePairMacro(REG, REG2, addr, OP); \
1049}
1050LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
1051#undef DEFINE_FUNCTION
1052
1053void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
1054 const CPURegister& rt2,
1055 const MemOperand& addr,
1056 LoadStorePairOp op) {
1057 // TODO(all): Should we support register offset for load-store-pair?
1058 VIXL_ASSERT(!addr.IsRegisterOffset());
1059 // Worst case is ldp/stp immediate:
1060 // * 1 instruction for ldp/stp
1061 // * up to 4 instructions to materialise the constant
1062 // * 1 instruction to update the base
1063 MacroEmissionCheckScope guard(this);
1064
1065 int64_t offset = addr.offset();
1066 LSDataSize size = CalcLSPairDataSize(op);
1067
1068 // Check if the offset fits in the immediate field of the appropriate
1069 // instruction. If not, emit two instructions to perform the operation.
1070 if (IsImmLSPair(offset, size)) {
1071 // Encodable in one load/store pair instruction.
1072 LoadStorePair(rt, rt2, addr, op);
1073 } else {
1074 Register base = addr.base();
1075 if (addr.IsImmediateOffset()) {
1076 UseScratchRegisterScope temps(this);
1077 Register temp = temps.AcquireSameSizeAs(base);
1078 Add(temp, base, offset);
1079 LoadStorePair(rt, rt2, MemOperand(temp), op);
1080 } else if (addr.IsPostIndex()) {
1081 LoadStorePair(rt, rt2, MemOperand(base), op);
1082 Add(base, base, offset);
1083 } else {
1084 VIXL_ASSERT(addr.IsPreIndex());
1085 Add(base, base, offset);
1086 LoadStorePair(rt, rt2, MemOperand(base), op);
1087 }
1088 }
1089}
1090
armvixlad96eda2013-06-14 11:42:37 +01001091void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1092 const CPURegister& src2, const CPURegister& src3) {
armvixlb0c8ae22014-03-21 14:03:59 +00001093 VIXL_ASSERT(allow_macro_instructions_);
1094 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1095 VIXL_ASSERT(src0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01001096
1097 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
1098 int size = src0.SizeInBytes();
1099
1100 PrepareForPush(count, size);
1101 PushHelper(count, size, src0, src1, src2, src3);
1102}
1103
1104
1105void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1106 const CPURegister& dst2, const CPURegister& dst3) {
1107 // It is not valid to pop into the same register more than once in one
1108 // instruction, not even into the zero register.
armvixlb0c8ae22014-03-21 14:03:59 +00001109 VIXL_ASSERT(allow_macro_instructions_);
1110 VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
1111 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1112 VIXL_ASSERT(dst0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01001113
1114 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
1115 int size = dst0.SizeInBytes();
1116
1117 PrepareForPop(count, size);
1118 PopHelper(count, size, dst0, dst1, dst2, dst3);
1119}
1120
1121
1122void MacroAssembler::PushCPURegList(CPURegList registers) {
1123 int size = registers.RegisterSizeInBytes();
1124
1125 PrepareForPush(registers.Count(), size);
1126 // Push up to four registers at a time because if the current stack pointer is
1127 // sp and reg_size is 32, registers must be pushed in blocks of four in order
1128 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001129 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001130 while (!registers.IsEmpty()) {
1131 int count_before = registers.Count();
1132 const CPURegister& src0 = registers.PopHighestIndex();
1133 const CPURegister& src1 = registers.PopHighestIndex();
1134 const CPURegister& src2 = registers.PopHighestIndex();
1135 const CPURegister& src3 = registers.PopHighestIndex();
1136 int count = count_before - registers.Count();
1137 PushHelper(count, size, src0, src1, src2, src3);
1138 }
1139}
1140
1141
1142void MacroAssembler::PopCPURegList(CPURegList registers) {
1143 int size = registers.RegisterSizeInBytes();
1144
1145 PrepareForPop(registers.Count(), size);
1146 // Pop up to four registers at a time because if the current stack pointer is
1147 // sp and reg_size is 32, registers must be pushed in blocks of four in order
1148 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001149 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001150 while (!registers.IsEmpty()) {
1151 int count_before = registers.Count();
1152 const CPURegister& dst0 = registers.PopLowestIndex();
1153 const CPURegister& dst1 = registers.PopLowestIndex();
1154 const CPURegister& dst2 = registers.PopLowestIndex();
1155 const CPURegister& dst3 = registers.PopLowestIndex();
1156 int count = count_before - registers.Count();
1157 PopHelper(count, size, dst0, dst1, dst2, dst3);
1158 }
1159}
1160
1161
1162void MacroAssembler::PushMultipleTimes(int count, Register src) {
armvixlb0c8ae22014-03-21 14:03:59 +00001163 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001164 int size = src.SizeInBytes();
1165
1166 PrepareForPush(count, size);
1167 // Push up to four registers at a time if possible because if the current
1168 // stack pointer is sp and the register size is 32, registers must be pushed
1169 // in blocks of four in order to maintain the 16-byte alignment for sp.
1170 while (count >= 4) {
1171 PushHelper(4, size, src, src, src, src);
1172 count -= 4;
1173 }
1174 if (count >= 2) {
1175 PushHelper(2, size, src, src, NoReg, NoReg);
1176 count -= 2;
1177 }
1178 if (count == 1) {
1179 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1180 count -= 1;
1181 }
armvixlb0c8ae22014-03-21 14:03:59 +00001182 VIXL_ASSERT(count == 0);
armvixlad96eda2013-06-14 11:42:37 +01001183}
1184
1185
1186void MacroAssembler::PushHelper(int count, int size,
1187 const CPURegister& src0,
1188 const CPURegister& src1,
1189 const CPURegister& src2,
1190 const CPURegister& src3) {
1191 // Ensure that we don't unintentionally modify scratch or debug registers.
armvixlc68cb642014-09-25 18:49:30 +01001192 // Worst case for size is 2 stp.
1193 InstructionAccurateScope scope(this, 2,
1194 InstructionAccurateScope::kMaximumSize);
armvixlad96eda2013-06-14 11:42:37 +01001195
armvixlb0c8ae22014-03-21 14:03:59 +00001196 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1197 VIXL_ASSERT(size == src0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +01001198
1199 // When pushing multiple registers, the store order is chosen such that
1200 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1201 switch (count) {
1202 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +00001203 VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001204 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1205 break;
1206 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +00001207 VIXL_ASSERT(src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001208 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1209 break;
1210 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +00001211 VIXL_ASSERT(src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001212 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1213 str(src0, MemOperand(StackPointer(), 2 * size));
1214 break;
1215 case 4:
1216 // Skip over 4 * size, then fill in the gap. This allows four W registers
1217 // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
1218 // all times.
1219 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1220 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1221 break;
1222 default:
armvixlb0c8ae22014-03-21 14:03:59 +00001223 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +01001224 }
1225}
1226
1227
1228void MacroAssembler::PopHelper(int count, int size,
1229 const CPURegister& dst0,
1230 const CPURegister& dst1,
1231 const CPURegister& dst2,
1232 const CPURegister& dst3) {
1233 // Ensure that we don't unintentionally modify scratch or debug registers.
armvixlc68cb642014-09-25 18:49:30 +01001234 // Worst case for size is 2 ldp.
1235 InstructionAccurateScope scope(this, 2,
1236 InstructionAccurateScope::kMaximumSize);
armvixlad96eda2013-06-14 11:42:37 +01001237
armvixlb0c8ae22014-03-21 14:03:59 +00001238 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1239 VIXL_ASSERT(size == dst0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +01001240
1241 // When popping multiple registers, the load order is chosen such that
1242 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1243 switch (count) {
1244 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +00001245 VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001246 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1247 break;
1248 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +00001249 VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001250 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1251 break;
1252 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +00001253 VIXL_ASSERT(dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001254 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1255 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1256 break;
1257 case 4:
1258 // Load the higher addresses first, then load the lower addresses and skip
1259 // the whole block in the second instruction. This allows four W registers
1260 // to be popped using sp, whilst maintaining 16-byte alignment for sp at
1261 // all times.
1262 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1263 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1264 break;
1265 default:
armvixlb0c8ae22014-03-21 14:03:59 +00001266 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +01001267 }
1268}
1269
1270
1271void MacroAssembler::PrepareForPush(int count, int size) {
1272 if (sp.Is(StackPointer())) {
1273 // If the current stack pointer is sp, then it must be aligned to 16 bytes
1274 // on entry and the total size of the specified registers must also be a
1275 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001276 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001277 } else {
1278 // Even if the current stack pointer is not the system stack pointer (sp),
1279 // the system stack pointer will still be modified in order to comply with
1280 // ABI rules about accessing memory below the system stack pointer.
1281 BumpSystemStackPointer(count * size);
1282 }
1283}
1284
1285
1286void MacroAssembler::PrepareForPop(int count, int size) {
1287 USE(count);
1288 USE(size);
1289 if (sp.Is(StackPointer())) {
1290 // If the current stack pointer is sp, then it must be aligned to 16 bytes
1291 // on entry and the total size of the specified registers must also be a
1292 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001293 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001294 }
1295}
1296
1297void MacroAssembler::Poke(const Register& src, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001298 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001299 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001300 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001301 }
1302
1303 Str(src, MemOperand(StackPointer(), offset));
1304}
1305
1306
1307void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001308 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001309 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001310 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001311 }
1312
1313 Ldr(dst, MemOperand(StackPointer(), offset));
1314}
1315
1316
armvixlc68cb642014-09-25 18:49:30 +01001317void MacroAssembler::PeekCPURegList(CPURegList registers, int offset) {
1318 VIXL_ASSERT(!registers.IncludesAliasOf(StackPointer()));
1319 VIXL_ASSERT(offset >= 0);
1320 int size = registers.RegisterSizeInBytes();
1321
1322 while (registers.Count() >= 2) {
1323 const CPURegister& dst0 = registers.PopLowestIndex();
1324 const CPURegister& dst1 = registers.PopLowestIndex();
1325 Ldp(dst0, dst1, MemOperand(StackPointer(), offset));
1326 offset += 2 * size;
1327 }
1328 if (!registers.IsEmpty()) {
1329 Ldr(registers.PopLowestIndex(),
1330 MemOperand(StackPointer(), offset));
1331 }
1332}
1333
1334
1335void MacroAssembler::PokeCPURegList(CPURegList registers, int offset) {
1336 VIXL_ASSERT(!registers.IncludesAliasOf(StackPointer()));
1337 VIXL_ASSERT(offset >= 0);
1338 int size = registers.RegisterSizeInBytes();
1339
1340 while (registers.Count() >= 2) {
1341 const CPURegister& dst0 = registers.PopLowestIndex();
1342 const CPURegister& dst1 = registers.PopLowestIndex();
1343 Stp(dst0, dst1, MemOperand(StackPointer(), offset));
1344 offset += 2 * size;
1345 }
1346 if (!registers.IsEmpty()) {
1347 Str(registers.PopLowestIndex(),
1348 MemOperand(StackPointer(), offset));
1349 }
1350}
1351
1352
armvixlad96eda2013-06-14 11:42:37 +01001353void MacroAssembler::Claim(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001354 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001355
1356 if (size.IsZero()) {
1357 return;
1358 }
1359
armvixlad96eda2013-06-14 11:42:37 +01001360 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001361 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001362 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001363 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001364 }
1365 }
1366
1367 if (!sp.Is(StackPointer())) {
1368 BumpSystemStackPointer(size);
1369 }
1370
1371 Sub(StackPointer(), StackPointer(), size);
1372}
1373
1374
1375void MacroAssembler::Drop(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001376 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001377
1378 if (size.IsZero()) {
1379 return;
1380 }
1381
armvixlad96eda2013-06-14 11:42:37 +01001382 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001383 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001384 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001385 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001386 }
1387 }
1388
1389 Add(StackPointer(), StackPointer(), size);
1390}
1391
1392
1393void MacroAssembler::PushCalleeSavedRegisters() {
1394 // Ensure that the macro-assembler doesn't use any scratch registers.
armvixlc68cb642014-09-25 18:49:30 +01001395 // 10 stp will be emitted.
1396 // TODO(all): Should we use GetCalleeSaved and SavedFP.
1397 InstructionAccurateScope scope(this, 10);
armvixlad96eda2013-06-14 11:42:37 +01001398
1399 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001400 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001401
1402 MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
1403
armvixlad96eda2013-06-14 11:42:37 +01001404 stp(x29, x30, tos);
1405 stp(x27, x28, tos);
1406 stp(x25, x26, tos);
1407 stp(x23, x24, tos);
1408 stp(x21, x22, tos);
1409 stp(x19, x20, tos);
armvixl5799d6c2014-05-01 11:05:00 +01001410
1411 stp(d14, d15, tos);
1412 stp(d12, d13, tos);
1413 stp(d10, d11, tos);
1414 stp(d8, d9, tos);
armvixlad96eda2013-06-14 11:42:37 +01001415}
1416
1417
1418void MacroAssembler::PopCalleeSavedRegisters() {
1419 // Ensure that the macro-assembler doesn't use any scratch registers.
armvixlc68cb642014-09-25 18:49:30 +01001420 // 10 ldp will be emitted.
1421 // TODO(all): Should we use GetCalleeSaved and SavedFP.
1422 InstructionAccurateScope scope(this, 10);
armvixlad96eda2013-06-14 11:42:37 +01001423
1424 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001425 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001426
1427 MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
1428
armvixl5799d6c2014-05-01 11:05:00 +01001429 ldp(d8, d9, tos);
1430 ldp(d10, d11, tos);
1431 ldp(d12, d13, tos);
1432 ldp(d14, d15, tos);
1433
armvixlad96eda2013-06-14 11:42:37 +01001434 ldp(x19, x20, tos);
1435 ldp(x21, x22, tos);
1436 ldp(x23, x24, tos);
1437 ldp(x25, x26, tos);
1438 ldp(x27, x28, tos);
1439 ldp(x29, x30, tos);
armvixlad96eda2013-06-14 11:42:37 +01001440}
1441
1442void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
armvixlb0c8ae22014-03-21 14:03:59 +00001443 VIXL_ASSERT(!sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001444 // TODO: Several callers rely on this not using scratch registers, so we use
1445 // the assembler directly here. However, this means that large immediate
1446 // values of 'space' cannot be handled.
armvixlc68cb642014-09-25 18:49:30 +01001447 InstructionAccurateScope scope(this, 1);
armvixlad96eda2013-06-14 11:42:37 +01001448 sub(sp, StackPointer(), space);
1449}
1450
1451
1452// This is the main Printf implementation. All callee-saved registers are
1453// preserved, but NZCV and the caller-saved registers may be clobbered.
1454void MacroAssembler::PrintfNoPreserve(const char * format,
1455 const CPURegister& arg0,
1456 const CPURegister& arg1,
1457 const CPURegister& arg2,
1458 const CPURegister& arg3) {
1459 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
1460 // in most cases anyway, so this restriction shouldn't be too serious.
armvixlb0c8ae22014-03-21 14:03:59 +00001461 VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001462
armvixl5799d6c2014-05-01 11:05:00 +01001463 // The provided arguments, and their proper PCS registers.
1464 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
1465 CPURegister pcs[kPrintfMaxArgCount];
1466
1467 int arg_count = kPrintfMaxArgCount;
1468
1469 // The PCS varargs registers for printf. Note that x0 is used for the printf
1470 // format string.
1471 static const CPURegList kPCSVarargs =
1472 CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count);
1473 static const CPURegList kPCSVarargsFP =
1474 CPURegList(CPURegister::kFPRegister, kDRegSize, 0, arg_count - 1);
1475
1476 // We can use caller-saved registers as scratch values, except for the
1477 // arguments and the PCS registers where they might need to go.
armvixlb0c8ae22014-03-21 14:03:59 +00001478 UseScratchRegisterScope temps(this);
armvixl5799d6c2014-05-01 11:05:00 +01001479 temps.Include(kCallerSaved);
1480 temps.Include(kCallerSavedFP);
1481 temps.Exclude(kPCSVarargs);
1482 temps.Exclude(kPCSVarargsFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001483 temps.Exclude(arg0, arg1, arg2, arg3);
1484
armvixl5799d6c2014-05-01 11:05:00 +01001485 // Copies of the arg lists that we can iterate through.
1486 CPURegList pcs_varargs = kPCSVarargs;
1487 CPURegList pcs_varargs_fp = kPCSVarargsFP;
armvixlad96eda2013-06-14 11:42:37 +01001488
armvixl5799d6c2014-05-01 11:05:00 +01001489 // Place the arguments. There are lots of clever tricks and optimizations we
1490 // could use here, but Printf is a debug tool so instead we just try to keep
1491 // it simple: Move each input that isn't already in the right place to a
1492 // scratch register, then move everything back.
1493 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
1494 // Work out the proper PCS register for this argument.
armvixlad96eda2013-06-14 11:42:37 +01001495 if (args[i].IsRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001496 pcs[i] = pcs_varargs.PopLowestIndex().X();
1497 // We might only need a W register here. We need to know the size of the
1498 // argument so we can properly encode it for the simulator call.
1499 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
armvixlad96eda2013-06-14 11:42:37 +01001500 } else if (args[i].IsFPRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001501 // In C, floats are always cast to doubles for varargs calls.
1502 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
armvixlad96eda2013-06-14 11:42:37 +01001503 } else {
armvixl5799d6c2014-05-01 11:05:00 +01001504 VIXL_ASSERT(args[i].IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001505 arg_count = i;
1506 break;
1507 }
armvixlad96eda2013-06-14 11:42:37 +01001508
armvixl5799d6c2014-05-01 11:05:00 +01001509 // If the argument is already in the right place, leave it where it is.
1510 if (args[i].Aliases(pcs[i])) continue;
armvixlad96eda2013-06-14 11:42:37 +01001511
armvixl5799d6c2014-05-01 11:05:00 +01001512 // Otherwise, if the argument is in a PCS argument register, allocate an
1513 // appropriate scratch register and then move it out of the way.
1514 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
1515 kPCSVarargsFP.IncludesAliasOf(args[i])) {
1516 if (args[i].IsRegister()) {
1517 Register old_arg = Register(args[i]);
1518 Register new_arg = temps.AcquireSameSizeAs(old_arg);
1519 Mov(new_arg, old_arg);
1520 args[i] = new_arg;
1521 } else {
1522 FPRegister old_arg = FPRegister(args[i]);
1523 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
1524 Fmov(new_arg, old_arg);
1525 args[i] = new_arg;
1526 }
armvixlad96eda2013-06-14 11:42:37 +01001527 }
1528 }
1529
armvixl5799d6c2014-05-01 11:05:00 +01001530 // Do a second pass to move values into their final positions and perform any
1531 // conversions that may be required.
1532 for (int i = 0; i < arg_count; i++) {
1533 VIXL_ASSERT(pcs[i].type() == args[i].type());
1534 if (pcs[i].IsRegister()) {
1535 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
1536 } else {
1537 VIXL_ASSERT(pcs[i].IsFPRegister());
1538 if (pcs[i].size() == args[i].size()) {
1539 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
1540 } else {
1541 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
1542 }
1543 }
armvixlad96eda2013-06-14 11:42:37 +01001544 }
1545
1546 // Load the format string into x0, as per the procedure-call standard.
1547 //
1548 // To make the code as portable as possible, the format string is encoded
1549 // directly in the instruction stream. It might be cleaner to encode it in a
1550 // literal pool, but since Printf is usually used for debugging, it is
1551 // beneficial for it to be minimally dependent on other features.
armvixl5799d6c2014-05-01 11:05:00 +01001552 temps.Exclude(x0);
armvixlad96eda2013-06-14 11:42:37 +01001553 Label format_address;
1554 Adr(x0, &format_address);
1555
1556 // Emit the format string directly in the instruction stream.
armvixlc68cb642014-09-25 18:49:30 +01001557 {
1558 BlockLiteralPoolScope scope(this);
1559 // Data emitted:
1560 // branch
1561 // strlen(format) + 1 (includes null termination)
1562 // padding to next instruction
1563 // unreachable
1564 EmissionCheckScope guard(
1565 this,
1566 AlignUp(strlen(format) + 1, kInstructionSize) + 2 * kInstructionSize);
armvixlad96eda2013-06-14 11:42:37 +01001567 Label after_data;
1568 B(&after_data);
1569 Bind(&format_address);
armvixlc68cb642014-09-25 18:49:30 +01001570 EmitString(format);
armvixlad96eda2013-06-14 11:42:37 +01001571 Unreachable();
1572 Bind(&after_data);
1573 }
1574
1575 // We don't pass any arguments on the stack, but we still need to align the C
1576 // stack pointer to a 16-byte boundary for PCS compliance.
1577 if (!sp.Is(StackPointer())) {
1578 Bic(sp, StackPointer(), 0xf);
1579 }
1580
1581 // Actually call printf. This part needs special handling for the simulator,
1582 // since the system printf function will use a different instruction set and
1583 // the procedure-call standard will not be compatible.
1584#ifdef USE_SIMULATOR
armvixlc68cb642014-09-25 18:49:30 +01001585 {
1586 InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
armvixlad96eda2013-06-14 11:42:37 +01001587 hlt(kPrintfOpcode);
armvixl5799d6c2014-05-01 11:05:00 +01001588 dc32(arg_count); // kPrintfArgCountOffset
1589
1590 // Determine the argument pattern.
1591 uint32_t arg_pattern_list = 0;
1592 for (int i = 0; i < arg_count; i++) {
1593 uint32_t arg_pattern;
1594 if (pcs[i].IsRegister()) {
1595 arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
1596 } else {
1597 VIXL_ASSERT(pcs[i].Is64Bits());
1598 arg_pattern = kPrintfArgD;
1599 }
1600 VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
1601 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
1602 }
1603 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
armvixlad96eda2013-06-14 11:42:37 +01001604 }
1605#else
armvixlb0c8ae22014-03-21 14:03:59 +00001606 Register tmp = temps.AcquireX();
1607 Mov(tmp, reinterpret_cast<uintptr_t>(printf));
1608 Blr(tmp);
armvixlad96eda2013-06-14 11:42:37 +01001609#endif
1610}
1611
1612
1613void MacroAssembler::Printf(const char * format,
armvixl5799d6c2014-05-01 11:05:00 +01001614 CPURegister arg0,
1615 CPURegister arg1,
1616 CPURegister arg2,
1617 CPURegister arg3) {
1618 // We can only print sp if it is the current stack pointer.
1619 if (!sp.Is(StackPointer())) {
1620 VIXL_ASSERT(!sp.Aliases(arg0));
1621 VIXL_ASSERT(!sp.Aliases(arg1));
1622 VIXL_ASSERT(!sp.Aliases(arg2));
1623 VIXL_ASSERT(!sp.Aliases(arg3));
1624 }
1625
armvixlb0c8ae22014-03-21 14:03:59 +00001626 // Make sure that the macro assembler doesn't try to use any of our arguments
1627 // as scratch registers.
1628 UseScratchRegisterScope exclude_all(this);
1629 exclude_all.ExcludeAll();
1630
armvixlad96eda2013-06-14 11:42:37 +01001631 // Preserve all caller-saved registers as well as NZCV.
1632 // If sp is the stack pointer, PushCPURegList asserts that the size of each
1633 // list is a multiple of 16 bytes.
1634 PushCPURegList(kCallerSaved);
1635 PushCPURegList(kCallerSavedFP);
armvixlad96eda2013-06-14 11:42:37 +01001636
armvixlb0c8ae22014-03-21 14:03:59 +00001637 { UseScratchRegisterScope temps(this);
1638 // We can use caller-saved registers as scratch values (except for argN).
armvixl5799d6c2014-05-01 11:05:00 +01001639 temps.Include(kCallerSaved);
1640 temps.Include(kCallerSavedFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001641 temps.Exclude(arg0, arg1, arg2, arg3);
armvixlad96eda2013-06-14 11:42:37 +01001642
armvixl5799d6c2014-05-01 11:05:00 +01001643 // If any of the arguments are the current stack pointer, allocate a new
1644 // register for them, and adjust the value to compensate for pushing the
1645 // caller-saved registers.
1646 bool arg0_sp = StackPointer().Aliases(arg0);
1647 bool arg1_sp = StackPointer().Aliases(arg1);
1648 bool arg2_sp = StackPointer().Aliases(arg2);
1649 bool arg3_sp = StackPointer().Aliases(arg3);
1650 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
1651 // Allocate a register to hold the original stack pointer value, to pass
1652 // to PrintfNoPreserve as an argument.
1653 Register arg_sp = temps.AcquireX();
1654 Add(arg_sp, StackPointer(),
1655 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
1656 if (arg0_sp) arg0 = Register(arg_sp.code(), arg0.size());
1657 if (arg1_sp) arg1 = Register(arg_sp.code(), arg1.size());
1658 if (arg2_sp) arg2 = Register(arg_sp.code(), arg2.size());
1659 if (arg3_sp) arg3 = Register(arg_sp.code(), arg3.size());
1660 }
1661
armvixlb0c8ae22014-03-21 14:03:59 +00001662 // Preserve NZCV.
1663 Register tmp = temps.AcquireX();
1664 Mrs(tmp, NZCV);
1665 Push(tmp, xzr);
armvixl5799d6c2014-05-01 11:05:00 +01001666 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001667
1668 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
1669
armvixl5799d6c2014-05-01 11:05:00 +01001670 // Restore NZCV.
1671 tmp = temps.AcquireX();
armvixlb0c8ae22014-03-21 14:03:59 +00001672 Pop(xzr, tmp);
1673 Msr(NZCV, tmp);
armvixl5799d6c2014-05-01 11:05:00 +01001674 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001675 }
1676
armvixlad96eda2013-06-14 11:42:37 +01001677 PopCPURegList(kCallerSavedFP);
1678 PopCPURegList(kCallerSaved);
1679}
1680
1681void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
armvixlb0c8ae22014-03-21 14:03:59 +00001682 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001683
1684#ifdef USE_SIMULATOR
1685 // The arguments to the trace pseudo instruction need to be contiguous in
1686 // memory, so make sure we don't try to emit a literal pool.
1687 InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1688
1689 Label start;
1690 bind(&start);
1691
1692 // Refer to instructions-a64.h for a description of the marker and its
1693 // arguments.
1694 hlt(kTraceOpcode);
1695
armvixlb0c8ae22014-03-21 14:03:59 +00001696 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001697 dc32(parameters);
1698
armvixlb0c8ae22014-03-21 14:03:59 +00001699 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
armvixlad96eda2013-06-14 11:42:37 +01001700 dc32(command);
1701#else
1702 // Emit nothing on real hardware.
1703 USE(parameters);
1704 USE(command);
1705#endif
1706}
1707
1708
1709void MacroAssembler::Log(TraceParameters parameters) {
armvixlb0c8ae22014-03-21 14:03:59 +00001710 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001711
1712#ifdef USE_SIMULATOR
1713 // The arguments to the log pseudo instruction need to be contiguous in
1714 // memory, so make sure we don't try to emit a literal pool.
1715 InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1716
1717 Label start;
1718 bind(&start);
1719
1720 // Refer to instructions-a64.h for a description of the marker and its
1721 // arguments.
1722 hlt(kLogOpcode);
1723
armvixlb0c8ae22014-03-21 14:03:59 +00001724 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001725 dc32(parameters);
1726#else
1727 // Emit nothing on real hardware.
1728 USE(parameters);
1729#endif
1730}
1731
armvixl578645f2013-08-15 17:21:42 +01001732
1733void MacroAssembler::EnableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001734 VIXL_ASSERT(!isprint(InstrumentStateEnable));
armvixl578645f2013-08-15 17:21:42 +01001735 InstructionAccurateScope scope(this, 1);
1736 movn(xzr, InstrumentStateEnable);
1737}
1738
1739
1740void MacroAssembler::DisableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001741 VIXL_ASSERT(!isprint(InstrumentStateDisable));
armvixl578645f2013-08-15 17:21:42 +01001742 InstructionAccurateScope scope(this, 1);
1743 movn(xzr, InstrumentStateDisable);
1744}
1745
1746
1747void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
armvixlb0c8ae22014-03-21 14:03:59 +00001748 VIXL_ASSERT(strlen(marker_name) == 2);
armvixl578645f2013-08-15 17:21:42 +01001749
1750 // We allow only printable characters in the marker names. Unprintable
1751 // characters are reserved for controlling features of the instrumentation.
armvixlb0c8ae22014-03-21 14:03:59 +00001752 VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
armvixl578645f2013-08-15 17:21:42 +01001753
1754 InstructionAccurateScope scope(this, 1);
1755 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1756}
1757
armvixlb0c8ae22014-03-21 14:03:59 +00001758
1759UseScratchRegisterScope::~UseScratchRegisterScope() {
1760 available_->set_list(old_available_);
1761 availablefp_->set_list(old_availablefp_);
1762}
1763
1764
armvixl5799d6c2014-05-01 11:05:00 +01001765bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
1766 return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
1767}
1768
1769
armvixlb0c8ae22014-03-21 14:03:59 +00001770Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
1771 int code = AcquireNextAvailable(available_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001772 return Register(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001773}
1774
1775
1776FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
1777 int code = AcquireNextAvailable(availablefp_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001778 return FPRegister(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001779}
1780
1781
1782void UseScratchRegisterScope::Release(const CPURegister& reg) {
1783 if (reg.IsRegister()) {
1784 ReleaseByCode(available_, reg.code());
1785 } else if (reg.IsFPRegister()) {
1786 ReleaseByCode(availablefp_, reg.code());
1787 } else {
1788 VIXL_ASSERT(reg.IsNone());
1789 }
1790}
1791
1792
armvixl5799d6c2014-05-01 11:05:00 +01001793void UseScratchRegisterScope::Include(const CPURegList& list) {
1794 if (list.type() == CPURegister::kRegister) {
1795 // Make sure that neither sp nor xzr are included the list.
1796 IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
1797 } else {
1798 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1799 IncludeByRegList(availablefp_, list.list());
1800 }
1801}
1802
1803
armvixlb0c8ae22014-03-21 14:03:59 +00001804void UseScratchRegisterScope::Include(const Register& reg1,
1805 const Register& reg2,
1806 const Register& reg3,
1807 const Register& reg4) {
1808 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1809 // Make sure that neither sp nor xzr are included the list.
1810 include &= ~(xzr.Bit() | sp.Bit());
1811
1812 IncludeByRegList(available_, include);
1813}
1814
1815
1816void UseScratchRegisterScope::Include(const FPRegister& reg1,
1817 const FPRegister& reg2,
1818 const FPRegister& reg3,
1819 const FPRegister& reg4) {
1820 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1821 IncludeByRegList(availablefp_, include);
1822}
1823
1824
armvixl5799d6c2014-05-01 11:05:00 +01001825void UseScratchRegisterScope::Exclude(const CPURegList& list) {
1826 if (list.type() == CPURegister::kRegister) {
1827 ExcludeByRegList(available_, list.list());
1828 } else {
1829 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1830 ExcludeByRegList(availablefp_, list.list());
1831 }
1832}
1833
1834
armvixlb0c8ae22014-03-21 14:03:59 +00001835void UseScratchRegisterScope::Exclude(const Register& reg1,
1836 const Register& reg2,
1837 const Register& reg3,
1838 const Register& reg4) {
1839 RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1840 ExcludeByRegList(available_, exclude);
1841}
1842
1843
1844void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
1845 const FPRegister& reg2,
1846 const FPRegister& reg3,
1847 const FPRegister& reg4) {
1848 RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1849 ExcludeByRegList(availablefp_, excludefp);
1850}
1851
1852
1853void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
1854 const CPURegister& reg2,
1855 const CPURegister& reg3,
1856 const CPURegister& reg4) {
1857 RegList exclude = 0;
1858 RegList excludefp = 0;
1859
1860 const CPURegister regs[] = {reg1, reg2, reg3, reg4};
1861
1862 for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
1863 if (regs[i].IsRegister()) {
1864 exclude |= regs[i].Bit();
1865 } else if (regs[i].IsFPRegister()) {
1866 excludefp |= regs[i].Bit();
1867 } else {
1868 VIXL_ASSERT(regs[i].IsNone());
1869 }
1870 }
1871
1872 ExcludeByRegList(available_, exclude);
1873 ExcludeByRegList(availablefp_, excludefp);
1874}
1875
1876
1877void UseScratchRegisterScope::ExcludeAll() {
1878 ExcludeByRegList(available_, available_->list());
1879 ExcludeByRegList(availablefp_, availablefp_->list());
1880}
1881
1882
1883CPURegister UseScratchRegisterScope::AcquireNextAvailable(
1884 CPURegList* available) {
1885 VIXL_CHECK(!available->IsEmpty());
1886 CPURegister result = available->PopLowestIndex();
1887 VIXL_ASSERT(!AreAliased(result, xzr, sp));
1888 return result;
1889}
1890
1891
1892void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
1893 ReleaseByRegList(available, static_cast<RegList>(1) << code);
1894}
1895
1896
1897void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
1898 RegList regs) {
1899 available->set_list(available->list() | regs);
1900}
1901
1902
1903void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
1904 RegList regs) {
1905 available->set_list(available->list() | regs);
1906}
1907
1908
1909void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
1910 RegList exclude) {
1911 available->set_list(available->list() & ~exclude);
1912}
1913
armvixlad96eda2013-06-14 11:42:37 +01001914} // namespace vixl