blob: 7daa6fcc6f1843185cda86c83b64ce29daf7633a [file] [log] [blame]
armvixlad96eda2013-06-14 11:42:37 +01001// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "a64/macro-assembler-a64.h"
28namespace vixl {
29
armvixlc68cb642014-09-25 18:49:30 +010030
31LiteralPool::LiteralPool(Assembler* assm)
32 : assm_(assm), first_use_(-1), monitor_(0) {
33}
34
35
36LiteralPool::~LiteralPool() {
37 VIXL_ASSERT(IsEmpty());
38 VIXL_ASSERT(!IsBlocked());
39}
40
41
42void LiteralPool::Reset() {
43 std::vector<RawLiteral*>::iterator it, end;
44 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
45 delete *it;
46 }
47 entries_.clear();
48 first_use_ = -1;
49 monitor_ = 0;
50}
51
52
53size_t LiteralPool::Size() const {
54 size_t size = 0;
55 std::vector<RawLiteral*>::const_iterator it, end;
56 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
57 size += (*it)->size();
58 }
59
60 // account for the pool header.
61 return size + kInstructionSize;
62}
63
64
65void LiteralPool::Release() {
66 if (--monitor_ == 0) {
67 // Has the literal pool been blocked for too long?
68 VIXL_ASSERT(assm_->CursorOffset() < MaxCursorOffset());
69 }
70}
71
72
73void LiteralPool::CheckEmitFor(size_t amount, EmitOption option) {
74 if (IsEmpty() || IsBlocked()) return;
75
76 ptrdiff_t distance = assm_->CursorOffset() + amount - first_use_;
77 if (distance >= kRecommendedLiteralPoolRange) {
78 Emit(option);
79 }
80}
81
82
83void LiteralPool::Emit(EmitOption option) {
84 // There is an issue if we are asked to emit a blocked or empty pool.
85 VIXL_ASSERT(!IsBlocked());
86 VIXL_ASSERT(!IsEmpty());
87
88 size_t pool_size = Size();
89 size_t emit_size = pool_size;
90 if (option == kBranchRequired) emit_size += kInstructionSize;
91 Label end_of_pool;
92
93 CodeBufferCheckScope guard(assm_,
94 emit_size,
95 CodeBufferCheckScope::kCheck,
96 CodeBufferCheckScope::kExactSize);
97 if (option == kBranchRequired) assm_->b(&end_of_pool);
98
99 // Marker indicating the size of the literal pool in 32-bit words.
100 VIXL_ASSERT((pool_size % kWRegSizeInBytes) == 0);
101 assm_->ldr(xzr, pool_size / kWRegSizeInBytes);
102
103 // Now populate the literal pool.
104 std::vector<RawLiteral*>::iterator it, end;
105 for (it = entries_.begin(), end = entries_.end(); it != end; ++it) {
106 VIXL_ASSERT((*it)->IsUsed());
107 assm_->place(*it);
108 delete *it;
109 }
110
111 if (option == kBranchRequired) assm_->bind(&end_of_pool);
112
113 entries_.clear();
114 first_use_ = -1;
115}
116
117
118ptrdiff_t LiteralPool::NextCheckOffset() {
119 if (IsEmpty()) {
120 return assm_->CursorOffset() + kRecommendedLiteralPoolRange;
121 }
122
123 VIXL_ASSERT(
124 ((assm_->CursorOffset() - first_use_) < kRecommendedLiteralPoolRange) ||
125 IsBlocked());
126
127 return first_use_ + kRecommendedLiteralPoolRange;
128}
129
130
131EmissionCheckScope::EmissionCheckScope(MacroAssembler* masm, size_t size) {
armvixl330dc712014-11-25 10:38:32 +0000132 if (masm) {
133 masm->EnsureEmitFor(size);
134#ifdef VIXL_DEBUG
135 masm_ = masm;
136 masm->Bind(&start_);
137 size_ = size;
138 masm->AcquireBuffer();
armvixlc68cb642014-09-25 18:49:30 +0100139#endif
armvixl330dc712014-11-25 10:38:32 +0000140 }
armvixlc68cb642014-09-25 18:49:30 +0100141}
142
143
144EmissionCheckScope::~EmissionCheckScope() {
armvixl330dc712014-11-25 10:38:32 +0000145#ifdef VIXL_DEBUG
146 if (masm_) {
147 masm_->ReleaseBuffer();
148 VIXL_ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) <= size_);
149 }
armvixlc68cb642014-09-25 18:49:30 +0100150#endif
151}
152
153
154MacroAssembler::MacroAssembler(size_t capacity,
155 PositionIndependentCodeOption pic)
156 : Assembler(capacity, pic),
armvixl330dc712014-11-25 10:38:32 +0000157#ifdef VIXL_DEBUG
armvixlc68cb642014-09-25 18:49:30 +0100158 allow_macro_instructions_(true),
159#endif
160 sp_(sp),
161 tmp_list_(ip0, ip1),
162 fptmp_list_(d31),
163 literal_pool_(this) {
164 checkpoint_ = NextCheckOffset();
165}
166
167
168MacroAssembler::MacroAssembler(byte * buffer,
169 size_t capacity,
170 PositionIndependentCodeOption pic)
171 : Assembler(buffer, capacity, pic),
armvixl330dc712014-11-25 10:38:32 +0000172#ifdef VIXL_DEBUG
armvixlc68cb642014-09-25 18:49:30 +0100173 allow_macro_instructions_(true),
174#endif
175 sp_(sp),
176 tmp_list_(ip0, ip1),
177 fptmp_list_(d31),
178 literal_pool_(this) {
179 checkpoint_ = NextCheckOffset();
180}
181
182
183MacroAssembler::~MacroAssembler() {
184}
185
186
187void MacroAssembler::Reset() {
188 Assembler::Reset();
189
190 VIXL_ASSERT(!literal_pool_.IsBlocked());
191 literal_pool_.Reset();
192
193 checkpoint_ = NextCheckOffset();
194}
195
196
197void MacroAssembler::FinalizeCode() {
198 if (!literal_pool_.IsEmpty()) literal_pool_.Emit();
199
200 Assembler::FinalizeCode();
201}
202
203
armvixl330dc712014-11-25 10:38:32 +0000204int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
205 const Register &rd,
206 uint64_t imm) {
207 bool emit_code = (masm != NULL);
208 VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
209 // The worst case for size is mov 64-bit immediate to sp:
210 // * up to 4 instructions to materialise the constant
211 // * 1 instruction to move to sp
212 MacroEmissionCheckScope guard(masm);
213
214 // Immediates on Aarch64 can be produced using an initial value, and zero to
215 // three move keep operations.
216 //
217 // Initial values can be generated with:
218 // 1. 64-bit move zero (movz).
219 // 2. 32-bit move inverted (movn).
220 // 3. 64-bit move inverted.
221 // 4. 32-bit orr immediate.
222 // 5. 64-bit orr immediate.
223 // Move-keep may then be used to modify each of the 16-bit half words.
224 //
225 // The code below supports all five initial value generators, and
226 // applying move-keep operations to move-zero and move-inverted initial
227 // values.
228
229 // Try to move the immediate in one instruction, and if that fails, switch to
230 // using multiple instructions.
231 if (OneInstrMoveImmediateHelper(masm, rd, imm)) {
232 return 1;
233 } else {
234 int instruction_count = 0;
235 unsigned reg_size = rd.size();
236
237 // Generic immediate case. Imm will be represented by
238 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
239 // A move-zero or move-inverted is generated for the first non-zero or
240 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
241
242 uint64_t ignored_halfword = 0;
243 bool invert_move = false;
244 // If the number of 0xffff halfwords is greater than the number of 0x0000
245 // halfwords, it's more efficient to use move-inverted.
246 if (CountClearHalfWords(~imm, reg_size) >
247 CountClearHalfWords(imm, reg_size)) {
248 ignored_halfword = 0xffff;
249 invert_move = true;
250 }
251
252 // Mov instructions can't move values into the stack pointer, so set up a
253 // temporary register, if needed.
254 UseScratchRegisterScope temps;
255 Register temp;
256 if (emit_code) {
257 temps.Open(masm);
258 temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
259 }
260
261 // Iterate through the halfwords. Use movn/movz for the first non-ignored
262 // halfword, and movk for subsequent halfwords.
263 VIXL_ASSERT((reg_size % 16) == 0);
264 bool first_mov_done = false;
265 for (unsigned i = 0; i < (temp.size() / 16); i++) {
266 uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
267 if (imm16 != ignored_halfword) {
268 if (!first_mov_done) {
269 if (invert_move) {
270 if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i);
271 instruction_count++;
272 } else {
273 if (emit_code) masm->movz(temp, imm16, 16 * i);
274 instruction_count++;
275 }
276 first_mov_done = true;
277 } else {
278 // Construct a wider constant.
279 if (emit_code) masm->movk(temp, imm16, 16 * i);
280 instruction_count++;
281 }
282 }
283 }
284
285 VIXL_ASSERT(first_mov_done);
286
287 // Move the temporary if the original destination register was the stack
288 // pointer.
289 if (rd.IsSP()) {
290 if (emit_code) masm->mov(rd, temp);
291 instruction_count++;
292 }
293 return instruction_count;
294 }
295}
296
297
298bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
299 const Register& dst,
300 int64_t imm) {
301 bool emit_code = masm != NULL;
302 unsigned n, imm_s, imm_r;
303 int reg_size = dst.size();
304
305 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
306 // Immediate can be represented in a move zero instruction. Movz can't write
307 // to the stack pointer.
308 if (emit_code) {
309 masm->movz(dst, imm);
310 }
311 return true;
312 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
313 // Immediate can be represented in a move negative instruction. Movn can't
314 // write to the stack pointer.
315 if (emit_code) {
316 masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
317 }
318 return true;
319 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
320 // Immediate can be represented in a logical orr instruction.
321 VIXL_ASSERT(!dst.IsZero());
322 if (emit_code) {
323 masm->LogicalImmediate(
324 dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
325 }
326 return true;
327 }
328 return false;
329}
330
331
armvixlb0c8ae22014-03-21 14:03:59 +0000332void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
333 VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
334 ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
335 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
336 B(static_cast<Condition>(type), label);
337 } else {
338 switch (type) {
339 case always: B(label); break;
340 case never: break;
341 case reg_zero: Cbz(reg, label); break;
342 case reg_not_zero: Cbnz(reg, label); break;
343 case reg_bit_clear: Tbz(reg, bit, label); break;
344 case reg_bit_set: Tbnz(reg, bit, label); break;
345 default:
346 VIXL_UNREACHABLE();
347 }
348 }
349}
350
armvixlad96eda2013-06-14 11:42:37 +0100351void MacroAssembler::And(const Register& rd,
352 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000353 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000354 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000355 LogicalMacro(rd, rn, operand, AND);
356}
357
358
359void MacroAssembler::Ands(const Register& rd,
360 const Register& rn,
361 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000362 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000363 LogicalMacro(rd, rn, operand, ANDS);
armvixlad96eda2013-06-14 11:42:37 +0100364}
365
366
367void MacroAssembler::Tst(const Register& rn,
368 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000369 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000370 Ands(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100371}
372
373
374void MacroAssembler::Bic(const Register& rd,
375 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000376 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000377 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000378 LogicalMacro(rd, rn, operand, BIC);
379}
380
381
382void MacroAssembler::Bics(const Register& rd,
383 const Register& rn,
384 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000385 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000386 LogicalMacro(rd, rn, operand, BICS);
armvixlad96eda2013-06-14 11:42:37 +0100387}
388
389
390void MacroAssembler::Orr(const Register& rd,
391 const Register& rn,
392 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000393 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100394 LogicalMacro(rd, rn, operand, ORR);
395}
396
397
398void MacroAssembler::Orn(const Register& rd,
399 const Register& rn,
400 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000401 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100402 LogicalMacro(rd, rn, operand, ORN);
403}
404
405
406void MacroAssembler::Eor(const Register& rd,
407 const Register& rn,
408 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000409 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100410 LogicalMacro(rd, rn, operand, EOR);
411}
412
413
414void MacroAssembler::Eon(const Register& rd,
415 const Register& rn,
416 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000417 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100418 LogicalMacro(rd, rn, operand, EON);
419}
420
421
422void MacroAssembler::LogicalMacro(const Register& rd,
423 const Register& rn,
424 const Operand& operand,
425 LogicalOp op) {
armvixlc68cb642014-09-25 18:49:30 +0100426 // The worst case for size is logical immediate to sp:
427 // * up to 4 instructions to materialise the constant
428 // * 1 instruction to do the operation
429 // * 1 instruction to move to sp
430 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000431 UseScratchRegisterScope temps(this);
432
armvixlad96eda2013-06-14 11:42:37 +0100433 if (operand.IsImmediate()) {
434 int64_t immediate = operand.immediate();
435 unsigned reg_size = rd.size();
armvixlad96eda2013-06-14 11:42:37 +0100436
437 // If the operation is NOT, invert the operation and immediate.
438 if ((op & NOT) == NOT) {
439 op = static_cast<LogicalOp>(op & ~NOT);
440 immediate = ~immediate;
armvixlad96eda2013-06-14 11:42:37 +0100441 }
442
armvixl4a102ba2014-07-14 09:02:40 +0100443 // Ignore the top 32 bits of an immediate if we're moving to a W register.
444 if (rd.Is32Bits()) {
445 // Check that the top 32 bits are consistent.
446 VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
447 ((immediate >> kWRegSize) == -1));
448 immediate &= kWRegMask;
449 }
450
451 VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
452
armvixlad96eda2013-06-14 11:42:37 +0100453 // Special cases for all set or all clear immediates.
454 if (immediate == 0) {
455 switch (op) {
456 case AND:
457 Mov(rd, 0);
458 return;
459 case ORR: // Fall through.
460 case EOR:
461 Mov(rd, rn);
462 return;
463 case ANDS: // Fall through.
464 case BICS:
465 break;
466 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000467 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100468 }
armvixlb0c8ae22014-03-21 14:03:59 +0000469 } else if ((rd.Is64Bits() && (immediate == -1)) ||
470 (rd.Is32Bits() && (immediate == 0xffffffff))) {
armvixlad96eda2013-06-14 11:42:37 +0100471 switch (op) {
472 case AND:
473 Mov(rd, rn);
474 return;
475 case ORR:
476 Mov(rd, immediate);
477 return;
478 case EOR:
479 Mvn(rd, rn);
480 return;
481 case ANDS: // Fall through.
482 case BICS:
483 break;
484 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000485 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100486 }
487 }
488
489 unsigned n, imm_s, imm_r;
490 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
491 // Immediate can be encoded in the instruction.
492 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
493 } else {
494 // Immediate can't be encoded: synthesize using move immediate.
armvixlb0c8ae22014-03-21 14:03:59 +0000495 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100496 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
497
armvixlad96eda2013-06-14 11:42:37 +0100498 if (rd.Is(sp)) {
499 // If rd is the stack pointer we cannot use it as the destination
500 // register so we use the temp register as an intermediate again.
armvixl4a102ba2014-07-14 09:02:40 +0100501 Logical(temp, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100502 Mov(sp, temp);
503 } else {
armvixl4a102ba2014-07-14 09:02:40 +0100504 Logical(rd, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100505 }
506 }
507 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000508 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100509 // Add/sub extended supports shift <= 4. We want to support exactly the
510 // same modes here.
armvixlb0c8ae22014-03-21 14:03:59 +0000511 VIXL_ASSERT(operand.shift_amount() <= 4);
512 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100513 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000514
515 temps.Exclude(operand.reg());
516 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100517 EmitExtendShift(temp, operand.reg(), operand.extend(),
518 operand.shift_amount());
519 Logical(rd, rn, Operand(temp), op);
520 } else {
521 // The operand can be encoded in the instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000522 VIXL_ASSERT(operand.IsShiftedRegister());
armvixlad96eda2013-06-14 11:42:37 +0100523 Logical(rd, rn, operand, op);
524 }
525}
526
527
armvixlf37fdc02014-02-05 13:22:16 +0000528void MacroAssembler::Mov(const Register& rd,
529 const Operand& operand,
530 DiscardMoveMode discard_mode) {
armvixlb0c8ae22014-03-21 14:03:59 +0000531 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100532 // The worst case for size is mov immediate with up to 4 instructions.
533 MacroEmissionCheckScope guard(this);
534
armvixlad96eda2013-06-14 11:42:37 +0100535 if (operand.IsImmediate()) {
536 // Call the macro assembler for generic immediates.
537 Mov(rd, operand.immediate());
538 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
539 // Emit a shift instruction if moving a shifted register. This operation
540 // could also be achieved using an orr instruction (like orn used by Mvn),
541 // but using a shift instruction makes the disassembly clearer.
542 EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
543 } else if (operand.IsExtendedRegister()) {
544 // Emit an extend instruction if moving an extended register. This handles
545 // extend with post-shift operations, too.
546 EmitExtendShift(rd, operand.reg(), operand.extend(),
547 operand.shift_amount());
548 } else {
549 // Otherwise, emit a register move only if the registers are distinct, or
armvixlf37fdc02014-02-05 13:22:16 +0000550 // if they are not X registers.
551 //
552 // Note that mov(w0, w0) is not a no-op because it clears the top word of
553 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
554 // registers is not required to clear the top word of the X register. In
555 // this case, the instruction is discarded.
556 //
armvixlad96eda2013-06-14 11:42:37 +0100557 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
armvixlf37fdc02014-02-05 13:22:16 +0000558 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
559 (discard_mode == kDontDiscardForSameWReg))) {
armvixlad96eda2013-06-14 11:42:37 +0100560 mov(rd, operand.reg());
561 }
562 }
563}
564
565
566void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000567 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100568 // The worst case for size is mvn immediate with up to 4 instructions.
569 MacroEmissionCheckScope guard(this);
570
armvixlad96eda2013-06-14 11:42:37 +0100571 if (operand.IsImmediate()) {
572 // Call the macro assembler for generic immediates.
573 Mvn(rd, operand.immediate());
574 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000575 UseScratchRegisterScope temps(this);
576 temps.Exclude(operand.reg());
577
armvixlad96eda2013-06-14 11:42:37 +0100578 // Emit two instructions for the extend case. This differs from Mov, as
579 // the extend and invert can't be achieved in one instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000580 Register temp = temps.AcquireSameSizeAs(rd);
armvixlad96eda2013-06-14 11:42:37 +0100581 EmitExtendShift(temp, operand.reg(), operand.extend(),
582 operand.shift_amount());
583 mvn(rd, Operand(temp));
584 } else {
585 // Otherwise, register and shifted register cases can be handled by the
586 // assembler directly, using orn.
587 mvn(rd, operand);
588 }
589}
590
591
592void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
armvixlb0c8ae22014-03-21 14:03:59 +0000593 VIXL_ASSERT(allow_macro_instructions_);
armvixl330dc712014-11-25 10:38:32 +0000594 MoveImmediateHelper(this, rd, imm);
armvixlad96eda2013-06-14 11:42:37 +0100595}
596
597
598void MacroAssembler::Ccmp(const Register& rn,
599 const Operand& operand,
600 StatusFlags nzcv,
601 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000602 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000603 if (operand.IsImmediate() && (operand.immediate() < 0)) {
604 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
605 } else {
606 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
607 }
armvixlad96eda2013-06-14 11:42:37 +0100608}
609
610
611void MacroAssembler::Ccmn(const Register& rn,
612 const Operand& operand,
613 StatusFlags nzcv,
614 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000615 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000616 if (operand.IsImmediate() && (operand.immediate() < 0)) {
617 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
618 } else {
619 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
620 }
armvixlad96eda2013-06-14 11:42:37 +0100621}
622
623
624void MacroAssembler::ConditionalCompareMacro(const Register& rn,
625 const Operand& operand,
626 StatusFlags nzcv,
627 Condition cond,
628 ConditionalCompareOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000629 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlc68cb642014-09-25 18:49:30 +0100630 // The worst case for size is ccmp immediate:
631 // * up to 4 instructions to materialise the constant
632 // * 1 instruction for ccmp
633 MacroEmissionCheckScope guard(this);
634
armvixlad96eda2013-06-14 11:42:37 +0100635 if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
636 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
637 // The immediate can be encoded in the instruction, or the operand is an
638 // unshifted register: call the assembler.
639 ConditionalCompare(rn, operand, nzcv, cond, op);
640 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000641 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100642 // The operand isn't directly supported by the instruction: perform the
643 // operation on a temporary register.
armvixlb0c8ae22014-03-21 14:03:59 +0000644 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000645 Mov(temp, operand);
646 ConditionalCompare(rn, temp, nzcv, cond, op);
647 }
648}
649
650
651void MacroAssembler::Csel(const Register& rd,
652 const Register& rn,
653 const Operand& operand,
654 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000655 VIXL_ASSERT(allow_macro_instructions_);
656 VIXL_ASSERT(!rd.IsZero());
657 VIXL_ASSERT(!rn.IsZero());
658 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlc68cb642014-09-25 18:49:30 +0100659 // The worst case for size is csel immediate:
660 // * up to 4 instructions to materialise the constant
661 // * 1 instruction for csel
662 MacroEmissionCheckScope guard(this);
663
armvixlf37fdc02014-02-05 13:22:16 +0000664 if (operand.IsImmediate()) {
665 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
666 // register.
667 int64_t imm = operand.immediate();
668 Register zr = AppropriateZeroRegFor(rn);
669 if (imm == 0) {
670 csel(rd, rn, zr, cond);
671 } else if (imm == 1) {
672 csinc(rd, rn, zr, cond);
673 } else if (imm == -1) {
674 csinv(rd, rn, zr, cond);
armvixlad96eda2013-06-14 11:42:37 +0100675 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000676 UseScratchRegisterScope temps(this);
677 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000678 Mov(temp, operand.immediate());
679 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100680 }
armvixlf37fdc02014-02-05 13:22:16 +0000681 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
682 // Unshifted register argument.
683 csel(rd, rn, operand.reg(), cond);
684 } else {
685 // All other arguments.
armvixlb0c8ae22014-03-21 14:03:59 +0000686 UseScratchRegisterScope temps(this);
687 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000688 Mov(temp, operand);
689 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100690 }
691}
692
693
694void MacroAssembler::Add(const Register& rd,
695 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000696 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000697 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100698 if (operand.IsImmediate() && (operand.immediate() < 0) &&
699 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000700 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100701 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000702 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
703 }
704}
705
706
707void MacroAssembler::Adds(const Register& rd,
708 const Register& rn,
709 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000710 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100711 if (operand.IsImmediate() && (operand.immediate() < 0) &&
712 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000713 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
714 } else {
715 AddSubMacro(rd, rn, operand, SetFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100716 }
717}
718
719
720void MacroAssembler::Sub(const Register& rd,
721 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000722 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000723 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100724 if (operand.IsImmediate() && (operand.immediate() < 0) &&
725 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000726 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100727 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000728 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
729 }
730}
731
732
733void MacroAssembler::Subs(const Register& rd,
734 const Register& rn,
735 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000736 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100737 if (operand.IsImmediate() && (operand.immediate() < 0) &&
738 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000739 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
740 } else {
741 AddSubMacro(rd, rn, operand, SetFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100742 }
743}
744
745
746void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000747 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000748 Adds(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100749}
750
751
752void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000753 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000754 Subs(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100755}
756
757
armvixlb0c8ae22014-03-21 14:03:59 +0000758void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
759 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100760 // The worst case for size is:
761 // * 1 to materialise the constant, using literal pool if necessary
762 // * 1 instruction for fcmp
763 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000764 if (value != 0.0) {
765 UseScratchRegisterScope temps(this);
766 FPRegister tmp = temps.AcquireSameSizeAs(fn);
767 Fmov(tmp, value);
768 fcmp(fn, tmp);
769 } else {
770 fcmp(fn, value);
771 }
772}
773
774
775void MacroAssembler::Fmov(FPRegister fd, double imm) {
776 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100777 // Floating point immediates are loaded through the literal pool.
778 MacroEmissionCheckScope guard(this);
779
armvixlb0c8ae22014-03-21 14:03:59 +0000780 if (fd.Is32Bits()) {
781 Fmov(fd, static_cast<float>(imm));
782 return;
783 }
784
785 VIXL_ASSERT(fd.Is64Bits());
786 if (IsImmFP64(imm)) {
787 fmov(fd, imm);
788 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
789 fmov(fd, xzr);
790 } else {
armvixlc68cb642014-09-25 18:49:30 +0100791 RawLiteral* literal = literal_pool_.Add(imm);
792 ldr(fd, literal);
armvixlb0c8ae22014-03-21 14:03:59 +0000793 }
794}
795
796
797void MacroAssembler::Fmov(FPRegister fd, float imm) {
798 VIXL_ASSERT(allow_macro_instructions_);
armvixlc68cb642014-09-25 18:49:30 +0100799 // Floating point immediates are loaded through the literal pool.
800 MacroEmissionCheckScope guard(this);
801
armvixlb0c8ae22014-03-21 14:03:59 +0000802 if (fd.Is64Bits()) {
803 Fmov(fd, static_cast<double>(imm));
804 return;
805 }
806
807 VIXL_ASSERT(fd.Is32Bits());
808 if (IsImmFP32(imm)) {
809 fmov(fd, imm);
810 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
811 fmov(fd, wzr);
812 } else {
armvixlc68cb642014-09-25 18:49:30 +0100813 RawLiteral* literal = literal_pool_.Add(imm);
814 ldr(fd, literal);
armvixlb0c8ae22014-03-21 14:03:59 +0000815 }
816}
817
818
819
armvixlad96eda2013-06-14 11:42:37 +0100820void MacroAssembler::Neg(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000821 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000822 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100823 if (operand.IsImmediate()) {
824 Mov(rd, -operand.immediate());
825 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000826 Sub(rd, AppropriateZeroRegFor(rd), operand);
armvixlad96eda2013-06-14 11:42:37 +0100827 }
828}
829
830
armvixlf37fdc02014-02-05 13:22:16 +0000831void MacroAssembler::Negs(const Register& rd,
832 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000833 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000834 Subs(rd, AppropriateZeroRegFor(rd), operand);
835}
836
837
armvixl4a102ba2014-07-14 09:02:40 +0100838bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
839 int64_t imm) {
armvixl330dc712014-11-25 10:38:32 +0000840 return OneInstrMoveImmediateHelper(this, dst, imm);
armvixl4a102ba2014-07-14 09:02:40 +0100841}
842
843
844Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
845 int64_t imm) {
846 int reg_size = dst.size();
847
848 // Encode the immediate in a single move instruction, if possible.
849 if (TryOneInstrMoveImmediate(dst, imm)) {
850 // The move was successful; nothing to do here.
851 } else {
852 // Pre-shift the immediate to the least-significant bits of the register.
853 int shift_low = CountTrailingZeros(imm, reg_size);
854 int64_t imm_low = imm >> shift_low;
855
856 // Pre-shift the immediate to the most-significant bits of the register,
857 // inserting set bits in the least-significant bits.
858 int shift_high = CountLeadingZeros(imm, reg_size);
859 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
860
861 if (TryOneInstrMoveImmediate(dst, imm_low)) {
862 // The new immediate has been moved into the destination's low bits:
863 // return a new leftward-shifting operand.
864 return Operand(dst, LSL, shift_low);
865 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
866 // The new immediate has been moved into the destination's high bits:
867 // return a new rightward-shifting operand.
868 return Operand(dst, LSR, shift_high);
869 } else {
870 Mov(dst, imm);
871 }
872 }
873 return Operand(dst);
874}
875
876
armvixlad96eda2013-06-14 11:42:37 +0100877void MacroAssembler::AddSubMacro(const Register& rd,
878 const Register& rn,
879 const Operand& operand,
880 FlagsUpdate S,
881 AddSubOp op) {
armvixlc68cb642014-09-25 18:49:30 +0100882 // Worst case is add/sub immediate:
883 // * up to 4 instructions to materialise the constant
884 // * 1 instruction for add/sub
885 MacroEmissionCheckScope guard(this);
886
armvixlf37fdc02014-02-05 13:22:16 +0000887 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
888 (S == LeaveFlags)) {
889 // The instruction would be a nop. Avoid generating useless code.
890 return;
891 }
892
armvixlad96eda2013-06-14 11:42:37 +0100893 if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
894 (rn.IsZero() && !operand.IsShiftedRegister()) ||
895 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
armvixlb0c8ae22014-03-21 14:03:59 +0000896 UseScratchRegisterScope temps(this);
897 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100898 if (operand.IsImmediate()) {
899 Operand imm_operand =
900 MoveImmediateForShiftedOp(temp, operand.immediate());
901 AddSub(rd, rn, imm_operand, S, op);
902 } else {
903 Mov(temp, operand);
904 AddSub(rd, rn, temp, S, op);
905 }
armvixlad96eda2013-06-14 11:42:37 +0100906 } else {
907 AddSub(rd, rn, operand, S, op);
908 }
909}
910
911
912void MacroAssembler::Adc(const Register& rd,
913 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000914 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000915 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000916 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
917}
918
919
920void MacroAssembler::Adcs(const Register& rd,
921 const Register& rn,
922 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000923 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000924 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
armvixlad96eda2013-06-14 11:42:37 +0100925}
926
927
928void MacroAssembler::Sbc(const Register& rd,
929 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000930 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000931 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000932 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
933}
934
935
936void MacroAssembler::Sbcs(const Register& rd,
937 const Register& rn,
938 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000939 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000940 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
armvixlad96eda2013-06-14 11:42:37 +0100941}
942
943
944void MacroAssembler::Ngc(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000945 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000946 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100947 Register zr = AppropriateZeroRegFor(rd);
armvixlf37fdc02014-02-05 13:22:16 +0000948 Sbc(rd, zr, operand);
949}
950
951
952void MacroAssembler::Ngcs(const Register& rd,
953 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000954 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000955 Register zr = AppropriateZeroRegFor(rd);
956 Sbcs(rd, zr, operand);
armvixlad96eda2013-06-14 11:42:37 +0100957}
958
959
960void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
961 const Register& rn,
962 const Operand& operand,
963 FlagsUpdate S,
964 AddSubWithCarryOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000965 VIXL_ASSERT(rd.size() == rn.size());
armvixlc68cb642014-09-25 18:49:30 +0100966 // Worst case is addc/subc immediate:
967 // * up to 4 instructions to materialise the constant
968 // * 1 instruction for add/sub
969 MacroEmissionCheckScope guard(this);
armvixlb0c8ae22014-03-21 14:03:59 +0000970 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100971
972 if (operand.IsImmediate() ||
973 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
974 // Add/sub with carry (immediate or ROR shifted register.)
armvixlb0c8ae22014-03-21 14:03:59 +0000975 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100976 Mov(temp, operand);
977 AddSubWithCarry(rd, rn, Operand(temp), S, op);
978 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
979 // Add/sub with carry (shifted register).
armvixlb0c8ae22014-03-21 14:03:59 +0000980 VIXL_ASSERT(operand.reg().size() == rd.size());
981 VIXL_ASSERT(operand.shift() != ROR);
982 VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
armvixlad96eda2013-06-14 11:42:37 +0100983 operand.shift_amount()));
armvixlb0c8ae22014-03-21 14:03:59 +0000984 temps.Exclude(operand.reg());
985 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100986 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
987 AddSubWithCarry(rd, rn, Operand(temp), S, op);
988 } else if (operand.IsExtendedRegister()) {
989 // Add/sub with carry (extended register).
armvixlb0c8ae22014-03-21 14:03:59 +0000990 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100991 // Add/sub extended supports a shift <= 4. We want to support exactly the
992 // same modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000993 VIXL_ASSERT(operand.shift_amount() <= 4);
994 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100995 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000996 temps.Exclude(operand.reg());
997 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100998 EmitExtendShift(temp, operand.reg(), operand.extend(),
999 operand.shift_amount());
1000 AddSubWithCarry(rd, rn, Operand(temp), S, op);
1001 } else {
1002 // The addressing mode is directly supported by the instruction.
1003 AddSubWithCarry(rd, rn, operand, S, op);
1004 }
1005}
1006
1007
1008#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
1009void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
armvixlc68cb642014-09-25 18:49:30 +01001010 VIXL_ASSERT(allow_macro_instructions_); \
armvixlad96eda2013-06-14 11:42:37 +01001011 LoadStoreMacro(REG, addr, OP); \
1012}
1013LS_MACRO_LIST(DEFINE_FUNCTION)
1014#undef DEFINE_FUNCTION
1015
armvixl330dc712014-11-25 10:38:32 +00001016
armvixlad96eda2013-06-14 11:42:37 +01001017void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
1018 const MemOperand& addr,
1019 LoadStoreOp op) {
armvixlc68cb642014-09-25 18:49:30 +01001020 // Worst case is ldr/str pre/post index:
1021 // * 1 instruction for ldr/str
1022 // * up to 4 instructions to materialise the constant
1023 // * 1 instruction to update the base
1024 MacroEmissionCheckScope guard(this);
1025
armvixlad96eda2013-06-14 11:42:37 +01001026 int64_t offset = addr.offset();
1027 LSDataSize size = CalcLSDataSize(op);
1028
1029 // Check if an immediate offset fits in the immediate field of the
1030 // appropriate instruction. If not, emit two instructions to perform
1031 // the operation.
1032 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
1033 !IsImmLSUnscaled(offset)) {
1034 // Immediate offset that can't be encoded using unsigned or unscaled
1035 // addressing modes.
armvixlb0c8ae22014-03-21 14:03:59 +00001036 UseScratchRegisterScope temps(this);
1037 Register temp = temps.AcquireSameSizeAs(addr.base());
armvixlad96eda2013-06-14 11:42:37 +01001038 Mov(temp, addr.offset());
1039 LoadStore(rt, MemOperand(addr.base(), temp), op);
1040 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
1041 // Post-index beyond unscaled addressing range.
1042 LoadStore(rt, MemOperand(addr.base()), op);
1043 Add(addr.base(), addr.base(), Operand(offset));
1044 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
1045 // Pre-index beyond unscaled addressing range.
1046 Add(addr.base(), addr.base(), Operand(offset));
1047 LoadStore(rt, MemOperand(addr.base()), op);
1048 } else {
1049 // Encodable in one load/store instruction.
1050 LoadStore(rt, addr, op);
1051 }
1052}
1053
1054
armvixlc68cb642014-09-25 18:49:30 +01001055#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1056void MacroAssembler::FN(const REGTYPE REG, \
1057 const REGTYPE REG2, \
1058 const MemOperand& addr) { \
1059 VIXL_ASSERT(allow_macro_instructions_); \
1060 LoadStorePairMacro(REG, REG2, addr, OP); \
1061}
1062LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
1063#undef DEFINE_FUNCTION
1064
1065void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
1066 const CPURegister& rt2,
1067 const MemOperand& addr,
1068 LoadStorePairOp op) {
1069 // TODO(all): Should we support register offset for load-store-pair?
1070 VIXL_ASSERT(!addr.IsRegisterOffset());
1071 // Worst case is ldp/stp immediate:
1072 // * 1 instruction for ldp/stp
1073 // * up to 4 instructions to materialise the constant
1074 // * 1 instruction to update the base
1075 MacroEmissionCheckScope guard(this);
1076
1077 int64_t offset = addr.offset();
1078 LSDataSize size = CalcLSPairDataSize(op);
1079
1080 // Check if the offset fits in the immediate field of the appropriate
1081 // instruction. If not, emit two instructions to perform the operation.
1082 if (IsImmLSPair(offset, size)) {
1083 // Encodable in one load/store pair instruction.
1084 LoadStorePair(rt, rt2, addr, op);
1085 } else {
1086 Register base = addr.base();
1087 if (addr.IsImmediateOffset()) {
1088 UseScratchRegisterScope temps(this);
1089 Register temp = temps.AcquireSameSizeAs(base);
1090 Add(temp, base, offset);
1091 LoadStorePair(rt, rt2, MemOperand(temp), op);
1092 } else if (addr.IsPostIndex()) {
1093 LoadStorePair(rt, rt2, MemOperand(base), op);
1094 Add(base, base, offset);
1095 } else {
1096 VIXL_ASSERT(addr.IsPreIndex());
1097 Add(base, base, offset);
1098 LoadStorePair(rt, rt2, MemOperand(base), op);
1099 }
1100 }
1101}
1102
armvixl330dc712014-11-25 10:38:32 +00001103
1104void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) {
1105 MacroEmissionCheckScope guard(this);
1106
1107 // There are no pre- or post-index modes for prfm.
1108 VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset());
1109
1110 // The access size is implicitly 8 bytes for all prefetch operations.
1111 LSDataSize size = LSDoubleWord;
1112
1113 // Check if an immediate offset fits in the immediate field of the
1114 // appropriate instruction. If not, emit two instructions to perform
1115 // the operation.
1116 if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) &&
1117 !IsImmLSUnscaled(addr.offset())) {
1118 // Immediate offset that can't be encoded using unsigned or unscaled
1119 // addressing modes.
1120 UseScratchRegisterScope temps(this);
1121 Register temp = temps.AcquireSameSizeAs(addr.base());
1122 Mov(temp, addr.offset());
1123 Prefetch(op, MemOperand(addr.base(), temp));
1124 } else {
1125 // Simple register-offsets are encodable in one instruction.
1126 Prefetch(op, addr);
1127 }
1128}
1129
1130
armvixlad96eda2013-06-14 11:42:37 +01001131void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1132 const CPURegister& src2, const CPURegister& src3) {
armvixlb0c8ae22014-03-21 14:03:59 +00001133 VIXL_ASSERT(allow_macro_instructions_);
1134 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1135 VIXL_ASSERT(src0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01001136
1137 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
1138 int size = src0.SizeInBytes();
1139
1140 PrepareForPush(count, size);
1141 PushHelper(count, size, src0, src1, src2, src3);
1142}
1143
1144
1145void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1146 const CPURegister& dst2, const CPURegister& dst3) {
1147 // It is not valid to pop into the same register more than once in one
1148 // instruction, not even into the zero register.
armvixlb0c8ae22014-03-21 14:03:59 +00001149 VIXL_ASSERT(allow_macro_instructions_);
1150 VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
1151 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1152 VIXL_ASSERT(dst0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +01001153
1154 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
1155 int size = dst0.SizeInBytes();
1156
1157 PrepareForPop(count, size);
1158 PopHelper(count, size, dst0, dst1, dst2, dst3);
1159}
1160
1161
1162void MacroAssembler::PushCPURegList(CPURegList registers) {
1163 int size = registers.RegisterSizeInBytes();
1164
1165 PrepareForPush(registers.Count(), size);
1166 // Push up to four registers at a time because if the current stack pointer is
1167 // sp and reg_size is 32, registers must be pushed in blocks of four in order
1168 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001169 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001170 while (!registers.IsEmpty()) {
1171 int count_before = registers.Count();
1172 const CPURegister& src0 = registers.PopHighestIndex();
1173 const CPURegister& src1 = registers.PopHighestIndex();
1174 const CPURegister& src2 = registers.PopHighestIndex();
1175 const CPURegister& src3 = registers.PopHighestIndex();
1176 int count = count_before - registers.Count();
1177 PushHelper(count, size, src0, src1, src2, src3);
1178 }
1179}
1180
1181
1182void MacroAssembler::PopCPURegList(CPURegList registers) {
1183 int size = registers.RegisterSizeInBytes();
1184
1185 PrepareForPop(registers.Count(), size);
1186 // Pop up to four registers at a time because if the current stack pointer is
1187 // sp and reg_size is 32, registers must be pushed in blocks of four in order
1188 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001189 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001190 while (!registers.IsEmpty()) {
1191 int count_before = registers.Count();
1192 const CPURegister& dst0 = registers.PopLowestIndex();
1193 const CPURegister& dst1 = registers.PopLowestIndex();
1194 const CPURegister& dst2 = registers.PopLowestIndex();
1195 const CPURegister& dst3 = registers.PopLowestIndex();
1196 int count = count_before - registers.Count();
1197 PopHelper(count, size, dst0, dst1, dst2, dst3);
1198 }
1199}
1200
1201
1202void MacroAssembler::PushMultipleTimes(int count, Register src) {
armvixlb0c8ae22014-03-21 14:03:59 +00001203 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001204 int size = src.SizeInBytes();
1205
1206 PrepareForPush(count, size);
1207 // Push up to four registers at a time if possible because if the current
1208 // stack pointer is sp and the register size is 32, registers must be pushed
1209 // in blocks of four in order to maintain the 16-byte alignment for sp.
1210 while (count >= 4) {
1211 PushHelper(4, size, src, src, src, src);
1212 count -= 4;
1213 }
1214 if (count >= 2) {
1215 PushHelper(2, size, src, src, NoReg, NoReg);
1216 count -= 2;
1217 }
1218 if (count == 1) {
1219 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1220 count -= 1;
1221 }
armvixlb0c8ae22014-03-21 14:03:59 +00001222 VIXL_ASSERT(count == 0);
armvixlad96eda2013-06-14 11:42:37 +01001223}
1224
1225
1226void MacroAssembler::PushHelper(int count, int size,
1227 const CPURegister& src0,
1228 const CPURegister& src1,
1229 const CPURegister& src2,
1230 const CPURegister& src3) {
1231 // Ensure that we don't unintentionally modify scratch or debug registers.
armvixlc68cb642014-09-25 18:49:30 +01001232 // Worst case for size is 2 stp.
1233 InstructionAccurateScope scope(this, 2,
1234 InstructionAccurateScope::kMaximumSize);
armvixlad96eda2013-06-14 11:42:37 +01001235
armvixlb0c8ae22014-03-21 14:03:59 +00001236 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1237 VIXL_ASSERT(size == src0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +01001238
1239 // When pushing multiple registers, the store order is chosen such that
1240 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1241 switch (count) {
1242 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +00001243 VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001244 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1245 break;
1246 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +00001247 VIXL_ASSERT(src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001248 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1249 break;
1250 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +00001251 VIXL_ASSERT(src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001252 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1253 str(src0, MemOperand(StackPointer(), 2 * size));
1254 break;
1255 case 4:
1256 // Skip over 4 * size, then fill in the gap. This allows four W registers
1257 // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
1258 // all times.
1259 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1260 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1261 break;
1262 default:
armvixlb0c8ae22014-03-21 14:03:59 +00001263 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +01001264 }
1265}
1266
1267
1268void MacroAssembler::PopHelper(int count, int size,
1269 const CPURegister& dst0,
1270 const CPURegister& dst1,
1271 const CPURegister& dst2,
1272 const CPURegister& dst3) {
1273 // Ensure that we don't unintentionally modify scratch or debug registers.
armvixlc68cb642014-09-25 18:49:30 +01001274 // Worst case for size is 2 ldp.
1275 InstructionAccurateScope scope(this, 2,
1276 InstructionAccurateScope::kMaximumSize);
armvixlad96eda2013-06-14 11:42:37 +01001277
armvixlb0c8ae22014-03-21 14:03:59 +00001278 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1279 VIXL_ASSERT(size == dst0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +01001280
1281 // When popping multiple registers, the load order is chosen such that
1282 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1283 switch (count) {
1284 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +00001285 VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001286 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1287 break;
1288 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +00001289 VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001290 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1291 break;
1292 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +00001293 VIXL_ASSERT(dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001294 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1295 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1296 break;
1297 case 4:
1298 // Load the higher addresses first, then load the lower addresses and skip
1299 // the whole block in the second instruction. This allows four W registers
1300 // to be popped using sp, whilst maintaining 16-byte alignment for sp at
1301 // all times.
1302 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1303 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1304 break;
1305 default:
armvixlb0c8ae22014-03-21 14:03:59 +00001306 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +01001307 }
1308}
1309
1310
1311void MacroAssembler::PrepareForPush(int count, int size) {
1312 if (sp.Is(StackPointer())) {
1313 // If the current stack pointer is sp, then it must be aligned to 16 bytes
1314 // on entry and the total size of the specified registers must also be a
1315 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001316 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001317 } else {
1318 // Even if the current stack pointer is not the system stack pointer (sp),
1319 // the system stack pointer will still be modified in order to comply with
1320 // ABI rules about accessing memory below the system stack pointer.
1321 BumpSystemStackPointer(count * size);
1322 }
1323}
1324
1325
1326void MacroAssembler::PrepareForPop(int count, int size) {
1327 USE(count);
1328 USE(size);
1329 if (sp.Is(StackPointer())) {
1330 // If the current stack pointer is sp, then it must be aligned to 16 bytes
1331 // on entry and the total size of the specified registers must also be a
1332 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001333 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001334 }
1335}
1336
1337void MacroAssembler::Poke(const Register& src, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001338 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001339 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001340 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001341 }
1342
1343 Str(src, MemOperand(StackPointer(), offset));
1344}
1345
1346
1347void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001348 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001349 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001350 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001351 }
1352
1353 Ldr(dst, MemOperand(StackPointer(), offset));
1354}
1355
1356
armvixlc68cb642014-09-25 18:49:30 +01001357void MacroAssembler::PeekCPURegList(CPURegList registers, int offset) {
1358 VIXL_ASSERT(!registers.IncludesAliasOf(StackPointer()));
1359 VIXL_ASSERT(offset >= 0);
1360 int size = registers.RegisterSizeInBytes();
1361
1362 while (registers.Count() >= 2) {
1363 const CPURegister& dst0 = registers.PopLowestIndex();
1364 const CPURegister& dst1 = registers.PopLowestIndex();
1365 Ldp(dst0, dst1, MemOperand(StackPointer(), offset));
1366 offset += 2 * size;
1367 }
1368 if (!registers.IsEmpty()) {
1369 Ldr(registers.PopLowestIndex(),
1370 MemOperand(StackPointer(), offset));
1371 }
1372}
1373
1374
1375void MacroAssembler::PokeCPURegList(CPURegList registers, int offset) {
1376 VIXL_ASSERT(!registers.IncludesAliasOf(StackPointer()));
1377 VIXL_ASSERT(offset >= 0);
1378 int size = registers.RegisterSizeInBytes();
1379
1380 while (registers.Count() >= 2) {
1381 const CPURegister& dst0 = registers.PopLowestIndex();
1382 const CPURegister& dst1 = registers.PopLowestIndex();
1383 Stp(dst0, dst1, MemOperand(StackPointer(), offset));
1384 offset += 2 * size;
1385 }
1386 if (!registers.IsEmpty()) {
1387 Str(registers.PopLowestIndex(),
1388 MemOperand(StackPointer(), offset));
1389 }
1390}
1391
1392
armvixlad96eda2013-06-14 11:42:37 +01001393void MacroAssembler::Claim(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001394 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001395
1396 if (size.IsZero()) {
1397 return;
1398 }
1399
armvixlad96eda2013-06-14 11:42:37 +01001400 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001401 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001402 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001403 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001404 }
1405 }
1406
1407 if (!sp.Is(StackPointer())) {
1408 BumpSystemStackPointer(size);
1409 }
1410
1411 Sub(StackPointer(), StackPointer(), size);
1412}
1413
1414
1415void MacroAssembler::Drop(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001416 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001417
1418 if (size.IsZero()) {
1419 return;
1420 }
1421
armvixlad96eda2013-06-14 11:42:37 +01001422 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001423 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001424 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001425 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001426 }
1427 }
1428
1429 Add(StackPointer(), StackPointer(), size);
1430}
1431
1432
1433void MacroAssembler::PushCalleeSavedRegisters() {
1434 // Ensure that the macro-assembler doesn't use any scratch registers.
armvixlc68cb642014-09-25 18:49:30 +01001435 // 10 stp will be emitted.
1436 // TODO(all): Should we use GetCalleeSaved and SavedFP.
1437 InstructionAccurateScope scope(this, 10);
armvixlad96eda2013-06-14 11:42:37 +01001438
1439 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001440 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001441
1442 MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
1443
armvixlad96eda2013-06-14 11:42:37 +01001444 stp(x29, x30, tos);
1445 stp(x27, x28, tos);
1446 stp(x25, x26, tos);
1447 stp(x23, x24, tos);
1448 stp(x21, x22, tos);
1449 stp(x19, x20, tos);
armvixl5799d6c2014-05-01 11:05:00 +01001450
1451 stp(d14, d15, tos);
1452 stp(d12, d13, tos);
1453 stp(d10, d11, tos);
1454 stp(d8, d9, tos);
armvixlad96eda2013-06-14 11:42:37 +01001455}
1456
1457
1458void MacroAssembler::PopCalleeSavedRegisters() {
1459 // Ensure that the macro-assembler doesn't use any scratch registers.
armvixlc68cb642014-09-25 18:49:30 +01001460 // 10 ldp will be emitted.
1461 // TODO(all): Should we use GetCalleeSaved and SavedFP.
1462 InstructionAccurateScope scope(this, 10);
armvixlad96eda2013-06-14 11:42:37 +01001463
1464 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001465 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001466
1467 MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
1468
armvixl5799d6c2014-05-01 11:05:00 +01001469 ldp(d8, d9, tos);
1470 ldp(d10, d11, tos);
1471 ldp(d12, d13, tos);
1472 ldp(d14, d15, tos);
1473
armvixlad96eda2013-06-14 11:42:37 +01001474 ldp(x19, x20, tos);
1475 ldp(x21, x22, tos);
1476 ldp(x23, x24, tos);
1477 ldp(x25, x26, tos);
1478 ldp(x27, x28, tos);
1479 ldp(x29, x30, tos);
armvixlad96eda2013-06-14 11:42:37 +01001480}
1481
1482void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
armvixlb0c8ae22014-03-21 14:03:59 +00001483 VIXL_ASSERT(!sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001484 // TODO: Several callers rely on this not using scratch registers, so we use
1485 // the assembler directly here. However, this means that large immediate
1486 // values of 'space' cannot be handled.
armvixlc68cb642014-09-25 18:49:30 +01001487 InstructionAccurateScope scope(this, 1);
armvixlad96eda2013-06-14 11:42:37 +01001488 sub(sp, StackPointer(), space);
1489}
1490
1491
1492// This is the main Printf implementation. All callee-saved registers are
1493// preserved, but NZCV and the caller-saved registers may be clobbered.
1494void MacroAssembler::PrintfNoPreserve(const char * format,
1495 const CPURegister& arg0,
1496 const CPURegister& arg1,
1497 const CPURegister& arg2,
1498 const CPURegister& arg3) {
1499 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
1500 // in most cases anyway, so this restriction shouldn't be too serious.
armvixlb0c8ae22014-03-21 14:03:59 +00001501 VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001502
armvixl5799d6c2014-05-01 11:05:00 +01001503 // The provided arguments, and their proper PCS registers.
1504 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
1505 CPURegister pcs[kPrintfMaxArgCount];
1506
1507 int arg_count = kPrintfMaxArgCount;
1508
1509 // The PCS varargs registers for printf. Note that x0 is used for the printf
1510 // format string.
1511 static const CPURegList kPCSVarargs =
1512 CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count);
1513 static const CPURegList kPCSVarargsFP =
1514 CPURegList(CPURegister::kFPRegister, kDRegSize, 0, arg_count - 1);
1515
1516 // We can use caller-saved registers as scratch values, except for the
1517 // arguments and the PCS registers where they might need to go.
armvixlb0c8ae22014-03-21 14:03:59 +00001518 UseScratchRegisterScope temps(this);
armvixl5799d6c2014-05-01 11:05:00 +01001519 temps.Include(kCallerSaved);
1520 temps.Include(kCallerSavedFP);
1521 temps.Exclude(kPCSVarargs);
1522 temps.Exclude(kPCSVarargsFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001523 temps.Exclude(arg0, arg1, arg2, arg3);
1524
armvixl5799d6c2014-05-01 11:05:00 +01001525 // Copies of the arg lists that we can iterate through.
1526 CPURegList pcs_varargs = kPCSVarargs;
1527 CPURegList pcs_varargs_fp = kPCSVarargsFP;
armvixlad96eda2013-06-14 11:42:37 +01001528
armvixl5799d6c2014-05-01 11:05:00 +01001529 // Place the arguments. There are lots of clever tricks and optimizations we
1530 // could use here, but Printf is a debug tool so instead we just try to keep
1531 // it simple: Move each input that isn't already in the right place to a
1532 // scratch register, then move everything back.
1533 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
1534 // Work out the proper PCS register for this argument.
armvixlad96eda2013-06-14 11:42:37 +01001535 if (args[i].IsRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001536 pcs[i] = pcs_varargs.PopLowestIndex().X();
1537 // We might only need a W register here. We need to know the size of the
1538 // argument so we can properly encode it for the simulator call.
1539 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
armvixlad96eda2013-06-14 11:42:37 +01001540 } else if (args[i].IsFPRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001541 // In C, floats are always cast to doubles for varargs calls.
1542 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
armvixlad96eda2013-06-14 11:42:37 +01001543 } else {
armvixl5799d6c2014-05-01 11:05:00 +01001544 VIXL_ASSERT(args[i].IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001545 arg_count = i;
1546 break;
1547 }
armvixlad96eda2013-06-14 11:42:37 +01001548
armvixl5799d6c2014-05-01 11:05:00 +01001549 // If the argument is already in the right place, leave it where it is.
1550 if (args[i].Aliases(pcs[i])) continue;
armvixlad96eda2013-06-14 11:42:37 +01001551
armvixl5799d6c2014-05-01 11:05:00 +01001552 // Otherwise, if the argument is in a PCS argument register, allocate an
1553 // appropriate scratch register and then move it out of the way.
1554 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
1555 kPCSVarargsFP.IncludesAliasOf(args[i])) {
1556 if (args[i].IsRegister()) {
1557 Register old_arg = Register(args[i]);
1558 Register new_arg = temps.AcquireSameSizeAs(old_arg);
1559 Mov(new_arg, old_arg);
1560 args[i] = new_arg;
1561 } else {
1562 FPRegister old_arg = FPRegister(args[i]);
1563 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
1564 Fmov(new_arg, old_arg);
1565 args[i] = new_arg;
1566 }
armvixlad96eda2013-06-14 11:42:37 +01001567 }
1568 }
1569
armvixl5799d6c2014-05-01 11:05:00 +01001570 // Do a second pass to move values into their final positions and perform any
1571 // conversions that may be required.
1572 for (int i = 0; i < arg_count; i++) {
1573 VIXL_ASSERT(pcs[i].type() == args[i].type());
1574 if (pcs[i].IsRegister()) {
1575 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
1576 } else {
1577 VIXL_ASSERT(pcs[i].IsFPRegister());
1578 if (pcs[i].size() == args[i].size()) {
1579 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
1580 } else {
1581 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
1582 }
1583 }
armvixlad96eda2013-06-14 11:42:37 +01001584 }
1585
1586 // Load the format string into x0, as per the procedure-call standard.
1587 //
1588 // To make the code as portable as possible, the format string is encoded
1589 // directly in the instruction stream. It might be cleaner to encode it in a
1590 // literal pool, but since Printf is usually used for debugging, it is
1591 // beneficial for it to be minimally dependent on other features.
armvixl5799d6c2014-05-01 11:05:00 +01001592 temps.Exclude(x0);
armvixlad96eda2013-06-14 11:42:37 +01001593 Label format_address;
1594 Adr(x0, &format_address);
1595
1596 // Emit the format string directly in the instruction stream.
armvixlc68cb642014-09-25 18:49:30 +01001597 {
1598 BlockLiteralPoolScope scope(this);
1599 // Data emitted:
1600 // branch
1601 // strlen(format) + 1 (includes null termination)
1602 // padding to next instruction
1603 // unreachable
1604 EmissionCheckScope guard(
1605 this,
1606 AlignUp(strlen(format) + 1, kInstructionSize) + 2 * kInstructionSize);
armvixlad96eda2013-06-14 11:42:37 +01001607 Label after_data;
1608 B(&after_data);
1609 Bind(&format_address);
armvixlc68cb642014-09-25 18:49:30 +01001610 EmitString(format);
armvixlad96eda2013-06-14 11:42:37 +01001611 Unreachable();
1612 Bind(&after_data);
1613 }
1614
1615 // We don't pass any arguments on the stack, but we still need to align the C
1616 // stack pointer to a 16-byte boundary for PCS compliance.
1617 if (!sp.Is(StackPointer())) {
1618 Bic(sp, StackPointer(), 0xf);
1619 }
1620
1621 // Actually call printf. This part needs special handling for the simulator,
1622 // since the system printf function will use a different instruction set and
1623 // the procedure-call standard will not be compatible.
1624#ifdef USE_SIMULATOR
armvixlc68cb642014-09-25 18:49:30 +01001625 {
1626 InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
armvixlad96eda2013-06-14 11:42:37 +01001627 hlt(kPrintfOpcode);
armvixl5799d6c2014-05-01 11:05:00 +01001628 dc32(arg_count); // kPrintfArgCountOffset
1629
1630 // Determine the argument pattern.
1631 uint32_t arg_pattern_list = 0;
1632 for (int i = 0; i < arg_count; i++) {
1633 uint32_t arg_pattern;
1634 if (pcs[i].IsRegister()) {
1635 arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
1636 } else {
1637 VIXL_ASSERT(pcs[i].Is64Bits());
1638 arg_pattern = kPrintfArgD;
1639 }
1640 VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
1641 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
1642 }
1643 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
armvixlad96eda2013-06-14 11:42:37 +01001644 }
1645#else
armvixlb0c8ae22014-03-21 14:03:59 +00001646 Register tmp = temps.AcquireX();
1647 Mov(tmp, reinterpret_cast<uintptr_t>(printf));
1648 Blr(tmp);
armvixlad96eda2013-06-14 11:42:37 +01001649#endif
1650}
1651
1652
1653void MacroAssembler::Printf(const char * format,
armvixl5799d6c2014-05-01 11:05:00 +01001654 CPURegister arg0,
1655 CPURegister arg1,
1656 CPURegister arg2,
1657 CPURegister arg3) {
1658 // We can only print sp if it is the current stack pointer.
1659 if (!sp.Is(StackPointer())) {
1660 VIXL_ASSERT(!sp.Aliases(arg0));
1661 VIXL_ASSERT(!sp.Aliases(arg1));
1662 VIXL_ASSERT(!sp.Aliases(arg2));
1663 VIXL_ASSERT(!sp.Aliases(arg3));
1664 }
1665
armvixlb0c8ae22014-03-21 14:03:59 +00001666 // Make sure that the macro assembler doesn't try to use any of our arguments
1667 // as scratch registers.
1668 UseScratchRegisterScope exclude_all(this);
1669 exclude_all.ExcludeAll();
1670
armvixlad96eda2013-06-14 11:42:37 +01001671 // Preserve all caller-saved registers as well as NZCV.
1672 // If sp is the stack pointer, PushCPURegList asserts that the size of each
1673 // list is a multiple of 16 bytes.
1674 PushCPURegList(kCallerSaved);
1675 PushCPURegList(kCallerSavedFP);
armvixlad96eda2013-06-14 11:42:37 +01001676
armvixlb0c8ae22014-03-21 14:03:59 +00001677 { UseScratchRegisterScope temps(this);
1678 // We can use caller-saved registers as scratch values (except for argN).
armvixl5799d6c2014-05-01 11:05:00 +01001679 temps.Include(kCallerSaved);
1680 temps.Include(kCallerSavedFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001681 temps.Exclude(arg0, arg1, arg2, arg3);
armvixlad96eda2013-06-14 11:42:37 +01001682
armvixl5799d6c2014-05-01 11:05:00 +01001683 // If any of the arguments are the current stack pointer, allocate a new
1684 // register for them, and adjust the value to compensate for pushing the
1685 // caller-saved registers.
1686 bool arg0_sp = StackPointer().Aliases(arg0);
1687 bool arg1_sp = StackPointer().Aliases(arg1);
1688 bool arg2_sp = StackPointer().Aliases(arg2);
1689 bool arg3_sp = StackPointer().Aliases(arg3);
1690 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
1691 // Allocate a register to hold the original stack pointer value, to pass
1692 // to PrintfNoPreserve as an argument.
1693 Register arg_sp = temps.AcquireX();
1694 Add(arg_sp, StackPointer(),
1695 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
1696 if (arg0_sp) arg0 = Register(arg_sp.code(), arg0.size());
1697 if (arg1_sp) arg1 = Register(arg_sp.code(), arg1.size());
1698 if (arg2_sp) arg2 = Register(arg_sp.code(), arg2.size());
1699 if (arg3_sp) arg3 = Register(arg_sp.code(), arg3.size());
1700 }
1701
armvixlb0c8ae22014-03-21 14:03:59 +00001702 // Preserve NZCV.
1703 Register tmp = temps.AcquireX();
1704 Mrs(tmp, NZCV);
1705 Push(tmp, xzr);
armvixl5799d6c2014-05-01 11:05:00 +01001706 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001707
1708 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
1709
armvixl5799d6c2014-05-01 11:05:00 +01001710 // Restore NZCV.
1711 tmp = temps.AcquireX();
armvixlb0c8ae22014-03-21 14:03:59 +00001712 Pop(xzr, tmp);
1713 Msr(NZCV, tmp);
armvixl5799d6c2014-05-01 11:05:00 +01001714 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001715 }
1716
armvixlad96eda2013-06-14 11:42:37 +01001717 PopCPURegList(kCallerSavedFP);
1718 PopCPURegList(kCallerSaved);
1719}
1720
1721void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
armvixlb0c8ae22014-03-21 14:03:59 +00001722 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001723
1724#ifdef USE_SIMULATOR
1725 // The arguments to the trace pseudo instruction need to be contiguous in
1726 // memory, so make sure we don't try to emit a literal pool.
1727 InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1728
1729 Label start;
1730 bind(&start);
1731
armvixl330dc712014-11-25 10:38:32 +00001732 // Refer to simulator-a64.h for a description of the marker and its
armvixlad96eda2013-06-14 11:42:37 +01001733 // arguments.
1734 hlt(kTraceOpcode);
1735
armvixlb0c8ae22014-03-21 14:03:59 +00001736 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001737 dc32(parameters);
1738
armvixlb0c8ae22014-03-21 14:03:59 +00001739 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
armvixlad96eda2013-06-14 11:42:37 +01001740 dc32(command);
1741#else
1742 // Emit nothing on real hardware.
1743 USE(parameters);
1744 USE(command);
1745#endif
1746}
1747
1748
1749void MacroAssembler::Log(TraceParameters parameters) {
armvixlb0c8ae22014-03-21 14:03:59 +00001750 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001751
1752#ifdef USE_SIMULATOR
1753 // The arguments to the log pseudo instruction need to be contiguous in
1754 // memory, so make sure we don't try to emit a literal pool.
1755 InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1756
1757 Label start;
1758 bind(&start);
1759
armvixl330dc712014-11-25 10:38:32 +00001760 // Refer to simulator-a64.h for a description of the marker and its
armvixlad96eda2013-06-14 11:42:37 +01001761 // arguments.
1762 hlt(kLogOpcode);
1763
armvixlb0c8ae22014-03-21 14:03:59 +00001764 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001765 dc32(parameters);
1766#else
1767 // Emit nothing on real hardware.
1768 USE(parameters);
1769#endif
1770}
1771
armvixl578645f2013-08-15 17:21:42 +01001772
1773void MacroAssembler::EnableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001774 VIXL_ASSERT(!isprint(InstrumentStateEnable));
armvixl578645f2013-08-15 17:21:42 +01001775 InstructionAccurateScope scope(this, 1);
1776 movn(xzr, InstrumentStateEnable);
1777}
1778
1779
1780void MacroAssembler::DisableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001781 VIXL_ASSERT(!isprint(InstrumentStateDisable));
armvixl578645f2013-08-15 17:21:42 +01001782 InstructionAccurateScope scope(this, 1);
1783 movn(xzr, InstrumentStateDisable);
1784}
1785
1786
1787void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
armvixlb0c8ae22014-03-21 14:03:59 +00001788 VIXL_ASSERT(strlen(marker_name) == 2);
armvixl578645f2013-08-15 17:21:42 +01001789
1790 // We allow only printable characters in the marker names. Unprintable
1791 // characters are reserved for controlling features of the instrumentation.
armvixlb0c8ae22014-03-21 14:03:59 +00001792 VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
armvixl578645f2013-08-15 17:21:42 +01001793
1794 InstructionAccurateScope scope(this, 1);
1795 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1796}
1797
armvixlb0c8ae22014-03-21 14:03:59 +00001798
armvixl330dc712014-11-25 10:38:32 +00001799void UseScratchRegisterScope::Open(MacroAssembler* masm) {
1800 VIXL_ASSERT(!initialised_);
1801 available_ = masm->TmpList();
1802 availablefp_ = masm->FPTmpList();
1803 old_available_ = available_->list();
1804 old_availablefp_ = availablefp_->list();
1805 VIXL_ASSERT(available_->type() == CPURegister::kRegister);
1806 VIXL_ASSERT(availablefp_->type() == CPURegister::kFPRegister);
1807#ifdef VIXL_DEBUG
1808 initialised_ = true;
1809#endif
1810}
1811
1812
1813void UseScratchRegisterScope::Close() {
1814 if (available_) {
1815 available_->set_list(old_available_);
1816 available_ = NULL;
1817 }
1818 if (availablefp_) {
1819 availablefp_->set_list(old_availablefp_);
1820 availablefp_ = NULL;
1821 }
1822#ifdef VIXL_DEBUG
1823 initialised_ = false;
1824#endif
1825}
1826
1827
1828UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) {
1829#ifdef VIXL_DEBUG
1830 initialised_ = false;
1831#endif
1832 Open(masm);
1833}
1834
1835// This allows deferred (and optional) initialisation of the scope.
1836UseScratchRegisterScope::UseScratchRegisterScope()
1837 : available_(NULL), availablefp_(NULL),
1838 old_available_(0), old_availablefp_(0) {
1839#ifdef VIXL_DEBUG
1840 initialised_ = false;
1841#endif
1842}
1843
armvixlb0c8ae22014-03-21 14:03:59 +00001844UseScratchRegisterScope::~UseScratchRegisterScope() {
armvixl330dc712014-11-25 10:38:32 +00001845 Close();
armvixlb0c8ae22014-03-21 14:03:59 +00001846}
1847
1848
armvixl5799d6c2014-05-01 11:05:00 +01001849bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
1850 return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
1851}
1852
1853
armvixlb0c8ae22014-03-21 14:03:59 +00001854Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
1855 int code = AcquireNextAvailable(available_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001856 return Register(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001857}
1858
1859
1860FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
1861 int code = AcquireNextAvailable(availablefp_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001862 return FPRegister(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001863}
1864
1865
1866void UseScratchRegisterScope::Release(const CPURegister& reg) {
armvixl330dc712014-11-25 10:38:32 +00001867 VIXL_ASSERT(initialised_);
armvixlb0c8ae22014-03-21 14:03:59 +00001868 if (reg.IsRegister()) {
1869 ReleaseByCode(available_, reg.code());
1870 } else if (reg.IsFPRegister()) {
1871 ReleaseByCode(availablefp_, reg.code());
1872 } else {
1873 VIXL_ASSERT(reg.IsNone());
1874 }
1875}
1876
1877
armvixl5799d6c2014-05-01 11:05:00 +01001878void UseScratchRegisterScope::Include(const CPURegList& list) {
armvixl330dc712014-11-25 10:38:32 +00001879 VIXL_ASSERT(initialised_);
armvixl5799d6c2014-05-01 11:05:00 +01001880 if (list.type() == CPURegister::kRegister) {
1881 // Make sure that neither sp nor xzr are included the list.
1882 IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
1883 } else {
1884 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1885 IncludeByRegList(availablefp_, list.list());
1886 }
1887}
1888
1889
armvixlb0c8ae22014-03-21 14:03:59 +00001890void UseScratchRegisterScope::Include(const Register& reg1,
1891 const Register& reg2,
1892 const Register& reg3,
1893 const Register& reg4) {
armvixl330dc712014-11-25 10:38:32 +00001894 VIXL_ASSERT(initialised_);
armvixlb0c8ae22014-03-21 14:03:59 +00001895 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1896 // Make sure that neither sp nor xzr are included the list.
1897 include &= ~(xzr.Bit() | sp.Bit());
1898
1899 IncludeByRegList(available_, include);
1900}
1901
1902
1903void UseScratchRegisterScope::Include(const FPRegister& reg1,
1904 const FPRegister& reg2,
1905 const FPRegister& reg3,
1906 const FPRegister& reg4) {
1907 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1908 IncludeByRegList(availablefp_, include);
1909}
1910
1911
armvixl5799d6c2014-05-01 11:05:00 +01001912void UseScratchRegisterScope::Exclude(const CPURegList& list) {
1913 if (list.type() == CPURegister::kRegister) {
1914 ExcludeByRegList(available_, list.list());
1915 } else {
1916 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1917 ExcludeByRegList(availablefp_, list.list());
1918 }
1919}
1920
1921
armvixlb0c8ae22014-03-21 14:03:59 +00001922void UseScratchRegisterScope::Exclude(const Register& reg1,
1923 const Register& reg2,
1924 const Register& reg3,
1925 const Register& reg4) {
1926 RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1927 ExcludeByRegList(available_, exclude);
1928}
1929
1930
1931void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
1932 const FPRegister& reg2,
1933 const FPRegister& reg3,
1934 const FPRegister& reg4) {
1935 RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1936 ExcludeByRegList(availablefp_, excludefp);
1937}
1938
1939
1940void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
1941 const CPURegister& reg2,
1942 const CPURegister& reg3,
1943 const CPURegister& reg4) {
1944 RegList exclude = 0;
1945 RegList excludefp = 0;
1946
1947 const CPURegister regs[] = {reg1, reg2, reg3, reg4};
1948
1949 for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
1950 if (regs[i].IsRegister()) {
1951 exclude |= regs[i].Bit();
1952 } else if (regs[i].IsFPRegister()) {
1953 excludefp |= regs[i].Bit();
1954 } else {
1955 VIXL_ASSERT(regs[i].IsNone());
1956 }
1957 }
1958
1959 ExcludeByRegList(available_, exclude);
1960 ExcludeByRegList(availablefp_, excludefp);
1961}
1962
1963
1964void UseScratchRegisterScope::ExcludeAll() {
1965 ExcludeByRegList(available_, available_->list());
1966 ExcludeByRegList(availablefp_, availablefp_->list());
1967}
1968
1969
1970CPURegister UseScratchRegisterScope::AcquireNextAvailable(
1971 CPURegList* available) {
1972 VIXL_CHECK(!available->IsEmpty());
1973 CPURegister result = available->PopLowestIndex();
1974 VIXL_ASSERT(!AreAliased(result, xzr, sp));
1975 return result;
1976}
1977
1978
1979void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
1980 ReleaseByRegList(available, static_cast<RegList>(1) << code);
1981}
1982
1983
1984void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
1985 RegList regs) {
1986 available->set_list(available->list() | regs);
1987}
1988
1989
1990void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
1991 RegList regs) {
1992 available->set_list(available->list() | regs);
1993}
1994
1995
1996void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
1997 RegList exclude) {
1998 available->set_list(available->list() & ~exclude);
1999}
2000
armvixlad96eda2013-06-14 11:42:37 +01002001} // namespace vixl