blob: e7a916a9b133049241c3b8fb75523556abf9f147 [file] [log] [blame]
armvixlad96eda2013-06-14 11:42:37 +01001// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "a64/macro-assembler-a64.h"
28namespace vixl {
29
armvixlb0c8ae22014-03-21 14:03:59 +000030void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
31 VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
32 ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
33 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
34 B(static_cast<Condition>(type), label);
35 } else {
36 switch (type) {
37 case always: B(label); break;
38 case never: break;
39 case reg_zero: Cbz(reg, label); break;
40 case reg_not_zero: Cbnz(reg, label); break;
41 case reg_bit_clear: Tbz(reg, bit, label); break;
42 case reg_bit_set: Tbnz(reg, bit, label); break;
43 default:
44 VIXL_UNREACHABLE();
45 }
46 }
47}
48
armvixlad96eda2013-06-14 11:42:37 +010049void MacroAssembler::And(const Register& rd,
50 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +000051 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000052 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000053 LogicalMacro(rd, rn, operand, AND);
54}
55
56
57void MacroAssembler::Ands(const Register& rd,
58 const Register& rn,
59 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000060 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000061 LogicalMacro(rd, rn, operand, ANDS);
armvixlad96eda2013-06-14 11:42:37 +010062}
63
64
65void MacroAssembler::Tst(const Register& rn,
66 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000067 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000068 Ands(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +010069}
70
71
72void MacroAssembler::Bic(const Register& rd,
73 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +000074 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000075 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000076 LogicalMacro(rd, rn, operand, BIC);
77}
78
79
80void MacroAssembler::Bics(const Register& rd,
81 const Register& rn,
82 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000083 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000084 LogicalMacro(rd, rn, operand, BICS);
armvixlad96eda2013-06-14 11:42:37 +010085}
86
87
88void MacroAssembler::Orr(const Register& rd,
89 const Register& rn,
90 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000091 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +010092 LogicalMacro(rd, rn, operand, ORR);
93}
94
95
96void MacroAssembler::Orn(const Register& rd,
97 const Register& rn,
98 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000099 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100100 LogicalMacro(rd, rn, operand, ORN);
101}
102
103
104void MacroAssembler::Eor(const Register& rd,
105 const Register& rn,
106 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000107 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100108 LogicalMacro(rd, rn, operand, EOR);
109}
110
111
112void MacroAssembler::Eon(const Register& rd,
113 const Register& rn,
114 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000115 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100116 LogicalMacro(rd, rn, operand, EON);
117}
118
119
120void MacroAssembler::LogicalMacro(const Register& rd,
121 const Register& rn,
122 const Operand& operand,
123 LogicalOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000124 UseScratchRegisterScope temps(this);
125
armvixlad96eda2013-06-14 11:42:37 +0100126 if (operand.IsImmediate()) {
127 int64_t immediate = operand.immediate();
128 unsigned reg_size = rd.size();
armvixlb0c8ae22014-03-21 14:03:59 +0000129 VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
armvixlad96eda2013-06-14 11:42:37 +0100130
131 // If the operation is NOT, invert the operation and immediate.
132 if ((op & NOT) == NOT) {
133 op = static_cast<LogicalOp>(op & ~NOT);
134 immediate = ~immediate;
135 if (rd.Is32Bits()) {
136 immediate &= kWRegMask;
137 }
138 }
139
140 // Special cases for all set or all clear immediates.
141 if (immediate == 0) {
142 switch (op) {
143 case AND:
144 Mov(rd, 0);
145 return;
146 case ORR: // Fall through.
147 case EOR:
148 Mov(rd, rn);
149 return;
150 case ANDS: // Fall through.
151 case BICS:
152 break;
153 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000154 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100155 }
armvixlb0c8ae22014-03-21 14:03:59 +0000156 } else if ((rd.Is64Bits() && (immediate == -1)) ||
157 (rd.Is32Bits() && (immediate == 0xffffffff))) {
armvixlad96eda2013-06-14 11:42:37 +0100158 switch (op) {
159 case AND:
160 Mov(rd, rn);
161 return;
162 case ORR:
163 Mov(rd, immediate);
164 return;
165 case EOR:
166 Mvn(rd, rn);
167 return;
168 case ANDS: // Fall through.
169 case BICS:
170 break;
171 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000172 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100173 }
174 }
175
176 unsigned n, imm_s, imm_r;
177 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
178 // Immediate can be encoded in the instruction.
179 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
180 } else {
181 // Immediate can't be encoded: synthesize using move immediate.
armvixlb0c8ae22014-03-21 14:03:59 +0000182 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100183 Mov(temp, immediate);
184 if (rd.Is(sp)) {
185 // If rd is the stack pointer we cannot use it as the destination
186 // register so we use the temp register as an intermediate again.
187 Logical(temp, rn, Operand(temp), op);
188 Mov(sp, temp);
189 } else {
190 Logical(rd, rn, Operand(temp), op);
191 }
192 }
193 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000194 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100195 // Add/sub extended supports shift <= 4. We want to support exactly the
196 // same modes here.
armvixlb0c8ae22014-03-21 14:03:59 +0000197 VIXL_ASSERT(operand.shift_amount() <= 4);
198 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100199 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000200
201 temps.Exclude(operand.reg());
202 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100203 EmitExtendShift(temp, operand.reg(), operand.extend(),
204 operand.shift_amount());
205 Logical(rd, rn, Operand(temp), op);
206 } else {
207 // The operand can be encoded in the instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000208 VIXL_ASSERT(operand.IsShiftedRegister());
armvixlad96eda2013-06-14 11:42:37 +0100209 Logical(rd, rn, operand, op);
210 }
211}
212
213
armvixlf37fdc02014-02-05 13:22:16 +0000214void MacroAssembler::Mov(const Register& rd,
215 const Operand& operand,
216 DiscardMoveMode discard_mode) {
armvixlb0c8ae22014-03-21 14:03:59 +0000217 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100218 if (operand.IsImmediate()) {
219 // Call the macro assembler for generic immediates.
220 Mov(rd, operand.immediate());
221 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
222 // Emit a shift instruction if moving a shifted register. This operation
223 // could also be achieved using an orr instruction (like orn used by Mvn),
224 // but using a shift instruction makes the disassembly clearer.
225 EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
226 } else if (operand.IsExtendedRegister()) {
227 // Emit an extend instruction if moving an extended register. This handles
228 // extend with post-shift operations, too.
229 EmitExtendShift(rd, operand.reg(), operand.extend(),
230 operand.shift_amount());
231 } else {
232 // Otherwise, emit a register move only if the registers are distinct, or
armvixlf37fdc02014-02-05 13:22:16 +0000233 // if they are not X registers.
234 //
235 // Note that mov(w0, w0) is not a no-op because it clears the top word of
236 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
237 // registers is not required to clear the top word of the X register. In
238 // this case, the instruction is discarded.
239 //
armvixlad96eda2013-06-14 11:42:37 +0100240 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
armvixlf37fdc02014-02-05 13:22:16 +0000241 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
242 (discard_mode == kDontDiscardForSameWReg))) {
armvixlad96eda2013-06-14 11:42:37 +0100243 mov(rd, operand.reg());
244 }
245 }
246}
247
248
249void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000250 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100251 if (operand.IsImmediate()) {
252 // Call the macro assembler for generic immediates.
253 Mvn(rd, operand.immediate());
254 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000255 UseScratchRegisterScope temps(this);
256 temps.Exclude(operand.reg());
257
armvixlad96eda2013-06-14 11:42:37 +0100258 // Emit two instructions for the extend case. This differs from Mov, as
259 // the extend and invert can't be achieved in one instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000260 Register temp = temps.AcquireSameSizeAs(rd);
armvixlad96eda2013-06-14 11:42:37 +0100261 EmitExtendShift(temp, operand.reg(), operand.extend(),
262 operand.shift_amount());
263 mvn(rd, Operand(temp));
264 } else {
265 // Otherwise, register and shifted register cases can be handled by the
266 // assembler directly, using orn.
267 mvn(rd, operand);
268 }
269}
270
271
272void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
armvixlb0c8ae22014-03-21 14:03:59 +0000273 VIXL_ASSERT(allow_macro_instructions_);
274 VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
armvixlad96eda2013-06-14 11:42:37 +0100275
276 // Immediates on Aarch64 can be produced using an initial value, and zero to
277 // three move keep operations.
278 //
279 // Initial values can be generated with:
280 // 1. 64-bit move zero (movz).
armvixlf37fdc02014-02-05 13:22:16 +0000281 // 2. 32-bit move inverted (movn).
282 // 3. 64-bit move inverted.
armvixlad96eda2013-06-14 11:42:37 +0100283 // 4. 32-bit orr immediate.
284 // 5. 64-bit orr immediate.
armvixlf37fdc02014-02-05 13:22:16 +0000285 // Move-keep may then be used to modify each of the 16-bit half words.
armvixlad96eda2013-06-14 11:42:37 +0100286 //
287 // The code below supports all five initial value generators, and
armvixlf37fdc02014-02-05 13:22:16 +0000288 // applying move-keep operations to move-zero and move-inverted initial
289 // values.
armvixlad96eda2013-06-14 11:42:37 +0100290
291 unsigned reg_size = rd.size();
292 unsigned n, imm_s, imm_r;
293 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
armvixlf37fdc02014-02-05 13:22:16 +0000294 // Immediate can be represented in a move zero instruction. Movz can't
295 // write to the stack pointer.
armvixlad96eda2013-06-14 11:42:37 +0100296 movz(rd, imm);
297 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
298 // Immediate can be represented in a move negative instruction. Movn can't
299 // write to the stack pointer.
300 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
301 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
302 // Immediate can be represented in a logical orr instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000303 VIXL_ASSERT(!rd.IsZero());
armvixlad96eda2013-06-14 11:42:37 +0100304 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
305 } else {
306 // Generic immediate case. Imm will be represented by
307 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
armvixlf37fdc02014-02-05 13:22:16 +0000308 // A move-zero or move-inverted is generated for the first non-zero or
309 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
armvixlad96eda2013-06-14 11:42:37 +0100310
armvixlf37fdc02014-02-05 13:22:16 +0000311 uint64_t ignored_halfword = 0;
312 bool invert_move = false;
313 // If the number of 0xffff halfwords is greater than the number of 0x0000
314 // halfwords, it's more efficient to use move-inverted.
315 if (CountClearHalfWords(~imm, reg_size) >
316 CountClearHalfWords(imm, reg_size)) {
armvixlb0c8ae22014-03-21 14:03:59 +0000317 ignored_halfword = 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000318 invert_move = true;
319 }
320
321 // Mov instructions can't move values into the stack pointer, so set up a
322 // temporary register, if needed.
armvixlb0c8ae22014-03-21 14:03:59 +0000323 UseScratchRegisterScope temps(this);
324 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
armvixlad96eda2013-06-14 11:42:37 +0100325
armvixlf37fdc02014-02-05 13:22:16 +0000326 // Iterate through the halfwords. Use movn/movz for the first non-ignored
327 // halfword, and movk for subsequent halfwords.
armvixlb0c8ae22014-03-21 14:03:59 +0000328 VIXL_ASSERT((reg_size % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +0100329 bool first_mov_done = false;
330 for (unsigned i = 0; i < (temp.size() / 16); i++) {
armvixlb0c8ae22014-03-21 14:03:59 +0000331 uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000332 if (imm16 != ignored_halfword) {
armvixlad96eda2013-06-14 11:42:37 +0100333 if (!first_mov_done) {
armvixlf37fdc02014-02-05 13:22:16 +0000334 if (invert_move) {
armvixlb0c8ae22014-03-21 14:03:59 +0000335 movn(temp, ~imm16 & 0xffff, 16 * i);
armvixlf37fdc02014-02-05 13:22:16 +0000336 } else {
337 movz(temp, imm16, 16 * i);
338 }
armvixlad96eda2013-06-14 11:42:37 +0100339 first_mov_done = true;
340 } else {
341 // Construct a wider constant.
342 movk(temp, imm16, 16 * i);
343 }
344 }
345 }
346
armvixlb0c8ae22014-03-21 14:03:59 +0000347 VIXL_ASSERT(first_mov_done);
armvixlf37fdc02014-02-05 13:22:16 +0000348
349 // Move the temporary if the original destination register was the stack
350 // pointer.
armvixlad96eda2013-06-14 11:42:37 +0100351 if (rd.IsSP()) {
352 mov(rd, temp);
353 }
armvixlad96eda2013-06-14 11:42:37 +0100354 }
355}
356
357
armvixlf37fdc02014-02-05 13:22:16 +0000358unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000359 VIXL_ASSERT((reg_size % 8) == 0);
armvixlf37fdc02014-02-05 13:22:16 +0000360 int count = 0;
361 for (unsigned i = 0; i < (reg_size / 16); i++) {
362 if ((imm & 0xffff) == 0) {
363 count++;
364 }
365 imm >>= 16;
366 }
367 return count;
368}
369
370
371// The movn instruction can generate immediates containing an arbitrary 16-bit
armvixlad96eda2013-06-14 11:42:37 +0100372// value, with remaining bits set, eg. 0x00001234, 0x0000123400000000.
373bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000374 VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
armvixlf37fdc02014-02-05 13:22:16 +0000375 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
armvixlad96eda2013-06-14 11:42:37 +0100376}
377
378
379// The movn instruction can generate immediates containing an arbitrary 16-bit
380// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
381bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
382 return IsImmMovz(~imm, reg_size);
383}
384
385
386void MacroAssembler::Ccmp(const Register& rn,
387 const Operand& operand,
388 StatusFlags nzcv,
389 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000390 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000391 if (operand.IsImmediate() && (operand.immediate() < 0)) {
392 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
393 } else {
394 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
395 }
armvixlad96eda2013-06-14 11:42:37 +0100396}
397
398
399void MacroAssembler::Ccmn(const Register& rn,
400 const Operand& operand,
401 StatusFlags nzcv,
402 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000403 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000404 if (operand.IsImmediate() && (operand.immediate() < 0)) {
405 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
406 } else {
407 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
408 }
armvixlad96eda2013-06-14 11:42:37 +0100409}
410
411
412void MacroAssembler::ConditionalCompareMacro(const Register& rn,
413 const Operand& operand,
414 StatusFlags nzcv,
415 Condition cond,
416 ConditionalCompareOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000417 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlad96eda2013-06-14 11:42:37 +0100418 if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
419 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
420 // The immediate can be encoded in the instruction, or the operand is an
421 // unshifted register: call the assembler.
422 ConditionalCompare(rn, operand, nzcv, cond, op);
423 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000424 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100425 // The operand isn't directly supported by the instruction: perform the
426 // operation on a temporary register.
armvixlb0c8ae22014-03-21 14:03:59 +0000427 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000428 Mov(temp, operand);
429 ConditionalCompare(rn, temp, nzcv, cond, op);
430 }
431}
432
433
434void MacroAssembler::Csel(const Register& rd,
435 const Register& rn,
436 const Operand& operand,
437 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000438 VIXL_ASSERT(allow_macro_instructions_);
439 VIXL_ASSERT(!rd.IsZero());
440 VIXL_ASSERT(!rn.IsZero());
441 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlf37fdc02014-02-05 13:22:16 +0000442 if (operand.IsImmediate()) {
443 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
444 // register.
445 int64_t imm = operand.immediate();
446 Register zr = AppropriateZeroRegFor(rn);
447 if (imm == 0) {
448 csel(rd, rn, zr, cond);
449 } else if (imm == 1) {
450 csinc(rd, rn, zr, cond);
451 } else if (imm == -1) {
452 csinv(rd, rn, zr, cond);
armvixlad96eda2013-06-14 11:42:37 +0100453 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000454 UseScratchRegisterScope temps(this);
455 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000456 Mov(temp, operand.immediate());
457 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100458 }
armvixlf37fdc02014-02-05 13:22:16 +0000459 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
460 // Unshifted register argument.
461 csel(rd, rn, operand.reg(), cond);
462 } else {
463 // All other arguments.
armvixlb0c8ae22014-03-21 14:03:59 +0000464 UseScratchRegisterScope temps(this);
465 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000466 Mov(temp, operand);
467 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100468 }
469}
470
471
472void MacroAssembler::Add(const Register& rd,
473 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000474 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000475 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100476 if (operand.IsImmediate() && (operand.immediate() < 0)) {
armvixlf37fdc02014-02-05 13:22:16 +0000477 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100478 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000479 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
480 }
481}
482
483
484void MacroAssembler::Adds(const Register& rd,
485 const Register& rn,
486 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000487 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000488 if (operand.IsImmediate() && (operand.immediate() < 0)) {
489 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
490 } else {
491 AddSubMacro(rd, rn, operand, SetFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100492 }
493}
494
495
496void MacroAssembler::Sub(const Register& rd,
497 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000498 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000499 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100500 if (operand.IsImmediate() && (operand.immediate() < 0)) {
armvixlf37fdc02014-02-05 13:22:16 +0000501 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100502 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000503 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
504 }
505}
506
507
508void MacroAssembler::Subs(const Register& rd,
509 const Register& rn,
510 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000511 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000512 if (operand.IsImmediate() && (operand.immediate() < 0)) {
513 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
514 } else {
515 AddSubMacro(rd, rn, operand, SetFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100516 }
517}
518
519
520void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000521 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000522 Adds(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100523}
524
525
526void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000527 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000528 Subs(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100529}
530
531
armvixlb0c8ae22014-03-21 14:03:59 +0000532void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
533 VIXL_ASSERT(allow_macro_instructions_);
534 if (value != 0.0) {
535 UseScratchRegisterScope temps(this);
536 FPRegister tmp = temps.AcquireSameSizeAs(fn);
537 Fmov(tmp, value);
538 fcmp(fn, tmp);
539 } else {
540 fcmp(fn, value);
541 }
542}
543
544
545void MacroAssembler::Fmov(FPRegister fd, double imm) {
546 VIXL_ASSERT(allow_macro_instructions_);
547 if (fd.Is32Bits()) {
548 Fmov(fd, static_cast<float>(imm));
549 return;
550 }
551
552 VIXL_ASSERT(fd.Is64Bits());
553 if (IsImmFP64(imm)) {
554 fmov(fd, imm);
555 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
556 fmov(fd, xzr);
557 } else {
558 ldr(fd, imm);
559 }
560}
561
562
563void MacroAssembler::Fmov(FPRegister fd, float imm) {
564 VIXL_ASSERT(allow_macro_instructions_);
565 if (fd.Is64Bits()) {
566 Fmov(fd, static_cast<double>(imm));
567 return;
568 }
569
570 VIXL_ASSERT(fd.Is32Bits());
571 if (IsImmFP32(imm)) {
572 fmov(fd, imm);
573 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
574 fmov(fd, wzr);
575 } else {
576 ldr(fd, imm);
577 }
578}
579
580
581
armvixlad96eda2013-06-14 11:42:37 +0100582void MacroAssembler::Neg(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000583 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000584 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100585 if (operand.IsImmediate()) {
586 Mov(rd, -operand.immediate());
587 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000588 Sub(rd, AppropriateZeroRegFor(rd), operand);
armvixlad96eda2013-06-14 11:42:37 +0100589 }
590}
591
592
armvixlf37fdc02014-02-05 13:22:16 +0000593void MacroAssembler::Negs(const Register& rd,
594 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000595 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000596 Subs(rd, AppropriateZeroRegFor(rd), operand);
597}
598
599
armvixlad96eda2013-06-14 11:42:37 +0100600void MacroAssembler::AddSubMacro(const Register& rd,
601 const Register& rn,
602 const Operand& operand,
603 FlagsUpdate S,
604 AddSubOp op) {
armvixlf37fdc02014-02-05 13:22:16 +0000605 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
606 (S == LeaveFlags)) {
607 // The instruction would be a nop. Avoid generating useless code.
608 return;
609 }
610
armvixlad96eda2013-06-14 11:42:37 +0100611 if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
612 (rn.IsZero() && !operand.IsShiftedRegister()) ||
613 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
armvixlb0c8ae22014-03-21 14:03:59 +0000614 UseScratchRegisterScope temps(this);
615 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100616 Mov(temp, operand);
617 AddSub(rd, rn, temp, S, op);
618 } else {
619 AddSub(rd, rn, operand, S, op);
620 }
621}
622
623
624void MacroAssembler::Adc(const Register& rd,
625 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000626 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000627 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000628 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
629}
630
631
632void MacroAssembler::Adcs(const Register& rd,
633 const Register& rn,
634 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000635 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000636 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
armvixlad96eda2013-06-14 11:42:37 +0100637}
638
639
640void MacroAssembler::Sbc(const Register& rd,
641 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000642 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000643 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000644 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
645}
646
647
648void MacroAssembler::Sbcs(const Register& rd,
649 const Register& rn,
650 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000651 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000652 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
armvixlad96eda2013-06-14 11:42:37 +0100653}
654
655
656void MacroAssembler::Ngc(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000657 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000658 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100659 Register zr = AppropriateZeroRegFor(rd);
armvixlf37fdc02014-02-05 13:22:16 +0000660 Sbc(rd, zr, operand);
661}
662
663
664void MacroAssembler::Ngcs(const Register& rd,
665 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000666 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000667 Register zr = AppropriateZeroRegFor(rd);
668 Sbcs(rd, zr, operand);
armvixlad96eda2013-06-14 11:42:37 +0100669}
670
671
672void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
673 const Register& rn,
674 const Operand& operand,
675 FlagsUpdate S,
676 AddSubWithCarryOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000677 VIXL_ASSERT(rd.size() == rn.size());
678 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100679
680 if (operand.IsImmediate() ||
681 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
682 // Add/sub with carry (immediate or ROR shifted register.)
armvixlb0c8ae22014-03-21 14:03:59 +0000683 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100684 Mov(temp, operand);
685 AddSubWithCarry(rd, rn, Operand(temp), S, op);
686 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
687 // Add/sub with carry (shifted register).
armvixlb0c8ae22014-03-21 14:03:59 +0000688 VIXL_ASSERT(operand.reg().size() == rd.size());
689 VIXL_ASSERT(operand.shift() != ROR);
690 VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
armvixlad96eda2013-06-14 11:42:37 +0100691 operand.shift_amount()));
armvixlb0c8ae22014-03-21 14:03:59 +0000692 temps.Exclude(operand.reg());
693 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100694 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
695 AddSubWithCarry(rd, rn, Operand(temp), S, op);
696 } else if (operand.IsExtendedRegister()) {
697 // Add/sub with carry (extended register).
armvixlb0c8ae22014-03-21 14:03:59 +0000698 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100699 // Add/sub extended supports a shift <= 4. We want to support exactly the
700 // same modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000701 VIXL_ASSERT(operand.shift_amount() <= 4);
702 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100703 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000704 temps.Exclude(operand.reg());
705 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100706 EmitExtendShift(temp, operand.reg(), operand.extend(),
707 operand.shift_amount());
708 AddSubWithCarry(rd, rn, Operand(temp), S, op);
709 } else {
710 // The addressing mode is directly supported by the instruction.
711 AddSubWithCarry(rd, rn, operand, S, op);
712 }
713}
714
715
716#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
717void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
718 LoadStoreMacro(REG, addr, OP); \
719}
720LS_MACRO_LIST(DEFINE_FUNCTION)
721#undef DEFINE_FUNCTION
722
723void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
724 const MemOperand& addr,
725 LoadStoreOp op) {
726 int64_t offset = addr.offset();
727 LSDataSize size = CalcLSDataSize(op);
728
729 // Check if an immediate offset fits in the immediate field of the
730 // appropriate instruction. If not, emit two instructions to perform
731 // the operation.
732 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
733 !IsImmLSUnscaled(offset)) {
734 // Immediate offset that can't be encoded using unsigned or unscaled
735 // addressing modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000736 UseScratchRegisterScope temps(this);
737 Register temp = temps.AcquireSameSizeAs(addr.base());
armvixlad96eda2013-06-14 11:42:37 +0100738 Mov(temp, addr.offset());
739 LoadStore(rt, MemOperand(addr.base(), temp), op);
740 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
741 // Post-index beyond unscaled addressing range.
742 LoadStore(rt, MemOperand(addr.base()), op);
743 Add(addr.base(), addr.base(), Operand(offset));
744 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
745 // Pre-index beyond unscaled addressing range.
746 Add(addr.base(), addr.base(), Operand(offset));
747 LoadStore(rt, MemOperand(addr.base()), op);
748 } else {
749 // Encodable in one load/store instruction.
750 LoadStore(rt, addr, op);
751 }
752}
753
754
755void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
756 const CPURegister& src2, const CPURegister& src3) {
armvixlb0c8ae22014-03-21 14:03:59 +0000757 VIXL_ASSERT(allow_macro_instructions_);
758 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
759 VIXL_ASSERT(src0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +0100760
761 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
762 int size = src0.SizeInBytes();
763
764 PrepareForPush(count, size);
765 PushHelper(count, size, src0, src1, src2, src3);
766}
767
768
769void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
770 const CPURegister& dst2, const CPURegister& dst3) {
771 // It is not valid to pop into the same register more than once in one
772 // instruction, not even into the zero register.
armvixlb0c8ae22014-03-21 14:03:59 +0000773 VIXL_ASSERT(allow_macro_instructions_);
774 VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
775 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
776 VIXL_ASSERT(dst0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +0100777
778 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
779 int size = dst0.SizeInBytes();
780
781 PrepareForPop(count, size);
782 PopHelper(count, size, dst0, dst1, dst2, dst3);
783}
784
785
786void MacroAssembler::PushCPURegList(CPURegList registers) {
787 int size = registers.RegisterSizeInBytes();
788
789 PrepareForPush(registers.Count(), size);
790 // Push up to four registers at a time because if the current stack pointer is
791 // sp and reg_size is 32, registers must be pushed in blocks of four in order
792 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +0000793 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100794 while (!registers.IsEmpty()) {
795 int count_before = registers.Count();
796 const CPURegister& src0 = registers.PopHighestIndex();
797 const CPURegister& src1 = registers.PopHighestIndex();
798 const CPURegister& src2 = registers.PopHighestIndex();
799 const CPURegister& src3 = registers.PopHighestIndex();
800 int count = count_before - registers.Count();
801 PushHelper(count, size, src0, src1, src2, src3);
802 }
803}
804
805
806void MacroAssembler::PopCPURegList(CPURegList registers) {
807 int size = registers.RegisterSizeInBytes();
808
809 PrepareForPop(registers.Count(), size);
810 // Pop up to four registers at a time because if the current stack pointer is
811 // sp and reg_size is 32, registers must be pushed in blocks of four in order
812 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +0000813 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100814 while (!registers.IsEmpty()) {
815 int count_before = registers.Count();
816 const CPURegister& dst0 = registers.PopLowestIndex();
817 const CPURegister& dst1 = registers.PopLowestIndex();
818 const CPURegister& dst2 = registers.PopLowestIndex();
819 const CPURegister& dst3 = registers.PopLowestIndex();
820 int count = count_before - registers.Count();
821 PopHelper(count, size, dst0, dst1, dst2, dst3);
822 }
823}
824
825
826void MacroAssembler::PushMultipleTimes(int count, Register src) {
armvixlb0c8ae22014-03-21 14:03:59 +0000827 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100828 int size = src.SizeInBytes();
829
830 PrepareForPush(count, size);
831 // Push up to four registers at a time if possible because if the current
832 // stack pointer is sp and the register size is 32, registers must be pushed
833 // in blocks of four in order to maintain the 16-byte alignment for sp.
834 while (count >= 4) {
835 PushHelper(4, size, src, src, src, src);
836 count -= 4;
837 }
838 if (count >= 2) {
839 PushHelper(2, size, src, src, NoReg, NoReg);
840 count -= 2;
841 }
842 if (count == 1) {
843 PushHelper(1, size, src, NoReg, NoReg, NoReg);
844 count -= 1;
845 }
armvixlb0c8ae22014-03-21 14:03:59 +0000846 VIXL_ASSERT(count == 0);
armvixlad96eda2013-06-14 11:42:37 +0100847}
848
849
850void MacroAssembler::PushHelper(int count, int size,
851 const CPURegister& src0,
852 const CPURegister& src1,
853 const CPURegister& src2,
854 const CPURegister& src3) {
855 // Ensure that we don't unintentionally modify scratch or debug registers.
856 InstructionAccurateScope scope(this);
857
armvixlb0c8ae22014-03-21 14:03:59 +0000858 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
859 VIXL_ASSERT(size == src0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +0100860
861 // When pushing multiple registers, the store order is chosen such that
862 // Push(a, b) is equivalent to Push(a) followed by Push(b).
863 switch (count) {
864 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +0000865 VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100866 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
867 break;
868 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +0000869 VIXL_ASSERT(src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100870 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
871 break;
872 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +0000873 VIXL_ASSERT(src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100874 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
875 str(src0, MemOperand(StackPointer(), 2 * size));
876 break;
877 case 4:
878 // Skip over 4 * size, then fill in the gap. This allows four W registers
879 // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
880 // all times.
881 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
882 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
883 break;
884 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000885 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100886 }
887}
888
889
890void MacroAssembler::PopHelper(int count, int size,
891 const CPURegister& dst0,
892 const CPURegister& dst1,
893 const CPURegister& dst2,
894 const CPURegister& dst3) {
895 // Ensure that we don't unintentionally modify scratch or debug registers.
896 InstructionAccurateScope scope(this);
897
armvixlb0c8ae22014-03-21 14:03:59 +0000898 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
899 VIXL_ASSERT(size == dst0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +0100900
901 // When popping multiple registers, the load order is chosen such that
902 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
903 switch (count) {
904 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +0000905 VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100906 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
907 break;
908 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +0000909 VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100910 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
911 break;
912 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +0000913 VIXL_ASSERT(dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100914 ldr(dst2, MemOperand(StackPointer(), 2 * size));
915 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
916 break;
917 case 4:
918 // Load the higher addresses first, then load the lower addresses and skip
919 // the whole block in the second instruction. This allows four W registers
920 // to be popped using sp, whilst maintaining 16-byte alignment for sp at
921 // all times.
922 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
923 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
924 break;
925 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000926 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100927 }
928}
929
930
931void MacroAssembler::PrepareForPush(int count, int size) {
932 if (sp.Is(StackPointer())) {
933 // If the current stack pointer is sp, then it must be aligned to 16 bytes
934 // on entry and the total size of the specified registers must also be a
935 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +0000936 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +0100937 } else {
938 // Even if the current stack pointer is not the system stack pointer (sp),
939 // the system stack pointer will still be modified in order to comply with
940 // ABI rules about accessing memory below the system stack pointer.
941 BumpSystemStackPointer(count * size);
942 }
943}
944
945
946void MacroAssembler::PrepareForPop(int count, int size) {
947 USE(count);
948 USE(size);
949 if (sp.Is(StackPointer())) {
950 // If the current stack pointer is sp, then it must be aligned to 16 bytes
951 // on entry and the total size of the specified registers must also be a
952 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +0000953 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +0100954 }
955}
956
957void MacroAssembler::Poke(const Register& src, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +0000958 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100959 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000960 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +0100961 }
962
963 Str(src, MemOperand(StackPointer(), offset));
964}
965
966
967void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +0000968 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100969 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000970 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +0100971 }
972
973 Ldr(dst, MemOperand(StackPointer(), offset));
974}
975
976
977void MacroAssembler::Claim(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000978 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000979
980 if (size.IsZero()) {
981 return;
982 }
983
armvixlad96eda2013-06-14 11:42:37 +0100984 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000985 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +0100986 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +0000987 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +0100988 }
989 }
990
991 if (!sp.Is(StackPointer())) {
992 BumpSystemStackPointer(size);
993 }
994
995 Sub(StackPointer(), StackPointer(), size);
996}
997
998
999void MacroAssembler::Drop(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001000 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001001
1002 if (size.IsZero()) {
1003 return;
1004 }
1005
armvixlad96eda2013-06-14 11:42:37 +01001006 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001007 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001008 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001009 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001010 }
1011 }
1012
1013 Add(StackPointer(), StackPointer(), size);
1014}
1015
1016
1017void MacroAssembler::PushCalleeSavedRegisters() {
1018 // Ensure that the macro-assembler doesn't use any scratch registers.
1019 InstructionAccurateScope scope(this);
1020
1021 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001022 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001023
1024 MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
1025
armvixlad96eda2013-06-14 11:42:37 +01001026 stp(x29, x30, tos);
1027 stp(x27, x28, tos);
1028 stp(x25, x26, tos);
1029 stp(x23, x24, tos);
1030 stp(x21, x22, tos);
1031 stp(x19, x20, tos);
armvixl5799d6c2014-05-01 11:05:00 +01001032
1033 stp(d14, d15, tos);
1034 stp(d12, d13, tos);
1035 stp(d10, d11, tos);
1036 stp(d8, d9, tos);
armvixlad96eda2013-06-14 11:42:37 +01001037}
1038
1039
1040void MacroAssembler::PopCalleeSavedRegisters() {
1041 // Ensure that the macro-assembler doesn't use any scratch registers.
1042 InstructionAccurateScope scope(this);
1043
1044 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001045 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001046
1047 MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
1048
armvixl5799d6c2014-05-01 11:05:00 +01001049 ldp(d8, d9, tos);
1050 ldp(d10, d11, tos);
1051 ldp(d12, d13, tos);
1052 ldp(d14, d15, tos);
1053
armvixlad96eda2013-06-14 11:42:37 +01001054 ldp(x19, x20, tos);
1055 ldp(x21, x22, tos);
1056 ldp(x23, x24, tos);
1057 ldp(x25, x26, tos);
1058 ldp(x27, x28, tos);
1059 ldp(x29, x30, tos);
armvixlad96eda2013-06-14 11:42:37 +01001060}
1061
1062void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
armvixlb0c8ae22014-03-21 14:03:59 +00001063 VIXL_ASSERT(!sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001064 // TODO: Several callers rely on this not using scratch registers, so we use
1065 // the assembler directly here. However, this means that large immediate
1066 // values of 'space' cannot be handled.
1067 InstructionAccurateScope scope(this);
1068 sub(sp, StackPointer(), space);
1069}
1070
1071
1072// This is the main Printf implementation. All callee-saved registers are
1073// preserved, but NZCV and the caller-saved registers may be clobbered.
1074void MacroAssembler::PrintfNoPreserve(const char * format,
1075 const CPURegister& arg0,
1076 const CPURegister& arg1,
1077 const CPURegister& arg2,
1078 const CPURegister& arg3) {
1079 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
1080 // in most cases anyway, so this restriction shouldn't be too serious.
armvixlb0c8ae22014-03-21 14:03:59 +00001081 VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001082
armvixl5799d6c2014-05-01 11:05:00 +01001083 // The provided arguments, and their proper PCS registers.
1084 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
1085 CPURegister pcs[kPrintfMaxArgCount];
1086
1087 int arg_count = kPrintfMaxArgCount;
1088
1089 // The PCS varargs registers for printf. Note that x0 is used for the printf
1090 // format string.
1091 static const CPURegList kPCSVarargs =
1092 CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count);
1093 static const CPURegList kPCSVarargsFP =
1094 CPURegList(CPURegister::kFPRegister, kDRegSize, 0, arg_count - 1);
1095
1096 // We can use caller-saved registers as scratch values, except for the
1097 // arguments and the PCS registers where they might need to go.
armvixlb0c8ae22014-03-21 14:03:59 +00001098 UseScratchRegisterScope temps(this);
armvixl5799d6c2014-05-01 11:05:00 +01001099 temps.Include(kCallerSaved);
1100 temps.Include(kCallerSavedFP);
1101 temps.Exclude(kPCSVarargs);
1102 temps.Exclude(kPCSVarargsFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001103 temps.Exclude(arg0, arg1, arg2, arg3);
1104
armvixl5799d6c2014-05-01 11:05:00 +01001105 // Copies of the arg lists that we can iterate through.
1106 CPURegList pcs_varargs = kPCSVarargs;
1107 CPURegList pcs_varargs_fp = kPCSVarargsFP;
armvixlad96eda2013-06-14 11:42:37 +01001108
armvixl5799d6c2014-05-01 11:05:00 +01001109 // Place the arguments. There are lots of clever tricks and optimizations we
1110 // could use here, but Printf is a debug tool so instead we just try to keep
1111 // it simple: Move each input that isn't already in the right place to a
1112 // scratch register, then move everything back.
1113 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
1114 // Work out the proper PCS register for this argument.
armvixlad96eda2013-06-14 11:42:37 +01001115 if (args[i].IsRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001116 pcs[i] = pcs_varargs.PopLowestIndex().X();
1117 // We might only need a W register here. We need to know the size of the
1118 // argument so we can properly encode it for the simulator call.
1119 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
armvixlad96eda2013-06-14 11:42:37 +01001120 } else if (args[i].IsFPRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001121 // In C, floats are always cast to doubles for varargs calls.
1122 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
armvixlad96eda2013-06-14 11:42:37 +01001123 } else {
armvixl5799d6c2014-05-01 11:05:00 +01001124 VIXL_ASSERT(args[i].IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001125 arg_count = i;
1126 break;
1127 }
armvixlad96eda2013-06-14 11:42:37 +01001128
armvixl5799d6c2014-05-01 11:05:00 +01001129 // If the argument is already in the right place, leave it where it is.
1130 if (args[i].Aliases(pcs[i])) continue;
armvixlad96eda2013-06-14 11:42:37 +01001131
armvixl5799d6c2014-05-01 11:05:00 +01001132 // Otherwise, if the argument is in a PCS argument register, allocate an
1133 // appropriate scratch register and then move it out of the way.
1134 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
1135 kPCSVarargsFP.IncludesAliasOf(args[i])) {
1136 if (args[i].IsRegister()) {
1137 Register old_arg = Register(args[i]);
1138 Register new_arg = temps.AcquireSameSizeAs(old_arg);
1139 Mov(new_arg, old_arg);
1140 args[i] = new_arg;
1141 } else {
1142 FPRegister old_arg = FPRegister(args[i]);
1143 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
1144 Fmov(new_arg, old_arg);
1145 args[i] = new_arg;
1146 }
armvixlad96eda2013-06-14 11:42:37 +01001147 }
1148 }
1149
armvixl5799d6c2014-05-01 11:05:00 +01001150 // Do a second pass to move values into their final positions and perform any
1151 // conversions that may be required.
1152 for (int i = 0; i < arg_count; i++) {
1153 VIXL_ASSERT(pcs[i].type() == args[i].type());
1154 if (pcs[i].IsRegister()) {
1155 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
1156 } else {
1157 VIXL_ASSERT(pcs[i].IsFPRegister());
1158 if (pcs[i].size() == args[i].size()) {
1159 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
1160 } else {
1161 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
1162 }
1163 }
armvixlad96eda2013-06-14 11:42:37 +01001164 }
1165
1166 // Load the format string into x0, as per the procedure-call standard.
1167 //
1168 // To make the code as portable as possible, the format string is encoded
1169 // directly in the instruction stream. It might be cleaner to encode it in a
1170 // literal pool, but since Printf is usually used for debugging, it is
1171 // beneficial for it to be minimally dependent on other features.
armvixl5799d6c2014-05-01 11:05:00 +01001172 temps.Exclude(x0);
armvixlad96eda2013-06-14 11:42:37 +01001173 Label format_address;
1174 Adr(x0, &format_address);
1175
1176 // Emit the format string directly in the instruction stream.
1177 { BlockLiteralPoolScope scope(this);
1178 Label after_data;
1179 B(&after_data);
1180 Bind(&format_address);
1181 EmitStringData(format);
1182 Unreachable();
1183 Bind(&after_data);
1184 }
1185
1186 // We don't pass any arguments on the stack, but we still need to align the C
1187 // stack pointer to a 16-byte boundary for PCS compliance.
1188 if (!sp.Is(StackPointer())) {
1189 Bic(sp, StackPointer(), 0xf);
1190 }
1191
1192 // Actually call printf. This part needs special handling for the simulator,
1193 // since the system printf function will use a different instruction set and
1194 // the procedure-call standard will not be compatible.
1195#ifdef USE_SIMULATOR
1196 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
1197 hlt(kPrintfOpcode);
armvixl5799d6c2014-05-01 11:05:00 +01001198 dc32(arg_count); // kPrintfArgCountOffset
1199
1200 // Determine the argument pattern.
1201 uint32_t arg_pattern_list = 0;
1202 for (int i = 0; i < arg_count; i++) {
1203 uint32_t arg_pattern;
1204 if (pcs[i].IsRegister()) {
1205 arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
1206 } else {
1207 VIXL_ASSERT(pcs[i].Is64Bits());
1208 arg_pattern = kPrintfArgD;
1209 }
1210 VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
1211 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
1212 }
1213 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
armvixlad96eda2013-06-14 11:42:37 +01001214 }
1215#else
armvixlb0c8ae22014-03-21 14:03:59 +00001216 Register tmp = temps.AcquireX();
1217 Mov(tmp, reinterpret_cast<uintptr_t>(printf));
1218 Blr(tmp);
armvixlad96eda2013-06-14 11:42:37 +01001219#endif
1220}
1221
1222
1223void MacroAssembler::Printf(const char * format,
armvixl5799d6c2014-05-01 11:05:00 +01001224 CPURegister arg0,
1225 CPURegister arg1,
1226 CPURegister arg2,
1227 CPURegister arg3) {
1228 // We can only print sp if it is the current stack pointer.
1229 if (!sp.Is(StackPointer())) {
1230 VIXL_ASSERT(!sp.Aliases(arg0));
1231 VIXL_ASSERT(!sp.Aliases(arg1));
1232 VIXL_ASSERT(!sp.Aliases(arg2));
1233 VIXL_ASSERT(!sp.Aliases(arg3));
1234 }
1235
armvixlb0c8ae22014-03-21 14:03:59 +00001236 // Make sure that the macro assembler doesn't try to use any of our arguments
1237 // as scratch registers.
1238 UseScratchRegisterScope exclude_all(this);
1239 exclude_all.ExcludeAll();
1240
armvixlad96eda2013-06-14 11:42:37 +01001241 // Preserve all caller-saved registers as well as NZCV.
1242 // If sp is the stack pointer, PushCPURegList asserts that the size of each
1243 // list is a multiple of 16 bytes.
1244 PushCPURegList(kCallerSaved);
1245 PushCPURegList(kCallerSavedFP);
armvixlad96eda2013-06-14 11:42:37 +01001246
armvixlb0c8ae22014-03-21 14:03:59 +00001247 { UseScratchRegisterScope temps(this);
1248 // We can use caller-saved registers as scratch values (except for argN).
armvixl5799d6c2014-05-01 11:05:00 +01001249 temps.Include(kCallerSaved);
1250 temps.Include(kCallerSavedFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001251 temps.Exclude(arg0, arg1, arg2, arg3);
armvixlad96eda2013-06-14 11:42:37 +01001252
armvixl5799d6c2014-05-01 11:05:00 +01001253 // If any of the arguments are the current stack pointer, allocate a new
1254 // register for them, and adjust the value to compensate for pushing the
1255 // caller-saved registers.
1256 bool arg0_sp = StackPointer().Aliases(arg0);
1257 bool arg1_sp = StackPointer().Aliases(arg1);
1258 bool arg2_sp = StackPointer().Aliases(arg2);
1259 bool arg3_sp = StackPointer().Aliases(arg3);
1260 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
1261 // Allocate a register to hold the original stack pointer value, to pass
1262 // to PrintfNoPreserve as an argument.
1263 Register arg_sp = temps.AcquireX();
1264 Add(arg_sp, StackPointer(),
1265 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
1266 if (arg0_sp) arg0 = Register(arg_sp.code(), arg0.size());
1267 if (arg1_sp) arg1 = Register(arg_sp.code(), arg1.size());
1268 if (arg2_sp) arg2 = Register(arg_sp.code(), arg2.size());
1269 if (arg3_sp) arg3 = Register(arg_sp.code(), arg3.size());
1270 }
1271
armvixlb0c8ae22014-03-21 14:03:59 +00001272 // Preserve NZCV.
1273 Register tmp = temps.AcquireX();
1274 Mrs(tmp, NZCV);
1275 Push(tmp, xzr);
armvixl5799d6c2014-05-01 11:05:00 +01001276 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001277
1278 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
1279
armvixl5799d6c2014-05-01 11:05:00 +01001280 // Restore NZCV.
1281 tmp = temps.AcquireX();
armvixlb0c8ae22014-03-21 14:03:59 +00001282 Pop(xzr, tmp);
1283 Msr(NZCV, tmp);
armvixl5799d6c2014-05-01 11:05:00 +01001284 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001285 }
1286
armvixlad96eda2013-06-14 11:42:37 +01001287 PopCPURegList(kCallerSavedFP);
1288 PopCPURegList(kCallerSaved);
1289}
1290
1291void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
armvixlb0c8ae22014-03-21 14:03:59 +00001292 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001293
1294#ifdef USE_SIMULATOR
1295 // The arguments to the trace pseudo instruction need to be contiguous in
1296 // memory, so make sure we don't try to emit a literal pool.
1297 InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1298
1299 Label start;
1300 bind(&start);
1301
1302 // Refer to instructions-a64.h for a description of the marker and its
1303 // arguments.
1304 hlt(kTraceOpcode);
1305
armvixlb0c8ae22014-03-21 14:03:59 +00001306 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001307 dc32(parameters);
1308
armvixlb0c8ae22014-03-21 14:03:59 +00001309 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
armvixlad96eda2013-06-14 11:42:37 +01001310 dc32(command);
1311#else
1312 // Emit nothing on real hardware.
1313 USE(parameters);
1314 USE(command);
1315#endif
1316}
1317
1318
1319void MacroAssembler::Log(TraceParameters parameters) {
armvixlb0c8ae22014-03-21 14:03:59 +00001320 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001321
1322#ifdef USE_SIMULATOR
1323 // The arguments to the log pseudo instruction need to be contiguous in
1324 // memory, so make sure we don't try to emit a literal pool.
1325 InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1326
1327 Label start;
1328 bind(&start);
1329
1330 // Refer to instructions-a64.h for a description of the marker and its
1331 // arguments.
1332 hlt(kLogOpcode);
1333
armvixlb0c8ae22014-03-21 14:03:59 +00001334 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001335 dc32(parameters);
1336#else
1337 // Emit nothing on real hardware.
1338 USE(parameters);
1339#endif
1340}
1341
armvixl578645f2013-08-15 17:21:42 +01001342
1343void MacroAssembler::EnableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001344 VIXL_ASSERT(!isprint(InstrumentStateEnable));
armvixl578645f2013-08-15 17:21:42 +01001345 InstructionAccurateScope scope(this, 1);
1346 movn(xzr, InstrumentStateEnable);
1347}
1348
1349
1350void MacroAssembler::DisableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001351 VIXL_ASSERT(!isprint(InstrumentStateDisable));
armvixl578645f2013-08-15 17:21:42 +01001352 InstructionAccurateScope scope(this, 1);
1353 movn(xzr, InstrumentStateDisable);
1354}
1355
1356
1357void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
armvixlb0c8ae22014-03-21 14:03:59 +00001358 VIXL_ASSERT(strlen(marker_name) == 2);
armvixl578645f2013-08-15 17:21:42 +01001359
1360 // We allow only printable characters in the marker names. Unprintable
1361 // characters are reserved for controlling features of the instrumentation.
armvixlb0c8ae22014-03-21 14:03:59 +00001362 VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
armvixl578645f2013-08-15 17:21:42 +01001363
1364 InstructionAccurateScope scope(this, 1);
1365 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1366}
1367
armvixlb0c8ae22014-03-21 14:03:59 +00001368
1369UseScratchRegisterScope::~UseScratchRegisterScope() {
1370 available_->set_list(old_available_);
1371 availablefp_->set_list(old_availablefp_);
1372}
1373
1374
armvixl5799d6c2014-05-01 11:05:00 +01001375bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
1376 return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
1377}
1378
1379
armvixlb0c8ae22014-03-21 14:03:59 +00001380Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
1381 int code = AcquireNextAvailable(available_).code();
1382 return Register(code, reg.SizeInBits());
1383}
1384
1385
1386FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
1387 int code = AcquireNextAvailable(availablefp_).code();
1388 return FPRegister(code, reg.SizeInBits());
1389}
1390
1391
1392void UseScratchRegisterScope::Release(const CPURegister& reg) {
1393 if (reg.IsRegister()) {
1394 ReleaseByCode(available_, reg.code());
1395 } else if (reg.IsFPRegister()) {
1396 ReleaseByCode(availablefp_, reg.code());
1397 } else {
1398 VIXL_ASSERT(reg.IsNone());
1399 }
1400}
1401
1402
armvixl5799d6c2014-05-01 11:05:00 +01001403void UseScratchRegisterScope::Include(const CPURegList& list) {
1404 if (list.type() == CPURegister::kRegister) {
1405 // Make sure that neither sp nor xzr are included the list.
1406 IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
1407 } else {
1408 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1409 IncludeByRegList(availablefp_, list.list());
1410 }
1411}
1412
1413
armvixlb0c8ae22014-03-21 14:03:59 +00001414void UseScratchRegisterScope::Include(const Register& reg1,
1415 const Register& reg2,
1416 const Register& reg3,
1417 const Register& reg4) {
1418 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1419 // Make sure that neither sp nor xzr are included the list.
1420 include &= ~(xzr.Bit() | sp.Bit());
1421
1422 IncludeByRegList(available_, include);
1423}
1424
1425
1426void UseScratchRegisterScope::Include(const FPRegister& reg1,
1427 const FPRegister& reg2,
1428 const FPRegister& reg3,
1429 const FPRegister& reg4) {
1430 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1431 IncludeByRegList(availablefp_, include);
1432}
1433
1434
armvixl5799d6c2014-05-01 11:05:00 +01001435void UseScratchRegisterScope::Exclude(const CPURegList& list) {
1436 if (list.type() == CPURegister::kRegister) {
1437 ExcludeByRegList(available_, list.list());
1438 } else {
1439 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1440 ExcludeByRegList(availablefp_, list.list());
1441 }
1442}
1443
1444
armvixlb0c8ae22014-03-21 14:03:59 +00001445void UseScratchRegisterScope::Exclude(const Register& reg1,
1446 const Register& reg2,
1447 const Register& reg3,
1448 const Register& reg4) {
1449 RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1450 ExcludeByRegList(available_, exclude);
1451}
1452
1453
1454void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
1455 const FPRegister& reg2,
1456 const FPRegister& reg3,
1457 const FPRegister& reg4) {
1458 RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1459 ExcludeByRegList(availablefp_, excludefp);
1460}
1461
1462
1463void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
1464 const CPURegister& reg2,
1465 const CPURegister& reg3,
1466 const CPURegister& reg4) {
1467 RegList exclude = 0;
1468 RegList excludefp = 0;
1469
1470 const CPURegister regs[] = {reg1, reg2, reg3, reg4};
1471
1472 for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
1473 if (regs[i].IsRegister()) {
1474 exclude |= regs[i].Bit();
1475 } else if (regs[i].IsFPRegister()) {
1476 excludefp |= regs[i].Bit();
1477 } else {
1478 VIXL_ASSERT(regs[i].IsNone());
1479 }
1480 }
1481
1482 ExcludeByRegList(available_, exclude);
1483 ExcludeByRegList(availablefp_, excludefp);
1484}
1485
1486
1487void UseScratchRegisterScope::ExcludeAll() {
1488 ExcludeByRegList(available_, available_->list());
1489 ExcludeByRegList(availablefp_, availablefp_->list());
1490}
1491
1492
1493CPURegister UseScratchRegisterScope::AcquireNextAvailable(
1494 CPURegList* available) {
1495 VIXL_CHECK(!available->IsEmpty());
1496 CPURegister result = available->PopLowestIndex();
1497 VIXL_ASSERT(!AreAliased(result, xzr, sp));
1498 return result;
1499}
1500
1501
1502void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
1503 ReleaseByRegList(available, static_cast<RegList>(1) << code);
1504}
1505
1506
1507void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
1508 RegList regs) {
1509 available->set_list(available->list() | regs);
1510}
1511
1512
1513void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
1514 RegList regs) {
1515 available->set_list(available->list() | regs);
1516}
1517
1518
1519void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
1520 RegList exclude) {
1521 available->set_list(available->list() & ~exclude);
1522}
1523
armvixlad96eda2013-06-14 11:42:37 +01001524} // namespace vixl