blob: 6677529c17cb9460ab955175095ca9733952bb4b [file] [log] [blame]
armvixlad96eda2013-06-14 11:42:37 +01001// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7// * Redistributions of source code must retain the above copyright notice,
8// this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above copyright notice,
10// this list of conditions and the following disclaimer in the documentation
11// and/or other materials provided with the distribution.
12// * Neither the name of ARM Limited nor the names of its contributors may be
13// used to endorse or promote products derived from this software without
14// specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "a64/macro-assembler-a64.h"
28namespace vixl {
29
armvixlb0c8ae22014-03-21 14:03:59 +000030void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
31 VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
32 ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
33 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
34 B(static_cast<Condition>(type), label);
35 } else {
36 switch (type) {
37 case always: B(label); break;
38 case never: break;
39 case reg_zero: Cbz(reg, label); break;
40 case reg_not_zero: Cbnz(reg, label); break;
41 case reg_bit_clear: Tbz(reg, bit, label); break;
42 case reg_bit_set: Tbnz(reg, bit, label); break;
43 default:
44 VIXL_UNREACHABLE();
45 }
46 }
47}
48
armvixlad96eda2013-06-14 11:42:37 +010049void MacroAssembler::And(const Register& rd,
50 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +000051 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000052 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000053 LogicalMacro(rd, rn, operand, AND);
54}
55
56
57void MacroAssembler::Ands(const Register& rd,
58 const Register& rn,
59 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000060 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000061 LogicalMacro(rd, rn, operand, ANDS);
armvixlad96eda2013-06-14 11:42:37 +010062}
63
64
65void MacroAssembler::Tst(const Register& rn,
66 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000067 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000068 Ands(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +010069}
70
71
72void MacroAssembler::Bic(const Register& rd,
73 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +000074 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000075 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000076 LogicalMacro(rd, rn, operand, BIC);
77}
78
79
80void MacroAssembler::Bics(const Register& rd,
81 const Register& rn,
82 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000083 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +000084 LogicalMacro(rd, rn, operand, BICS);
armvixlad96eda2013-06-14 11:42:37 +010085}
86
87
88void MacroAssembler::Orr(const Register& rd,
89 const Register& rn,
90 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000091 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +010092 LogicalMacro(rd, rn, operand, ORR);
93}
94
95
96void MacroAssembler::Orn(const Register& rd,
97 const Register& rn,
98 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +000099 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100100 LogicalMacro(rd, rn, operand, ORN);
101}
102
103
104void MacroAssembler::Eor(const Register& rd,
105 const Register& rn,
106 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000107 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100108 LogicalMacro(rd, rn, operand, EOR);
109}
110
111
112void MacroAssembler::Eon(const Register& rd,
113 const Register& rn,
114 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000115 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100116 LogicalMacro(rd, rn, operand, EON);
117}
118
119
120void MacroAssembler::LogicalMacro(const Register& rd,
121 const Register& rn,
122 const Operand& operand,
123 LogicalOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000124 UseScratchRegisterScope temps(this);
125
armvixlad96eda2013-06-14 11:42:37 +0100126 if (operand.IsImmediate()) {
127 int64_t immediate = operand.immediate();
128 unsigned reg_size = rd.size();
armvixlad96eda2013-06-14 11:42:37 +0100129
130 // If the operation is NOT, invert the operation and immediate.
131 if ((op & NOT) == NOT) {
132 op = static_cast<LogicalOp>(op & ~NOT);
133 immediate = ~immediate;
armvixlad96eda2013-06-14 11:42:37 +0100134 }
135
armvixl4a102ba2014-07-14 09:02:40 +0100136 // Ignore the top 32 bits of an immediate if we're moving to a W register.
137 if (rd.Is32Bits()) {
138 // Check that the top 32 bits are consistent.
139 VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
140 ((immediate >> kWRegSize) == -1));
141 immediate &= kWRegMask;
142 }
143
144 VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
145
armvixlad96eda2013-06-14 11:42:37 +0100146 // Special cases for all set or all clear immediates.
147 if (immediate == 0) {
148 switch (op) {
149 case AND:
150 Mov(rd, 0);
151 return;
152 case ORR: // Fall through.
153 case EOR:
154 Mov(rd, rn);
155 return;
156 case ANDS: // Fall through.
157 case BICS:
158 break;
159 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000160 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100161 }
armvixlb0c8ae22014-03-21 14:03:59 +0000162 } else if ((rd.Is64Bits() && (immediate == -1)) ||
163 (rd.Is32Bits() && (immediate == 0xffffffff))) {
armvixlad96eda2013-06-14 11:42:37 +0100164 switch (op) {
165 case AND:
166 Mov(rd, rn);
167 return;
168 case ORR:
169 Mov(rd, immediate);
170 return;
171 case EOR:
172 Mvn(rd, rn);
173 return;
174 case ANDS: // Fall through.
175 case BICS:
176 break;
177 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000178 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100179 }
180 }
181
182 unsigned n, imm_s, imm_r;
183 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
184 // Immediate can be encoded in the instruction.
185 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
186 } else {
187 // Immediate can't be encoded: synthesize using move immediate.
armvixlb0c8ae22014-03-21 14:03:59 +0000188 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100189 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
190
armvixlad96eda2013-06-14 11:42:37 +0100191 if (rd.Is(sp)) {
192 // If rd is the stack pointer we cannot use it as the destination
193 // register so we use the temp register as an intermediate again.
armvixl4a102ba2014-07-14 09:02:40 +0100194 Logical(temp, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100195 Mov(sp, temp);
196 } else {
armvixl4a102ba2014-07-14 09:02:40 +0100197 Logical(rd, rn, imm_operand, op);
armvixlad96eda2013-06-14 11:42:37 +0100198 }
199 }
200 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000201 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100202 // Add/sub extended supports shift <= 4. We want to support exactly the
203 // same modes here.
armvixlb0c8ae22014-03-21 14:03:59 +0000204 VIXL_ASSERT(operand.shift_amount() <= 4);
205 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100206 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000207
208 temps.Exclude(operand.reg());
209 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100210 EmitExtendShift(temp, operand.reg(), operand.extend(),
211 operand.shift_amount());
212 Logical(rd, rn, Operand(temp), op);
213 } else {
214 // The operand can be encoded in the instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000215 VIXL_ASSERT(operand.IsShiftedRegister());
armvixlad96eda2013-06-14 11:42:37 +0100216 Logical(rd, rn, operand, op);
217 }
218}
219
220
armvixlf37fdc02014-02-05 13:22:16 +0000221void MacroAssembler::Mov(const Register& rd,
222 const Operand& operand,
223 DiscardMoveMode discard_mode) {
armvixlb0c8ae22014-03-21 14:03:59 +0000224 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100225 if (operand.IsImmediate()) {
226 // Call the macro assembler for generic immediates.
227 Mov(rd, operand.immediate());
228 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
229 // Emit a shift instruction if moving a shifted register. This operation
230 // could also be achieved using an orr instruction (like orn used by Mvn),
231 // but using a shift instruction makes the disassembly clearer.
232 EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
233 } else if (operand.IsExtendedRegister()) {
234 // Emit an extend instruction if moving an extended register. This handles
235 // extend with post-shift operations, too.
236 EmitExtendShift(rd, operand.reg(), operand.extend(),
237 operand.shift_amount());
238 } else {
239 // Otherwise, emit a register move only if the registers are distinct, or
armvixlf37fdc02014-02-05 13:22:16 +0000240 // if they are not X registers.
241 //
242 // Note that mov(w0, w0) is not a no-op because it clears the top word of
243 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
244 // registers is not required to clear the top word of the X register. In
245 // this case, the instruction is discarded.
246 //
armvixlad96eda2013-06-14 11:42:37 +0100247 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
armvixlf37fdc02014-02-05 13:22:16 +0000248 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
249 (discard_mode == kDontDiscardForSameWReg))) {
armvixlad96eda2013-06-14 11:42:37 +0100250 mov(rd, operand.reg());
251 }
252 }
253}
254
255
256void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000257 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100258 if (operand.IsImmediate()) {
259 // Call the macro assembler for generic immediates.
260 Mvn(rd, operand.immediate());
261 } else if (operand.IsExtendedRegister()) {
armvixlb0c8ae22014-03-21 14:03:59 +0000262 UseScratchRegisterScope temps(this);
263 temps.Exclude(operand.reg());
264
armvixlad96eda2013-06-14 11:42:37 +0100265 // Emit two instructions for the extend case. This differs from Mov, as
266 // the extend and invert can't be achieved in one instruction.
armvixlb0c8ae22014-03-21 14:03:59 +0000267 Register temp = temps.AcquireSameSizeAs(rd);
armvixlad96eda2013-06-14 11:42:37 +0100268 EmitExtendShift(temp, operand.reg(), operand.extend(),
269 operand.shift_amount());
270 mvn(rd, Operand(temp));
271 } else {
272 // Otherwise, register and shifted register cases can be handled by the
273 // assembler directly, using orn.
274 mvn(rd, operand);
275 }
276}
277
278
279void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
armvixlb0c8ae22014-03-21 14:03:59 +0000280 VIXL_ASSERT(allow_macro_instructions_);
281 VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
armvixlad96eda2013-06-14 11:42:37 +0100282
283 // Immediates on Aarch64 can be produced using an initial value, and zero to
284 // three move keep operations.
285 //
286 // Initial values can be generated with:
287 // 1. 64-bit move zero (movz).
armvixlf37fdc02014-02-05 13:22:16 +0000288 // 2. 32-bit move inverted (movn).
289 // 3. 64-bit move inverted.
armvixlad96eda2013-06-14 11:42:37 +0100290 // 4. 32-bit orr immediate.
291 // 5. 64-bit orr immediate.
armvixlf37fdc02014-02-05 13:22:16 +0000292 // Move-keep may then be used to modify each of the 16-bit half words.
armvixlad96eda2013-06-14 11:42:37 +0100293 //
294 // The code below supports all five initial value generators, and
armvixlf37fdc02014-02-05 13:22:16 +0000295 // applying move-keep operations to move-zero and move-inverted initial
296 // values.
armvixlad96eda2013-06-14 11:42:37 +0100297
armvixl4a102ba2014-07-14 09:02:40 +0100298 // Try to move the immediate in one instruction, and if that fails, switch to
299 // using multiple instructions.
300 if (!TryOneInstrMoveImmediate(rd, imm)) {
301 unsigned reg_size = rd.size();
302
armvixlad96eda2013-06-14 11:42:37 +0100303 // Generic immediate case. Imm will be represented by
304 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
armvixlf37fdc02014-02-05 13:22:16 +0000305 // A move-zero or move-inverted is generated for the first non-zero or
306 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
armvixlad96eda2013-06-14 11:42:37 +0100307
armvixlf37fdc02014-02-05 13:22:16 +0000308 uint64_t ignored_halfword = 0;
309 bool invert_move = false;
310 // If the number of 0xffff halfwords is greater than the number of 0x0000
311 // halfwords, it's more efficient to use move-inverted.
312 if (CountClearHalfWords(~imm, reg_size) >
313 CountClearHalfWords(imm, reg_size)) {
armvixlb0c8ae22014-03-21 14:03:59 +0000314 ignored_halfword = 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000315 invert_move = true;
316 }
317
318 // Mov instructions can't move values into the stack pointer, so set up a
319 // temporary register, if needed.
armvixlb0c8ae22014-03-21 14:03:59 +0000320 UseScratchRegisterScope temps(this);
321 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
armvixlad96eda2013-06-14 11:42:37 +0100322
armvixlf37fdc02014-02-05 13:22:16 +0000323 // Iterate through the halfwords. Use movn/movz for the first non-ignored
324 // halfword, and movk for subsequent halfwords.
armvixlb0c8ae22014-03-21 14:03:59 +0000325 VIXL_ASSERT((reg_size % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +0100326 bool first_mov_done = false;
327 for (unsigned i = 0; i < (temp.size() / 16); i++) {
armvixlb0c8ae22014-03-21 14:03:59 +0000328 uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
armvixlf37fdc02014-02-05 13:22:16 +0000329 if (imm16 != ignored_halfword) {
armvixlad96eda2013-06-14 11:42:37 +0100330 if (!first_mov_done) {
armvixlf37fdc02014-02-05 13:22:16 +0000331 if (invert_move) {
armvixlb0c8ae22014-03-21 14:03:59 +0000332 movn(temp, ~imm16 & 0xffff, 16 * i);
armvixlf37fdc02014-02-05 13:22:16 +0000333 } else {
334 movz(temp, imm16, 16 * i);
335 }
armvixlad96eda2013-06-14 11:42:37 +0100336 first_mov_done = true;
337 } else {
338 // Construct a wider constant.
339 movk(temp, imm16, 16 * i);
340 }
341 }
342 }
343
armvixlb0c8ae22014-03-21 14:03:59 +0000344 VIXL_ASSERT(first_mov_done);
armvixlf37fdc02014-02-05 13:22:16 +0000345
346 // Move the temporary if the original destination register was the stack
347 // pointer.
armvixlad96eda2013-06-14 11:42:37 +0100348 if (rd.IsSP()) {
349 mov(rd, temp);
350 }
armvixlad96eda2013-06-14 11:42:37 +0100351 }
352}
353
354
armvixlf37fdc02014-02-05 13:22:16 +0000355unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000356 VIXL_ASSERT((reg_size % 8) == 0);
armvixlf37fdc02014-02-05 13:22:16 +0000357 int count = 0;
358 for (unsigned i = 0; i < (reg_size / 16); i++) {
359 if ((imm & 0xffff) == 0) {
360 count++;
361 }
362 imm >>= 16;
363 }
364 return count;
365}
366
367
armvixl4a102ba2014-07-14 09:02:40 +0100368// The movz instruction can generate immediates containing an arbitrary 16-bit
369// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
armvixlad96eda2013-06-14 11:42:37 +0100370bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
armvixlb0c8ae22014-03-21 14:03:59 +0000371 VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
armvixlf37fdc02014-02-05 13:22:16 +0000372 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
armvixlad96eda2013-06-14 11:42:37 +0100373}
374
375
376// The movn instruction can generate immediates containing an arbitrary 16-bit
377// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
378bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
379 return IsImmMovz(~imm, reg_size);
380}
381
382
383void MacroAssembler::Ccmp(const Register& rn,
384 const Operand& operand,
385 StatusFlags nzcv,
386 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000387 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000388 if (operand.IsImmediate() && (operand.immediate() < 0)) {
389 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
390 } else {
391 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
392 }
armvixlad96eda2013-06-14 11:42:37 +0100393}
394
395
396void MacroAssembler::Ccmn(const Register& rn,
397 const Operand& operand,
398 StatusFlags nzcv,
399 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000400 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000401 if (operand.IsImmediate() && (operand.immediate() < 0)) {
402 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
403 } else {
404 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
405 }
armvixlad96eda2013-06-14 11:42:37 +0100406}
407
408
409void MacroAssembler::ConditionalCompareMacro(const Register& rn,
410 const Operand& operand,
411 StatusFlags nzcv,
412 Condition cond,
413 ConditionalCompareOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000414 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlad96eda2013-06-14 11:42:37 +0100415 if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
416 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
417 // The immediate can be encoded in the instruction, or the operand is an
418 // unshifted register: call the assembler.
419 ConditionalCompare(rn, operand, nzcv, cond, op);
420 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000421 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100422 // The operand isn't directly supported by the instruction: perform the
423 // operation on a temporary register.
armvixlb0c8ae22014-03-21 14:03:59 +0000424 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000425 Mov(temp, operand);
426 ConditionalCompare(rn, temp, nzcv, cond, op);
427 }
428}
429
430
431void MacroAssembler::Csel(const Register& rd,
432 const Register& rn,
433 const Operand& operand,
434 Condition cond) {
armvixlb0c8ae22014-03-21 14:03:59 +0000435 VIXL_ASSERT(allow_macro_instructions_);
436 VIXL_ASSERT(!rd.IsZero());
437 VIXL_ASSERT(!rn.IsZero());
438 VIXL_ASSERT((cond != al) && (cond != nv));
armvixlf37fdc02014-02-05 13:22:16 +0000439 if (operand.IsImmediate()) {
440 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
441 // register.
442 int64_t imm = operand.immediate();
443 Register zr = AppropriateZeroRegFor(rn);
444 if (imm == 0) {
445 csel(rd, rn, zr, cond);
446 } else if (imm == 1) {
447 csinc(rd, rn, zr, cond);
448 } else if (imm == -1) {
449 csinv(rd, rn, zr, cond);
armvixlad96eda2013-06-14 11:42:37 +0100450 } else {
armvixlb0c8ae22014-03-21 14:03:59 +0000451 UseScratchRegisterScope temps(this);
452 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000453 Mov(temp, operand.immediate());
454 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100455 }
armvixlf37fdc02014-02-05 13:22:16 +0000456 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
457 // Unshifted register argument.
458 csel(rd, rn, operand.reg(), cond);
459 } else {
460 // All other arguments.
armvixlb0c8ae22014-03-21 14:03:59 +0000461 UseScratchRegisterScope temps(this);
462 Register temp = temps.AcquireSameSizeAs(rn);
armvixlf37fdc02014-02-05 13:22:16 +0000463 Mov(temp, operand);
464 csel(rd, rn, temp, cond);
armvixlad96eda2013-06-14 11:42:37 +0100465 }
466}
467
468
469void MacroAssembler::Add(const Register& rd,
470 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000471 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000472 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100473 if (operand.IsImmediate() && (operand.immediate() < 0) &&
474 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000475 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100476 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000477 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
478 }
479}
480
481
482void MacroAssembler::Adds(const Register& rd,
483 const Register& rn,
484 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000485 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100486 if (operand.IsImmediate() && (operand.immediate() < 0) &&
487 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000488 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
489 } else {
490 AddSubMacro(rd, rn, operand, SetFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100491 }
492}
493
494
495void MacroAssembler::Sub(const Register& rd,
496 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000497 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000498 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100499 if (operand.IsImmediate() && (operand.immediate() < 0) &&
500 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000501 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
armvixlad96eda2013-06-14 11:42:37 +0100502 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000503 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
504 }
505}
506
507
508void MacroAssembler::Subs(const Register& rd,
509 const Register& rn,
510 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000511 VIXL_ASSERT(allow_macro_instructions_);
armvixl4a102ba2014-07-14 09:02:40 +0100512 if (operand.IsImmediate() && (operand.immediate() < 0) &&
513 IsImmAddSub(-operand.immediate())) {
armvixlf37fdc02014-02-05 13:22:16 +0000514 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
515 } else {
516 AddSubMacro(rd, rn, operand, SetFlags, SUB);
armvixlad96eda2013-06-14 11:42:37 +0100517 }
518}
519
520
521void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000522 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000523 Adds(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100524}
525
526
527void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000528 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000529 Subs(AppropriateZeroRegFor(rn), rn, operand);
armvixlad96eda2013-06-14 11:42:37 +0100530}
531
532
armvixlb0c8ae22014-03-21 14:03:59 +0000533void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
534 VIXL_ASSERT(allow_macro_instructions_);
535 if (value != 0.0) {
536 UseScratchRegisterScope temps(this);
537 FPRegister tmp = temps.AcquireSameSizeAs(fn);
538 Fmov(tmp, value);
539 fcmp(fn, tmp);
540 } else {
541 fcmp(fn, value);
542 }
543}
544
545
546void MacroAssembler::Fmov(FPRegister fd, double imm) {
547 VIXL_ASSERT(allow_macro_instructions_);
548 if (fd.Is32Bits()) {
549 Fmov(fd, static_cast<float>(imm));
550 return;
551 }
552
553 VIXL_ASSERT(fd.Is64Bits());
554 if (IsImmFP64(imm)) {
555 fmov(fd, imm);
556 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
557 fmov(fd, xzr);
558 } else {
559 ldr(fd, imm);
560 }
561}
562
563
564void MacroAssembler::Fmov(FPRegister fd, float imm) {
565 VIXL_ASSERT(allow_macro_instructions_);
566 if (fd.Is64Bits()) {
567 Fmov(fd, static_cast<double>(imm));
568 return;
569 }
570
571 VIXL_ASSERT(fd.Is32Bits());
572 if (IsImmFP32(imm)) {
573 fmov(fd, imm);
574 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
575 fmov(fd, wzr);
576 } else {
577 ldr(fd, imm);
578 }
579}
580
581
582
armvixlad96eda2013-06-14 11:42:37 +0100583void MacroAssembler::Neg(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000584 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000585 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100586 if (operand.IsImmediate()) {
587 Mov(rd, -operand.immediate());
588 } else {
armvixlf37fdc02014-02-05 13:22:16 +0000589 Sub(rd, AppropriateZeroRegFor(rd), operand);
armvixlad96eda2013-06-14 11:42:37 +0100590 }
591}
592
593
armvixlf37fdc02014-02-05 13:22:16 +0000594void MacroAssembler::Negs(const Register& rd,
595 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000596 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000597 Subs(rd, AppropriateZeroRegFor(rd), operand);
598}
599
600
armvixl4a102ba2014-07-14 09:02:40 +0100601bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
602 int64_t imm) {
603 unsigned n, imm_s, imm_r;
604 int reg_size = dst.size();
605
606 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
607 // Immediate can be represented in a move zero instruction. Movz can't write
608 // to the stack pointer.
609 movz(dst, imm);
610 return true;
611 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
612 // Immediate can be represented in a move negative instruction. Movn can't
613 // write to the stack pointer.
614 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
615 return true;
616 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
617 // Immediate can be represented in a logical orr instruction.
618 VIXL_ASSERT(!dst.IsZero());
619 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
620 return true;
621 }
622 return false;
623}
624
625
626Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
627 int64_t imm) {
628 int reg_size = dst.size();
629
630 // Encode the immediate in a single move instruction, if possible.
631 if (TryOneInstrMoveImmediate(dst, imm)) {
632 // The move was successful; nothing to do here.
633 } else {
634 // Pre-shift the immediate to the least-significant bits of the register.
635 int shift_low = CountTrailingZeros(imm, reg_size);
636 int64_t imm_low = imm >> shift_low;
637
638 // Pre-shift the immediate to the most-significant bits of the register,
639 // inserting set bits in the least-significant bits.
640 int shift_high = CountLeadingZeros(imm, reg_size);
641 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
642
643 if (TryOneInstrMoveImmediate(dst, imm_low)) {
644 // The new immediate has been moved into the destination's low bits:
645 // return a new leftward-shifting operand.
646 return Operand(dst, LSL, shift_low);
647 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
648 // The new immediate has been moved into the destination's high bits:
649 // return a new rightward-shifting operand.
650 return Operand(dst, LSR, shift_high);
651 } else {
652 Mov(dst, imm);
653 }
654 }
655 return Operand(dst);
656}
657
658
armvixlad96eda2013-06-14 11:42:37 +0100659void MacroAssembler::AddSubMacro(const Register& rd,
660 const Register& rn,
661 const Operand& operand,
662 FlagsUpdate S,
663 AddSubOp op) {
armvixlf37fdc02014-02-05 13:22:16 +0000664 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
665 (S == LeaveFlags)) {
666 // The instruction would be a nop. Avoid generating useless code.
667 return;
668 }
669
armvixlad96eda2013-06-14 11:42:37 +0100670 if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
671 (rn.IsZero() && !operand.IsShiftedRegister()) ||
672 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
armvixlb0c8ae22014-03-21 14:03:59 +0000673 UseScratchRegisterScope temps(this);
674 Register temp = temps.AcquireSameSizeAs(rn);
armvixl4a102ba2014-07-14 09:02:40 +0100675 if (operand.IsImmediate()) {
676 Operand imm_operand =
677 MoveImmediateForShiftedOp(temp, operand.immediate());
678 AddSub(rd, rn, imm_operand, S, op);
679 } else {
680 Mov(temp, operand);
681 AddSub(rd, rn, temp, S, op);
682 }
armvixlad96eda2013-06-14 11:42:37 +0100683 } else {
684 AddSub(rd, rn, operand, S, op);
685 }
686}
687
688
689void MacroAssembler::Adc(const Register& rd,
690 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000691 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000692 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000693 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
694}
695
696
697void MacroAssembler::Adcs(const Register& rd,
698 const Register& rn,
699 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000700 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000701 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
armvixlad96eda2013-06-14 11:42:37 +0100702}
703
704
705void MacroAssembler::Sbc(const Register& rd,
706 const Register& rn,
armvixlf37fdc02014-02-05 13:22:16 +0000707 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000708 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000709 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
710}
711
712
713void MacroAssembler::Sbcs(const Register& rd,
714 const Register& rn,
715 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000716 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000717 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
armvixlad96eda2013-06-14 11:42:37 +0100718}
719
720
721void MacroAssembler::Ngc(const Register& rd,
armvixlf37fdc02014-02-05 13:22:16 +0000722 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000723 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100724 Register zr = AppropriateZeroRegFor(rd);
armvixlf37fdc02014-02-05 13:22:16 +0000725 Sbc(rd, zr, operand);
726}
727
728
729void MacroAssembler::Ngcs(const Register& rd,
730 const Operand& operand) {
armvixlb0c8ae22014-03-21 14:03:59 +0000731 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +0000732 Register zr = AppropriateZeroRegFor(rd);
733 Sbcs(rd, zr, operand);
armvixlad96eda2013-06-14 11:42:37 +0100734}
735
736
737void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
738 const Register& rn,
739 const Operand& operand,
740 FlagsUpdate S,
741 AddSubWithCarryOp op) {
armvixlb0c8ae22014-03-21 14:03:59 +0000742 VIXL_ASSERT(rd.size() == rn.size());
743 UseScratchRegisterScope temps(this);
armvixlad96eda2013-06-14 11:42:37 +0100744
745 if (operand.IsImmediate() ||
746 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
747 // Add/sub with carry (immediate or ROR shifted register.)
armvixlb0c8ae22014-03-21 14:03:59 +0000748 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100749 Mov(temp, operand);
750 AddSubWithCarry(rd, rn, Operand(temp), S, op);
751 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
752 // Add/sub with carry (shifted register).
armvixlb0c8ae22014-03-21 14:03:59 +0000753 VIXL_ASSERT(operand.reg().size() == rd.size());
754 VIXL_ASSERT(operand.shift() != ROR);
755 VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
armvixlad96eda2013-06-14 11:42:37 +0100756 operand.shift_amount()));
armvixlb0c8ae22014-03-21 14:03:59 +0000757 temps.Exclude(operand.reg());
758 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100759 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
760 AddSubWithCarry(rd, rn, Operand(temp), S, op);
761 } else if (operand.IsExtendedRegister()) {
762 // Add/sub with carry (extended register).
armvixlb0c8ae22014-03-21 14:03:59 +0000763 VIXL_ASSERT(operand.reg().size() <= rd.size());
armvixlad96eda2013-06-14 11:42:37 +0100764 // Add/sub extended supports a shift <= 4. We want to support exactly the
765 // same modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000766 VIXL_ASSERT(operand.shift_amount() <= 4);
767 VIXL_ASSERT(operand.reg().Is64Bits() ||
armvixlad96eda2013-06-14 11:42:37 +0100768 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
armvixlb0c8ae22014-03-21 14:03:59 +0000769 temps.Exclude(operand.reg());
770 Register temp = temps.AcquireSameSizeAs(rn);
armvixlad96eda2013-06-14 11:42:37 +0100771 EmitExtendShift(temp, operand.reg(), operand.extend(),
772 operand.shift_amount());
773 AddSubWithCarry(rd, rn, Operand(temp), S, op);
774 } else {
775 // The addressing mode is directly supported by the instruction.
776 AddSubWithCarry(rd, rn, operand, S, op);
777 }
778}
779
780
781#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
782void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
783 LoadStoreMacro(REG, addr, OP); \
784}
785LS_MACRO_LIST(DEFINE_FUNCTION)
786#undef DEFINE_FUNCTION
787
788void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
789 const MemOperand& addr,
790 LoadStoreOp op) {
791 int64_t offset = addr.offset();
792 LSDataSize size = CalcLSDataSize(op);
793
794 // Check if an immediate offset fits in the immediate field of the
795 // appropriate instruction. If not, emit two instructions to perform
796 // the operation.
797 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
798 !IsImmLSUnscaled(offset)) {
799 // Immediate offset that can't be encoded using unsigned or unscaled
800 // addressing modes.
armvixlb0c8ae22014-03-21 14:03:59 +0000801 UseScratchRegisterScope temps(this);
802 Register temp = temps.AcquireSameSizeAs(addr.base());
armvixlad96eda2013-06-14 11:42:37 +0100803 Mov(temp, addr.offset());
804 LoadStore(rt, MemOperand(addr.base(), temp), op);
805 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
806 // Post-index beyond unscaled addressing range.
807 LoadStore(rt, MemOperand(addr.base()), op);
808 Add(addr.base(), addr.base(), Operand(offset));
809 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
810 // Pre-index beyond unscaled addressing range.
811 Add(addr.base(), addr.base(), Operand(offset));
812 LoadStore(rt, MemOperand(addr.base()), op);
813 } else {
814 // Encodable in one load/store instruction.
815 LoadStore(rt, addr, op);
816 }
817}
818
819
820void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
821 const CPURegister& src2, const CPURegister& src3) {
armvixlb0c8ae22014-03-21 14:03:59 +0000822 VIXL_ASSERT(allow_macro_instructions_);
823 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
824 VIXL_ASSERT(src0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +0100825
826 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
827 int size = src0.SizeInBytes();
828
829 PrepareForPush(count, size);
830 PushHelper(count, size, src0, src1, src2, src3);
831}
832
833
834void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
835 const CPURegister& dst2, const CPURegister& dst3) {
836 // It is not valid to pop into the same register more than once in one
837 // instruction, not even into the zero register.
armvixlb0c8ae22014-03-21 14:03:59 +0000838 VIXL_ASSERT(allow_macro_instructions_);
839 VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
840 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
841 VIXL_ASSERT(dst0.IsValid());
armvixlad96eda2013-06-14 11:42:37 +0100842
843 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
844 int size = dst0.SizeInBytes();
845
846 PrepareForPop(count, size);
847 PopHelper(count, size, dst0, dst1, dst2, dst3);
848}
849
850
851void MacroAssembler::PushCPURegList(CPURegList registers) {
852 int size = registers.RegisterSizeInBytes();
853
854 PrepareForPush(registers.Count(), size);
855 // Push up to four registers at a time because if the current stack pointer is
856 // sp and reg_size is 32, registers must be pushed in blocks of four in order
857 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +0000858 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100859 while (!registers.IsEmpty()) {
860 int count_before = registers.Count();
861 const CPURegister& src0 = registers.PopHighestIndex();
862 const CPURegister& src1 = registers.PopHighestIndex();
863 const CPURegister& src2 = registers.PopHighestIndex();
864 const CPURegister& src3 = registers.PopHighestIndex();
865 int count = count_before - registers.Count();
866 PushHelper(count, size, src0, src1, src2, src3);
867 }
868}
869
870
871void MacroAssembler::PopCPURegList(CPURegList registers) {
872 int size = registers.RegisterSizeInBytes();
873
874 PrepareForPop(registers.Count(), size);
875 // Pop up to four registers at a time because if the current stack pointer is
876 // sp and reg_size is 32, registers must be pushed in blocks of four in order
877 // to maintain the 16-byte alignment for sp.
armvixlb0c8ae22014-03-21 14:03:59 +0000878 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100879 while (!registers.IsEmpty()) {
880 int count_before = registers.Count();
881 const CPURegister& dst0 = registers.PopLowestIndex();
882 const CPURegister& dst1 = registers.PopLowestIndex();
883 const CPURegister& dst2 = registers.PopLowestIndex();
884 const CPURegister& dst3 = registers.PopLowestIndex();
885 int count = count_before - registers.Count();
886 PopHelper(count, size, dst0, dst1, dst2, dst3);
887 }
888}
889
890
891void MacroAssembler::PushMultipleTimes(int count, Register src) {
armvixlb0c8ae22014-03-21 14:03:59 +0000892 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +0100893 int size = src.SizeInBytes();
894
895 PrepareForPush(count, size);
896 // Push up to four registers at a time if possible because if the current
897 // stack pointer is sp and the register size is 32, registers must be pushed
898 // in blocks of four in order to maintain the 16-byte alignment for sp.
899 while (count >= 4) {
900 PushHelper(4, size, src, src, src, src);
901 count -= 4;
902 }
903 if (count >= 2) {
904 PushHelper(2, size, src, src, NoReg, NoReg);
905 count -= 2;
906 }
907 if (count == 1) {
908 PushHelper(1, size, src, NoReg, NoReg, NoReg);
909 count -= 1;
910 }
armvixlb0c8ae22014-03-21 14:03:59 +0000911 VIXL_ASSERT(count == 0);
armvixlad96eda2013-06-14 11:42:37 +0100912}
913
914
915void MacroAssembler::PushHelper(int count, int size,
916 const CPURegister& src0,
917 const CPURegister& src1,
918 const CPURegister& src2,
919 const CPURegister& src3) {
920 // Ensure that we don't unintentionally modify scratch or debug registers.
921 InstructionAccurateScope scope(this);
922
armvixlb0c8ae22014-03-21 14:03:59 +0000923 VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
924 VIXL_ASSERT(size == src0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +0100925
926 // When pushing multiple registers, the store order is chosen such that
927 // Push(a, b) is equivalent to Push(a) followed by Push(b).
928 switch (count) {
929 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +0000930 VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100931 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
932 break;
933 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +0000934 VIXL_ASSERT(src2.IsNone() && src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100935 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
936 break;
937 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +0000938 VIXL_ASSERT(src3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100939 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
940 str(src0, MemOperand(StackPointer(), 2 * size));
941 break;
942 case 4:
943 // Skip over 4 * size, then fill in the gap. This allows four W registers
944 // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
945 // all times.
946 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
947 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
948 break;
949 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000950 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100951 }
952}
953
954
955void MacroAssembler::PopHelper(int count, int size,
956 const CPURegister& dst0,
957 const CPURegister& dst1,
958 const CPURegister& dst2,
959 const CPURegister& dst3) {
960 // Ensure that we don't unintentionally modify scratch or debug registers.
961 InstructionAccurateScope scope(this);
962
armvixlb0c8ae22014-03-21 14:03:59 +0000963 VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
964 VIXL_ASSERT(size == dst0.SizeInBytes());
armvixlad96eda2013-06-14 11:42:37 +0100965
966 // When popping multiple registers, the load order is chosen such that
967 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
968 switch (count) {
969 case 1:
armvixlb0c8ae22014-03-21 14:03:59 +0000970 VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100971 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
972 break;
973 case 2:
armvixlb0c8ae22014-03-21 14:03:59 +0000974 VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100975 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
976 break;
977 case 3:
armvixlb0c8ae22014-03-21 14:03:59 +0000978 VIXL_ASSERT(dst3.IsNone());
armvixlad96eda2013-06-14 11:42:37 +0100979 ldr(dst2, MemOperand(StackPointer(), 2 * size));
980 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
981 break;
982 case 4:
983 // Load the higher addresses first, then load the lower addresses and skip
984 // the whole block in the second instruction. This allows four W registers
985 // to be popped using sp, whilst maintaining 16-byte alignment for sp at
986 // all times.
987 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
988 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
989 break;
990 default:
armvixlb0c8ae22014-03-21 14:03:59 +0000991 VIXL_UNREACHABLE();
armvixlad96eda2013-06-14 11:42:37 +0100992 }
993}
994
995
996void MacroAssembler::PrepareForPush(int count, int size) {
997 if (sp.Is(StackPointer())) {
998 // If the current stack pointer is sp, then it must be aligned to 16 bytes
999 // on entry and the total size of the specified registers must also be a
1000 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001001 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001002 } else {
1003 // Even if the current stack pointer is not the system stack pointer (sp),
1004 // the system stack pointer will still be modified in order to comply with
1005 // ABI rules about accessing memory below the system stack pointer.
1006 BumpSystemStackPointer(count * size);
1007 }
1008}
1009
1010
1011void MacroAssembler::PrepareForPop(int count, int size) {
1012 USE(count);
1013 USE(size);
1014 if (sp.Is(StackPointer())) {
1015 // If the current stack pointer is sp, then it must be aligned to 16 bytes
1016 // on entry and the total size of the specified registers must also be a
1017 // multiple of 16 bytes.
armvixlb0c8ae22014-03-21 14:03:59 +00001018 VIXL_ASSERT((count * size) % 16 == 0);
armvixlad96eda2013-06-14 11:42:37 +01001019 }
1020}
1021
1022void MacroAssembler::Poke(const Register& src, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001023 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001024 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001025 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001026 }
1027
1028 Str(src, MemOperand(StackPointer(), offset));
1029}
1030
1031
1032void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
armvixlb0c8ae22014-03-21 14:03:59 +00001033 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001034 if (offset.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001035 VIXL_ASSERT(offset.immediate() >= 0);
armvixlad96eda2013-06-14 11:42:37 +01001036 }
1037
1038 Ldr(dst, MemOperand(StackPointer(), offset));
1039}
1040
1041
1042void MacroAssembler::Claim(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001043 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001044
1045 if (size.IsZero()) {
1046 return;
1047 }
1048
armvixlad96eda2013-06-14 11:42:37 +01001049 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001050 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001051 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001052 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001053 }
1054 }
1055
1056 if (!sp.Is(StackPointer())) {
1057 BumpSystemStackPointer(size);
1058 }
1059
1060 Sub(StackPointer(), StackPointer(), size);
1061}
1062
1063
1064void MacroAssembler::Drop(const Operand& size) {
armvixlb0c8ae22014-03-21 14:03:59 +00001065 VIXL_ASSERT(allow_macro_instructions_);
armvixlf37fdc02014-02-05 13:22:16 +00001066
1067 if (size.IsZero()) {
1068 return;
1069 }
1070
armvixlad96eda2013-06-14 11:42:37 +01001071 if (size.IsImmediate()) {
armvixlb0c8ae22014-03-21 14:03:59 +00001072 VIXL_ASSERT(size.immediate() > 0);
armvixlad96eda2013-06-14 11:42:37 +01001073 if (sp.Is(StackPointer())) {
armvixlb0c8ae22014-03-21 14:03:59 +00001074 VIXL_ASSERT((size.immediate() % 16) == 0);
armvixlad96eda2013-06-14 11:42:37 +01001075 }
1076 }
1077
1078 Add(StackPointer(), StackPointer(), size);
1079}
1080
1081
1082void MacroAssembler::PushCalleeSavedRegisters() {
1083 // Ensure that the macro-assembler doesn't use any scratch registers.
1084 InstructionAccurateScope scope(this);
1085
1086 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001087 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001088
1089 MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
1090
armvixlad96eda2013-06-14 11:42:37 +01001091 stp(x29, x30, tos);
1092 stp(x27, x28, tos);
1093 stp(x25, x26, tos);
1094 stp(x23, x24, tos);
1095 stp(x21, x22, tos);
1096 stp(x19, x20, tos);
armvixl5799d6c2014-05-01 11:05:00 +01001097
1098 stp(d14, d15, tos);
1099 stp(d12, d13, tos);
1100 stp(d10, d11, tos);
1101 stp(d8, d9, tos);
armvixlad96eda2013-06-14 11:42:37 +01001102}
1103
1104
1105void MacroAssembler::PopCalleeSavedRegisters() {
1106 // Ensure that the macro-assembler doesn't use any scratch registers.
1107 InstructionAccurateScope scope(this);
1108
1109 // This method must not be called unless the current stack pointer is sp.
armvixlb0c8ae22014-03-21 14:03:59 +00001110 VIXL_ASSERT(sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001111
1112 MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
1113
armvixl5799d6c2014-05-01 11:05:00 +01001114 ldp(d8, d9, tos);
1115 ldp(d10, d11, tos);
1116 ldp(d12, d13, tos);
1117 ldp(d14, d15, tos);
1118
armvixlad96eda2013-06-14 11:42:37 +01001119 ldp(x19, x20, tos);
1120 ldp(x21, x22, tos);
1121 ldp(x23, x24, tos);
1122 ldp(x25, x26, tos);
1123 ldp(x27, x28, tos);
1124 ldp(x29, x30, tos);
armvixlad96eda2013-06-14 11:42:37 +01001125}
1126
1127void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
armvixlb0c8ae22014-03-21 14:03:59 +00001128 VIXL_ASSERT(!sp.Is(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001129 // TODO: Several callers rely on this not using scratch registers, so we use
1130 // the assembler directly here. However, this means that large immediate
1131 // values of 'space' cannot be handled.
1132 InstructionAccurateScope scope(this);
1133 sub(sp, StackPointer(), space);
1134}
1135
1136
1137// This is the main Printf implementation. All callee-saved registers are
1138// preserved, but NZCV and the caller-saved registers may be clobbered.
1139void MacroAssembler::PrintfNoPreserve(const char * format,
1140 const CPURegister& arg0,
1141 const CPURegister& arg1,
1142 const CPURegister& arg2,
1143 const CPURegister& arg3) {
1144 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
1145 // in most cases anyway, so this restriction shouldn't be too serious.
armvixlb0c8ae22014-03-21 14:03:59 +00001146 VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
armvixlad96eda2013-06-14 11:42:37 +01001147
armvixl5799d6c2014-05-01 11:05:00 +01001148 // The provided arguments, and their proper PCS registers.
1149 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
1150 CPURegister pcs[kPrintfMaxArgCount];
1151
1152 int arg_count = kPrintfMaxArgCount;
1153
1154 // The PCS varargs registers for printf. Note that x0 is used for the printf
1155 // format string.
1156 static const CPURegList kPCSVarargs =
1157 CPURegList(CPURegister::kRegister, kXRegSize, 1, arg_count);
1158 static const CPURegList kPCSVarargsFP =
1159 CPURegList(CPURegister::kFPRegister, kDRegSize, 0, arg_count - 1);
1160
1161 // We can use caller-saved registers as scratch values, except for the
1162 // arguments and the PCS registers where they might need to go.
armvixlb0c8ae22014-03-21 14:03:59 +00001163 UseScratchRegisterScope temps(this);
armvixl5799d6c2014-05-01 11:05:00 +01001164 temps.Include(kCallerSaved);
1165 temps.Include(kCallerSavedFP);
1166 temps.Exclude(kPCSVarargs);
1167 temps.Exclude(kPCSVarargsFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001168 temps.Exclude(arg0, arg1, arg2, arg3);
1169
armvixl5799d6c2014-05-01 11:05:00 +01001170 // Copies of the arg lists that we can iterate through.
1171 CPURegList pcs_varargs = kPCSVarargs;
1172 CPURegList pcs_varargs_fp = kPCSVarargsFP;
armvixlad96eda2013-06-14 11:42:37 +01001173
armvixl5799d6c2014-05-01 11:05:00 +01001174 // Place the arguments. There are lots of clever tricks and optimizations we
1175 // could use here, but Printf is a debug tool so instead we just try to keep
1176 // it simple: Move each input that isn't already in the right place to a
1177 // scratch register, then move everything back.
1178 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
1179 // Work out the proper PCS register for this argument.
armvixlad96eda2013-06-14 11:42:37 +01001180 if (args[i].IsRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001181 pcs[i] = pcs_varargs.PopLowestIndex().X();
1182 // We might only need a W register here. We need to know the size of the
1183 // argument so we can properly encode it for the simulator call.
1184 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
armvixlad96eda2013-06-14 11:42:37 +01001185 } else if (args[i].IsFPRegister()) {
armvixl5799d6c2014-05-01 11:05:00 +01001186 // In C, floats are always cast to doubles for varargs calls.
1187 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
armvixlad96eda2013-06-14 11:42:37 +01001188 } else {
armvixl5799d6c2014-05-01 11:05:00 +01001189 VIXL_ASSERT(args[i].IsNone());
armvixlad96eda2013-06-14 11:42:37 +01001190 arg_count = i;
1191 break;
1192 }
armvixlad96eda2013-06-14 11:42:37 +01001193
armvixl5799d6c2014-05-01 11:05:00 +01001194 // If the argument is already in the right place, leave it where it is.
1195 if (args[i].Aliases(pcs[i])) continue;
armvixlad96eda2013-06-14 11:42:37 +01001196
armvixl5799d6c2014-05-01 11:05:00 +01001197 // Otherwise, if the argument is in a PCS argument register, allocate an
1198 // appropriate scratch register and then move it out of the way.
1199 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
1200 kPCSVarargsFP.IncludesAliasOf(args[i])) {
1201 if (args[i].IsRegister()) {
1202 Register old_arg = Register(args[i]);
1203 Register new_arg = temps.AcquireSameSizeAs(old_arg);
1204 Mov(new_arg, old_arg);
1205 args[i] = new_arg;
1206 } else {
1207 FPRegister old_arg = FPRegister(args[i]);
1208 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
1209 Fmov(new_arg, old_arg);
1210 args[i] = new_arg;
1211 }
armvixlad96eda2013-06-14 11:42:37 +01001212 }
1213 }
1214
armvixl5799d6c2014-05-01 11:05:00 +01001215 // Do a second pass to move values into their final positions and perform any
1216 // conversions that may be required.
1217 for (int i = 0; i < arg_count; i++) {
1218 VIXL_ASSERT(pcs[i].type() == args[i].type());
1219 if (pcs[i].IsRegister()) {
1220 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
1221 } else {
1222 VIXL_ASSERT(pcs[i].IsFPRegister());
1223 if (pcs[i].size() == args[i].size()) {
1224 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
1225 } else {
1226 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
1227 }
1228 }
armvixlad96eda2013-06-14 11:42:37 +01001229 }
1230
1231 // Load the format string into x0, as per the procedure-call standard.
1232 //
1233 // To make the code as portable as possible, the format string is encoded
1234 // directly in the instruction stream. It might be cleaner to encode it in a
1235 // literal pool, but since Printf is usually used for debugging, it is
1236 // beneficial for it to be minimally dependent on other features.
armvixl5799d6c2014-05-01 11:05:00 +01001237 temps.Exclude(x0);
armvixlad96eda2013-06-14 11:42:37 +01001238 Label format_address;
1239 Adr(x0, &format_address);
1240
1241 // Emit the format string directly in the instruction stream.
1242 { BlockLiteralPoolScope scope(this);
1243 Label after_data;
1244 B(&after_data);
1245 Bind(&format_address);
1246 EmitStringData(format);
1247 Unreachable();
1248 Bind(&after_data);
1249 }
1250
1251 // We don't pass any arguments on the stack, but we still need to align the C
1252 // stack pointer to a 16-byte boundary for PCS compliance.
1253 if (!sp.Is(StackPointer())) {
1254 Bic(sp, StackPointer(), 0xf);
1255 }
1256
1257 // Actually call printf. This part needs special handling for the simulator,
1258 // since the system printf function will use a different instruction set and
1259 // the procedure-call standard will not be compatible.
1260#ifdef USE_SIMULATOR
1261 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
1262 hlt(kPrintfOpcode);
armvixl5799d6c2014-05-01 11:05:00 +01001263 dc32(arg_count); // kPrintfArgCountOffset
1264
1265 // Determine the argument pattern.
1266 uint32_t arg_pattern_list = 0;
1267 for (int i = 0; i < arg_count; i++) {
1268 uint32_t arg_pattern;
1269 if (pcs[i].IsRegister()) {
1270 arg_pattern = pcs[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
1271 } else {
1272 VIXL_ASSERT(pcs[i].Is64Bits());
1273 arg_pattern = kPrintfArgD;
1274 }
1275 VIXL_ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
1276 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
1277 }
1278 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
armvixlad96eda2013-06-14 11:42:37 +01001279 }
1280#else
armvixlb0c8ae22014-03-21 14:03:59 +00001281 Register tmp = temps.AcquireX();
1282 Mov(tmp, reinterpret_cast<uintptr_t>(printf));
1283 Blr(tmp);
armvixlad96eda2013-06-14 11:42:37 +01001284#endif
1285}
1286
1287
1288void MacroAssembler::Printf(const char * format,
armvixl5799d6c2014-05-01 11:05:00 +01001289 CPURegister arg0,
1290 CPURegister arg1,
1291 CPURegister arg2,
1292 CPURegister arg3) {
1293 // We can only print sp if it is the current stack pointer.
1294 if (!sp.Is(StackPointer())) {
1295 VIXL_ASSERT(!sp.Aliases(arg0));
1296 VIXL_ASSERT(!sp.Aliases(arg1));
1297 VIXL_ASSERT(!sp.Aliases(arg2));
1298 VIXL_ASSERT(!sp.Aliases(arg3));
1299 }
1300
armvixlb0c8ae22014-03-21 14:03:59 +00001301 // Make sure that the macro assembler doesn't try to use any of our arguments
1302 // as scratch registers.
1303 UseScratchRegisterScope exclude_all(this);
1304 exclude_all.ExcludeAll();
1305
armvixlad96eda2013-06-14 11:42:37 +01001306 // Preserve all caller-saved registers as well as NZCV.
1307 // If sp is the stack pointer, PushCPURegList asserts that the size of each
1308 // list is a multiple of 16 bytes.
1309 PushCPURegList(kCallerSaved);
1310 PushCPURegList(kCallerSavedFP);
armvixlad96eda2013-06-14 11:42:37 +01001311
armvixlb0c8ae22014-03-21 14:03:59 +00001312 { UseScratchRegisterScope temps(this);
1313 // We can use caller-saved registers as scratch values (except for argN).
armvixl5799d6c2014-05-01 11:05:00 +01001314 temps.Include(kCallerSaved);
1315 temps.Include(kCallerSavedFP);
armvixlb0c8ae22014-03-21 14:03:59 +00001316 temps.Exclude(arg0, arg1, arg2, arg3);
armvixlad96eda2013-06-14 11:42:37 +01001317
armvixl5799d6c2014-05-01 11:05:00 +01001318 // If any of the arguments are the current stack pointer, allocate a new
1319 // register for them, and adjust the value to compensate for pushing the
1320 // caller-saved registers.
1321 bool arg0_sp = StackPointer().Aliases(arg0);
1322 bool arg1_sp = StackPointer().Aliases(arg1);
1323 bool arg2_sp = StackPointer().Aliases(arg2);
1324 bool arg3_sp = StackPointer().Aliases(arg3);
1325 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
1326 // Allocate a register to hold the original stack pointer value, to pass
1327 // to PrintfNoPreserve as an argument.
1328 Register arg_sp = temps.AcquireX();
1329 Add(arg_sp, StackPointer(),
1330 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
1331 if (arg0_sp) arg0 = Register(arg_sp.code(), arg0.size());
1332 if (arg1_sp) arg1 = Register(arg_sp.code(), arg1.size());
1333 if (arg2_sp) arg2 = Register(arg_sp.code(), arg2.size());
1334 if (arg3_sp) arg3 = Register(arg_sp.code(), arg3.size());
1335 }
1336
armvixlb0c8ae22014-03-21 14:03:59 +00001337 // Preserve NZCV.
1338 Register tmp = temps.AcquireX();
1339 Mrs(tmp, NZCV);
1340 Push(tmp, xzr);
armvixl5799d6c2014-05-01 11:05:00 +01001341 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001342
1343 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
1344
armvixl5799d6c2014-05-01 11:05:00 +01001345 // Restore NZCV.
1346 tmp = temps.AcquireX();
armvixlb0c8ae22014-03-21 14:03:59 +00001347 Pop(xzr, tmp);
1348 Msr(NZCV, tmp);
armvixl5799d6c2014-05-01 11:05:00 +01001349 temps.Release(tmp);
armvixlb0c8ae22014-03-21 14:03:59 +00001350 }
1351
armvixlad96eda2013-06-14 11:42:37 +01001352 PopCPURegList(kCallerSavedFP);
1353 PopCPURegList(kCallerSaved);
1354}
1355
1356void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
armvixlb0c8ae22014-03-21 14:03:59 +00001357 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001358
1359#ifdef USE_SIMULATOR
1360 // The arguments to the trace pseudo instruction need to be contiguous in
1361 // memory, so make sure we don't try to emit a literal pool.
1362 InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1363
1364 Label start;
1365 bind(&start);
1366
1367 // Refer to instructions-a64.h for a description of the marker and its
1368 // arguments.
1369 hlt(kTraceOpcode);
1370
armvixlb0c8ae22014-03-21 14:03:59 +00001371 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001372 dc32(parameters);
1373
armvixlb0c8ae22014-03-21 14:03:59 +00001374 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
armvixlad96eda2013-06-14 11:42:37 +01001375 dc32(command);
1376#else
1377 // Emit nothing on real hardware.
1378 USE(parameters);
1379 USE(command);
1380#endif
1381}
1382
1383
1384void MacroAssembler::Log(TraceParameters parameters) {
armvixlb0c8ae22014-03-21 14:03:59 +00001385 VIXL_ASSERT(allow_macro_instructions_);
armvixlad96eda2013-06-14 11:42:37 +01001386
1387#ifdef USE_SIMULATOR
1388 // The arguments to the log pseudo instruction need to be contiguous in
1389 // memory, so make sure we don't try to emit a literal pool.
1390 InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1391
1392 Label start;
1393 bind(&start);
1394
1395 // Refer to instructions-a64.h for a description of the marker and its
1396 // arguments.
1397 hlt(kLogOpcode);
1398
armvixlb0c8ae22014-03-21 14:03:59 +00001399 VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
armvixlad96eda2013-06-14 11:42:37 +01001400 dc32(parameters);
1401#else
1402 // Emit nothing on real hardware.
1403 USE(parameters);
1404#endif
1405}
1406
armvixl578645f2013-08-15 17:21:42 +01001407
1408void MacroAssembler::EnableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001409 VIXL_ASSERT(!isprint(InstrumentStateEnable));
armvixl578645f2013-08-15 17:21:42 +01001410 InstructionAccurateScope scope(this, 1);
1411 movn(xzr, InstrumentStateEnable);
1412}
1413
1414
1415void MacroAssembler::DisableInstrumentation() {
armvixlb0c8ae22014-03-21 14:03:59 +00001416 VIXL_ASSERT(!isprint(InstrumentStateDisable));
armvixl578645f2013-08-15 17:21:42 +01001417 InstructionAccurateScope scope(this, 1);
1418 movn(xzr, InstrumentStateDisable);
1419}
1420
1421
1422void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
armvixlb0c8ae22014-03-21 14:03:59 +00001423 VIXL_ASSERT(strlen(marker_name) == 2);
armvixl578645f2013-08-15 17:21:42 +01001424
1425 // We allow only printable characters in the marker names. Unprintable
1426 // characters are reserved for controlling features of the instrumentation.
armvixlb0c8ae22014-03-21 14:03:59 +00001427 VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
armvixl578645f2013-08-15 17:21:42 +01001428
1429 InstructionAccurateScope scope(this, 1);
1430 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1431}
1432
armvixlb0c8ae22014-03-21 14:03:59 +00001433
1434UseScratchRegisterScope::~UseScratchRegisterScope() {
1435 available_->set_list(old_available_);
1436 availablefp_->set_list(old_availablefp_);
1437}
1438
1439
armvixl5799d6c2014-05-01 11:05:00 +01001440bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
1441 return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
1442}
1443
1444
armvixlb0c8ae22014-03-21 14:03:59 +00001445Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
1446 int code = AcquireNextAvailable(available_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001447 return Register(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001448}
1449
1450
1451FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
1452 int code = AcquireNextAvailable(availablefp_).code();
armvixl4a102ba2014-07-14 09:02:40 +01001453 return FPRegister(code, reg.size());
armvixlb0c8ae22014-03-21 14:03:59 +00001454}
1455
1456
1457void UseScratchRegisterScope::Release(const CPURegister& reg) {
1458 if (reg.IsRegister()) {
1459 ReleaseByCode(available_, reg.code());
1460 } else if (reg.IsFPRegister()) {
1461 ReleaseByCode(availablefp_, reg.code());
1462 } else {
1463 VIXL_ASSERT(reg.IsNone());
1464 }
1465}
1466
1467
armvixl5799d6c2014-05-01 11:05:00 +01001468void UseScratchRegisterScope::Include(const CPURegList& list) {
1469 if (list.type() == CPURegister::kRegister) {
1470 // Make sure that neither sp nor xzr are included the list.
1471 IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
1472 } else {
1473 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1474 IncludeByRegList(availablefp_, list.list());
1475 }
1476}
1477
1478
armvixlb0c8ae22014-03-21 14:03:59 +00001479void UseScratchRegisterScope::Include(const Register& reg1,
1480 const Register& reg2,
1481 const Register& reg3,
1482 const Register& reg4) {
1483 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1484 // Make sure that neither sp nor xzr are included the list.
1485 include &= ~(xzr.Bit() | sp.Bit());
1486
1487 IncludeByRegList(available_, include);
1488}
1489
1490
1491void UseScratchRegisterScope::Include(const FPRegister& reg1,
1492 const FPRegister& reg2,
1493 const FPRegister& reg3,
1494 const FPRegister& reg4) {
1495 RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1496 IncludeByRegList(availablefp_, include);
1497}
1498
1499
armvixl5799d6c2014-05-01 11:05:00 +01001500void UseScratchRegisterScope::Exclude(const CPURegList& list) {
1501 if (list.type() == CPURegister::kRegister) {
1502 ExcludeByRegList(available_, list.list());
1503 } else {
1504 VIXL_ASSERT(list.type() == CPURegister::kFPRegister);
1505 ExcludeByRegList(availablefp_, list.list());
1506 }
1507}
1508
1509
armvixlb0c8ae22014-03-21 14:03:59 +00001510void UseScratchRegisterScope::Exclude(const Register& reg1,
1511 const Register& reg2,
1512 const Register& reg3,
1513 const Register& reg4) {
1514 RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1515 ExcludeByRegList(available_, exclude);
1516}
1517
1518
1519void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
1520 const FPRegister& reg2,
1521 const FPRegister& reg3,
1522 const FPRegister& reg4) {
1523 RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1524 ExcludeByRegList(availablefp_, excludefp);
1525}
1526
1527
1528void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
1529 const CPURegister& reg2,
1530 const CPURegister& reg3,
1531 const CPURegister& reg4) {
1532 RegList exclude = 0;
1533 RegList excludefp = 0;
1534
1535 const CPURegister regs[] = {reg1, reg2, reg3, reg4};
1536
1537 for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
1538 if (regs[i].IsRegister()) {
1539 exclude |= regs[i].Bit();
1540 } else if (regs[i].IsFPRegister()) {
1541 excludefp |= regs[i].Bit();
1542 } else {
1543 VIXL_ASSERT(regs[i].IsNone());
1544 }
1545 }
1546
1547 ExcludeByRegList(available_, exclude);
1548 ExcludeByRegList(availablefp_, excludefp);
1549}
1550
1551
1552void UseScratchRegisterScope::ExcludeAll() {
1553 ExcludeByRegList(available_, available_->list());
1554 ExcludeByRegList(availablefp_, availablefp_->list());
1555}
1556
1557
1558CPURegister UseScratchRegisterScope::AcquireNextAvailable(
1559 CPURegList* available) {
1560 VIXL_CHECK(!available->IsEmpty());
1561 CPURegister result = available->PopLowestIndex();
1562 VIXL_ASSERT(!AreAliased(result, xzr, sp));
1563 return result;
1564}
1565
1566
1567void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
1568 ReleaseByRegList(available, static_cast<RegList>(1) << code);
1569}
1570
1571
1572void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
1573 RegList regs) {
1574 available->set_list(available->list() | regs);
1575}
1576
1577
1578void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
1579 RegList regs) {
1580 available->set_list(available->list() | regs);
1581}
1582
1583
1584void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
1585 RegList exclude) {
1586 available->set_list(available->list() & ~exclude);
1587}
1588
armvixlad96eda2013-06-14 11:42:37 +01001589} // namespace vixl