VIXL Release 1.3
Refer to the README.md and LICENCE files for details.
diff --git a/src/a64/macro-assembler-a64.cc b/src/a64/macro-assembler-a64.cc
index a7e2c2e..fc7b043 100644
--- a/src/a64/macro-assembler-a64.cc
+++ b/src/a64/macro-assembler-a64.cc
@@ -27,10 +27,29 @@
#include "a64/macro-assembler-a64.h"
namespace vixl {
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
+ ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+}
+
void MacroAssembler::And(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, AND);
}
@@ -38,14 +57,14 @@
void MacroAssembler::Ands(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, ANDS);
}
void MacroAssembler::Tst(const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Ands(AppropriateZeroRegFor(rn), rn, operand);
}
@@ -53,7 +72,7 @@
void MacroAssembler::Bic(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, BIC);
}
@@ -61,7 +80,7 @@
void MacroAssembler::Bics(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, BICS);
}
@@ -69,7 +88,7 @@
void MacroAssembler::Orr(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, ORR);
}
@@ -77,7 +96,7 @@
void MacroAssembler::Orn(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, ORN);
}
@@ -85,7 +104,7 @@
void MacroAssembler::Eor(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, EOR);
}
@@ -93,7 +112,7 @@
void MacroAssembler::Eon(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
LogicalMacro(rd, rn, operand, EON);
}
@@ -102,10 +121,12 @@
const Register& rn,
const Operand& operand,
LogicalOp op) {
+ UseScratchRegisterScope temps(this);
+
if (operand.IsImmediate()) {
int64_t immediate = operand.immediate();
unsigned reg_size = rd.size();
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
+ VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
@@ -130,10 +151,10 @@
case BICS:
break;
default:
- UNREACHABLE();
+ VIXL_UNREACHABLE();
}
- } else if ((rd.Is64Bits() && (immediate == -1L)) ||
- (rd.Is32Bits() && (immediate == 0xffffffffL))) {
+ } else if ((rd.Is64Bits() && (immediate == -1)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffff))) {
switch (op) {
case AND:
Mov(rd, rn);
@@ -148,7 +169,7 @@
case BICS:
break;
default:
- UNREACHABLE();
+ VIXL_UNREACHABLE();
}
}
@@ -158,7 +179,7 @@
LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
} else {
// Immediate can't be encoded: synthesize using move immediate.
- Register temp = AppropriateTempFor(rn);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, immediate);
if (rd.Is(sp)) {
// If rd is the stack pointer we cannot use it as the destination
@@ -170,19 +191,21 @@
}
}
} else if (operand.IsExtendedRegister()) {
- ASSERT(operand.reg().size() <= rd.size());
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
- Register temp = AppropriateTempFor(rn, operand.reg());
+
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
operand.shift_amount());
Logical(rd, rn, Operand(temp), op);
} else {
// The operand can be encoded in the instruction.
- ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(operand.IsShiftedRegister());
Logical(rd, rn, operand, op);
}
}
@@ -191,7 +214,7 @@
void MacroAssembler::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mov(rd, operand.immediate());
@@ -224,14 +247,17 @@
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
Mvn(rd, operand.immediate());
} else if (operand.IsExtendedRegister()) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(operand.reg());
+
// Emit two instructions for the extend case. This differs from Mov, as
// the extend and invert can't be achieved in one instruction.
- Register temp = AppropriateTempFor(rd, operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rd);
EmitExtendShift(temp, operand.reg(), operand.extend(),
operand.shift_amount());
mvn(rd, Operand(temp));
@@ -244,8 +270,8 @@
void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ VIXL_ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
// Immediates on Aarch64 can be produced using an initial value, and zero to
// three move keep operations.
@@ -274,7 +300,7 @@
movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
} else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
// Immediate can be represented in a logical orr instruction.
- ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rd.IsZero());
LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
} else {
// Generic immediate case. Imm will be represented by
@@ -288,24 +314,25 @@
// halfwords, it's more efficient to use move-inverted.
if (CountClearHalfWords(~imm, reg_size) >
CountClearHalfWords(imm, reg_size)) {
- ignored_halfword = 0xffffL;
+ ignored_halfword = 0xffff;
invert_move = true;
}
// Mov instructions can't move values into the stack pointer, so set up a
// temporary register, if needed.
- Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
+ UseScratchRegisterScope temps(this);
+ Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
- ASSERT((reg_size % 16) == 0);
+ VIXL_ASSERT((reg_size % 16) == 0);
bool first_mov_done = false;
for (unsigned i = 0; i < (temp.size() / 16); i++) {
- uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
if (imm16 != ignored_halfword) {
if (!first_mov_done) {
if (invert_move) {
- movn(temp, (~imm16) & 0xffffL, 16 * i);
+ movn(temp, ~imm16 & 0xffff, 16 * i);
} else {
movz(temp, imm16, 16 * i);
}
@@ -317,7 +344,7 @@
}
}
- ASSERT(first_mov_done);
+ VIXL_ASSERT(first_mov_done);
// Move the temporary if the original destination register was the stack
// pointer.
@@ -329,7 +356,7 @@
unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size % 8) == 0);
+ VIXL_ASSERT((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
@@ -344,7 +371,7 @@
// The movn instruction can generate immediates containing an arbitrary 16-bit
// value, with remaining bits set, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
@@ -360,7 +387,7 @@
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
} else {
@@ -373,7 +400,7 @@
const Operand& operand,
StatusFlags nzcv,
Condition cond) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
} else {
@@ -387,16 +414,17 @@
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op) {
- ASSERT((cond != al) && (cond != nv));
+ VIXL_ASSERT((cond != al) && (cond != nv));
if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
(operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
// The immediate can be encoded in the instruction, or the operand is an
// unshifted register: call the assembler.
ConditionalCompare(rn, operand, nzcv, cond, op);
} else {
+ UseScratchRegisterScope temps(this);
// The operand isn't directly supported by the instruction: perform the
// operation on a temporary register.
- Register temp = AppropriateTempFor(rn);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, operand);
ConditionalCompare(rn, temp, nzcv, cond, op);
}
@@ -407,10 +435,10 @@
const Register& rn,
const Operand& operand,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT(!rn.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ VIXL_ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
if (operand.IsImmediate()) {
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
// register.
@@ -423,7 +451,8 @@
} else if (imm == -1) {
csinv(rd, rn, zr, cond);
} else {
- Register temp = AppropriateTempFor(rn);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, operand.immediate());
csel(rd, rn, temp, cond);
}
@@ -432,7 +461,8 @@
csel(rd, rn, operand.reg(), cond);
} else {
// All other arguments.
- Register temp = AppropriateTempFor(rn);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, operand);
csel(rd, rn, temp, cond);
}
@@ -442,7 +472,7 @@
void MacroAssembler::Add(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
} else {
@@ -454,7 +484,7 @@
void MacroAssembler::Adds(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
} else {
@@ -466,7 +496,7 @@
void MacroAssembler::Sub(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
} else {
@@ -478,7 +508,7 @@
void MacroAssembler::Subs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate() && (operand.immediate() < 0)) {
AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
} else {
@@ -488,20 +518,70 @@
void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Adds(AppropriateZeroRegFor(rn), rn, operand);
}
void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Subs(AppropriateZeroRegFor(rn), rn, operand);
}
+void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
+ VIXL_ASSERT(allow_macro_instructions_);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ Fmov(tmp, value);
+ fcmp(fn, tmp);
+ } else {
+ fcmp(fn, value);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, double imm) {
+ VIXL_ASSERT(allow_macro_instructions_);
+ if (fd.Is32Bits()) {
+ Fmov(fd, static_cast<float>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(fd.Is64Bits());
+ if (IsImmFP64(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, xzr);
+ } else {
+ ldr(fd, imm);
+ }
+}
+
+
+void MacroAssembler::Fmov(FPRegister fd, float imm) {
+ VIXL_ASSERT(allow_macro_instructions_);
+ if (fd.Is64Bits()) {
+ Fmov(fd, static_cast<double>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(fd.Is32Bits());
+ if (IsImmFP32(imm)) {
+ fmov(fd, imm);
+ } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
+ fmov(fd, wzr);
+ } else {
+ ldr(fd, imm);
+ }
+}
+
+
+
void MacroAssembler::Neg(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (operand.IsImmediate()) {
Mov(rd, -operand.immediate());
} else {
@@ -512,7 +592,7 @@
void MacroAssembler::Negs(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Subs(rd, AppropriateZeroRegFor(rd), operand);
}
@@ -531,7 +611,8 @@
if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
(rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
- Register temp = AppropriateTempFor(rn);
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, operand);
AddSub(rd, rn, temp, S, op);
} else {
@@ -543,7 +624,7 @@
void MacroAssembler::Adc(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
}
@@ -551,7 +632,7 @@
void MacroAssembler::Adcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
}
@@ -559,7 +640,7 @@
void MacroAssembler::Sbc(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
}
@@ -567,14 +648,14 @@
void MacroAssembler::Sbcs(const Register& rd,
const Register& rn,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
}
void MacroAssembler::Ngc(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Register zr = AppropriateZeroRegFor(rd);
Sbc(rd, zr, operand);
}
@@ -582,7 +663,7 @@
void MacroAssembler::Ngcs(const Register& rd,
const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
Register zr = AppropriateZeroRegFor(rd);
Sbcs(rd, zr, operand);
}
@@ -593,32 +674,35 @@
const Operand& operand,
FlagsUpdate S,
AddSubWithCarryOp op) {
- ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rn.size());
+ UseScratchRegisterScope temps(this);
if (operand.IsImmediate() ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
// Add/sub with carry (immediate or ROR shifted register.)
- Register temp = AppropriateTempFor(rn);
+ Register temp = temps.AcquireSameSizeAs(rn);
Mov(temp, operand);
AddSubWithCarry(rd, rn, Operand(temp), S, op);
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Add/sub with carry (shifted register).
- ASSERT(operand.reg().size() == rd.size());
- ASSERT(operand.shift() != ROR);
- ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+ VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
operand.shift_amount()));
- Register temp = AppropriateTempFor(rn, operand.reg());
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
AddSubWithCarry(rd, rn, Operand(temp), S, op);
} else if (operand.IsExtendedRegister()) {
// Add/sub with carry (extended register).
- ASSERT(operand.reg().size() <= rd.size());
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
- Register temp = AppropriateTempFor(rn, operand.reg());
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
operand.shift_amount());
AddSubWithCarry(rd, rn, Operand(temp), S, op);
@@ -649,7 +733,8 @@
!IsImmLSUnscaled(offset)) {
// Immediate offset that can't be encoded using unsigned or unscaled
// addressing modes.
- Register temp = AppropriateTempFor(addr.base());
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
Mov(temp, addr.offset());
LoadStore(rt, MemOperand(addr.base(), temp), op);
} else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
@@ -669,9 +754,9 @@
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
- ASSERT(allow_macro_instructions_);
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(src0.IsValid());
+ VIXL_ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(src0.IsValid());
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
@@ -685,10 +770,10 @@
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
- ASSERT(allow_macro_instructions_);
- ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(dst0.IsValid());
+ VIXL_ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(dst0.IsValid());
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
@@ -705,7 +790,7 @@
// Push up to four registers at a time because if the current stack pointer is
// sp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for sp.
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@@ -725,7 +810,7 @@
// Pop up to four registers at a time because if the current stack pointer is
// sp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for sp.
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -739,7 +824,7 @@
void MacroAssembler::PushMultipleTimes(int count, Register src) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
int size = src.SizeInBytes();
PrepareForPush(count, size);
@@ -758,7 +843,7 @@
PushHelper(1, size, src, NoReg, NoReg, NoReg);
count -= 1;
}
- ASSERT(count == 0);
+ VIXL_ASSERT(count == 0);
}
@@ -770,22 +855,22 @@
// Ensure that we don't unintentionally modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(size == src0.SizeInBytes());
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(size == src0.SizeInBytes());
// When pushing multiple registers, the store order is chosen such that
// Push(a, b) is equivalent to Push(a) followed by Push(b).
switch (count) {
case 1:
- ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
break;
case 2:
- ASSERT(src2.IsNone() && src3.IsNone());
+ VIXL_ASSERT(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
break;
case 3:
- ASSERT(src3.IsNone());
+ VIXL_ASSERT(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
break;
@@ -797,7 +882,7 @@
stp(src1, src0, MemOperand(StackPointer(), 2 * size));
break;
default:
- UNREACHABLE();
+ VIXL_UNREACHABLE();
}
}
@@ -810,22 +895,22 @@
// Ensure that we don't unintentionally modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(size == dst0.SizeInBytes());
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(size == dst0.SizeInBytes());
// When popping multiple registers, the load order is chosen such that
// Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
switch (count) {
case 1:
- ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
break;
case 2:
- ASSERT(dst2.IsNone() && dst3.IsNone());
+ VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
break;
case 3:
- ASSERT(dst3.IsNone());
+ VIXL_ASSERT(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
break;
@@ -838,7 +923,7 @@
ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
break;
default:
- UNREACHABLE();
+ VIXL_UNREACHABLE();
}
}
@@ -848,7 +933,7 @@
// If the current stack pointer is sp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
- ASSERT((count * size) % 16 == 0);
+ VIXL_ASSERT((count * size) % 16 == 0);
} else {
// Even if the current stack pointer is not the system stack pointer (sp),
// the system stack pointer will still be modified in order to comply with
@@ -865,14 +950,14 @@
// If the current stack pointer is sp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
- ASSERT((count * size) % 16 == 0);
+ VIXL_ASSERT((count * size) % 16 == 0);
}
}
void MacroAssembler::Poke(const Register& src, const Operand& offset) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ VIXL_ASSERT(offset.immediate() >= 0);
}
Str(src, MemOperand(StackPointer(), offset));
@@ -880,9 +965,9 @@
void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ VIXL_ASSERT(offset.immediate() >= 0);
}
Ldr(dst, MemOperand(StackPointer(), offset));
@@ -890,16 +975,16 @@
void MacroAssembler::Claim(const Operand& size) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (size.IsZero()) {
return;
}
if (size.IsImmediate()) {
- ASSERT(size.immediate() > 0);
+ VIXL_ASSERT(size.immediate() > 0);
if (sp.Is(StackPointer())) {
- ASSERT((size.immediate() % 16) == 0);
+ VIXL_ASSERT((size.immediate() % 16) == 0);
}
}
@@ -912,16 +997,16 @@
void MacroAssembler::Drop(const Operand& size) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
if (size.IsZero()) {
return;
}
if (size.IsImmediate()) {
- ASSERT(size.immediate() > 0);
+ VIXL_ASSERT(size.immediate() > 0);
if (sp.Is(StackPointer())) {
- ASSERT((size.immediate() % 16) == 0);
+ VIXL_ASSERT((size.immediate() % 16) == 0);
}
}
@@ -934,7 +1019,7 @@
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is sp.
- ASSERT(sp.Is(StackPointer()));
+ VIXL_ASSERT(sp.Is(StackPointer()));
MemOperand tos(sp, -2 * kXRegSizeInBytes, PreIndex);
@@ -957,7 +1042,7 @@
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is sp.
- ASSERT(sp.Is(StackPointer()));
+ VIXL_ASSERT(sp.Is(StackPointer()));
MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
@@ -975,7 +1060,7 @@
}
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
- ASSERT(!sp.Is(StackPointer()));
+ VIXL_ASSERT(!sp.Is(StackPointer()));
// TODO: Several callers rely on this not using scratch registers, so we use
// the assembler directly here. However, this means that large immediate
// values of 'space' cannot be handled.
@@ -993,16 +1078,20 @@
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
+ VIXL_ASSERT(!kCallerSaved.IncludesAliasOf(StackPointer()));
- // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
- // assembler. We cannot print the stack pointer because it is typically used
- // to preserve caller-saved registers (using other Printf variants which
- // depend on this helper).
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
- ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
+ // Make sure that the macro assembler doesn't try to use any of our arguments
+ // as scratch registers.
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(arg0, arg1, arg2, arg3);
+
+ // We cannot print the stack pointer because it is typically used to preserve
+ // caller-saved registers (using other Printf variants which depend on this
+ // helper).
+ VIXL_ASSERT(!AreAliased(arg0, StackPointer()));
+ VIXL_ASSERT(!AreAliased(arg1, StackPointer()));
+ VIXL_ASSERT(!AreAliased(arg2, StackPointer()));
+ VIXL_ASSERT(!AreAliased(arg3, StackPointer()));
static const int kMaxArgCount = 4;
// Assume that we have the maximum number of arguments until we know
@@ -1049,12 +1138,12 @@
break;
}
}
- ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
+ VIXL_ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
// Check that every remaining argument is NoCPUReg.
for (int i = arg_count; i < kMaxArgCount; i++) {
- ASSERT(args[i].IsNone());
+ VIXL_ASSERT(args[i].IsNone());
}
- ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
+ VIXL_ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
args[2], args[3],
pcs[0], pcs[1],
pcs[2], pcs[3]));
@@ -1129,8 +1218,9 @@
dc32(pcs[0].type());
}
#else
- Mov(Tmp0(), reinterpret_cast<uintptr_t>(printf));
- Blr(Tmp0());
+ Register tmp = temps.AcquireX();
+ Mov(tmp, reinterpret_cast<uintptr_t>(printf));
+ Blr(tmp);
#endif
}
@@ -1140,26 +1230,40 @@
const CPURegister& arg1,
const CPURegister& arg2,
const CPURegister& arg3) {
+ // Make sure that the macro assembler doesn't try to use any of our arguments
+ // as scratch registers.
+ UseScratchRegisterScope exclude_all(this);
+ exclude_all.ExcludeAll();
+
// Preserve all caller-saved registers as well as NZCV.
// If sp is the stack pointer, PushCPURegList asserts that the size of each
// list is a multiple of 16 bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedFP);
- // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
- // never overlap an argument register.
- Mrs(Tmp0(), NZCV);
- Push(Tmp0(), xzr);
- PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+ { UseScratchRegisterScope temps(this);
+ // We can use caller-saved registers as scratch values (except for argN).
+ TmpList()->Combine(kCallerSaved);
+ FPTmpList()->Combine(kCallerSavedFP);
+ temps.Exclude(arg0, arg1, arg2, arg3);
- Pop(xzr, Tmp0());
- Msr(NZCV, Tmp0());
+ // Preserve NZCV.
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
+
PopCPURegList(kCallerSavedFP);
PopCPURegList(kCallerSaved);
}
void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
#ifdef USE_SIMULATOR
// The arguments to the trace pseudo instruction need to be contiguous in
@@ -1173,10 +1277,10 @@
// arguments.
hlt(kTraceOpcode);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
+ VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
dc32(parameters);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
+ VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
dc32(command);
#else
// Emit nothing on real hardware.
@@ -1187,7 +1291,7 @@
void MacroAssembler::Log(TraceParameters parameters) {
- ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(allow_macro_instructions_);
#ifdef USE_SIMULATOR
// The arguments to the log pseudo instruction need to be contiguous in
@@ -1201,7 +1305,7 @@
// arguments.
hlt(kLogOpcode);
- ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
+ VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
dc32(parameters);
#else
// Emit nothing on real hardware.
@@ -1211,28 +1315,158 @@
void MacroAssembler::EnableInstrumentation() {
- ASSERT(!isprint(InstrumentStateEnable));
+ VIXL_ASSERT(!isprint(InstrumentStateEnable));
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateEnable);
}
void MacroAssembler::DisableInstrumentation() {
- ASSERT(!isprint(InstrumentStateDisable));
+ VIXL_ASSERT(!isprint(InstrumentStateDisable));
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateDisable);
}
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
- ASSERT(strlen(marker_name) == 2);
+ VIXL_ASSERT(strlen(marker_name) == 2);
// We allow only printable characters in the marker names. Unprintable
// characters are reserved for controlling features of the instrumentation.
- ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+ VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
InstructionAccurateScope scope(this, 1);
movn(xzr, (marker_name[1] << 8) | marker_name[0]);
}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ available_->set_list(old_available_);
+ availablefp_->set_list(old_availablefp_);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register(code, reg.SizeInBits());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister(code, reg.SizeInBits());
+}
+
+
+void UseScratchRegisterScope::Release(const CPURegister& reg) {
+ if (reg.IsRegister()) {
+ ReleaseByCode(available_, reg.code());
+ } else if (reg.IsFPRegister()) {
+ ReleaseByCode(availablefp_, reg.code());
+ } else {
+ VIXL_ASSERT(reg.IsNone());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ // Make sure that neither sp nor xzr are included the list.
+ include &= ~(xzr.Bit() | sp.Bit());
+
+ IncludeByRegList(available_, include);
+}
+
+
+void UseScratchRegisterScope::Include(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ IncludeByRegList(availablefp_, include);
+}
+
+
+void UseScratchRegisterScope::Exclude(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(available_, exclude);
+}
+
+
+void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4) {
+ RegList exclude = 0;
+ RegList excludefp = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4};
+
+ for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
+ if (regs[i].IsRegister()) {
+ exclude |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ excludefp |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(regs[i].IsNone());
+ }
+ }
+
+ ExcludeByRegList(available_, exclude);
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::ExcludeAll() {
+ ExcludeByRegList(available_, available_->list());
+ ExcludeByRegList(availablefp_, availablefp_->list());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ VIXL_CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ VIXL_ASSERT(!AreAliased(result, xzr, sp));
+ return result;
+}
+
+
+void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
+ ReleaseByRegList(available, static_cast<RegList>(1) << code);
+}
+
+
+void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
+ RegList exclude) {
+ available->set_list(available->list() & ~exclude);
+}
+
} // namespace vixl