aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartyn Capewell <martyn.capewell@arm.com>2017-06-02 11:44:56 +0100
committerMartyn Capewell <martyn.capewell@arm.com>2017-06-02 16:53:16 +0100
commit210db2113e52bd22a2835e366ed4abac6e7df2a6 (patch)
treec5cc8bd702ba249dd5ed9102de7cd87e88e99489
parent4e6baa22dc09224243cb52fb98adb5811496ac68 (diff)
Fix pre-shifted immediate generation involving sp.
The function that generated a pre-shifted immediate didn't account for the instruction with post-shift being unencodable. Fix this by passing information about the target instruction, and use it to limit the application of pre-shift. Change-Id: I22ba36cce8a9e1d6da0b8a04c3602036af448d4f
-rw-r--r--src/aarch64/macro-assembler-aarch64.cc35
-rw-r--r--src/aarch64/macro-assembler-aarch64.h19
-rw-r--r--test/aarch64/test-assembler-aarch64.cc71
3 files changed, 119 insertions, 6 deletions
diff --git a/src/aarch64/macro-assembler-aarch64.cc b/src/aarch64/macro-assembler-aarch64.cc
index 9eb40336..a3dfe723 100644
--- a/src/aarch64/macro-assembler-aarch64.cc
+++ b/src/aarch64/macro-assembler-aarch64.cc
@@ -874,7 +874,11 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
+
+ // If the left-hand input is the stack pointer, we can't pre-shift the
+ // immediate, as the encoding won't allow the subsequent post shift.
+ PreShiftImmMode mode = rn.IsSP() ? kNoShift : kAnyShift;
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
if (rd.Is(sp)) {
// If rd is the stack pointer we cannot use it as the destination
@@ -1562,7 +1566,8 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
- int64_t imm) {
+ int64_t imm,
+ PreShiftImmMode mode) {
int reg_size = dst.GetSizeInBits();
// Encode the immediate in a single move instruction, if possible.
@@ -1571,6 +1576,13 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
} else {
// Pre-shift the immediate to the least-significant bits of the register.
int shift_low = CountTrailingZeros(imm, reg_size);
+ if (mode == kLimitShiftForSP) {
+ // When applied to the stack pointer, the subsequent arithmetic operation
+ // can use the extend form to shift left by a maximum of four bits. Right
+ // shifts are not allowed, so we filter them out later before the new
+ // immediate is tested.
+ shift_low = std::min(shift_low, 4);
+ }
int64_t imm_low = imm >> shift_low;
// Pre-shift the immediate to the most-significant bits of the register,
@@ -1578,11 +1590,11 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
int shift_high = CountLeadingZeros(imm, reg_size);
int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
- if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
// The new immediate has been moved into the destination's low bits:
// return a new leftward-shifting operand.
return Operand(dst, LSL, shift_low);
- } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
// The new immediate has been moved into the destination's high bits:
// return a new rightward-shifting operand.
return Operand(dst, LSR, shift_high);
@@ -1685,8 +1697,21 @@ void MacroAssembler::AddSubMacro(const Register& rd,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
if (operand.IsImmediate()) {
+ PreShiftImmMode mode = kAnyShift;
+
+ // If the destination or source register is the stack pointer, we can
+ // only pre-shift the immediate right by values supported in the add/sub
+ // extend encoding.
+ if (rd.IsSP()) {
+ // If the destination is SP and flags will be set, we can't pre-shift
+ // the immediate at all.
+ mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
+ } else if (rn.IsSP()) {
+ mode = kLimitShiftForSP;
+ }
+
Operand imm_operand =
- MoveImmediateForShiftedOp(temp, operand.GetImmediate());
+ MoveImmediateForShiftedOp(temp, operand.GetImmediate(), mode);
AddSub(rd, rn, imm_operand, S, op);
} else {
Mov(temp, operand);
diff --git a/src/aarch64/macro-assembler-aarch64.h b/src/aarch64/macro-assembler-aarch64.h
index ad901d85..f4909b15 100644
--- a/src/aarch64/macro-assembler-aarch64.h
+++ b/src/aarch64/macro-assembler-aarch64.h
@@ -568,6 +568,21 @@ enum BranchType {
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+// The macro assembler supports moving automatically pre-shifted immediates for
+// arithmetic and logical instructions, and then applying a post shift in the
+// instruction to undo the modification, in order to reduce the code emitted for
+// an operation. For example:
+//
+// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
+//
+// This optimisation can be only partially applied when the stack pointer is an
+// operand or destination, so this enumeration is used to control the shift.
+enum PreShiftImmMode {
+ kNoShift, // Don't pre-shift.
+ kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
+ kAnyShift // Allow any pre-shift.
+};
+
class MacroAssembler : public Assembler, public MacroAssemblerInterface {
public:
@@ -698,7 +713,9 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
// into dst is not necessarily equal to imm; it may have had a shifting
// operation applied to it that will be subsequently undone by the shift
// applied in the Operand.
- Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+ Operand MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm,
+ PreShiftImmMode mode);
void Move(const GenericOperand& dst, const GenericOperand& src);
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 4b6d0df5..4544b8c2 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -259,6 +259,77 @@ namespace aarch64 {
VIXL_CHECK((expected + kInstructionSize) == (masm.GetLiteralPoolSize()))
+TEST(preshift_immediates) {
+ SETUP();
+
+ START();
+ // Test operations involving immediates that could be generated using a
+ // pre-shifted encodable immediate followed by a post-shift applied to
+ // the arithmetic or logical operation.
+
+ // Save sp.
+ __ Mov(x29, sp);
+
+ // Set the registers to known values.
+ __ Mov(x0, 0x1000);
+ __ Mov(sp, 0x1004);
+
+ // Arithmetic ops.
+ __ Add(x1, x0, 0x1f7de);
+ __ Add(w2, w0, 0xffffff1);
+ __ Adds(x3, x0, 0x18001);
+ __ Adds(w4, w0, 0xffffff1);
+ __ Sub(x5, x0, 0x1f7de);
+ __ Sub(w6, w0, 0xffffff1);
+ __ Subs(x7, x0, 0x18001);
+ __ Subs(w8, w0, 0xffffff1);
+
+ // Logical ops.
+ __ And(x9, x0, 0x1f7de);
+ __ Orr(w10, w0, 0xffffff1);
+ __ Eor(x11, x0, 0x18001);
+
+ // Ops using the stack pointer.
+ __ Add(sp, sp, 0x18001);
+ __ Mov(x12, sp);
+ __ Mov(sp, 0x1004);
+
+ __ Add(sp, sp, 0x1f7de);
+ __ Mov(x13, sp);
+ __ Mov(sp, 0x1004);
+
+ __ Adds(x14, sp, 0x1f7de);
+
+ __ Orr(sp, x0, 0x1f7de);
+ __ Mov(x15, sp);
+
+ // Restore sp.
+ __ Mov(sp, x29);
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x1000, x0);
+ ASSERT_EQUAL_64(0x207de, x1);
+ ASSERT_EQUAL_64(0x10000ff1, x2);
+ ASSERT_EQUAL_64(0x19001, x3);
+ ASSERT_EQUAL_64(0x10000ff1, x4);
+ ASSERT_EQUAL_64(0xfffffffffffe1822, x5);
+ ASSERT_EQUAL_64(0xf000100f, x6);
+ ASSERT_EQUAL_64(0xfffffffffffe8fff, x7);
+ ASSERT_EQUAL_64(0xf000100f, x8);
+ ASSERT_EQUAL_64(0x1000, x9);
+ ASSERT_EQUAL_64(0xffffff1, x10);
+ ASSERT_EQUAL_64(0x19001, x11);
+ ASSERT_EQUAL_64(0x19005, x12);
+ ASSERT_EQUAL_64(0x207e2, x13);
+ ASSERT_EQUAL_64(0x207e2, x14);
+ ASSERT_EQUAL_64(0x1f7de, x15);
+
+ TEARDOWN();
+}
+
+
TEST(stack_ops) {
SETUP();