aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgia Kouveli <georgia.kouveli@arm.com>2017-03-02 15:18:58 +0000
committerGeorgia Kouveli <georgia.kouveli@arm.com>2017-06-01 17:47:10 +0100
commit8b57c86886020cf0a5331823be4789ee558764e2 (patch)
treee64603395a42164f2472d90ffd1cc17b0fb81e96
parentc9a1da70cea8c4f28bac34bad9f195c27f095bfa (diff)
[pool-manager] Integration with aarch32.
Key points of this patch: - renames LabelBase to LocationBase - makes the Location class derive from LoctionBase - moves the code for Location, Label and RawLiteral/Literal to a shared file - moves ReferenceInfo out of Assembler - removes all the old veneer pool and literal pool code - updates the macro assembler to use the new pool manager - updates existing tests that expect a certain behaviour from the pool manager - adds new tests for corner cases that came up during integration - adds tests for issues that the new pool manager addresses (literal_and_veneer_interaction_*) Change-Id: Ied81401d40f88cb988ff95e85fe832851f171f77
-rw-r--r--examples/aarch32/custom-aarch32-disasm.cc4
-rw-r--r--src/aarch32/assembler-aarch32.cc384
-rw-r--r--src/aarch32/assembler-aarch32.h49
-rw-r--r--src/aarch32/disasm-aarch32.h4
-rw-r--r--src/aarch32/instructions-aarch32.cc2
-rw-r--r--src/aarch32/instructions-aarch32.h102
-rw-r--r--src/aarch32/label-aarch32.cc51
-rw-r--r--src/aarch32/label-aarch32.h336
-rw-r--r--src/aarch32/location-aarch32.cc152
-rw-r--r--src/aarch32/location-aarch32.h409
-rw-r--r--src/aarch32/macro-assembler-aarch32.cc321
-rw-r--r--src/aarch32/macro-assembler-aarch32.h785
-rw-r--r--src/aarch32/operands-aarch32.cc3
-rw-r--r--src/code-buffer-vixl.cc13
-rw-r--r--src/code-buffer-vixl.h5
-rw-r--r--src/pool-manager-impl.h18
-rw-r--r--src/pool-manager.h95
-rw-r--r--test/aarch32/test-assembler-aarch32.cc924
-rw-r--r--test/aarch32/test-utils-aarch32.h18
-rw-r--r--test/test-code-generation-scopes.cc14
-rw-r--r--test/test-pool-manager.cc18
21 files changed, 1861 insertions, 1846 deletions
diff --git a/examples/aarch32/custom-aarch32-disasm.cc b/examples/aarch32/custom-aarch32-disasm.cc
index 72ce564c..89e48b66 100644
--- a/examples/aarch32/custom-aarch32-disasm.cc
+++ b/examples/aarch32/custom-aarch32-disasm.cc
@@ -1,4 +1,4 @@
-// Copyright 2016, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -101,7 +101,7 @@ class NamedLabel : public Label {
public:
NamedLabel(CustomStream* stream, const char* name)
: stream_(stream), name_(name) {}
- ~NamedLabel() {
+ ~NamedLabel() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {
if (IsBound()) {
stream_->GetSymbols().insert(
std::pair<Location::Offset, const char*>(GetLocation(), name_));
diff --git a/src/aarch32/assembler-aarch32.cc b/src/aarch32/assembler-aarch32.cc
index 0d23884d..98a2b93a 100644
--- a/src/aarch32/assembler-aarch32.cc
+++ b/src/aarch32/assembler-aarch32.cc
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -80,67 +80,24 @@ void Assembler::PerformCheckIT(Condition condition) {
void Assembler::BindHelper(Label* label) {
- BindLocationHelper(label);
-
- if (label->IsInVeneerPool()) {
- label->GetVeneerPoolManager()->RemoveLabel(label);
- }
-}
-
-
-void Assembler::BindLocationHelper(Location* location) {
- VIXL_ASSERT(!location->IsBound());
- location->Bind(GetCursorOffset());
-
- for (Location::ForwardRefList::iterator ref = location->GetFirstForwardRef();
- ref != location->GetEndForwardRef();
- ref++) {
- EncodeLocationFor(*ref, location);
- }
+ VIXL_ASSERT(!label->IsBound());
+ label->SetLocation(this, GetCursorOffset());
+ label->MarkBound();
}
-
uint32_t Assembler::Link(uint32_t instr,
Location* location,
const Location::EmitOperator& op,
- const struct ReferenceInfo* info) {
+ const ReferenceInfo* info) {
location->SetReferenced();
if (location->IsBound()) {
return op.Encode(instr, GetCursorOffset(), location);
}
- location->AddForwardRef(GetCursorOffset(), op, info->max_offset);
+ location->AddForwardRef(GetCursorOffset(), op, info);
return instr;
}
-void Assembler::EncodeLocationFor(const Location::ForwardReference& forward,
- Location* location) {
- const uint32_t from = forward.GetLocation();
- const Location::EmitOperator& encoder = forward.GetEmitOperator();
- if (encoder.IsUsingT32()) {
- uint16_t* instr_ptr = buffer_.GetOffsetAddress<uint16_t*>(from);
- if (Is16BitEncoding(instr_ptr[0])) {
- // The Encode methods always deals with uint32_t types so we need
- // to explicitely cast it.
- uint32_t instr = static_cast<uint32_t>(instr_ptr[0]);
- instr = encoder.Encode(instr, from, location);
- // The Encode method should not ever set the top 16 bits.
- VIXL_ASSERT((instr & ~0xffff) == 0);
- instr_ptr[0] = static_cast<uint16_t>(instr);
- } else {
- uint32_t instr =
- instr_ptr[1] | (static_cast<uint32_t>(instr_ptr[0]) << 16);
- instr = encoder.Encode(instr, from, location);
- instr_ptr[0] = static_cast<uint16_t>(instr >> 16);
- instr_ptr[1] = static_cast<uint16_t>(instr);
- }
- } else {
- uint32_t* instr_ptr = buffer_.GetOffsetAddress<uint32_t*>(from);
- instr_ptr[0] = encoder.Encode(instr_ptr[0], from, location);
- }
-}
-
-
// Start of generated code.
class Dt_L_imm6_1 : public EncodingValue {
uint32_t type_;
@@ -1798,216 +1755,189 @@ Align_align_5::Align_align_5(Alignment align,
}
}
-static const struct Assembler::ReferenceInfo kAdrT1Info =
- {k16BitT32InstructionSizeInBytes,
- 0, // Min offset.
- 1020, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kAdrT3Info =
- {k32BitT32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kAdrA1Info =
- {kA32InstructionSizeInBytes,
- -256, // Min offset.
- 256, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kBT1Info =
- {k16BitT32InstructionSizeInBytes,
- -256, // Min offset.
- 254, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBT2Info =
- {k16BitT32InstructionSizeInBytes,
- -2048, // Min offset.
- 2046, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBT3Info =
- {k32BitT32InstructionSizeInBytes,
- -1048576, // Min offset.
- 1048574, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBT4Info =
- {k32BitT32InstructionSizeInBytes,
- -16777216, // Min offset.
- 16777214, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBA1Info =
- {kA32InstructionSizeInBytes,
- -33554432, // Min offset.
- 33554428, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBlT1Info =
- {k32BitT32InstructionSizeInBytes,
- -16777216, // Min offset.
- 16777214, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBlA1Info =
- {kA32InstructionSizeInBytes,
- -33554432, // Min offset.
- 33554428, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kBlxT2Info =
- {k32BitT32InstructionSizeInBytes,
- -16777216, // Min offset.
- 16777212, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kBlxA2Info =
- {kA32InstructionSizeInBytes,
- -33554432, // Min offset.
- 33554430, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kCbnzT1Info =
- {k16BitT32InstructionSizeInBytes,
- 0, // Min offset.
- 126, // Max offset.
- 2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kCbzT1Info =
+static const struct ReferenceInfo kAdrT1Info = {k16BitT32InstructionSizeInBytes,
+ 0, // Min offset.
+ 1020, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kAdrT3Info = {k32BitT32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kAdrA1Info = {kA32InstructionSizeInBytes,
+ -256, // Min offset.
+ 256, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kBT1Info = {k16BitT32InstructionSizeInBytes,
+ -256, // Min offset.
+ 254, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBT2Info = {k16BitT32InstructionSizeInBytes,
+ -2048, // Min offset.
+ 2046, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBT3Info = {k32BitT32InstructionSizeInBytes,
+ -1048576, // Min offset.
+ 1048574, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBT4Info = {k32BitT32InstructionSizeInBytes,
+ -16777216, // Min offset.
+ 16777214, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBA1Info = {kA32InstructionSizeInBytes,
+ -33554432, // Min offset.
+ 33554428, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBlT1Info = {k32BitT32InstructionSizeInBytes,
+ -16777216, // Min offset.
+ 16777214, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBlA1Info = {kA32InstructionSizeInBytes,
+ -33554432, // Min offset.
+ 33554428, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kBlxT2Info = {k32BitT32InstructionSizeInBytes,
+ -16777216, // Min offset.
+ 16777212, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kBlxA2Info = {kA32InstructionSizeInBytes,
+ -33554432, // Min offset.
+ 33554430, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kCbnzT1Info =
{k16BitT32InstructionSizeInBytes,
0, // Min offset.
126, // Max offset.
2, // Alignment.
- Assembler::ReferenceInfo::kDontAlignPc};
-static const struct Assembler::ReferenceInfo kLdrT1Info =
- {k16BitT32InstructionSizeInBytes,
- 0, // Min offset.
- 1020, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrT2Info =
- {k32BitT32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrA1Info =
- {kA32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrbT1Info =
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kCbzT1Info = {k16BitT32InstructionSizeInBytes,
+ 0, // Min offset.
+ 126, // Max offset.
+ 2, // Alignment.
+ ReferenceInfo::kDontAlignPc};
+static const struct ReferenceInfo kLdrT1Info = {k16BitT32InstructionSizeInBytes,
+ 0, // Min offset.
+ 1020, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrT2Info = {k32BitT32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrA1Info = {kA32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrbT1Info =
{k32BitT32InstructionSizeInBytes,
-4095, // Min offset.
4095, // Max offset.
1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrbA1Info =
- {kA32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrdT1Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrbA1Info = {kA32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrdT1Info =
{k32BitT32InstructionSizeInBytes,
-1020, // Min offset.
1020, // Max offset.
4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrdA1Info =
- {kA32InstructionSizeInBytes,
- -255, // Min offset.
- 255, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrhT1Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrdA1Info = {kA32InstructionSizeInBytes,
+ -255, // Min offset.
+ 255, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrhT1Info =
{k32BitT32InstructionSizeInBytes,
-4095, // Min offset.
4095, // Max offset.
1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrhA1Info =
- {kA32InstructionSizeInBytes,
- -255, // Min offset.
- 255, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrsbT1Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrhA1Info = {kA32InstructionSizeInBytes,
+ -255, // Min offset.
+ 255, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrsbT1Info =
{k32BitT32InstructionSizeInBytes,
-4095, // Min offset.
4095, // Max offset.
1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrsbA1Info =
- {kA32InstructionSizeInBytes,
- -255, // Min offset.
- 255, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrshT1Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrsbA1Info = {kA32InstructionSizeInBytes,
+ -255, // Min offset.
+ 255, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrshT1Info =
{k32BitT32InstructionSizeInBytes,
-4095, // Min offset.
4095, // Max offset.
1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kLdrshA1Info =
- {kA32InstructionSizeInBytes,
- -255, // Min offset.
- 255, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kPldT1Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kLdrshA1Info = {kA32InstructionSizeInBytes,
+ -255, // Min offset.
+ 255, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kPldT1Info = {k32BitT32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kPldA1Info = {kA32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kPliT3Info = {k32BitT32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kPliA1Info = {kA32InstructionSizeInBytes,
+ -4095, // Min offset.
+ 4095, // Max offset.
+ 1, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kVldrT1Info =
{k32BitT32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kPldA1Info =
- {kA32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kPliT3Info =
- {k32BitT32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kPliA1Info =
- {kA32InstructionSizeInBytes,
- -4095, // Min offset.
- 4095, // Max offset.
- 1, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kVldrT1Info =
- {k32BitT32InstructionSizeInBytes,
- -1020, // Min offset.
- 1020, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kVldrA1Info =
- {kA32InstructionSizeInBytes,
-1020, // Min offset.
1020, // Max offset.
4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kVldrT2Info =
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kVldrA1Info = {kA32InstructionSizeInBytes,
+ -1020, // Min offset.
+ 1020, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kVldrT2Info =
{k32BitT32InstructionSizeInBytes,
-1020, // Min offset.
1020, // Max offset.
4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
-static const struct Assembler::ReferenceInfo kVldrA2Info =
- {kA32InstructionSizeInBytes,
- -1020, // Min offset.
- 1020, // Max offset.
- 4, // Alignment.
- Assembler::ReferenceInfo::kAlignPc};
+ ReferenceInfo::kAlignPc};
+static const struct ReferenceInfo kVldrA2Info = {kA32InstructionSizeInBytes,
+ -1020, // Min offset.
+ 1020, // Max offset.
+ 4, // Alignment.
+ ReferenceInfo::kAlignPc};
void Assembler::adc(Condition cond,
diff --git a/src/aarch32/assembler-aarch32.h b/src/aarch32/assembler-aarch32.h
index 5488bb84..9644a882 100644
--- a/src/aarch32/assembler-aarch32.h
+++ b/src/aarch32/assembler-aarch32.h
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
#include "assembler-base-vixl.h"
#include "aarch32/instructions-aarch32.h"
-#include "aarch32/label-aarch32.h"
+#include "aarch32/location-aarch32.h"
namespace vixl {
namespace aarch32 {
@@ -43,9 +43,6 @@ class Assembler : public internal::AssemblerBase {
bool allow_unpredictable_;
bool allow_strongly_discouraged_;
- public:
- struct ReferenceInfo;
-
protected:
void EmitT32_16(uint16_t instr);
void EmitT32_32(uint32_t instr);
@@ -63,12 +60,10 @@ class Assembler : public internal::AssemblerBase {
void PerformCheckIT(Condition condition);
#endif
void AdvanceIT() { it_mask_ = (it_mask_ << 1) & 0xf; }
- void BindHelper(Label* label);
- void BindLocationHelper(Location* location);
- void PlaceHelper(RawLiteral* literal) {
- BindLocationHelper(literal);
- GetBuffer()->EmitData(literal->GetDataAddress(), literal->GetSize());
- }
+ // Virtual, in order to be overridden by the MacroAssembler, which needs to
+ // notify the pool manager.
+ virtual void BindHelper(Label* label);
+
uint32_t Link(uint32_t instr,
Location* location,
const Location::EmitOperator& op,
@@ -177,10 +172,6 @@ class Assembler : public internal::AssemblerBase {
first_condition_ = first_condition;
it_mask_ = it_mask;
}
- bool Is16BitEncoding(uint16_t instr) const {
- VIXL_ASSERT(IsUsingT32());
- return instr < 0xe800;
- }
bool InITBlock() { return it_mask_ != 0; }
bool OutsideITBlock() { return it_mask_ == 0; }
bool OutsideITBlockOrLast() { return (it_mask_ == 0) || (it_mask_ == 0x8); }
@@ -198,14 +189,29 @@ class Assembler : public internal::AssemblerBase {
}
uint32_t GetArchitectureStatePCOffset() const { return IsUsingT32() ? 4 : 8; }
+
+ // Bind a raw Location that will never be tracked by the pool manager.
+ void bind(Location* location) {
+ VIXL_ASSERT(AllowAssembler());
+ VIXL_ASSERT(!location->IsBound());
+ location->SetLocation(this, GetCursorOffset());
+ location->MarkBound();
+ }
+
+ // Bind a Label, which may be tracked by the pool manager in the presence of a
+ // MacroAssembler.
void bind(Label* label) {
VIXL_ASSERT(AllowAssembler());
BindHelper(label);
}
+
void place(RawLiteral* literal) {
VIXL_ASSERT(AllowAssembler());
VIXL_ASSERT(literal->IsManuallyPlaced());
- PlaceHelper(literal);
+ literal->SetLocation(this, GetCursorOffset());
+ literal->MarkBound();
+ GetBuffer()->EnsureSpaceFor(literal->GetSize());
+ GetBuffer()->EmitData(literal->GetDataAddress(), literal->GetSize());
}
size_t GetSizeOfCodeGeneratedSince(Label* label) const {
@@ -213,9 +219,6 @@ class Assembler : public internal::AssemblerBase {
return buffer_.GetOffsetFrom(label->GetLocation());
}
- void EncodeLocationFor(const Location::ForwardReference& forward,
- Location* location);
-
// Helpers for it instruction.
void it(Condition cond) { it(cond, 0x8); }
void itt(Condition cond) { it(cond, 0x4); }
@@ -1832,14 +1835,6 @@ class Assembler : public internal::AssemblerBase {
VIXL_ASSERT((type == kVtbl) || (type == kVtbx));
UnimplementedDelegate(type);
}
- // Structure containing information on forward references.
- struct ReferenceInfo {
- int size;
- int min_offset;
- int max_offset;
- int alignment; // As a power of two.
- enum { kAlignPc, kDontAlignPc } pc_needs_aligning;
- };
void adc(Condition cond,
EncodingSize size,
diff --git a/src/aarch32/disasm-aarch32.h b/src/aarch32/disasm-aarch32.h
index ed230458..657136a8 100644
--- a/src/aarch32/disasm-aarch32.h
+++ b/src/aarch32/disasm-aarch32.h
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -31,6 +31,8 @@ extern "C" {
#include <stdint.h>
}
+#include <iomanip>
+
#include "aarch32/constants-aarch32.h"
#include "aarch32/operands-aarch32.h"
diff --git a/src/aarch32/instructions-aarch32.cc b/src/aarch32/instructions-aarch32.cc
index 91932009..cbf414c1 100644
--- a/src/aarch32/instructions-aarch32.cc
+++ b/src/aarch32/instructions-aarch32.cc
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
diff --git a/src/aarch32/instructions-aarch32.h b/src/aarch32/instructions-aarch32.h
index ad54cd6a..ea7b8562 100644
--- a/src/aarch32/instructions-aarch32.h
+++ b/src/aarch32/instructions-aarch32.h
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,6 @@ extern "C" {
#include "utils-vixl.h"
#include "code-buffer-vixl.h"
#include "aarch32/constants-aarch32.h"
-#include "aarch32/label-aarch32.h"
#ifdef __arm__
#define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp")))
@@ -1341,98 +1340,13 @@ inline std::ostream& operator<<(std::ostream& os, Alignment align) {
return os << " :" << (0x10 << static_cast<uint32_t>(align.GetType()));
}
-class RawLiteral : public Location {
- public:
- enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
-
- enum DeletionPolicy {
- kDeletedOnPlacementByPool,
- kDeletedOnPoolDestruction,
- kManuallyDeleted
- };
-
- public:
- RawLiteral(const void* addr,
- size_t size,
- PlacementPolicy placement_policy = kPlacedWhenUsed,
- DeletionPolicy deletion_policy = kManuallyDeleted)
- : addr_(addr),
- size_(size),
- position_(kMaxOffset),
- manually_placed_(placement_policy == kManuallyPlaced),
- deletion_policy_(deletion_policy) {
- // We can't have manually placed literals that are not manually deleted.
- VIXL_ASSERT(!IsManuallyPlaced() ||
- (GetDeletionPolicy() == kManuallyDeleted));
- }
- RawLiteral(const void* addr, size_t size, DeletionPolicy deletion_policy)
- : addr_(addr),
- size_(size),
- position_(kMaxOffset),
- manually_placed_(false),
- deletion_policy_(deletion_policy) {}
- ~RawLiteral() {}
- const void* GetDataAddress() const { return addr_; }
- size_t GetSize() const { return size_; }
- size_t GetAlignedSize() const { return (size_ + 3) & ~0x3; }
-
- Offset GetPositionInPool() const { return position_; }
- void SetPositionInPool(Offset position_in_pool) {
- // Assumed that the literal has not already been added to
- // the pool.
- VIXL_ASSERT(position_ == Label::kMaxOffset);
- position_ = position_in_pool;
- }
-
- bool IsManuallyPlaced() const { return manually_placed_; }
- DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
-
- private:
- // Data address before it's moved into the code buffer.
- const void* const addr_;
- // Data size before it's moved into the code buffer.
- const size_t size_;
- // Position in the pool, if not in a pool: Label::kMaxOffset.
- Offset position_;
- // When this flag is true, the label will be placed manually.
- bool manually_placed_;
- // When is the literal to be removed from the memory
- // Can be delete'd when:
- // moved into the code buffer: kDeletedOnPlacementByPool
- // the pool is delete'd: kDeletedOnPoolDestruction
- // or left to the application: kManuallyDeleted.
- DeletionPolicy deletion_policy_;
-};
-
-template <typename T>
-class Literal : public RawLiteral {
- public:
- explicit Literal(const T& value,
- PlacementPolicy placement_policy = kPlacedWhenUsed,
- DeletionPolicy deletion_policy = kManuallyDeleted)
- : RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
- value_(value) {}
- explicit Literal(const T& value, DeletionPolicy deletion_policy)
- : RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
- void UpdateValue(const T& value, CodeBuffer* buffer) {
- value_ = value;
- if (IsBound()) {
- buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
- }
- }
-
- private:
- T value_;
-};
-
-class StringLiteral : public RawLiteral {
- public:
- explicit StringLiteral(const char* str,
- PlacementPolicy placement_policy = kPlacedWhenUsed,
- DeletionPolicy deletion_policy = kManuallyDeleted)
- : RawLiteral(str, strlen(str) + 1, placement_policy, deletion_policy) {}
- explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
- : RawLiteral(str, strlen(str) + 1, deletion_policy) {}
+// Structure containing information on forward references.
+struct ReferenceInfo {
+ int size;
+ int min_offset;
+ int max_offset;
+ int alignment; // As a power of two.
+ enum { kAlignPc, kDontAlignPc } pc_needs_aligning;
};
} // namespace aarch32
diff --git a/src/aarch32/label-aarch32.cc b/src/aarch32/label-aarch32.cc
deleted file mode 100644
index ac0c343f..00000000
--- a/src/aarch32/label-aarch32.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016, VIXL authors
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of ARM Limited nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "label-aarch32.h"
-#include "macro-assembler-aarch32.h"
-
-namespace vixl {
-namespace aarch32 {
-
-void VeneerPoolManager::Release() {
- VIXL_ASSERT(IsBlocked());
- if (--monitor_ == 0) {
- // Ensure the pool has not been blocked for too long.
- // This may generate some veneers if some labels has been added by the code
- // which used Block/Release.
-
- // TODO: This check is _temporarily_ disabled to work around some usage in
- // ART, which assumes that pools will not be generated immediately after
- // macros or ExactAssemblyScopes. The next instruction that is generated
- // will perform this check anyway, but in a place less convenient for
- // debugging.
- // masm_->EnsureEmitPoolsFor(0);
- }
-}
-
-} // namespace aarch32
-} // namespace vixl
diff --git a/src/aarch32/label-aarch32.h b/src/aarch32/label-aarch32.h
deleted file mode 100644
index 74dda423..00000000
--- a/src/aarch32/label-aarch32.h
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2015, VIXL authors
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of ARM Limited nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
-// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
-#define VIXL_AARCH32_LABEL_AARCH32_H_
-
-extern "C" {
-#include <stdint.h>
-}
-
-#include <algorithm>
-#include <cstddef>
-#include <iomanip>
-#include <list>
-
-#include "utils-vixl.h"
-
-#include "constants-aarch32.h"
-
-namespace vixl {
-namespace aarch32 {
-
-class VeneerPoolManager;
-class MacroAssembler;
-
-class Location {
- public:
- typedef int32_t Offset;
- static const Offset kMaxOffset = 0x7fffffff;
-
- class EmitOperator {
- InstructionSet isa_;
-
- public:
- explicit EmitOperator(InstructionSet isa) : isa_(isa) {
-#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
- USE(isa_);
- VIXL_ASSERT(isa == A32);
-#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
- USE(isa_);
- VIXL_ASSERT(isa == T32);
-#endif
- }
- virtual ~EmitOperator() {}
- virtual uint32_t Encode(uint32_t /*instr*/,
- Location::Offset /*pc*/,
- const Location* /*label*/) const {
- return 0;
- }
-#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
- bool IsUsingT32() const { return false; }
-#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
- bool IsUsingT32() const { return true; }
-#else
- bool IsUsingT32() const { return isa_ == T32; }
-#endif
- };
-
- class ForwardReference {
- public:
- ForwardReference(int32_t location,
- const EmitOperator& op,
- Offset max_offset)
- : location_(location),
- op_(op),
- is_branch_(false),
- max_forward_(max_offset) {}
- Offset GetMaxForwardDistance() const { return max_forward_; }
- int32_t GetLocation() const { return location_; }
- uint32_t GetStatePCOffset() const {
- return op_.IsUsingT32() ? kT32PcDelta : kA32PcDelta;
- }
-
- bool IsUsingT32() const { return op_.IsUsingT32(); }
- bool IsBranch() const { return is_branch_; }
- void SetIsBranch() { is_branch_ = true; }
- const EmitOperator& GetEmitOperator() const { return op_; }
- Offset GetCheckpoint() const {
- // The load instructions align down PC before adding the offset.
- // The alignment is only needed for T32 as A32 instructions are always
- // 4 byte aligned.
- int32_t pc = GetLocation() + GetStatePCOffset();
- return GetMaxForwardDistance() +
- ((op_.IsUsingT32() && !IsBranch()) ? AlignDown(pc, 4) : pc);
- }
-
- private:
- int32_t location_;
- const EmitOperator& op_;
- bool is_branch_;
- Location::Offset max_forward_;
- };
-
- typedef std::list<ForwardReference> ForwardRefList;
-
- enum UpdateCheckpointOption { kNoUpdateNecessary, kRecomputeCheckpoint };
-
- static bool CompareCheckpoints(const ForwardReference& a,
- const ForwardReference& b) {
- return a.GetCheckpoint() < b.GetCheckpoint();
- }
-
- Offset GetNextCheckpoint() {
- if (HasForwardReference()) {
- ForwardRefList::iterator min_checkpoint =
- std::min_element(forward_.begin(),
- forward_.end(),
- CompareCheckpoints);
- return (*min_checkpoint).GetCheckpoint();
- }
- return kMaxOffset;
- }
-
- public:
- Location()
- : location_(kMaxOffset),
- is_bound_(false),
- referenced_(false),
- checkpoint_(kMaxOffset) {}
- explicit Location(Offset location)
- : location_(location),
- is_bound_(true),
- referenced_(false),
- checkpoint_(kMaxOffset) {}
- ~Location() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {
-#ifdef VIXL_DEBUG
- if (referenced_ && !is_bound_) {
- VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
- }
-#endif
- }
-
-#undef DEFAULT_IS_T32
-
- bool IsBound() const { return is_bound_; }
- bool HasForwardReference() const { return !forward_.empty(); }
- void Bind(Offset offset) {
- VIXL_ASSERT(!IsBound());
- location_ = offset;
- is_bound_ = true;
- }
- Offset GetLocation() const {
- VIXL_ASSERT(IsBound());
- return location_;
- }
- void SetReferenced() { referenced_ = true; }
- bool IsReferenced() const { return referenced_; }
- void SetCheckpoint(Offset checkpoint) { checkpoint_ = checkpoint; }
- Offset GetCheckpoint() const { return checkpoint_; }
- Offset GetAlignedCheckpoint(int byte_align) const {
- return AlignDown(GetCheckpoint(), byte_align);
- }
- void AddForwardRef(int32_t instr_location,
- const EmitOperator& op,
- Offset max_offset) {
- VIXL_ASSERT(referenced_);
- forward_.push_back(ForwardReference(instr_location, op, max_offset));
- }
-
- ForwardRefList::iterator GetFirstForwardRef() { return forward_.begin(); }
- ForwardRefList::iterator GetEndForwardRef() { return forward_.end(); }
- const ForwardReference* GetForwardRefBack() const {
- if (forward_.empty()) return NULL;
- return &forward_.back();
- }
- // Erase an item in the list. We don't have to recompute the checkpoint as
- // the caller does it.
- ForwardRefList::iterator Erase(ForwardRefList::iterator ref) {
- return forward_.erase(ref);
- }
- ForwardReference& GetBackForwardRef() { return forward_.back(); }
-
- void ClearForwardRef() { forward_.clear(); }
-
- // Only used by the literal pool.
- // Removes the last forward reference, in particular because of a rewind.
- // TODO(all): This is hard to test as the checkpoint could be affected only
- // if the literal has multiple forward references. So, the literal has to be
- // shared between multiple instructions and part of the literal pool which
- // is not yet supperted.
- void InvalidateLastForwardReference(
- UpdateCheckpointOption update_checkpoint = kRecomputeCheckpoint) {
- if (!IsBound()) {
- VIXL_ASSERT(HasForwardReference());
- forward_.pop_back();
- }
- VIXL_ASSERT((update_checkpoint == kNoUpdateNecessary) &&
- ((checkpoint_ == GetNextCheckpoint()) ||
- ((checkpoint_ == Location::kMaxOffset) && forward_.empty())));
- if (update_checkpoint == kRecomputeCheckpoint) {
- checkpoint_ = GetNextCheckpoint();
- }
- }
-
- // Only used by the literal pool.
- // Update the checkpoint as the shorter distance from the last
- // literal in the pool's reference location to the point
- // where the forward reference will fail.
- // The last forward reference is assumed to be the one freshly
- // added regarding this literal.
- void UpdateCheckpoint() {
- if (HasForwardReference()) {
- const ForwardReference& ref = forward_.back();
- checkpoint_ = std::min(checkpoint_, ref.GetCheckpoint());
- }
- VIXL_ASSERT(GetNextCheckpoint() == checkpoint_);
- }
-
- private:
- // Once bound, location of this label in the code buffer.
- Offset location_;
- // Is the label bound.
- bool is_bound_;
- // True if the label has been used at least once.
- bool referenced_;
- // Contains the references to the unbound label
- ForwardRefList forward_;
- // Max offset in the code buffer. Must be emitted before this checkpoint.
- Offset checkpoint_;
-};
-
-class Label : public Location {
- public:
- Label() : Location(), veneer_pool_manager_(NULL), is_near_(false) {}
- explicit Label(Offset location)
- : Location(location), veneer_pool_manager_(NULL), is_near_(false) {}
- static bool CompareLabels(Label* a, Label* b) {
- return a->GetCheckpoint() < b->GetCheckpoint();
- }
- bool IsInVeneerPool() const { return veneer_pool_manager_ != NULL; }
- VeneerPoolManager* GetVeneerPoolManager() const {
- return veneer_pool_manager_;
- }
- void SetVeneerPoolManager(VeneerPoolManager* veneer_pool_manager,
- bool is_near) {
- veneer_pool_manager_ = veneer_pool_manager;
- is_near_ = is_near;
- }
- void ClearVeneerPoolManager() { veneer_pool_manager_ = NULL; }
- bool IsNear() const { return is_near_; }
-
- private:
- // Not null if the label is currently in the veneer pool.
- VeneerPoolManager* veneer_pool_manager_;
- // True if the label is in the near_labels_ list.
- bool is_near_;
-};
-
-class VeneerPoolManager {
- public:
- explicit VeneerPoolManager(MacroAssembler* masm)
- : masm_(masm),
- near_checkpoint_(Location::kMaxOffset),
- far_checkpoint_(Location::kMaxOffset),
- max_near_checkpoint_(0),
- near_checkpoint_margin_(0),
- last_label_reference_offset_(0),
- monitor_(0) {}
- bool IsEmpty() const {
- return (near_labels_.size() + far_labels_.size()) == 0;
- }
- Location::Offset GetCheckpoint() const {
- // For the far labels, we subtract the veneer size. This way avoids problems
- // when two label have the same checkpoint. In the usual case, we lose some
- // range but, as the minimum range for far labels is 1 mega byte, it's not
- // very important.
- size_t veneer_max_size = GetMaxSize();
- VIXL_ASSERT(IsInt32(veneer_max_size));
- Location::Offset tmp =
- far_checkpoint_ - static_cast<Location::Offset>(veneer_max_size);
- // Make room for a branch over the pools.
- return std::min(near_checkpoint_, tmp) - kMaxInstructionSizeInBytes -
- near_checkpoint_margin_;
- }
- size_t GetMaxSize() const {
- return (near_labels_.size() + far_labels_.size()) *
- kMaxInstructionSizeInBytes;
- }
- void AddLabel(Label* label);
- void RemoveLabel(Label* label);
- void EmitLabel(Label* label, Location::Offset emitted_target);
- void Emit(Location::Offset target);
-
- void Block() { monitor_++; }
- void Release();
- bool IsBlocked() const { return monitor_ != 0; }
-
- private:
- MacroAssembler* masm_;
- // Lists of all unbound labels which are used by a branch instruction.
- std::list<Label*> near_labels_;
- std::list<Label*> far_labels_;
- // Offset in the code buffer after which the veneer needs to be emitted.
- // It's the lowest checkpoint value in the associated list.
- // A default value of Location::kMaxOffset means that the checkpoint is
- // invalid (no entry in the list).
- Location::Offset near_checkpoint_;
- Location::Offset far_checkpoint_;
- // Highest checkpoint value for the near list.
- Location::Offset max_near_checkpoint_;
- // Margin we have to take to ensure that 16 bit branch instructions will be
- // able to generate 32 bit veneers.
- uint32_t near_checkpoint_margin_;
- // Offset where the last reference to a label has been added to the pool.
- Location::Offset last_label_reference_offset_;
- // Indicates whether the emission of this pool is blocked.
- int monitor_;
-};
-
-} // namespace aarch32
-} // namespace vixl
-
-#endif // VIXL_AARCH32_LABEL_AARCH32_H_
diff --git a/src/aarch32/location-aarch32.cc b/src/aarch32/location-aarch32.cc
new file mode 100644
index 00000000..d61aafa9
--- /dev/null
+++ b/src/aarch32/location-aarch32.cc
@@ -0,0 +1,152 @@
+// Copyright 2017, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "location-aarch32.h"
+
+#include "assembler-aarch32.h"
+#include "macro-assembler-aarch32.h"
+
+namespace vixl {
+
+namespace aarch32 {
+
+bool Location::Needs16BitPadding(int32_t location) const {
+ if (!HasForwardReferences()) return false;
+ const ForwardRef& last_ref = GetLastForwardReference();
+ int32_t min_location_last_ref = last_ref.GetMinLocation();
+ VIXL_ASSERT(min_location_last_ref - location <= 2);
+ return (min_location_last_ref > location);
+}
+
+void Location::ResolveReferences(internal::AssemblerBase* assembler) {
+ // Iterate over references and call EncodeLocationFor on each of them.
+ for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) {
+ const ForwardRef& reference = *it.Current();
+ VIXL_ASSERT(reference.LocationIsEncodable(location_));
+ int32_t from = reference.GetLocation();
+ EncodeLocationFor(assembler, from, reference.op());
+ }
+ forward_.clear();
+}
+
+static bool Is16BitEncoding(uint16_t instr) {
+ return instr < (kLowestT32_32Opcode >> 16);
+}
+
+void Location::EncodeLocationFor(internal::AssemblerBase* assembler,
+ int32_t from,
+ const Location::EmitOperator* encoder) {
+ if (encoder->IsUsingT32()) {
+ uint16_t* instr_ptr =
+ assembler->GetBuffer()->GetOffsetAddress<uint16_t*>(from);
+ if (Is16BitEncoding(instr_ptr[0])) {
+ // The Encode methods always deals with uint32_t types so we need
+ // to explicitly cast it.
+ uint32_t instr = static_cast<uint32_t>(instr_ptr[0]);
+ instr = encoder->Encode(instr, from, this);
+ // The Encode method should not ever set the top 16 bits.
+ VIXL_ASSERT((instr & ~0xffff) == 0);
+ instr_ptr[0] = static_cast<uint16_t>(instr);
+ } else {
+ uint32_t instr =
+ instr_ptr[1] | (static_cast<uint32_t>(instr_ptr[0]) << 16);
+ instr = encoder->Encode(instr, from, this);
+ instr_ptr[0] = static_cast<uint16_t>(instr >> 16);
+ instr_ptr[1] = static_cast<uint16_t>(instr);
+ }
+ } else {
+ uint32_t* instr_ptr =
+ assembler->GetBuffer()->GetOffsetAddress<uint32_t*>(from);
+ instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this);
+ }
+}
+
+void Location::AddForwardRef(int32_t instr_location,
+ const EmitOperator& op,
+ const ReferenceInfo* info) {
+ VIXL_ASSERT(referenced_);
+ int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta);
+ if (info->pc_needs_aligning == ReferenceInfo::kAlignPc)
+ from = AlignDown(from, 4);
+ int32_t min_object_location = from + info->min_offset;
+ int32_t max_object_location = from + info->max_offset;
+ forward_.insert(ForwardRef(&op,
+ instr_location,
+ info->size,
+ min_object_location,
+ max_object_location,
+ info->alignment));
+}
+
+int Location::GetMaxAlignment() const {
+ int max_alignment = GetPoolObjectAlignment();
+ for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
+ it.Advance()) {
+ const ForwardRef& reference = *it.Current();
+ if (reference.GetAlignment() > max_alignment)
+ max_alignment = reference.GetAlignment();
+ }
+ return max_alignment;
+}
+
+int Location::GetMinLocation() const {
+ int32_t min_location = 0;
+ for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
+ it.Advance()) {
+ const ForwardRef& reference = *it.Current();
+ if (reference.GetMinLocation() > min_location)
+ min_location = reference.GetMinLocation();
+ }
+ return min_location;
+}
+
+void Label::UpdatePoolObject(PoolObject<int32_t>* object) {
+ VIXL_ASSERT(forward_.size() == 1);
+ const ForwardRef& reference = forward_.Front();
+ object->Update(reference.GetMinLocation(),
+ reference.GetMaxLocation(),
+ reference.GetAlignment());
+}
+
+void Label::EmitPoolObject(MacroAssemblerInterface* masm) {
+ MacroAssembler* macro_assembler = static_cast<MacroAssembler*>(masm);
+
+ // Add a new branch to this label.
+ macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes);
+ ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler,
+ kMaxInstructionSizeInBytes,
+ ExactAssemblyScope::kMaximumSize);
+ macro_assembler->b(this);
+}
+
+void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) {
+ Assembler* assembler = static_cast<Assembler*>(masm->AsAssemblerBase());
+
+ assembler->GetBuffer()->EnsureSpaceFor(GetSize());
+ assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize());
+}
+}
+}
diff --git a/src/aarch32/location-aarch32.h b/src/aarch32/location-aarch32.h
new file mode 100644
index 00000000..1aea268e
--- /dev/null
+++ b/src/aarch32/location-aarch32.h
@@ -0,0 +1,409 @@
+// Copyright 2017, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
+#define VIXL_AARCH32_LABEL_AARCH32_H_
+
+extern "C" {
+#include <stdint.h>
+}
+
+#include <algorithm>
+#include <cstddef>
+#include <iomanip>
+#include <list>
+
+#include "pool-manager.h"
+#include "invalset-vixl.h"
+#include "utils-vixl.h"
+
+#include "constants-aarch32.h"
+#include "instructions-aarch32.h"
+
+namespace vixl {
+
+namespace aarch32 {
+
+class MacroAssembler;
+
+class Location : public LocationBase<int32_t> {
+ friend class Assembler;
+ friend class MacroAssembler;
+
+ public:
+ // Unbound location that can be used with the assembler bind() method and
+ // with the assembler methods for generating instructions, but will never
+ // be handled by the pool manager.
+ Location()
+ : LocationBase<int32_t>(kRawLocation, 1 /* dummy size*/),
+ referenced_(false) {}
+
+ typedef int32_t Offset;
+
+ ~Location() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {
+#ifdef VIXL_DEBUG
+ if (IsReferenced() && !IsBound()) {
+ VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
+ }
+#endif
+ }
+
+ private:
+ class EmitOperator {
+ public:
+ explicit EmitOperator(InstructionSet isa) : isa_(isa) {
+#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
+ USE(isa_);
+ VIXL_ASSERT(isa == A32);
+#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
+ USE(isa_);
+ VIXL_ASSERT(isa == T32);
+#endif
+ }
+ virtual ~EmitOperator() {}
+ virtual uint32_t Encode(uint32_t /*instr*/,
+ Location::Offset /*pc*/,
+ const Location* /*label*/) const {
+ return 0;
+ }
+#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
+ bool IsUsingT32() const { return false; }
+#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
+ bool IsUsingT32() const { return true; }
+#else
+ bool IsUsingT32() const { return isa_ == T32; }
+#endif
+
+ private:
+ InstructionSet isa_;
+ };
+
+ protected:
+ class ForwardRef : public ForwardReference<int32_t> {
+ public:
+ // Default constructor for InvalSet.
+ ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
+
+ ForwardRef(const Location::EmitOperator* op,
+ int32_t location,
+ int size,
+ int32_t min_object_location,
+ int32_t max_object_location,
+ int object_alignment = 1)
+ : ForwardReference<int32_t>(location,
+ size,
+ min_object_location,
+ max_object_location,
+ object_alignment),
+ op_(op) {}
+
+ const Location::EmitOperator* op() const { return op_; }
+
+ // We must provide comparison operators to work with InvalSet.
+ bool operator==(const ForwardRef& other) const {
+ return GetLocation() == other.GetLocation();
+ }
+ bool operator<(const ForwardRef& other) const {
+ return GetLocation() < other.GetLocation();
+ }
+ bool operator<=(const ForwardRef& other) const {
+ return GetLocation() <= other.GetLocation();
+ }
+ bool operator>(const ForwardRef& other) const {
+ return GetLocation() > other.GetLocation();
+ }
+
+ private:
+ const Location::EmitOperator* op_;
+ };
+
+ static const int kNPreallocatedElements = 4;
+ // The following parameters will not affect ForwardRefList in practice, as we
+ // resolve all references at once and clear the list, so we do not need to
+ // remove individual elements by invalidating them.
+ static const int32_t kInvalidLinkKey = INT32_MAX;
+ static const size_t kReclaimFrom = 512;
+ static const size_t kReclaimFactor = 2;
+
+ typedef InvalSet<ForwardRef,
+ kNPreallocatedElements,
+ int32_t,
+ kInvalidLinkKey,
+ kReclaimFrom,
+ kReclaimFactor> ForwardRefListBase;
+ typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
+
+ class ForwardRefList : public ForwardRefListBase {
+ public:
+ ForwardRefList() : ForwardRefListBase() {}
+
+ using ForwardRefListBase::Back;
+ using ForwardRefListBase::Front;
+ };
+
+ class ForwardRefListIterator : public ForwardRefListIteratorBase {
+ public:
+ explicit ForwardRefListIterator(Location* location)
+ : ForwardRefListIteratorBase(&location->forward_) {}
+
+ // TODO: Remove these and use the STL-like interface instead. We'll need a
+ // const_iterator implemented for this.
+ using ForwardRefListIteratorBase::Advance;
+ using ForwardRefListIteratorBase::Current;
+ };
+
+ // For InvalSet::GetKey() and InvalSet::SetKey().
+ friend class InvalSet<ForwardRef,
+ kNPreallocatedElements,
+ int32_t,
+ kInvalidLinkKey,
+ kReclaimFrom,
+ kReclaimFactor>;
+
+ private:
+ virtual void ResolveReferences(internal::AssemblerBase* assembler)
+ VIXL_OVERRIDE;
+
+ void SetReferenced() { referenced_ = true; }
+ bool IsReferenced() const { return referenced_; }
+
+ bool HasForwardReferences() const { return !forward_.empty(); }
+
+ ForwardRef GetLastForwardReference() const {
+ VIXL_ASSERT(HasForwardReferences());
+ return forward_.Back();
+ }
+
+ // Add forward reference to this object. Called from the assembler.
+ void AddForwardRef(int32_t instr_location,
+ const EmitOperator& op,
+ const ReferenceInfo* info);
+
+ // Check if we need to add padding when binding this object, in order to
+ // meet the minimum location requirement.
+ bool Needs16BitPadding(int location) const;
+
+ void EncodeLocationFor(internal::AssemblerBase* assembler,
+ int32_t from,
+ const Location::EmitOperator* encoder);
+
+ // True if the label has been used at least once.
+ bool referenced_;
+
+ protected:
+ // Types passed to LocationBase. Must be distinct for unbound Locations (not
+ // relevant for bound locations, as they don't have a correspoding
+ // PoolObject).
+ static const int kRawLocation = 0; // Will not be used by the pool manager.
+ static const int kVeneerType = 1;
+ static const int kLiteralType = 2;
+
+ // Contains the references to the unbound label
+ ForwardRefList forward_;
+
+ // To be used only by derived classes.
+ Location(uint32_t type, int size, int alignment)
+ : LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
+
+ // To be used only by derived classes.
+ explicit Location(Offset location)
+ : LocationBase<int32_t>(location), referenced_(false) {}
+
+ virtual int GetMaxAlignment() const VIXL_OVERRIDE;
+ virtual int GetMinLocation() const VIXL_OVERRIDE;
+
+ private:
+ // Included to make the class concrete, however should never be called.
+ virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
+ USE(masm);
+ VIXL_UNREACHABLE();
+ }
+};
+
+class Label : public Location {
+ static const int kVeneerSize = 4;
+ // Use an alignment of 1 for all architectures. Even though we can bind an
+ // unused label, because of the way the MacroAssembler works we can always be
+ // sure to have the correct buffer alignment for the instruction set we are
+ // using, so we do not need to enforce additional alignment requirements
+ // here.
+ // TODO: Consider modifying the interface of the pool manager to pass an
+ // optional additional alignment to Bind() in order to handle cases where the
+ // buffer could be unaligned.
+ static const int kVeneerAlignment = 1;
+
+ public:
+ Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
+ explicit Label(Offset location) : Location(location) {}
+
+ private:
+ virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
+ return false;
+ }
+ virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
+ return false;
+ }
+
+ virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
+ virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
+
+ virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
+ return true;
+ }
+ virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
+ VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
+ return 1 * KBytes;
+ }
+};
+
+class RawLiteral : public Location {
+ // Some load instructions require alignment to 4 bytes. Since we do
+ // not know what instructions will reference a literal after we place
+ // it, we enforce a 4 byte alignment for literals that are 4 bytes or
+ // larger.
+ static const int kLiteralAlignment = 4;
+
+ public:
+ enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
+
+ enum DeletionPolicy {
+ kDeletedOnPlacementByPool,
+ kDeletedOnPoolDestruction,
+ kManuallyDeleted
+ };
+
+ RawLiteral(const void* addr,
+ int size,
+ PlacementPolicy placement_policy = kPlacedWhenUsed,
+ DeletionPolicy deletion_policy = kManuallyDeleted)
+ : Location(kLiteralType,
+ size,
+ (size < kLiteralAlignment) ? size : kLiteralAlignment),
+ addr_(addr),
+ manually_placed_(placement_policy == kManuallyPlaced),
+ deletion_policy_(deletion_policy) {
+ // We can't have manually placed literals that are not manually deleted.
+ VIXL_ASSERT(!IsManuallyPlaced() ||
+ (GetDeletionPolicy() == kManuallyDeleted));
+ }
+ RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
+ : Location(kLiteralType,
+ size,
+ (size < kLiteralAlignment) ? size : kLiteralAlignment),
+ addr_(addr),
+ manually_placed_(false),
+ deletion_policy_(deletion_policy) {}
+ const void* GetDataAddress() const { return addr_; }
+ int GetSize() const { return GetPoolObjectSizeInBytes(); }
+
+ bool IsManuallyPlaced() const { return manually_placed_; }
+
+ private:
+ DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
+
+ virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
+ return GetDeletionPolicy() == kDeletedOnPlacementByPool;
+ }
+ virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
+ return GetDeletionPolicy() == kDeletedOnPoolDestruction;
+ }
+ virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
+
+ // Data address before it's moved into the code buffer.
+ const void* const addr_;
+ // When this flag is true, the label will be placed manually.
+ bool manually_placed_;
+ // When is the literal to be removed from the memory
+ // Can be delete'd when:
+ // moved into the code buffer: kDeletedOnPlacementByPool
+ // the pool is delete'd: kDeletedOnPoolDestruction
+ // or left to the application: kManuallyDeleted.
+ DeletionPolicy deletion_policy_;
+
+ friend class MacroAssembler;
+};
+
+template <typename T>
+class Literal : public RawLiteral {
+ public:
+ explicit Literal(const T& value,
+ PlacementPolicy placement_policy = kPlacedWhenUsed,
+ DeletionPolicy deletion_policy = kManuallyDeleted)
+ : RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
+ value_(value) {}
+ explicit Literal(const T& value, DeletionPolicy deletion_policy)
+ : RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
+ void UpdateValue(const T& value, CodeBuffer* buffer) {
+ value_ = value;
+ if (IsBound()) {
+ buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
+ }
+ }
+
+ private:
+ T value_;
+};
+
+class StringLiteral : public RawLiteral {
+ public:
+ explicit StringLiteral(const char* str,
+ PlacementPolicy placement_policy = kPlacedWhenUsed,
+ DeletionPolicy deletion_policy = kManuallyDeleted)
+ : RawLiteral(str,
+ static_cast<int>(strlen(str) + 1),
+ placement_policy,
+ deletion_policy) {
+ VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
+ }
+ explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
+ : RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
+ VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
+ }
+};
+
+} // namespace aarch32
+
+
+// Required InvalSet template specialisations.
+#define INVAL_SET_TEMPLATE_PARAMETERS \
+ aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
+ int32_t, aarch32::Location::kInvalidLinkKey, \
+ aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
+template <>
+inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
+ const aarch32::Location::ForwardRef& element) {
+ return element.GetLocation();
+}
+template <>
+inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
+ aarch32::Location::ForwardRef* element, int32_t key) {
+ element->SetLocationToInvalidateOnly(key);
+}
+#undef INVAL_SET_TEMPLATE_PARAMETERS
+
+} // namespace vixl
+
+#endif // VIXL_AARCH32_LABEL_AARCH32_H_
diff --git a/src/aarch32/macro-assembler-aarch32.cc b/src/aarch32/macro-assembler-aarch32.cc
index e86b139d..56c0ffbd 100644
--- a/src/aarch32/macro-assembler-aarch32.cc
+++ b/src/aarch32/macro-assembler-aarch32.cc
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -36,23 +36,12 @@
namespace vixl {
namespace aarch32 {
-// We use a subclass to access the protected `ExactAssemblyScope` constructor
-// giving us control over the pools, and make the constructor private to limit
-// usage to code paths emitting pools.
-class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope {
- private:
- ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm,
- size_t size,
- SizePolicy size_policy = kExactSize)
- : ExactAssemblyScope(masm,
- size,
- size_policy,
- ExactAssemblyScope::kIgnorePools) {}
-
- friend class MacroAssembler;
- friend class VeneerPoolManager;
-};
-
+ExactAssemblyScopeWithoutPoolsCheck::ExactAssemblyScopeWithoutPoolsCheck(
+ MacroAssembler* masm, size_t size, SizePolicy size_policy)
+ : ExactAssemblyScope(masm,
+ size,
+ size_policy,
+ ExactAssemblyScope::kIgnorePools) {}
void UseScratchRegisterScope::Open(MacroAssembler* masm) {
VIXL_ASSERT(masm_ == NULL);
@@ -216,280 +205,17 @@ void UseScratchRegisterScope::ExcludeAll() {
}
-void VeneerPoolManager::AddLabel(Label* label) {
- if (last_label_reference_offset_ != 0) {
- // If the pool grows faster than the instruction stream, we must adjust
- // the checkpoint to compensate. The veneer pool entries take 32 bits, so
- // this can only occur when two consecutive 16-bit instructions add veneer
- // pool entries.
- // This is typically the case for cbz and cbnz (other forward branches
- // have a 32 bit variant which is always used).
- if (last_label_reference_offset_ + 2 * k16BitT32InstructionSizeInBytes ==
- static_cast<uint32_t>(masm_->GetCursorOffset())) {
- // We found two 16 bit forward branches generated one after the other.
- // That means that the pool will grow by one 32-bit branch when
- // the cursor offset will move forward by only one 16-bit branch.
- // Update the near checkpoint margin to manage the difference.
- near_checkpoint_margin_ +=
- k32BitT32InstructionSizeInBytes - k16BitT32InstructionSizeInBytes;
- }
- }
- Label::ForwardReference& back = label->GetBackForwardRef();
- VIXL_ASSERT(back.GetMaxForwardDistance() >= kCbzCbnzRange);
- if (!label->IsInVeneerPool()) {
- if (back.GetMaxForwardDistance() <= kNearLabelRange) {
- near_labels_.push_back(label);
- label->SetVeneerPoolManager(this, true);
- } else {
- far_labels_.push_back(label);
- label->SetVeneerPoolManager(this, false);
- }
- } else if (back.GetMaxForwardDistance() <= kNearLabelRange) {
- if (!label->IsNear()) {
- far_labels_.remove(label);
- near_labels_.push_back(label);
- label->SetVeneerPoolManager(this, true);
- }
- }
-
- back.SetIsBranch();
- last_label_reference_offset_ = back.GetLocation();
- label->UpdateCheckpoint();
- Label::Offset tmp = label->GetCheckpoint();
- if (label->IsNear()) {
- if (near_checkpoint_ > tmp) near_checkpoint_ = tmp;
- if (max_near_checkpoint_ >= tmp) {
- // This checkpoint is before some already in the near list. That means
- // that the veneer (if needed) will be emitted before some of the veneers
- // already in the list. We adjust the margin with the size of a veneer
- // branch.
- near_checkpoint_margin_ += k32BitT32InstructionSizeInBytes;
- } else {
- max_near_checkpoint_ = tmp;
- }
- } else {
- if (far_checkpoint_ > tmp) far_checkpoint_ = tmp;
- }
- // Always compute the global checkpoint as, adding veneers shorten the
- // literals' checkpoint.
- masm_->ComputeCheckpoint();
-}
-
-
-void VeneerPoolManager::RemoveLabel(Label* label) {
- label->ClearVeneerPoolManager();
- std::list<Label*>& list = label->IsNear() ? near_labels_ : far_labels_;
- Label::Offset* checkpoint_reference =
- label->IsNear() ? &near_checkpoint_ : &far_checkpoint_;
- if (label->GetCheckpoint() == *checkpoint_reference) {
- // We have to compute checkpoint again.
- *checkpoint_reference = Label::kMaxOffset;
- for (std::list<Label*>::iterator it = list.begin(); it != list.end();) {
- if (*it == label) {
- it = list.erase(it);
- } else {
- *checkpoint_reference =
- std::min(*checkpoint_reference, (*it)->GetCheckpoint());
- ++it;
- }
- }
- masm_->ComputeCheckpoint();
- } else {
- // We only have to remove the label from the list.
- list.remove(label);
- }
-}
-
-
-void VeneerPoolManager::EmitLabel(Label* label, Label::Offset emitted_target) {
- VIXL_ASSERT(!IsBlocked());
- // Define the veneer.
- Label veneer;
- masm_->Bind(&veneer);
- Label::Offset label_checkpoint = Label::kMaxOffset;
- // Check all uses of this label.
- for (Label::ForwardRefList::iterator ref = label->GetFirstForwardRef();
- ref != label->GetEndForwardRef();) {
- if (ref->IsBranch()) {
- if (ref->GetCheckpoint() <= emitted_target) {
- // Use the veneer.
- masm_->EncodeLocationFor(*ref, &veneer);
- ref = label->Erase(ref);
- } else {
- // Don't use the veneer => update checkpoint.
- label_checkpoint = std::min(label_checkpoint, ref->GetCheckpoint());
- ++ref;
- }
- } else {
- ++ref;
- }
- }
- label->SetCheckpoint(label_checkpoint);
- if (label->IsNear()) {
- near_checkpoint_ = std::min(near_checkpoint_, label_checkpoint);
- } else {
- far_checkpoint_ = std::min(far_checkpoint_, label_checkpoint);
- }
- // Generate the veneer.
- ExactAssemblyScopeWithoutPoolsCheck guard(masm_,
- kMaxInstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- masm_->b(label);
- masm_->AddBranchLabel(label);
-}
-
-
-void VeneerPoolManager::Emit(Label::Offset target) {
- VIXL_ASSERT(!IsBlocked());
- // Sort labels (regarding their checkpoint) to avoid that a veneer
- // becomes out of range.
- near_labels_.sort(Label::CompareLabels);
- far_labels_.sort(Label::CompareLabels);
- // To avoid too many veneers, generate veneers which will be necessary soon.
- target += static_cast<int>(GetMaxSize()) + near_checkpoint_margin_;
- static const size_t kVeneerEmissionMargin = 1 * KBytes;
- // To avoid too many veneers, use generated veneers for other not too far
- // uses.
- static const size_t kVeneerEmittedMargin = 2 * KBytes;
- Label::Offset emitted_target = target + kVeneerEmittedMargin;
- target += kVeneerEmissionMargin;
- // Reset the checkpoints. They will be computed again in the loop.
- near_checkpoint_ = Label::kMaxOffset;
- far_checkpoint_ = Label::kMaxOffset;
- max_near_checkpoint_ = 0;
- near_checkpoint_margin_ = 0;
- for (std::list<Label*>::iterator it = near_labels_.begin();
- it != near_labels_.end();) {
- Label* label = *it;
- // Move the label from the near list to the far list as it will be needed in
- // the far list (as the veneer will generate a far branch).
- // The label is pushed at the end of the list. The list remains sorted as
- // we use an unconditional jump which has the biggest range. However, it
- // wouldn't be a problem if the items at the end of the list were not
- // sorted as they won't be used by this generation (their range will be
- // greater than kVeneerEmittedMargin).
- it = near_labels_.erase(it);
- far_labels_.push_back(label);
- label->SetVeneerPoolManager(this, false);
- EmitLabel(label, emitted_target);
- }
- for (std::list<Label*>::iterator it = far_labels_.begin();
- it != far_labels_.end();) {
- // The labels are sorted. As soon as a veneer is not needed, we can stop.
- if ((*it)->GetCheckpoint() > target) {
- far_checkpoint_ = std::min(far_checkpoint_, (*it)->GetCheckpoint());
- break;
- }
- // Even if we no longer have use of this label, we can keep it in the list
- // as the next "B" would add it back.
- EmitLabel(*it, emitted_target);
- ++it;
- }
-#ifdef VIXL_DEBUG
- for (std::list<Label*>::iterator it = near_labels_.begin();
- it != near_labels_.end();
- ++it) {
- VIXL_ASSERT((*it)->GetCheckpoint() >= near_checkpoint_);
- }
- for (std::list<Label*>::iterator it = far_labels_.begin();
- it != far_labels_.end();
- ++it) {
- VIXL_ASSERT((*it)->GetCheckpoint() >= far_checkpoint_);
- }
-#endif
- masm_->ComputeCheckpoint();
-}
-
-
void MacroAssembler::EnsureEmitPoolsFor(size_t size_arg) {
+ // We skip the check when the pools are blocked.
+ if (ArePoolsBlocked()) return;
+
VIXL_ASSERT(IsUint32(size_arg));
uint32_t size = static_cast<uint32_t>(size_arg);
- Label::Offset target = GetCursorOffset() + size;
- if (target <= checkpoint_) return;
-
- EmitOption option = kBranchRequired;
- Label after_pools;
- Label::Offset literal_target = GetTargetForLiteralEmission();
- VIXL_ASSERT(literal_target >= 0);
- bool generate_veneers = target > veneer_pool_manager_.GetCheckpoint();
- if (target > literal_target) {
- // We will generate the literal pool. Generate all the veneers which
- // would become out of range.
- size_t literal_pool_size =
- literal_pool_manager_.GetLiteralPoolSize() + kMaxInstructionSizeInBytes;
- VIXL_ASSERT(IsInt32(literal_pool_size));
- Label::Offset veneers_target =
- AlignUp(target + static_cast<Label::Offset>(literal_pool_size), 4);
- VIXL_ASSERT(veneers_target >= 0);
- if (veneers_target > veneer_pool_manager_.GetCheckpoint()) {
- generate_veneers = true;
- }
- }
- if (!IsVeneerPoolBlocked() && generate_veneers) {
- {
- ExactAssemblyScopeWithoutPoolsCheck
- guard(this,
- kMaxInstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- b(&after_pools);
- }
- veneer_pool_manager_.Emit(target);
- option = kNoBranchRequired;
- }
- // Check if the macro-assembler's internal literal pool should be emitted
- // to avoid any overflow. If we already generated the veneers, we can
- // emit the pool (the branch is already done).
- if (!IsLiteralPoolBlocked() &&
- ((target > literal_target) || (option == kNoBranchRequired))) {
- EmitLiteralPool(option);
- }
- BindHelper(&after_pools);
-}
-
-
-void MacroAssembler::ComputeCheckpoint() {
- checkpoint_ = AlignDown(std::min(veneer_pool_manager_.GetCheckpoint(),
- GetTargetForLiteralEmission()),
- 4);
-}
-
-
-void MacroAssembler::EmitLiteralPool(LiteralPool* const literal_pool,
- EmitOption option) {
- VIXL_ASSERT(!IsLiteralPoolBlocked());
- if (literal_pool->GetSize() > 0) {
-#ifdef VIXL_DEBUG
- for (LiteralPool::RawLiteralListIterator literal_it =
- literal_pool->GetFirst();
- literal_it != literal_pool->GetEnd();
- literal_it++) {
- RawLiteral* literal = *literal_it;
- VIXL_ASSERT(GetCursorOffset() < literal->GetCheckpoint());
- }
-#endif
- Label after_literal;
- if (option == kBranchRequired) {
- GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes);
- VIXL_ASSERT(!AllowAssembler());
- {
- ExactAssemblyScopeWithoutPoolsCheck
- guard(this,
- kMaxInstructionSizeInBytes,
- ExactAssemblyScope::kMaximumSize);
- b(&after_literal);
- }
- }
- GetBuffer()->Align();
- GetBuffer()->EnsureSpaceFor(literal_pool->GetSize());
- for (LiteralPool::RawLiteralListIterator it = literal_pool->GetFirst();
- it != literal_pool->GetEnd();
- it++) {
- PlaceHelper(*it);
- GetBuffer()->Align();
- }
- if (option == kBranchRequired) BindHelper(&after_literal);
- literal_pool->Clear();
+ if (pool_manager_.MustEmit(GetCursorOffset(), size)) {
+ int32_t new_pc = pool_manager_.Emit(this, GetCursorOffset(), size);
+ VIXL_ASSERT(new_pc == GetCursorOffset());
+ USE(new_pc);
}
}
@@ -521,25 +247,6 @@ void MacroAssembler::HandleOutOfBoundsImmediate(Condition cond,
}
-void MacroAssembler::PadToMinimumBranchRange(Label* label) {
- const Label::ForwardReference* last_reference = label->GetForwardRefBack();
- if ((last_reference != NULL) && last_reference->IsUsingT32()) {
- uint32_t location = last_reference->GetLocation();
- if (location + k16BitT32InstructionSizeInBytes ==
- static_cast<uint32_t>(GetCursorOffset())) {
- uint16_t* instr_ptr = buffer_.GetOffsetAddress<uint16_t*>(location);
- if ((instr_ptr[0] & kCbzCbnzMask) == kCbzCbnzValue) {
- VIXL_ASSERT(!InITBlock());
- // A Cbz or a Cbnz can't jump immediately after the instruction. If the
- // target is immediately after the Cbz or Cbnz, we insert a nop to
- // avoid that.
- EmitT32_16(k16BitT32NopOpcode);
- }
- }
- }
-}
-
-
MemOperand MacroAssembler::MemOperandComputationHelper(
Condition cond,
Register scratch,
diff --git a/src/aarch32/macro-assembler-aarch32.h b/src/aarch32/macro-assembler-aarch32.h
index 91320ae8..4d52ccbd 100644
--- a/src/aarch32/macro-assembler-aarch32.h
+++ b/src/aarch32/macro-assembler-aarch32.h
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -30,6 +30,8 @@
#include "code-generation-scopes-vixl.h"
#include "macro-assembler-interface.h"
+#include "pool-manager.h"
+#include "pool-manager-impl.h"
#include "utils-vixl.h"
#include "aarch32/instructions-aarch32.h"
@@ -37,87 +39,28 @@
#include "aarch32/operands-aarch32.h"
namespace vixl {
+
namespace aarch32 {
class UseScratchRegisterScope;
enum FlagsUpdate { LeaveFlags = 0, SetFlags = 1, DontCare = 2 };
-// LiteralPool class, defined as a container for literals
-class LiteralPool {
- public:
- typedef std::list<RawLiteral*>::iterator RawLiteralListIterator;
-
- public:
- LiteralPool() : size_(0) {}
- ~LiteralPool() {
- VIXL_ASSERT(literals_.empty() && (size_ == 0));
- for (RawLiteralListIterator literal_it = keep_until_delete_.begin();
- literal_it != keep_until_delete_.end();
- literal_it++) {
- delete *literal_it;
- }
- keep_until_delete_.clear();
- }
-
- unsigned GetSize() const { return size_; }
-
- // Add a literal to the literal container.
- void AddLiteral(RawLiteral* literal) {
- // Manually placed literals can't be added to a literal pool.
- VIXL_ASSERT(!literal->IsManuallyPlaced());
- VIXL_ASSERT(!literal->IsBound());
- if (literal->GetPositionInPool() == Label::kMaxOffset) {
- uint32_t position = GetSize();
- literal->SetPositionInPool(position);
- literals_.push_back(literal);
- size_ += literal->GetAlignedSize();
- }
- }
-
- // First literal to be emitted.
- RawLiteralListIterator GetFirst() { return literals_.begin(); }
-
- // Mark the end of the literal container.
- RawLiteralListIterator GetEnd() { return literals_.end(); }
-
- // Remove all the literals from the container.
- // If the literal's memory management has been delegated to the container
- // it will be delete'd.
- void Clear() {
- for (RawLiteralListIterator literal_it = GetFirst(); literal_it != GetEnd();
- literal_it++) {
- RawLiteral* literal = *literal_it;
- switch (literal->GetDeletionPolicy()) {
- case RawLiteral::kDeletedOnPlacementByPool:
- delete literal;
- break;
- case RawLiteral::kDeletedOnPoolDestruction:
- keep_until_delete_.push_back(literal);
- break;
- case RawLiteral::kManuallyDeleted:
- break;
- }
- }
- literals_.clear();
- size_ = 0;
- }
-
+// We use a subclass to access the protected `ExactAssemblyScope` constructor
+// giving us control over the pools, and make the constructor private to limit
+// usage to code paths emitting pools.
+class ExactAssemblyScopeWithoutPoolsCheck : public ExactAssemblyScope {
private:
- // Size (in bytes and including alignments) of the literal pool.
- unsigned size_;
+ ExactAssemblyScopeWithoutPoolsCheck(MacroAssembler* masm,
+ size_t size,
+ SizePolicy size_policy = kExactSize);
- // Literal container.
- std::list<RawLiteral*> literals_;
- // Already bound Literal container the app requested this pool to keep.
- std::list<RawLiteral*> keep_until_delete_;
+ friend class MacroAssembler;
+ friend class Label;
};
-
-
// Macro assembler for aarch32 instruction set.
class MacroAssembler : public Assembler, public MacroAssemblerInterface {
public:
- enum EmitOption { kBranchRequired, kNoBranchRequired };
enum FinalizeOption {
kFallThrough, // There may be more code to execute after calling Finalize.
kUnreachable // Anything generated after calling Finalize is unreachable.
@@ -128,20 +71,59 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
}
virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
- return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
+ return pool_manager_.IsBlocked();
+ }
+
+ virtual void EmitPoolHeader() VIXL_OVERRIDE {
+ // Check that we have the correct alignment.
+ if (IsUsingT32()) {
+ VIXL_ASSERT(GetBuffer()->Is16bitAligned());
+ } else {
+ VIXL_ASSERT(GetBuffer()->Is32bitAligned());
+ }
+ VIXL_ASSERT(pool_end_ == NULL);
+ pool_end_ = new Label();
+ ExactAssemblyScopeWithoutPoolsCheck guard(this,
+ kMaxInstructionSizeInBytes,
+ ExactAssemblyScope::kMaximumSize);
+ b(pool_end_);
+ }
+ virtual void EmitPoolFooter() VIXL_OVERRIDE {
+ // Align buffer to 4 bytes.
+ GetBuffer()->Align();
+ if (pool_end_ != NULL) {
+ Bind(pool_end_);
+ delete pool_end_;
+ pool_end_ = NULL;
+ }
+ }
+ virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE {
+ GetBuffer()->EmitZeroedBytes(n);
+ }
+ virtual void EmitNopBytes(int n) VIXL_OVERRIDE {
+ int nops = 0;
+ int nop_size = IsUsingT32() ? k16BitT32InstructionSizeInBytes
+ : kA32InstructionSizeInBytes;
+ VIXL_ASSERT(n % nop_size == 0);
+ nops = n / nop_size;
+ ExactAssemblyScopeWithoutPoolsCheck guard(this,
+ n,
+ ExactAssemblyScope::kExactSize);
+ for (int i = 0; i < nops; ++i) {
+ nop();
+ }
}
- // TODO(pools): implement these functions.
- virtual void EmitPoolHeader() VIXL_OVERRIDE {}
- virtual void EmitPoolFooter() VIXL_OVERRIDE {}
- virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); }
- virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); }
private:
class MacroEmissionCheckScope : public EmissionCheckScope {
public:
- explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
- : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
+ explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm,
+ PoolPolicy pool_policy = kBlockPools)
+ : EmissionCheckScope(masm,
+ kTypicalMacroInstructionMaxSize,
+ kMaximumSize,
+ pool_policy) {}
private:
static const size_t kTypicalMacroInstructionMaxSize =
@@ -256,172 +238,10 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
uint32_t initial_cursor_offset_;
};
- template <Assembler::InstructionCondDtDL asmfn>
- class EmitLiteralCondDtDL {
- public:
- EmitLiteralCondDtDL(DataType dt, DRegister rt) : dt_(dt), rt_(rt) {}
- void emit(MacroAssembler* const masm,
- Condition cond,
- RawLiteral* const literal) {
- (masm->*asmfn)(cond, dt_, rt_, literal);
- }
-
- private:
- DataType dt_;
- DRegister rt_;
- };
-
- template <Assembler::InstructionCondDtSL asmfn>
- class EmitLiteralCondDtSL {
- public:
- EmitLiteralCondDtSL(DataType dt, SRegister rt) : dt_(dt), rt_(rt) {}
- void emit(MacroAssembler* const masm,
- Condition cond,
- RawLiteral* const literal) {
- (masm->*asmfn)(cond, dt_, rt_, literal);
- }
-
- private:
- DataType dt_;
- SRegister rt_;
- };
-
- template <Assembler::InstructionCondRL asmfn>
- class EmitLiteralCondRL {
- public:
- explicit EmitLiteralCondRL(Register rt) : rt_(rt) {}
- void emit(MacroAssembler* const masm,
- Condition cond,
- RawLiteral* const literal) {
- (masm->*asmfn)(cond, rt_, literal);
- }
-
- private:
- Register rt_;
- };
-
- template <Assembler::InstructionCondRRL asmfn>
- class EmitLiteralCondRRL {
- public:
- EmitLiteralCondRRL(Register rt, Register rt2) : rt_(rt), rt2_(rt2) {}
- void emit(MacroAssembler* const masm,
- Condition cond,
- RawLiteral* const literal) {
- (masm->*asmfn)(cond, rt_, rt2_, literal);
- }
-
- private:
- Register rt_, rt2_;
- };
-
- class LiteralPoolManager {
- public:
- explicit LiteralPoolManager(MacroAssembler* const masm)
- : masm_(masm), monitor_(0) {
- ResetCheckpoint();
- }
-
- void ResetCheckpoint() { checkpoint_ = Label::kMaxOffset; }
-
- LiteralPool* GetLiteralPool() { return &literal_pool_; }
- Label::Offset GetCheckpoint() const {
- // Make room for a branch over the pools.
- return checkpoint_ - kMaxInstructionSizeInBytes;
- }
- size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
-
- // Checks if the insertion of the literal will put the forward reference
- // too far in the literal pool.
- // This function is called after generating an instruction with a literal.
- // We want to know if the literal can be reached by the instruction.
- // If not, we will unwind the instruction, generate the pool (without the
- // last literal) and generate the instruction again.
- // "literal" is the literal we want to insert into the pool.
- // "from" is the location where the instruction which uses the literal has
- // been generated.
- bool WasInsertedTooFar(RawLiteral* literal) const {
- // Last accessible location for the instruction we just generated, which
- // uses the literal.
- Label::ForwardReference& reference = literal->GetBackForwardRef();
- Label::Offset new_checkpoint = AlignDown(reference.GetCheckpoint(), 4);
-
- // TODO: We should not need to get the min of new_checkpoint and the
- // existing checkpoint. The existing checkpoint should already have
- // been checked when reserving space for this load literal instruction.
- // The assertion below asserts that we don't need the min operation here.
- Label::Offset checkpoint =
- std::min(new_checkpoint, literal->GetAlignedCheckpoint(4));
- bool literal_in_pool =
- (literal->GetPositionInPool() != Label::kMaxOffset);
- Label::Offset position_in_pool = literal_in_pool
- ? literal->GetPositionInPool()
- : literal_pool_.GetSize();
- // Compare the checkpoint to the location where the literal should be
- // added.
- // We add space for two instructions: one branch and one potential veneer
- // which may be added after the check. In this particular use case, no
- // veneer can be added but, this way, we are consistent with all the
- // literal pool checks.
- int32_t from =
- reference.GetLocation() + masm_->GetArchitectureStatePCOffset();
- bool too_far =
- checkpoint < from + position_in_pool +
- 2 * static_cast<int32_t>(kMaxInstructionSizeInBytes);
- // Assert if the literal is already in the pool and the existing
- // checkpoint triggers a rewind here, as this means the pool should
- // already have been emitted (perhaps we have not reserved enough space
- // for the instruction we are about to rewind).
- VIXL_ASSERT(!(too_far && (literal->GetCheckpoint() < new_checkpoint)));
- return too_far;
- }
-
- // Set the different checkpoints where the literal pool has to be emited.
- void UpdateCheckpoint(RawLiteral* literal) {
- // The literal should have been placed somewhere in the literal pool
- VIXL_ASSERT(literal->GetPositionInPool() != Label::kMaxOffset);
- // TODO(all): Consider AddForwardRef as a virtual so the checkpoint is
- // updated when inserted. Or move checkpoint_ into Label,
- literal->UpdateCheckpoint();
- Label::Offset tmp =
- literal->GetAlignedCheckpoint(4) - literal->GetPositionInPool();
- if (checkpoint_ > tmp) {
- checkpoint_ = tmp;
- masm_->ComputeCheckpoint();
- }
- }
-
- bool IsEmpty() const { return GetLiteralPoolSize() == 0; }
-
- void Block() { monitor_++; }
- void Release() {
- VIXL_ASSERT(IsBlocked());
- if (--monitor_ == 0) {
- // Ensure the pool has not been blocked for too long.
- VIXL_ASSERT(masm_->GetCursorOffset() <= checkpoint_);
- }
- }
- bool IsBlocked() const { return monitor_ != 0; }
-
- private:
- MacroAssembler* const masm_;
- LiteralPool literal_pool_;
-
- // Max offset in the code buffer where the literal needs to be
- // emitted. A default value of Label::kMaxOffset means that the checkpoint
- // is invalid.
- Label::Offset checkpoint_;
- // Indicates whether the emission of this pool is blocked.
- int monitor_;
- };
-
protected:
- virtual void BlockPools() VIXL_OVERRIDE {
- BlockLiteralPool();
- BlockVeneerPool();
- }
+ virtual void BlockPools() VIXL_OVERRIDE { pool_manager_.Block(); }
virtual void ReleasePools() VIXL_OVERRIDE {
- ReleaseLiteralPool();
- ReleaseVeneerPool();
+ pool_manager_.Release(GetCursorOffset());
}
virtual void EnsureEmitPoolsFor(size_t size) VIXL_OVERRIDE;
@@ -432,98 +252,52 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
allow_macro_instructions_ = value;
}
- void BlockLiteralPool() { literal_pool_manager_.Block(); }
- void ReleaseLiteralPool() { literal_pool_manager_.Release(); }
- bool IsLiteralPoolBlocked() const {
- return literal_pool_manager_.IsBlocked();
- }
- void BlockVeneerPool() { veneer_pool_manager_.Block(); }
- void ReleaseVeneerPool() { veneer_pool_manager_.Release(); }
- bool IsVeneerPoolBlocked() const { return veneer_pool_manager_.IsBlocked(); }
-
void HandleOutOfBoundsImmediate(Condition cond, Register tmp, uint32_t imm);
- void PadToMinimumBranchRange(Label* label);
-
- // Generate the instruction and if it's not possible revert the whole thing.
- // emit the literal pool and regenerate the instruction.
- // Note: The instruction is generated via
- // void T::emit(MacroAssembler* const, RawLiteral* const)
- template <typename T>
- void GenerateInstruction(Condition cond,
- T instr_callback,
- RawLiteral* const literal) {
- int32_t cursor = GetCursorOffset();
- // Emit the instruction, via the assembler
- {
- MacroEmissionCheckScope guard(this);
- // The ITScope can change the condition and we want to be able to revert
- // this.
- Condition c(cond);
- ITScope it_scope(this, &c, guard);
- instr_callback.emit(this, c, literal);
- }
- if (!literal->IsManuallyPlaced() && !literal->IsBound() &&
- !IsLiteralPoolBlocked()) {
- if (WasInsertedTooFar(literal)) {
- // The instruction's data is too far: revert the emission
- GetBuffer()->Rewind(cursor);
- literal->InvalidateLastForwardReference(RawLiteral::kNoUpdateNecessary);
- EmitLiteralPool(kBranchRequired);
- MacroEmissionCheckScope guard(this);
- ITScope it_scope(this, &cond, guard);
- instr_callback.emit(this, cond, literal);
- }
- // The literal pool above might have included the literal - in which
- // case it will now be bound.
- if (!literal->IsBound()) {
- literal_pool_manager_.GetLiteralPool()->AddLiteral(literal);
- literal_pool_manager_.UpdateCheckpoint(literal);
- }
- }
- }
public:
+ // TODO: If we change the MacroAssembler to disallow setting a different ISA,
+ // we can change the alignment of the pool in the pool manager constructor to
+ // be 2 bytes for T32.
explicit MacroAssembler(InstructionSet isa = kDefaultISA)
: Assembler(isa),
available_(r12),
current_scratch_scope_(NULL),
- checkpoint_(Label::kMaxOffset),
- literal_pool_manager_(this),
- veneer_pool_manager_(this),
- generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE) {
+ pool_manager_(4 /*header_size*/,
+ 4 /*alignment*/,
+ 4 /*buffer_alignment*/),
+ generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE),
+ pool_end_(NULL) {
#ifdef VIXL_DEBUG
SetAllowMacroInstructions(true);
#else
- USE(literal_pool_manager_);
USE(allow_macro_instructions_);
#endif
- ComputeCheckpoint();
}
explicit MacroAssembler(size_t size, InstructionSet isa = kDefaultISA)
: Assembler(size, isa),
available_(r12),
current_scratch_scope_(NULL),
- checkpoint_(Label::kMaxOffset),
- literal_pool_manager_(this),
- veneer_pool_manager_(this),
- generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE) {
+ pool_manager_(4 /*header_size*/,
+ 4 /*alignment*/,
+ 4 /*buffer_alignment*/),
+ generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE),
+ pool_end_(NULL) {
#ifdef VIXL_DEBUG
SetAllowMacroInstructions(true);
#endif
- ComputeCheckpoint();
}
MacroAssembler(byte* buffer, size_t size, InstructionSet isa = kDefaultISA)
: Assembler(buffer, size, isa),
available_(r12),
current_scratch_scope_(NULL),
- checkpoint_(Label::kMaxOffset),
- literal_pool_manager_(this),
- veneer_pool_manager_(this),
- generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE) {
+ pool_manager_(4 /*header_size*/,
+ 4 /*alignment*/,
+ 4 /*buffer_alignment*/),
+ generate_simulator_code_(VIXL_AARCH32_GENERATE_SIMULATOR_CODE),
+ pool_end_(NULL) {
#ifdef VIXL_DEBUG
SetAllowMacroInstructions(true);
#endif
- ComputeCheckpoint();
}
bool GenerateSimulatorCode() const { return generate_simulator_code_; }
@@ -533,8 +307,9 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
}
void FinalizeCode(FinalizeOption option = kUnreachable) {
- EmitLiteralPool(option == kUnreachable ? kNoBranchRequired
- : kBranchRequired);
+ EmitLiteralPool(option == kUnreachable
+ ? PoolManager<int32_t>::kNoBranchRequired
+ : PoolManager<int32_t>::kBranchRequired);
Assembler::FinalizeCode();
}
@@ -613,64 +388,102 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
void Bind(Label* label) {
VIXL_ASSERT(allow_macro_instructions_);
- PadToMinimumBranchRange(label);
BindHelper(label);
}
- void AddBranchLabel(Label* label) {
- if (label->IsBound()) return;
- veneer_pool_manager_.AddLabel(label);
+ virtual void BindHelper(Label* label) VIXL_OVERRIDE {
+ // Assert that we have the correct buffer alignment.
+ if (IsUsingT32()) {
+ VIXL_ASSERT(GetBuffer()->Is16bitAligned());
+ } else {
+ VIXL_ASSERT(GetBuffer()->Is32bitAligned());
+ }
+ // If we need to add padding, check if we have to emit the pool.
+ const int32_t pc = GetCursorOffset();
+ if (label->Needs16BitPadding(pc)) {
+ const int kPaddingBytes = 2;
+ if (pool_manager_.MustEmit(pc, kPaddingBytes)) {
+ int32_t new_pc = pool_manager_.Emit(this, pc, kPaddingBytes);
+ USE(new_pc);
+ VIXL_ASSERT(new_pc == GetCursorOffset());
+ }
+ }
+ pool_manager_.Bind(this, label, GetCursorOffset());
}
- void Place(RawLiteral* literal) {
- VIXL_ASSERT(allow_macro_instructions_);
- VIXL_ASSERT(literal->IsManuallyPlaced());
- // We have two calls to `GetBuffer()->Align()` below, that aligns on word
- // (4 bytes) boundaries. Only one is taken into account in
- // `GetAlignedSize()`.
- static const size_t kMaxAlignSize = 3;
- size_t size = literal->GetAlignedSize() + kMaxAlignSize;
- VIXL_ASSERT(IsUint32(size));
- // Check if we need to emit the pools or grow the code buffer.
- EmissionCheckScope(this, size);
- // Literals must be emitted aligned on word (4 bytes) boundaries.
- GetBuffer()->Align();
- PlaceHelper(literal);
- GetBuffer()->Align();
+ void RegisterLiteralReference(RawLiteral* literal) {
+ if (literal->IsManuallyPlaced()) return;
+ RegisterForwardReference(literal);
}
- void ComputeCheckpoint();
-
- int32_t GetMarginBeforeVeneerEmission() const {
- return veneer_pool_manager_.GetCheckpoint() - GetCursorOffset();
+ void RegisterForwardReference(Location* location) {
+ if (location->IsBound()) return;
+ VIXL_ASSERT(location->HasForwardReferences());
+ const Location::ForwardRef& reference = location->GetLastForwardReference();
+ pool_manager_.AddObjectReference(&reference, location);
}
- Label::Offset GetTargetForLiteralEmission() const {
- if (literal_pool_manager_.IsEmpty()) return Label::kMaxOffset;
- // We add an instruction to the size as the instruction which calls this
- // function may add a veneer and, without this extra instruction, could put
- // the literals out of range. For example, it's the case for a "B"
- // instruction. At the beginning of the instruction we call
- // EnsureEmitPoolsFor which calls this function. However, the target of the
- // branch hasn't been inserted yet in the veneer pool.
- size_t veneer_max_size =
- veneer_pool_manager_.GetMaxSize() + kMaxInstructionSizeInBytes;
- VIXL_ASSERT(IsInt32(veneer_max_size));
- // We must be able to generate the veneer pool first.
- Label::Offset tmp = literal_pool_manager_.GetCheckpoint() -
- static_cast<Label::Offset>(veneer_max_size);
- VIXL_ASSERT(tmp >= 0);
- return tmp;
+ void CheckEmitPoolForInstruction(const ReferenceInfo* info,
+ Location* location,
+ Condition* cond = NULL) {
+ int size = info->size;
+ int32_t pc = GetCursorOffset();
+ // If we need to emit a branch over the instruction, take this into account.
+ if ((cond != NULL) && NeedBranch(cond)) {
+ size += kBranchSize;
+ pc += kBranchSize;
+ }
+ int32_t from = pc;
+ from += IsUsingT32() ? kT32PcDelta : kA32PcDelta;
+ if (info->pc_needs_aligning) from = AlignDown(from, 4);
+ int32_t min = from + info->min_offset;
+ int32_t max = from + info->max_offset;
+ ForwardReference<int32_t> temp_ref(pc,
+ info->size,
+ min,
+ max,
+ info->alignment);
+ if (pool_manager_.MustEmit(GetCursorOffset(), size, &temp_ref, location)) {
+ int32_t new_pc = pool_manager_.Emit(this,
+ GetCursorOffset(),
+ info->size,
+ &temp_ref,
+ location);
+ USE(new_pc);
+ VIXL_ASSERT(new_pc == GetCursorOffset());
+ }
}
- int32_t GetMarginBeforeLiteralEmission() const {
- Label::Offset tmp = GetTargetForLiteralEmission();
- VIXL_ASSERT(tmp >= GetCursorOffset());
- return tmp - GetCursorOffset();
+ void Place(RawLiteral* literal) {
+ VIXL_ASSERT(allow_macro_instructions_);
+ VIXL_ASSERT(literal->IsManuallyPlaced());
+ // Check if we need to emit the pools. Take the alignment of the literal
+ // into account, as well as potential 16-bit padding needed to reach the
+ // minimum accessible location.
+ int alignment = literal->GetMaxAlignment();
+ int32_t pc = GetCursorOffset();
+ int total_size = AlignUp(pc, alignment) - pc + literal->GetSize();
+ if (literal->Needs16BitPadding(pc)) total_size += 2;
+ if (pool_manager_.MustEmit(pc, total_size)) {
+ int32_t new_pc = pool_manager_.Emit(this, pc, total_size);
+ USE(new_pc);
+ VIXL_ASSERT(new_pc == GetCursorOffset());
+ }
+ pool_manager_.Bind(this, literal, GetCursorOffset());
+ literal->EmitPoolObject(this);
+ // Align the buffer, to be ready to generate instructions right after
+ // this.
+ GetBuffer()->Align();
}
- bool VeneerPoolIsEmpty() const { return veneer_pool_manager_.IsEmpty(); }
- bool LiteralPoolIsEmpty() const { return literal_pool_manager_.IsEmpty(); }
+ void EmitLiteralPool(PoolManager<int32_t>::EmitOption option =
+ PoolManager<int32_t>::kBranchRequired) {
+ VIXL_ASSERT(!ArePoolsBlocked());
+ int32_t new_pc =
+ pool_manager_.Emit(this, GetCursorOffset(), 0, NULL, NULL, option);
+ VIXL_ASSERT(new_pc == GetCursorOffset());
+ USE(new_pc);
+ }
void EnsureEmitFor(uint32_t size) {
EnsureEmitPoolsFor(size);
@@ -678,10 +491,6 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
GetBuffer()->EnsureSpaceFor(size);
}
- bool WasInsertedTooFar(RawLiteral* literal) {
- return literal_pool_manager_.WasInsertedTooFar(literal);
- }
-
bool AliasesAvailableScratchRegister(Register reg) {
return GetScratchRegisterList()->Includes(reg);
}
@@ -738,30 +547,26 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
AliasesAvailableScratchRegister(operand.GetOffsetRegister()));
}
- // Emit the literal pool in the code buffer.
- // Every literal is placed on a 32bit boundary
- // All the literals in the pool will be removed from the pool and potentially
- // delete'd.
- void EmitLiteralPool(LiteralPool* const literal_pool, EmitOption option);
- void EmitLiteralPool(EmitOption option = kBranchRequired) {
- VIXL_ASSERT(!IsLiteralPoolBlocked());
- EmitLiteralPool(literal_pool_manager_.GetLiteralPool(), option);
- literal_pool_manager_.ResetCheckpoint();
- ComputeCheckpoint();
- }
-
- size_t GetLiteralPoolSize() const {
- return literal_pool_manager_.GetLiteralPoolSize();
- }
-
// Adr with a literal already constructed. Add the literal to the pool if it
// is not already done.
void Adr(Condition cond, Register rd, RawLiteral* literal) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::adr> emit_helper(rd);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = adr_info(cond, Best, rd, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ adr(cond, Best, rd, literal);
+ RegisterLiteralReference(literal);
}
void Adr(Register rd, RawLiteral* literal) { Adr(al, rd, literal); }
@@ -771,8 +576,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::ldr> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldr_info(cond, Best, rt, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldr(cond, rt, literal);
+ RegisterLiteralReference(literal);
}
void Ldr(Register rt, RawLiteral* literal) { Ldr(al, rt, literal); }
@@ -780,8 +597,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::ldrb> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldrb_info(cond, rt, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldrb(cond, rt, literal);
+ RegisterLiteralReference(literal);
}
void Ldrb(Register rt, RawLiteral* literal) { Ldrb(al, rt, literal); }
@@ -790,8 +619,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt2));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRRL<&Assembler::ldrd> emit_helper(rt, rt2);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldrd_info(cond, rt, rt2, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldrd(cond, rt, rt2, literal);
+ RegisterLiteralReference(literal);
}
void Ldrd(Register rt, Register rt2, RawLiteral* literal) {
Ldrd(al, rt, rt2, literal);
@@ -801,8 +642,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::ldrh> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldrh_info(cond, rt, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldrh(cond, rt, literal);
+ RegisterLiteralReference(literal);
}
void Ldrh(Register rt, RawLiteral* literal) { Ldrh(al, rt, literal); }
@@ -810,8 +663,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::ldrsb> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldrsb_info(cond, rt, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldrsb(cond, rt, literal);
+ RegisterLiteralReference(literal);
}
void Ldrsb(Register rt, RawLiteral* literal) { Ldrsb(al, rt, literal); }
@@ -819,8 +684,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rt));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondRL<&Assembler::ldrsh> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = ldrsh_info(cond, rt, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ ldrsh(cond, rt, literal);
+ RegisterLiteralReference(literal);
}
void Ldrsh(Register rt, RawLiteral* literal) { Ldrsh(al, rt, literal); }
@@ -828,8 +705,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondDtDL<&Assembler::vldr> emit_helper(dt, rd);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = vldr_info(cond, dt, rd, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ vldr(cond, dt, rd, literal);
+ RegisterLiteralReference(literal);
}
void Vldr(DataType dt, DRegister rd, RawLiteral* literal) {
Vldr(al, dt, rd, literal);
@@ -845,8 +734,20 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rd));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- EmitLiteralCondDtSL<&Assembler::vldr> emit_helper(dt, rd);
- GenerateInstruction(cond, emit_helper, literal);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!literal->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = vldr_info(cond, dt, rd, literal, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, literal, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
+ ITScope it_scope(this, &cond, guard);
+ vldr(cond, dt, rd, literal);
+ RegisterLiteralReference(literal);
}
void Vldr(DataType dt, SRegister rd, RawLiteral* literal) {
Vldr(al, dt, rd, literal);
@@ -865,8 +766,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(OutsideITBlock());
RawLiteral* literal =
new Literal<uint32_t>(v, RawLiteral::kDeletedOnPlacementByPool);
- EmitLiteralCondRL<&Assembler::ldr> emit_helper(rt);
- GenerateInstruction(cond, emit_helper, literal);
+ Ldr(cond, rt, literal);
}
template <typename T>
void Ldr(Register rt, T v) {
@@ -881,8 +781,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(OutsideITBlock());
RawLiteral* literal =
new Literal<uint64_t>(v, RawLiteral::kDeletedOnPlacementByPool);
- EmitLiteralCondRRL<&Assembler::ldrd> emit_helper(rt, rt2);
- GenerateInstruction(cond, emit_helper, literal);
+ Ldrd(cond, rt, rt2, literal);
}
template <typename T>
void Ldrd(Register rt, Register rt2, T v) {
@@ -895,8 +794,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(OutsideITBlock());
RawLiteral* literal =
new Literal<float>(v, RawLiteral::kDeletedOnPlacementByPool);
- EmitLiteralCondDtSL<&Assembler::vldr> emit_helper(Untyped32, rd);
- GenerateInstruction(cond, emit_helper, literal);
+ Vldr(cond, rd, literal);
}
void Vldr(SRegister rd, float v) { Vldr(al, rd, v); }
@@ -906,8 +804,7 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(OutsideITBlock());
RawLiteral* literal =
new Literal<double>(v, RawLiteral::kDeletedOnPlacementByPool);
- EmitLiteralCondDtDL<&Assembler::vldr> emit_helper(Untyped64, rd);
- GenerateInstruction(cond, emit_helper, literal);
+ Vldr(cond, rd, literal);
}
void Vldr(DRegister rd, double v) { Vldr(al, rd, v); }
@@ -1399,17 +1296,21 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
void B(Condition cond, Label* label, BranchHint hint = kBranchWithoutHint) {
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
- if (hint == kNear) {
- if (label->IsBound()) {
- b(cond, label);
- } else {
- b(cond, Narrow, label);
- }
- } else {
- b(cond, label);
+ EncodingSize size = Best;
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!label->IsBound()) {
+ if (hint == kNear) size = Narrow;
+ const ReferenceInfo* info;
+ bool can_encode = b_info(cond, size, label, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, label, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
}
- AddBranchLabel(label);
+ MacroEmissionCheckScope guard(this, pool_policy);
+ b(cond, size, label);
+ RegisterForwardReference(label);
}
void B(Label* label, BranchHint hint = kBranchWithoutHint) {
B(al, label, hint);
@@ -1527,20 +1428,40 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
void Bl(Condition cond, Label* label) {
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!label->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = bl_info(cond, label, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, label, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
ITScope it_scope(this, &cond, guard);
bl(cond, label);
- AddBranchLabel(label);
+ RegisterForwardReference(label);
}
void Bl(Label* label) { Bl(al, label); }
void Blx(Condition cond, Label* label) {
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!label->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = blx_info(cond, label, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, label, &cond);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
ITScope it_scope(this, &cond, guard);
blx(cond, label);
- AddBranchLabel(label);
+ RegisterForwardReference(label);
}
void Blx(Label* label) { Blx(al, label); }
@@ -1584,18 +1505,38 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rn));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!label->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = cbnz_info(rn, label, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, label);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
cbnz(rn, label);
- AddBranchLabel(label);
+ RegisterForwardReference(label);
}
void Cbz(Register rn, Label* label) {
VIXL_ASSERT(!AliasesAvailableScratchRegister(rn));
VIXL_ASSERT(allow_macro_instructions_);
VIXL_ASSERT(OutsideITBlock());
- MacroEmissionCheckScope guard(this);
+ MacroEmissionCheckScope::PoolPolicy pool_policy =
+ MacroEmissionCheckScope::kBlockPools;
+ if (!label->IsBound()) {
+ const ReferenceInfo* info;
+ bool can_encode = cbz_info(rn, label, &info);
+ VIXL_CHECK(can_encode);
+ CheckEmitPoolForInstruction(info, label);
+ // We have already checked for pool emission.
+ pool_policy = MacroEmissionCheckScope::kIgnorePools;
+ }
+ MacroEmissionCheckScope guard(this, pool_policy);
cbz(rn, label);
- AddBranchLabel(label);
+ RegisterForwardReference(label);
}
void Clrex(Condition cond) {
@@ -10832,15 +10773,19 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
}
private:
+ bool NeedBranch(Condition* cond) { return !cond->Is(al) && IsUsingT32(); }
+ static const int kBranchSize = kMaxInstructionSizeInBytes;
+
RegisterList available_;
VRegisterList available_vfp_;
UseScratchRegisterScope* current_scratch_scope_;
MacroAssemblerContext context_;
- Label::Offset checkpoint_;
- LiteralPoolManager literal_pool_manager_;
- VeneerPoolManager veneer_pool_manager_;
+ PoolManager<int32_t> pool_manager_;
bool generate_simulator_code_;
bool allow_macro_instructions_;
+ Label* pool_end_;
+
+ friend class TestMacroAssembler;
};
// This scope utility allows scratch registers to be managed safely. The
diff --git a/src/aarch32/operands-aarch32.cc b/src/aarch32/operands-aarch32.cc
index 5ab7d012..80e22264 100644
--- a/src/aarch32/operands-aarch32.cc
+++ b/src/aarch32/operands-aarch32.cc
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@ extern "C" {
#include <cstdio>
#include <cstdlib>
#include <cstring>
+#include <iomanip>
#include <iostream>
#include "utils-vixl.h"
diff --git a/src/code-buffer-vixl.cc b/src/code-buffer-vixl.cc
index e88419a0..0fdd373f 100644
--- a/src/code-buffer-vixl.cc
+++ b/src/code-buffer-vixl.cc
@@ -1,4 +1,4 @@
-// Copyright 2014, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -133,12 +133,15 @@ void CodeBuffer::Align() {
byte* end = AlignUp(cursor_, 4);
const size_t padding_size = end - cursor_;
VIXL_ASSERT(padding_size <= 4);
- EnsureSpaceFor(padding_size);
- dirty_ = true;
- memset(cursor_, 0, padding_size);
- cursor_ = end;
+ EmitZeroedBytes(static_cast<int>(padding_size));
}
+void CodeBuffer::EmitZeroedBytes(int n) {
+ EnsureSpaceFor(n);
+ dirty_ = true;
+ memset(cursor_, 0, n);
+ cursor_ += n;
+}
void CodeBuffer::Reset() {
#ifdef VIXL_DEBUG
diff --git a/src/code-buffer-vixl.h b/src/code-buffer-vixl.h
index 17a2f618..b7bc5736 100644
--- a/src/code-buffer-vixl.h
+++ b/src/code-buffer-vixl.h
@@ -1,4 +1,4 @@
-// Copyright 2016, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,9 @@ class CodeBuffer {
// Align to 32bit.
void Align();
+ // Ensure there is enough space for and emit 'n' zero bytes.
+ void EmitZeroedBytes(int n);
+
bool Is16bitAligned() const { return IsAligned<2>(cursor_); }
bool Is32bitAligned() const { return IsAligned<4>(cursor_); }
diff --git a/src/pool-manager-impl.h b/src/pool-manager-impl.h
index 5c815fa8..78947d93 100644
--- a/src/pool-manager-impl.h
+++ b/src/pool-manager-impl.h
@@ -40,7 +40,7 @@ T PoolManager<T>::Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
- LabelBase<T>* new_object,
+ LocationBase<T>* new_object,
EmitOption option) {
// Make sure that the buffer still has the alignment we think it does.
VIXL_ASSERT(IsAligned(masm->AsAssemblerBase()
@@ -90,7 +90,7 @@ T PoolManager<T>::Emit(MacroAssemblerInterface* masm,
++iter;
continue;
}
- LabelBase<T>* label_base = current.label_base_;
+ LocationBase<T>* label_base = current.label_base_;
T aligned_pc = AlignUp(pc, current.alignment_);
masm->EmitPaddingBytes(aligned_pc - pc);
pc = aligned_pc;
@@ -129,7 +129,7 @@ bool PoolManager<T>::ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
- LabelBase<T>* new_object,
+ LocationBase<T>* new_object,
PoolObject<T>* existing_object) const {
// We assume that all objects before this have been skipped and all objects
// after this will be emitted, therefore we will emit the whole pool. Add
@@ -190,7 +190,7 @@ template <typename T>
bool PoolManager<T>::MustEmit(T pc,
int num_bytes,
ForwardReference<T>* reference,
- LabelBase<T>* label_base) const {
+ LocationBase<T>* label_base) const {
// Check if we are at or past the checkpoint.
if (CheckCurrentPC(pc, checkpoint_)) return true;
@@ -361,7 +361,7 @@ bool PoolManager<T>::PoolObjectLessThan(const PoolObject<T>& a,
template <typename T>
void PoolManager<T>::AddObjectReference(const ForwardReference<T>* reference,
- LabelBase<T>* label_base) {
+ LocationBase<T>* label_base) {
VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_);
VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_);
@@ -423,9 +423,9 @@ template <typename T>
typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete(
objects_iter iter) {
PoolObject<T>& object = *iter;
- LabelBase<T>* label_base = object.label_base_;
+ LocationBase<T>* label_base = object.label_base_;
- // Check if we also need to delete the LabelBase object.
+ // Check if we also need to delete the LocationBase object.
if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) {
delete_on_destruction_.push_back(label_base);
}
@@ -439,7 +439,7 @@ typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete(
template <typename T>
T PoolManager<T>::Bind(MacroAssemblerInterface* masm,
- LabelBase<T>* object,
+ LocationBase<T>* object,
T location) {
PoolObject<T>* existing_object = GetObjectIfTracked(object);
int alignment;
@@ -497,7 +497,7 @@ PoolManager<T>::~PoolManager<T>() {
}
#endif
// Delete objects the pool manager owns.
- for (typename std::vector<LabelBase<T>*>::iterator
+ for (typename std::vector<LocationBase<T>*>::iterator
iter = delete_on_destruction_.begin(),
end = delete_on_destruction_.end();
iter != end;
diff --git a/src/pool-manager.h b/src/pool-manager.h
index ea980310..c3596de0 100644
--- a/src/pool-manager.h
+++ b/src/pool-manager.h
@@ -43,7 +43,7 @@ namespace vixl {
class TestPoolManager;
// There are four classes declared in this header file:
-// PoolManager, PoolObject, ForwardReference and LabelBase.
+// PoolManager, PoolObject, ForwardReference and LocationBase.
// The PoolManager manages both literal and veneer pools, and is designed to be
// shared between AArch32 and AArch64. A pool is represented as an abstract
@@ -51,14 +51,14 @@ class TestPoolManager;
// architecture-specific details about literals and veneers; the actual
// emission of the pool objects is delegated.
//
-// Literal and Label will derive from LabelBase. The MacroAssembler will create
-// these objects as instructions that reference pool objects are encountered,
-// and ask the PoolManager to track them. The PoolManager will create an
-// internal PoolObject object for each object derived from LabelBase. Some of
-// these PoolObject objects will be deleted when placed (e.g. the ones
-// corresponding to Literals), whereas others will be updated with a new range
-// when placed (e.g. Veneers) and deleted when Bind() is called on the
-// PoolManager with their corresponding object as a parameter.
+// Literal and Label will derive from LocationBase. The MacroAssembler will
+// create these objects as instructions that reference pool objects are
+// encountered, and ask the PoolManager to track them. The PoolManager will
+// create an internal PoolObject object for each object derived from
+// LocationBase. Some of these PoolObject objects will be deleted when placed
+// (e.g. the ones corresponding to Literals), whereas others will be updated
+// with a new range when placed (e.g. Veneers) and deleted when Bind() is
+// called on the PoolManager with their corresponding object as a parameter.
//
// A ForwardReference represents a reference to a PoolObject that will be
// placed later in the instruction stream. Each ForwardReference may only refer
@@ -66,15 +66,15 @@ class TestPoolManager;
// object.
//
// A PoolObject represents an object that has not yet been placed. The final
-// location of a PoolObject (and hence the LabelBase object to which it
+// location of a PoolObject (and hence the LocationBase object to which it
// corresponds) is constrained mostly by the instructions that refer to it, but
// PoolObjects can also have inherent constraints, such as alignment.
//
-// LabelBase objects, unlike PoolObject objects, can be used outside of the
+// LocationBase objects, unlike PoolObject objects, can be used outside of the
// pool manager (e.g. as manually placed literals, which may still have
// forward references that need to be resolved).
//
-// At the moment, each LabelBase will have at most one PoolObject that keeps
+// At the moment, each LocationBase will have at most one PoolObject that keeps
// the relevant information for placing this object in the pool. When that
// object is placed, all forward references of the object are resolved. For
// that reason, we do not need to keep track of the ForwardReference objects in
@@ -91,16 +91,16 @@ template <typename T>
class PoolManager;
// Represents an object that has a size and alignment, and either has a known
-// location or has not been placed yet. An object of a subclass of LabelBase
+// location or has not been placed yet. An object of a subclass of LocationBase
// will typically keep track of a number of ForwardReferences when it has not
-// yet been placed, but LabelBase does not assume or implement that
-// functionality. LabelBase provides virtual methods for emitting the object,
-// updating all the forward references, and giving the PoolManager information
-// on the lifetime of this object and the corresponding PoolObject.
+// yet been placed, but LocationBase does not assume or implement that
+// functionality. LocationBase provides virtual methods for emitting the
+// object, updating all the forward references, and giving the PoolManager
+// information on the lifetime of this object and the corresponding PoolObject.
template <typename T>
-class LabelBase {
+class LocationBase {
public:
- // The size of a LabelBase object is restricted to 4KB, in order to avoid
+ // The size of a LocationBase object is restricted to 4KB, in order to avoid
// situations where the size of the pool becomes larger than the range of
// an unconditional branch. This cannot happen without having large objects,
// as typically the range of an unconditional branch is the larger range
@@ -109,8 +109,8 @@ class LabelBase {
// another template parameter.
static const int kMaxObjectSize = 4 * KBytes;
- // By default, LabelBase objects are aligned naturally to their size.
- LabelBase(uint32_t type, int size)
+ // By default, LocationBase objects are aligned naturally to their size.
+ LocationBase(uint32_t type, int size)
: pool_object_size_(size),
pool_object_alignment_(size),
pool_object_type_(type),
@@ -122,7 +122,7 @@ class LabelBase {
}
// Allow alignment to be specified, as long as it is smaller than the size.
- LabelBase(uint32_t type, int size, int alignment)
+ LocationBase(uint32_t type, int size, int alignment)
: pool_object_size_(size),
pool_object_alignment_(alignment),
pool_object_type_(type),
@@ -135,15 +135,15 @@ class LabelBase {
}
// Constructor for locations that are already bound.
- explicit LabelBase(T location)
+ explicit LocationBase(T location)
: pool_object_size_(-1),
pool_object_alignment_(-1),
pool_object_type_(0),
is_bound_(true),
location_(location) {}
- virtual ~LabelBase() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {
- }
+ virtual ~LocationBase()
+ VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {}
// The PoolManager should assume ownership of some objects, and delete them
// after they have been placed. This can happen for example for literals that
@@ -163,13 +163,13 @@ class LabelBase {
// Resolve the references to this object. Will encode the necessary offset
// in the instruction corresponding to each reference and then delete it.
// TODO: An alternative here would be to provide a ResolveReference()
- // method that only asks the LabelBase to resolve a specific reference (thus
- // allowing the pool manager to resolve some of the references only). This
- // would mean we need to have some kind of API to get all the references to
- // a LabelObject.
+ // method that only asks the LocationBase to resolve a specific reference
+ // (thus allowing the pool manager to resolve some of the references only).
+ // This would mean we need to have some kind of API to get all the references
+ // to a LabelObject.
virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0;
- // Returns true when the PoolObject corresponding to this LabelBase object
+ // Returns true when the PoolObject corresponding to this LocationBase object
// needs to be removed from the pool once placed, and false if it needs to
// be updated instead (in which case UpdatePoolObject will be called).
virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; }
@@ -246,7 +246,7 @@ template <typename T>
class PoolObject {
public:
// By default, PoolObjects have no inherent position constraints.
- explicit PoolObject(LabelBase<T>* parent)
+ explicit PoolObject(LocationBase<T>* parent)
: label_base_(parent),
min_location_(0),
max_location_(std::numeric_limits<T>::max()),
@@ -258,7 +258,7 @@ class PoolObject {
}
// Reset the minimum and maximum location and the alignment of the object.
- // This function is public in order to allow the LabelBase corresponding to
+ // This function is public in order to allow the LocationBase corresponding to
// this PoolObject to update the PoolObject when placed, e.g. in the case of
// veneers. The size and type of the object cannot be modified.
void Update(T min, T max, int alignment) {
@@ -292,8 +292,8 @@ class PoolObject {
}
}
- // The LabelBase that this pool object represents.
- LabelBase<T>* label_base_;
+ // The LocationBase that this pool object represents.
+ LocationBase<T>* label_base_;
// Hard, precise location constraints for the start location of the object.
// They are both inclusive, that is the start location of the object can be
@@ -322,8 +322,8 @@ class PoolObject {
};
// Class that represents a forward reference. It is the responsibility of
-// LabelBase objects to keep track of forward references and patch them when an
-// object is placed - this class is only used by the PoolManager in order to
+// LocationBase objects to keep track of forward references and patch them when
+// an object is placed - this class is only used by the PoolManager in order to
// restrict the requirements on PoolObjects it is tracking.
template <typename T>
class ForwardReference {
@@ -353,6 +353,9 @@ class ForwardReference {
T GetMaxLocation() const { return max_object_location_; }
int GetAlignment() const { return object_alignment_; }
+ // Needed for InvalSet.
+ void SetLocationToInvalidateOnly(T location) { location_ = location; }
+
private:
// The location of the thing that contains the reference. For example, this
// can be the location of the branch or load instruction.
@@ -404,7 +407,7 @@ class PoolManager {
bool MustEmit(T pc,
int num_bytes = 0,
ForwardReference<T>* reference = NULL,
- LabelBase<T>* object = NULL) const;
+ LocationBase<T>* object = NULL) const;
enum EmitOption { kBranchRequired, kNoBranchRequired };
@@ -421,21 +424,21 @@ class PoolManager {
T pc,
int num_bytes = 0,
ForwardReference<T>* new_reference = NULL,
- LabelBase<T>* new_object = NULL,
+ LocationBase<T>* new_object = NULL,
EmitOption option = kBranchRequired);
// Add 'reference' to 'object'. Should not be preceded by a call to MustEmit()
// that returned true, unless Emit() has been successfully afterwards.
void AddObjectReference(const ForwardReference<T>* reference,
- LabelBase<T>* object);
+ LocationBase<T>* object);
- // This is to notify the pool that a LabelBase has been bound to a location
+ // This is to notify the pool that a LocationBase has been bound to a location
// and does not need to be tracked anymore.
// This will happen, for example, for Labels, which are manually bound by the
// user.
// This can potentially add some padding bytes in order to meet the object
// requirements, and will return the new location.
- T Bind(MacroAssemblerInterface* masm, LabelBase<T>* object, T location);
+ T Bind(MacroAssemblerInterface* masm, LocationBase<T>* object, T location);
// Functions for blocking and releasing the pools.
void Block() { monitor_++; }
@@ -447,12 +450,12 @@ class PoolManager {
typedef
typename std::vector<PoolObject<T> >::const_iterator const_objects_iter;
- PoolObject<T>* GetObjectIfTracked(LabelBase<T>* label) {
+ PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) {
return const_cast<PoolObject<T>*>(
static_cast<const PoolManager<T>*>(this)->GetObjectIfTracked(label));
}
- const PoolObject<T>* GetObjectIfTracked(LabelBase<T>* label) const {
+ const PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) const {
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
const PoolObject<T>& current = *iter;
@@ -481,7 +484,7 @@ class PoolManager {
void Insert(const PoolObject<T>& new_object);
// Helper functions to remove an object from objects_ and delete the
- // corresponding LabelBase object, if necessary. This will be called
+ // corresponding LocationBase object, if necessary. This will be called
// either after placing the object, or when Bind() is called.
void RemoveAndDelete(PoolObject<T>* object);
objects_iter RemoveAndDelete(objects_iter iter);
@@ -491,7 +494,7 @@ class PoolManager {
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
- LabelBase<T>* new_object,
+ LocationBase<T>* new_object,
PoolObject<T>* existing_object) const;
// Used only for debugging.
@@ -510,7 +513,7 @@ class PoolManager {
std::vector<PoolObject<T> > objects_;
// Objects to be deleted on pool destruction.
- std::vector<LabelBase<T>*> delete_on_destruction_;
+ std::vector<LocationBase<T>*> delete_on_destruction_;
// The header_size_ and alignment_ values are hardcoded for each instance of
// PoolManager. The PoolManager does not know how to emit the header, and
diff --git a/test/aarch32/test-assembler-aarch32.cc b/test/aarch32/test-assembler-aarch32.cc
index 5a469ef7..4840a7ff 100644
--- a/test/aarch32/test-assembler-aarch32.cc
+++ b/test/aarch32/test-assembler-aarch32.cc
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -91,17 +91,20 @@ namespace aarch32 {
void Test##Name()
#define __ masm.
+#define __TESTOBJ test.
#define BUF_SIZE (4096)
-#define ASSERT_LITERAL_POOL_SIZE(size) \
+#define CHECK_POOL_SIZE(size) \
do { \
- VIXL_CHECK(__ GetLiteralPoolSize() == size); \
+ VIXL_CHECK(__TESTOBJ GetPoolSize() == size); \
} while (false)
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
// No simulator yet.
-#define SETUP() MacroAssembler masm(BUF_SIZE, isa);
+#define SETUP() \
+ MacroAssembler masm(BUF_SIZE, isa); \
+ TestMacroAssembler test(&masm);
#define START() masm.GetBuffer()->Reset();
@@ -116,6 +119,7 @@ namespace aarch32 {
#define SETUP() \
RegisterDump core; \
MacroAssembler masm(BUF_SIZE, isa); \
+ TestMacroAssembler test(&masm); \
UseScratchRegisterScope harness_scratch;
#define START() \
@@ -219,6 +223,7 @@ namespace aarch32 {
} \
}
+
// TODO: Add SBC to the ADC tests.
@@ -726,29 +731,30 @@ TEST(adr_in_range) {
TEST(adr_unaligned) {
SETUP();
- Label label_0, label_1, label_2, label_3, label_end;
+ Label label_end;
START();
{
+ Location label_0, label_1, label_2, label_3;
// 5 instructions.
ExactAssemblyScope scope(&masm,
- 5 * kA32InstructionSizeInBytes,
+ 5 * kA32InstructionSizeInBytes + 4,
ExactAssemblyScope::kExactSize);
__ adr(Wide, r0, &label_0);
__ adr(Wide, r1, &label_1);
__ adr(Wide, r2, &label_2);
__ adr(Wide, r3, &label_3);
__ b(Wide, &label_end);
- }
- {
- __ Bind(&label_0);
+ __ bind(&label_0);
__ GetBuffer()->EmitData("a", 1);
- __ Bind(&label_1);
+ __ bind(&label_1);
__ GetBuffer()->EmitData("b", 1);
- __ Bind(&label_2);
+ __ bind(&label_2);
__ GetBuffer()->EmitData("c", 1);
- __ Bind(&label_3);
+ __ bind(&label_3);
__ GetBuffer()->EmitData("d", 1);
+ }
+ {
__ Bind(&label_end);
__ Ldrb(r0, MemOperand(r0));
__ Ldrb(r1, MemOperand(r1));
@@ -1096,7 +1102,6 @@ TEST(bics) {
ASSERT_EQUAL_32(0x80000000, r0);
}
-
// Make sure calling a macro-assembler instruction will generate literal pools
// if needed.
TEST_T32(veneer_pool_generated_by_macro_instruction) {
@@ -1106,21 +1111,19 @@ TEST_T32(veneer_pool_generated_by_macro_instruction) {
Label start, end;
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
__ Mov(r0, 1);
__ Bind(&start);
__ Cbz(r0, &end);
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
// Generate enough code so that, after the loop, no instruction can be
// generated before we need to generate the veneer pool.
// Use `ExactAssemblyScope` and the assembler to generate the code.
- int32_t space = masm.GetMarginBeforeVeneerEmission();
+ int32_t space = test.GetPoolCheckpoint() - masm.GetCursorOffset();
{
ExactAssemblyScope scope(&masm, space, ExactAssemblyScope::kExactSize);
while (space > 0) {
@@ -1129,10 +1132,9 @@ TEST_T32(veneer_pool_generated_by_macro_instruction) {
}
}
- // We should not have emitted the veneer pool at this point.
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() == 0);
+ // We should not have emitted the pool at this point.
+ VIXL_CHECK(!test.PoolIsEmpty());
+ VIXL_CHECK(test.GetPoolCheckpoint() == masm.GetCursorOffset());
// Now the pool will need to be generated before we can emit anything.
Label check;
@@ -1150,8 +1152,7 @@ TEST_T32(veneer_pool_generated_by_macro_instruction) {
__ B(&start);
__ Bind(&end);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -1160,18 +1161,17 @@ TEST_T32(veneer_pool_generated_by_macro_instruction) {
ASSERT_EQUAL_32(0, r0);
}
-
-TEST(emit_reused_load_literal_rewind) {
- // This test generates an Ldrd that needs to be rewinded and loads a literal
- // that already is in the pool (hence it will be part of the pool that gets
- // emitted as part of the rewind).
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
+TEST(emit_reused_load_literal) {
SETUP();
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
const int string_size = AlignUp(ldrd_range + kMaxInstructionSizeInBytes, 4);
@@ -1184,11 +1184,10 @@ TEST(emit_reused_load_literal_rewind) {
Literal<uint64_t> l1(0xcafebeefdeadbaba);
__ Ldr(r0, &l1);
- // This Ldrd will be emitted and then rewinded, forcing the pool to be
- // emitted before we regenerate the instruction, so l1 will be bound and the
- // literal pool empty afterwards.
+ // With the old pool manager, this Ldrd used to force pool emission before
+ // being generated. Now, 'l1' and 'big_literal' can be reordered in the pool,
+ // and pool emission is not triggered anymore.
__ Ldrd(r2, r3, &l1);
- ASSERT_LITERAL_POOL_SIZE(0);
__ Ldr(r4, MemOperand(r4)); // Load the first 4 characters in r4.
END();
@@ -1202,7 +1201,9 @@ TEST(emit_reused_load_literal_rewind) {
ASSERT_EQUAL_32(0x78787878, r4);
}
-
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
TEST(emit_reused_load_literal_should_not_rewind) {
// This test checks that we are not conservative when rewinding a load of a
// literal that is already in the literal pool.
@@ -1211,8 +1212,8 @@ TEST(emit_reused_load_literal_should_not_rewind) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
// This load has a wider range than the Ldrd used below for the same
// literal.
@@ -1228,11 +1229,12 @@ TEST(emit_reused_load_literal_should_not_rewind) {
__ Adr(r4, &big_literal);
__ Ldrd(r2, r3, &l1);
- ASSERT_LITERAL_POOL_SIZE(AlignUp(string_size + 1, 4) + l1.GetSize());
+ // Here we used to check the pool size, which can now be zero as we emit the
+ // literals in a different order.
// Make sure the pool is emitted.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
__ Ldr(r4, MemOperand(r4)); // Load the first 4 characters in r4.
END();
@@ -1249,11 +1251,9 @@ TEST(emit_reused_load_literal_should_not_rewind) {
void EmitReusedLoadLiteralStressTest(InstructionSet isa, bool conditional) {
// This test stresses loading a literal that is already in the literal pool,
- // for
- // various positionings on the existing load from that literal. We try to
- // exercise
- // cases where the two loads result in similar checkpoints for the literal
- // pool.
+ // for various positionings on the existing load from that literal. We try to
+ // exercise cases where the two loads result in similar checkpoints for the
+ // literal pool.
SETUP();
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
@@ -1265,16 +1265,16 @@ void EmitReusedLoadLiteralStressTest(InstructionSet isa, bool conditional) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
if (conditional) {
__ Mov(r1, 0);
__ Cmp(r1, 0);
}
- // Add a large string to the pool, which will force the Ldrd below to rewind
- // (if the pool is not already emitted due to the Ldr).
+ // Add a large string to the pool, which will stress corner cases with the
+ // Ldrd below (if the pool is not already emitted due to the Ldr).
const int string_size = AlignUp(ldrd_range + kMaxInstructionSizeInBytes, 4);
std::string test_string(string_size, 'x');
StringLiteral big_literal(test_string.c_str());
@@ -1301,9 +1301,11 @@ void EmitReusedLoadLiteralStressTest(InstructionSet isa, bool conditional) {
} else {
__ Ldrd(r2, r3, &l1);
}
- // At this point, the pool will be emitted either because Ldrd needed to
- // rewind, or because Ldr reached its range.
- ASSERT_LITERAL_POOL_SIZE(0);
+
+ // Here we used to check that the pool is empty. Since the new pool manager
+ // allows reordering of literals in the pool, this will not always be the
+ // case. 'l1' can now be emitted before 'big_literal', allowing the pool to
+ // be emitted after the ldrd when the number of nops is small enough.
END();
@@ -1337,8 +1339,8 @@ TEST(test_many_loads_from_same_literal) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint64_t> l0(0xcafebeefdeadbaba);
__ Ldrd(r0, r1, &l0);
@@ -1370,18 +1372,16 @@ TEST_T32(literal_pool_generated_by_macro_instruction) {
START();
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
__ Ldrd(r0, r1, 0x1234567890abcdef);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
// Generate enough code so that, after the loop, no instruction can be
// generated before we need to generate the literal pool.
// Use `ExactAssemblyScope` and the assembler to generate the code.
- int32_t space = masm.GetMarginBeforeLiteralEmission();
+ int32_t space = test.GetPoolCheckpoint() - masm.GetCursorOffset();
{
ExactAssemblyScope scope(&masm, space, ExactAssemblyScope::kExactSize);
while (space > 0) {
@@ -1391,9 +1391,8 @@ TEST_T32(literal_pool_generated_by_macro_instruction) {
}
// We should not have emitted the literal pool at this point.
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
- VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() == 0);
+ VIXL_CHECK(!test.PoolIsEmpty());
+ VIXL_CHECK(test.GetPoolCheckpoint() == masm.GetCursorOffset());
// Now the pool will need to be generated before we emit anything.
Label check;
@@ -1408,8 +1407,7 @@ TEST_T32(literal_pool_generated_by_macro_instruction) {
VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&check) ==
(3 * k32BitT32InstructionSizeInBytes + 8));
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -1420,23 +1418,22 @@ TEST_T32(literal_pool_generated_by_macro_instruction) {
ASSERT_EQUAL_32(0x12345678, r2);
}
-
TEST(emit_single_literal) {
SETUP();
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
// Create one literal pool entry.
__ Ldrd(r0, r1, 0x1234567890abcdef);
- ASSERT_LITERAL_POOL_SIZE(8);
+ CHECK_POOL_SIZE(8);
__ Vldr(s0, 1.0);
__ Vldr(d1, 2.0);
__ Vmov(d2, 4.1);
__ Vmov(s8, 8.2);
- ASSERT_LITERAL_POOL_SIZE(20);
+ CHECK_POOL_SIZE(20);
END();
RUN();
@@ -1452,10 +1449,12 @@ TEST(emit_single_literal) {
#undef __
+#undef __TESTOBJ
#define __ masm->
+#define __TESTOBJ test->
-void EmitLdrdLiteralTest(MacroAssembler* masm) {
+void EmitLdrdLiteralTest(MacroAssembler* masm, TestMacroAssembler* test) {
const int ldrd_range = masm->IsUsingA32() ? 255 : 1020;
// We want to emit code up to the maximum literal load range and ensure the
// pool has not been emitted. Compute the limit (end).
@@ -1469,21 +1468,22 @@ void EmitLdrdLiteralTest(MacroAssembler* masm) {
4) +
// Maximum range allowed to access the constant.
ldrd_range -
- // The literal pool has a two instruction margin.
- 2 * kMaxInstructionSizeInBytes,
+ // Take into account the branch over the pool.
+ kMaxInstructionSizeInBytes,
// AlignDown to 4 byte as the literals will be 4 byte aligned.
4);
// Create one literal pool entry.
__ Ldrd(r0, r1, 0x1234567890abcdef);
- ASSERT_LITERAL_POOL_SIZE(8);
+ CHECK_POOL_SIZE(8);
- int32_t margin = masm->GetMarginBeforeLiteralEmission();
+ int32_t margin = test->GetPoolCheckpoint() - masm->GetCursorOffset();
+ VIXL_ASSERT(end == test->GetPoolCheckpoint());
{
ExactAssemblyScope scope(masm, margin, ExactAssemblyScope::kExactSize);
// Opening the scope should not have triggered the emission of the literal
// pool.
- VIXL_CHECK(!masm->LiteralPoolIsEmpty());
+ VIXL_CHECK(!test->PoolIsEmpty());
while (masm->GetCursorOffset() < end) {
__ nop();
}
@@ -1491,42 +1491,46 @@ void EmitLdrdLiteralTest(MacroAssembler* masm) {
}
// Check that the pool has not been emited along the way.
- ASSERT_LITERAL_POOL_SIZE(8);
+ CHECK_POOL_SIZE(8);
// This extra instruction should trigger an emit of the pool.
__ Nop();
// The pool should have been emitted.
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test->PoolIsEmpty());
}
-
#undef __
+#undef __TESTOBJ
#define __ masm.
+#define __TESTOBJ test.
-
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
TEST(emit_literal_rewind) {
SETUP();
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
- EmitLdrdLiteralTest(&masm);
+ EmitLdrdLiteralTest(&masm, &test);
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
const int string_size = AlignUp(ldrd_range + kMaxInstructionSizeInBytes, 4);
std::string test_string(string_size, 'x');
StringLiteral big_literal(test_string.c_str());
__ Adr(r4, &big_literal);
- // This adr will overflow the literal pool and force a rewind.
- // That means that the string will be generated then, then Ldrd and the
- // Ldrd's value will be alone in the pool.
__ Ldrd(r2, r3, 0xcafebeefdeadbaba);
- ASSERT_LITERAL_POOL_SIZE(8);
-
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ // With the old pool manager, the adr above would overflow the literal pool
+ // and force a rewind and pool emission.
+ // Here we used to check the pool size to confirm that 'big_literal' had
+ // already been emitted. This does not have to be the case now, as we can
+ // emit the literals in a different order.
+
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
__ Ldr(r4, MemOperand(r4)); // Load the first 4 characters in r4.
END();
@@ -1540,6 +1544,10 @@ TEST(emit_literal_rewind) {
ASSERT_EQUAL_32(0x78787878, r4);
}
+
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
TEST(emit_literal_conditional_rewind) {
SETUP();
@@ -1547,32 +1555,31 @@ TEST(emit_literal_conditional_rewind) {
// This test is almost identical to the test above, but the Ldrd instruction
// is conditional and there is a second conditional Ldrd instruction that will
- // not be executed. This is to check that reverting the emission of a load
- // literal instruction, rewinding, emitting the literal pool and then emitting
- // the instruction again works correctly when the load is conditional.
+ // not be executed.
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
const int string_size = AlignUp(ldrd_range + kMaxInstructionSizeInBytes, 4);
std::string test_string(string_size, 'x');
StringLiteral big_literal(test_string.c_str());
__ Adr(r2, &big_literal);
- // This adr will overflow the literal pool and force a rewind.
- // That means that the string will be generated then, then Ldrd and the
- // Ldrd's value will be alone in the pool.
__ Mov(r0, 0);
__ Mov(r1, 0);
__ Mov(r3, 1);
__ Cmp(r3, 1);
__ Ldrd(eq, r0, r1, 0xcafebeefdeadbaba);
__ Ldrd(ne, r0, r1, 0xdeadcafebeefbaba);
- ASSERT_LITERAL_POOL_SIZE(16);
-
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ // With the old pool manager, the adr above would overflow the literal pool
+ // and force a rewind and pool emission.
+ // Here we used to check the pool size to confirm that 'big_literal' had
+ // already been emitted. This does not have to be the case now, as we can
+ // emit the literals in a different order.
+
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
__ Ldr(r2, MemOperand(r2)); // Load the first 4 characters in r2.
END();
@@ -1596,8 +1603,8 @@ enum LiteralStressTestMode {
// This test is similar to the tests above, with the difference that we allow
// an extra offset to the string size in order to make sure that various pool
// sizes close to the maximum supported offset will produce code that executes
-// correctly. As the Ldrd might or might not be rewinded, we do not assert on
-// the size of the literal pool in this test.
+// correctly. As the Ldrd might or might not be emitted before the pool, we do
+// not assert on the size of the literal pool in this test.
void EmitLdrdLiteralStressTest(InstructionSet isa,
bool unaligned,
LiteralStressTestMode test_mode) {
@@ -1612,8 +1619,8 @@ void EmitLdrdLiteralStressTest(InstructionSet isa,
}
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
const int ldrd_range = masm.IsUsingA32() ? 255 : 1020;
const int string_size = ldrd_range + offset;
@@ -1648,8 +1655,8 @@ void EmitLdrdLiteralStressTest(InstructionSet isa,
break;
}
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
__ Ldr(r2, MemOperand(r2)); // Load the first 4 characters in r2.
END();
@@ -1663,43 +1670,42 @@ void EmitLdrdLiteralStressTest(InstructionSet isa,
}
-TEST(emit_literal_rewind_stress) {
+TEST(emit_literal_stress) {
EmitLdrdLiteralStressTest(isa, false /*unaligned*/, kUnconditional);
}
-TEST_T32(emit_literal_rewind_stress_unaligned) {
+TEST_T32(emit_literal_stress_unaligned) {
EmitLdrdLiteralStressTest(isa, true /*unaligned*/, kUnconditional);
}
-TEST(emit_literal_conditional_rewind_stress) {
+TEST(emit_literal_conditional_stress) {
EmitLdrdLiteralStressTest(isa, false /*unaligned*/, kConditionalTrue);
EmitLdrdLiteralStressTest(isa, false /*unaligned*/, kConditionalFalse);
EmitLdrdLiteralStressTest(isa, false /*unaligned*/, kConditionalBoth);
}
-TEST_T32(emit_literal_conditional_rewind_stress_unaligned) {
+TEST_T32(emit_literal_conditional_stress_unaligned) {
EmitLdrdLiteralStressTest(isa, true /*unaligned*/, kConditionalTrue);
EmitLdrdLiteralStressTest(isa, true /*unaligned*/, kConditionalFalse);
EmitLdrdLiteralStressTest(isa, true /*unaligned*/, kConditionalBoth);
}
-
TEST_T32(emit_literal_unaligned) {
SETUP();
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
// Generate a nop to break the 4 bytes alignment.
__ Nop();
- EmitLdrdLiteralTest(&masm);
+ EmitLdrdLiteralTest(&masm, &test);
END();
@@ -1710,14 +1716,13 @@ TEST_T32(emit_literal_unaligned) {
ASSERT_EQUAL_32(0x12345678, r1);
}
-
TEST(literal_multiple_uses) {
SETUP();
START();
Literal<int32_t> lit(42);
__ Ldr(r0, &lit);
- ASSERT_LITERAL_POOL_SIZE(4);
+ CHECK_POOL_SIZE(4);
// Multiple uses of the same literal object should not make the
// pool grow.
@@ -1725,7 +1730,7 @@ TEST(literal_multiple_uses) {
__ Ldrsb(r2, &lit);
__ Ldrh(r3, &lit);
__ Ldrsh(r4, &lit);
- ASSERT_LITERAL_POOL_SIZE(4);
+ CHECK_POOL_SIZE(4);
END();
@@ -1753,7 +1758,7 @@ TEST_A32(ldr_literal_range_same_time) {
ldr_range - ldrd_padding - 2 * kA32InstructionSizeInBytes;
__ Ldr(r1, 0x12121212);
- ASSERT_LITERAL_POOL_SIZE(4);
+ CHECK_POOL_SIZE(4);
{
int space = AlignDown(ldr_padding, kA32InstructionSizeInBytes);
@@ -1765,7 +1770,7 @@ TEST_A32(ldr_literal_range_same_time) {
}
__ Ldrd(r2, r3, 0x1234567890abcdef);
- ASSERT_LITERAL_POOL_SIZE(12);
+ CHECK_POOL_SIZE(12);
{
int space = AlignDown(ldrd_padding, kA32InstructionSizeInBytes);
@@ -1775,12 +1780,12 @@ TEST_A32(ldr_literal_range_same_time) {
__ nop();
}
}
- ASSERT_LITERAL_POOL_SIZE(12);
+ CHECK_POOL_SIZE(12);
// This mov will put the two loads literal out of range and will force
// the literal pool emission.
__ Mov(r0, 0);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
END();
RUN();
@@ -1808,7 +1813,8 @@ TEST(ldr_literal_mix_types) {
__ Ldrsh(r4, &l3);
__ Ldrb(r5, &l4);
__ Ldrsb(r6, &l5);
- ASSERT_LITERAL_POOL_SIZE(28);
+ // The pool size does not include padding.
+ CHECK_POOL_SIZE(18);
END();
@@ -1908,7 +1914,7 @@ void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
SETUP();
for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
- const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
+ const LdrLiteralRangeTest& test_case = kLdrLiteralRangeTestData[i];
START();
@@ -1919,31 +1925,32 @@ void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
}
__ Ldr(r6, 0x12345678);
- ASSERT_LITERAL_POOL_SIZE(4);
+ CHECK_POOL_SIZE(4);
// TODO: The MacroAssembler currently checks for more space than required
// when emitting macro instructions, triggering emission of the pool before
// absolutely required. For now we keep a buffer. Fix this test when the
// MacroAssembler becomes precise again.
int masm_check_margin = 10 * kMaxInstructionSizeInBytes;
- size_t expected_pool_size = 4;
- while ((masm.GetMarginBeforeLiteralEmission() - masm_check_margin) >=
+ int expected_pool_size = 4;
+ while ((test.GetPoolCheckpoint() - masm.GetCursorOffset() -
+ masm_check_margin) >=
static_cast<int32_t>(kMaxInstructionSizeInBytes)) {
__ Ldr(r7, 0x90abcdef);
// Each ldr instruction will force a new literal value to be added
// to the pool. Check that the literal pool grows accordingly.
expected_pool_size += 4;
- ASSERT_LITERAL_POOL_SIZE(expected_pool_size);
+ CHECK_POOL_SIZE(expected_pool_size);
}
- int space = masm.GetMarginBeforeLiteralEmission();
+ int space = test.GetPoolCheckpoint() - masm.GetCursorOffset();
int end = masm.GetCursorOffset() + space;
{
// Generate nops precisely to fill the buffer.
ExactAssemblyScope accurate_scope(&masm, space); // This should not
// trigger emission of
// the pool.
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
while (masm.GetCursorOffset() < end) {
__ nop();
}
@@ -1951,10 +1958,10 @@ void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
// This ldr will force the literal pool to be emitted before emitting
// the load and will create a new pool for the new literal used by this ldr.
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
- Literal<uint32_t> literal(test.literal_value);
- (masm.*test.instruction)(test.result_reg, &literal);
- ASSERT_LITERAL_POOL_SIZE(4);
+ VIXL_CHECK(!test.PoolIsEmpty());
+ Literal<uint32_t> literal(test_case.literal_value);
+ (masm.*test_case.instruction)(test_case.result_reg, &literal);
+ CHECK_POOL_SIZE(4);
END();
@@ -1962,7 +1969,7 @@ void GenerateLdrLiteralTriggerPoolEmission(InstructionSet isa,
ASSERT_EQUAL_32(0x12345678, r6);
ASSERT_EQUAL_32(0x90abcdef, r7);
- ASSERT_EQUAL_32(test.test_value, test.result_reg);
+ ASSERT_EQUAL_32(test_case.test_value, test_case.result_reg);
}
}
@@ -1976,18 +1983,17 @@ TEST_T32(ldr_literal_trigger_pool_emission_unaligned) {
GenerateLdrLiteralTriggerPoolEmission(isa, true);
}
-
void GenerateLdrLiteralRangeTest(InstructionSet isa, bool unaligned_ldr) {
SETUP();
for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
- const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
+ const LdrLiteralRangeTest& test_case = kLdrLiteralRangeTestData[i];
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
if (unaligned_ldr) {
// Generate a nop to break the 4-byte alignment.
@@ -1995,27 +2001,28 @@ void GenerateLdrLiteralRangeTest(InstructionSet isa, bool unaligned_ldr) {
VIXL_ASSERT((masm.GetBuffer()->GetCursorOffset() % 4) == 2);
}
- Literal<uint32_t> literal(test.literal_value);
- (masm.*test.instruction)(test.result_reg, &literal);
- ASSERT_LITERAL_POOL_SIZE(4);
+ Literal<uint32_t> literal(test_case.literal_value);
+ (masm.*test_case.instruction)(test_case.result_reg, &literal);
+ CHECK_POOL_SIZE(4);
// Generate enough instruction so that we go out of range for the load
// literal we just emitted.
- ptrdiff_t end = masm.GetBuffer()->GetCursorOffset() +
- ((masm.IsUsingA32()) ? test.a32_range : test.t32_range);
+ ptrdiff_t end =
+ masm.GetBuffer()->GetCursorOffset() +
+ ((masm.IsUsingA32()) ? test_case.a32_range : test_case.t32_range);
while (masm.GetBuffer()->GetCursorOffset() < end) {
__ Mov(r0, 0);
}
// The literal pool should have been emitted now.
VIXL_CHECK(literal.IsBound());
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
END();
RUN();
- ASSERT_EQUAL_32(test.test_value, test.result_reg);
+ ASSERT_EQUAL_32(test_case.test_value, test_case.result_reg);
}
}
@@ -2033,8 +2040,8 @@ TEST(string_literal) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
StringLiteral hello_string("hello");
@@ -2056,20 +2063,20 @@ TEST(custom_literal_in_pool) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint32_t> l0(static_cast<uint32_t>(0x12345678));
__ Ldr(r0, &l0);
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
__ Ldr(r1, &l0);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint64_t> cafebeefdeadbaba(0xcafebeefdeadbaba);
__ Ldrd(r8, r9, &cafebeefdeadbaba);
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
__ Ldrd(r2, r3, &cafebeefdeadbaba);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint32_t> l1(0x09abcdef);
__ Adr(r4, &l1);
@@ -2077,7 +2084,7 @@ TEST(custom_literal_in_pool) {
masm.EmitLiteralPool();
__ Adr(r5, &l1);
__ Ldr(r5, MemOperand(r5));
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -2100,8 +2107,8 @@ TEST(custom_literal_place) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint64_t> l0(0xcafebeefdeadbaba, RawLiteral::kManuallyPlaced);
Literal<int32_t> l1(0x12345678, RawLiteral::kManuallyPlaced);
@@ -2117,7 +2124,7 @@ TEST(custom_literal_place) {
__ Ldrb(r5, &l4);
__ Ldrsb(r6, &l5);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
// Manually generate a literal pool.
Label after_pool;
@@ -2144,7 +2151,7 @@ TEST(custom_literal_place) {
__ Ldrsb(lr, &l5);
}
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -2173,16 +2180,18 @@ TEST(custom_literal_place_shared) {
SETUP();
for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
- const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
+ const LdrLiteralRangeTest& test_case = kLdrLiteralRangeTestData[i];
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
- Literal<uint32_t> before(test.literal_value, RawLiteral::kManuallyPlaced);
- Literal<uint32_t> after(test.literal_value, RawLiteral::kManuallyPlaced);
+ Literal<uint32_t> before(test_case.literal_value,
+ RawLiteral::kManuallyPlaced);
+ Literal<uint32_t> after(test_case.literal_value,
+ RawLiteral::kManuallyPlaced);
VIXL_CHECK(!before.IsBound());
VIXL_CHECK(!after.IsBound());
@@ -2193,17 +2202,17 @@ TEST(custom_literal_place_shared) {
__ Place(&before);
__ Bind(&end_of_pool_before);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
VIXL_CHECK(before.IsBound());
VIXL_CHECK(!after.IsBound());
// Load the entries several times to test that literals can be shared.
for (int i = 0; i < 20; i++) {
- (masm.*test.instruction)(r0, &before);
- (masm.*test.instruction)(r1, &after);
+ (masm.*test_case.instruction)(r0, &before);
+ (masm.*test_case.instruction)(r1, &after);
}
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
VIXL_CHECK(before.IsBound());
VIXL_CHECK(!after.IsBound());
@@ -2213,7 +2222,7 @@ TEST(custom_literal_place_shared) {
__ Place(&after);
__ Bind(&end_of_pool_after);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
VIXL_CHECK(before.IsBound());
VIXL_CHECK(after.IsBound());
@@ -2221,8 +2230,8 @@ TEST(custom_literal_place_shared) {
RUN();
- ASSERT_EQUAL_32(test.test_value, r0);
- ASSERT_EQUAL_32(test.test_value, r1);
+ ASSERT_EQUAL_32(test_case.test_value, r0);
+ ASSERT_EQUAL_32(test_case.test_value, r1);
}
}
@@ -2231,10 +2240,11 @@ TEST(custom_literal_place_range) {
SETUP();
for (size_t i = 0; i < ARRAY_SIZE(kLdrLiteralRangeTestData); ++i) {
- const LdrLiteralRangeTest& test = kLdrLiteralRangeTestData[i];
+ const LdrLiteralRangeTest& test_case = kLdrLiteralRangeTestData[i];
const int nop_size = masm.IsUsingA32() ? kA32InstructionSizeInBytes
: k16BitT32InstructionSizeInBytes;
- const int range = masm.IsUsingA32() ? test.a32_range : test.t32_range;
+ const int range =
+ masm.IsUsingA32() ? test_case.a32_range : test_case.t32_range;
// On T32 the PC will be 4-byte aligned to compute the range. The
// MacroAssembler might also need to align the code buffer before emitting
// the literal when placing it. We keep a margin to account for this.
@@ -2256,8 +2266,10 @@ TEST(custom_literal_place_range) {
(2 * kMaxInstructionSizeInBytes) - margin;
START();
- Literal<uint32_t> before(test.literal_value, RawLiteral::kManuallyPlaced);
- Literal<uint32_t> after(test.literal_value, RawLiteral::kManuallyPlaced);
+ Literal<uint32_t> before(test_case.literal_value,
+ RawLiteral::kManuallyPlaced);
+ Literal<uint32_t> after(test_case.literal_value,
+ RawLiteral::kManuallyPlaced);
Label test_start;
__ B(&test_start);
@@ -2273,8 +2285,8 @@ TEST(custom_literal_place_range) {
}
__ Bind(&test_start);
- (masm.*test.instruction)(r0, &before);
- (masm.*test.instruction)(r1, &after);
+ (masm.*test_case.instruction)(r0, &before);
+ (masm.*test_case.instruction)(r1, &after);
{
int space = AlignDown(padding_after, nop_size);
@@ -2294,8 +2306,8 @@ TEST(custom_literal_place_range) {
RUN();
- ASSERT_EQUAL_32(test.test_value, r0);
- ASSERT_EQUAL_32(test.test_value, r1);
+ ASSERT_EQUAL_32(test_case.test_value, r0);
+ ASSERT_EQUAL_32(test_case.test_value, r1);
}
}
@@ -2305,7 +2317,7 @@ TEST(emit_big_pool) {
START();
// Make sure the pool is empty.
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
Label start;
__ Bind(&start);
@@ -2315,7 +2327,7 @@ TEST(emit_big_pool) {
VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&start) == 4000);
- ASSERT_LITERAL_POOL_SIZE(4000);
+ CHECK_POOL_SIZE(4000);
END();
RUN();
@@ -2430,15 +2442,16 @@ TEST_T32(veneers) {
__ Mov(r0, 0);
// Create one literal pool entry.
__ Ldr(r1, 0x12345678);
- ASSERT_LITERAL_POOL_SIZE(4);
+ CHECK_POOL_SIZE(4);
__ Cbz(r0, &zero);
__ Mov(r0, 1);
__ B(&exit);
for (int i = 32; i > 0; i--) {
__ Mov(r1, 0);
}
- // Assert that the literal pool has been generated with the veneers.
- ASSERT_LITERAL_POOL_SIZE(0);
+ // Assert that the pool contains only the two veneers.
+ const int kVeneerSize = 4;
+ CHECK_POOL_SIZE(2 * kVeneerSize);
__ Bind(&zero);
__ Mov(r0, 2);
__ Bind(&exit);
@@ -2504,7 +2517,7 @@ TEST_T32(veneer_bind) {
}
VIXL_CHECK(target.IsBound());
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
END();
}
@@ -2545,8 +2558,8 @@ TEST_T32(b_narrow_and_cbz_sort) {
// Force pool emission. If the labels are not sorted, the cbz will be out
// of range.
- int32_t margin = masm.GetMarginBeforeVeneerEmission();
- int32_t end = masm.GetCursorOffset() + margin;
+ int32_t end = test.GetPoolCheckpoint();
+ int32_t margin = end - masm.GetCursorOffset();
{
ExactAssemblyScope scope(&masm, margin, ExactAssemblyScope::kExactSize);
@@ -2601,8 +2614,7 @@ TEST_T32(b_narrow_and_cbz_sort_2) {
// Force pool emission. If the labels are not sorted, the cbz will be out
// of range.
- int32_t margin = masm.GetMarginBeforeVeneerEmission();
- int32_t end = masm.GetCursorOffset() + margin;
+ int32_t end = test.GetPoolCheckpoint();
while (masm.GetCursorOffset() < end) __ Nop();
@@ -2666,10 +2678,10 @@ TEST_T32(unaligned_branch_after_literal) {
Literal<int32_t> l0(0x01234567, RawLiteral::kManuallyPlaced);
__ Ldr(r0, &l0);
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
// Manually generate a literal pool.
{
@@ -2683,7 +2695,7 @@ TEST_T32(unaligned_branch_after_literal) {
__ bind(&after_pool);
}
- ASSERT_LITERAL_POOL_SIZE(0);
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -3419,30 +3431,28 @@ TEST(nop) {
masm.FinalizeCode();
}
-
-// Check that `GetMarginBeforeLiteralEmission()` is precise.
+// Check that `GetPoolCheckpoint()` is precise.
TEST(literal_pool_margin) {
SETUP();
START();
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
// Create a single literal.
__ Ldrd(r0, r1, 0x1234567890abcdef);
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
// Generate code to fill all the margin we have before generating the literal
// pool.
- int32_t margin = masm.GetMarginBeforeLiteralEmission();
- int32_t end = masm.GetCursorOffset() + margin;
+ int32_t margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ int32_t end = test.GetPoolCheckpoint();
{
ExactAssemblyScope scope(&masm, margin, ExactAssemblyScope::kExactSize);
// Opening the scope should not have triggered the emission of the literal
// pool.
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
while (masm.GetCursorOffset() < end) {
__ nop();
}
@@ -3450,12 +3460,12 @@ TEST(literal_pool_margin) {
}
// There should be no margin left to emit the literal pool.
- VIXL_CHECK(!masm.LiteralPoolIsEmpty());
- VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() == 0);
+ VIXL_CHECK(!test.PoolIsEmpty());
+ VIXL_CHECK(test.GetPoolCheckpoint() == masm.GetCursorOffset());
// So emitting a single instruction should force emission of the pool.
__ Nop();
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
END();
RUN();
@@ -3466,37 +3476,36 @@ TEST(literal_pool_margin) {
}
-// Check that `GetMarginBeforeVeneerEmission()` is precise.
+// Check that `GetPoolCheckpoint()` is precise.
TEST(veneer_pool_margin) {
SETUP();
START();
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
// Create a single veneer.
Label target;
__ B(eq, &target);
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
// Generate code to fill all the margin we have before generating the veneer
// pool.
- int32_t margin = masm.GetMarginBeforeVeneerEmission();
- int32_t end = masm.GetCursorOffset() + margin;
+ int32_t margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ int32_t end = test.GetPoolCheckpoint();
{
ExactAssemblyScope scope(&masm, margin, ExactAssemblyScope::kExactSize);
// Opening the scope should not have triggered the emission of the veneer
// pool.
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
while (masm.GetCursorOffset() < end) {
__ nop();
}
VIXL_CHECK(masm.GetCursorOffset() == end);
}
// There should be no margin left to emit the veneer pool.
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() == 0);
+ VIXL_CHECK(test.GetPoolCheckpoint() == masm.GetCursorOffset());
// So emitting a single instruction should force emission of the pool.
// We cannot simply check that the veneer pool is empty, because the veneer
@@ -3510,14 +3519,13 @@ TEST(veneer_pool_margin) {
}
VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&check) > 0);
__ Bind(&target);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
END();
RUN();
}
-
TEST_T32(near_branch_fuzz) {
SETUP();
START();
@@ -3915,17 +3923,17 @@ TEST_NOASM(code_buffer_precise_growth) {
TEST_NOASM(out_of_space_immediately_before_EnsureEmitFor) {
static const int kBaseBufferSize = 64;
MacroAssembler masm(kBaseBufferSize, T32);
+ TestMacroAssembler test(&masm);
VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
// Create a veneer.
Label target;
__ Cbz(r0, &target);
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
VIXL_CHECK(IsUint32(masm.GetBuffer()->GetRemainingBytes()));
uint32_t space = static_cast<uint32_t>(masm.GetBuffer()->GetRemainingBytes());
@@ -3937,7 +3945,7 @@ TEST_NOASM(out_of_space_immediately_before_EnsureEmitFor) {
}
}
- VIXL_CHECK(!masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(!test.PoolIsEmpty());
// The buffer should not have grown yet, and there should be no space left.
VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
@@ -3945,12 +3953,13 @@ TEST_NOASM(out_of_space_immediately_before_EnsureEmitFor) {
// Force emission of the veneer, at a point where there is no space available
// in the buffer.
- int32_t past_cbz_range = masm.GetMarginBeforeVeneerEmission() + 1;
+ int32_t past_cbz_range =
+ test.GetPoolCheckpoint() - masm.GetCursorOffset() + 1;
masm.EnsureEmitFor(past_cbz_range);
__ Bind(&target);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
masm.FinalizeCode();
}
@@ -3963,9 +3972,6 @@ TEST_NOASM(EnsureEmitFor) {
VIXL_CHECK(masm.GetBuffer()->GetCapacity() == kBaseBufferSize);
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
-
VIXL_CHECK(IsUint32(masm.GetBuffer()->GetRemainingBytes()));
int32_t space = static_cast<int32_t>(masm.GetBuffer()->GetRemainingBytes());
int32_t end = __ GetCursorOffset() + space;
@@ -4901,16 +4907,15 @@ TEST_T32(veneer_simultaneous_one_label) {
END();
}
-
-// The literal pool will be emitted early because we keep a margin to always be
-// able to generate the veneers before the literal.
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
TEST_T32(veneer_and_literal) {
SETUP();
START();
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
const uint32_t ldrd_range = 1020;
const uint32_t cbz_range = 126;
@@ -4937,31 +4942,20 @@ TEST_T32(veneer_and_literal) {
i += 2 * k16BitT32InstructionSizeInBytes;
}
- // However as we have pending veneer, the range is shrinken and the literal
- // pool is generated.
- VIXL_ASSERT(masm.LiteralPoolIsEmpty());
- // However, we didn't generate the veneer pool.
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() <
- static_cast<int32_t>(cbz_range));
-
// We generate a few more instructions.
for (; i < ldrd_range - 4 * kA32InstructionSizeInBytes;
i += k16BitT32InstructionSizeInBytes) {
__ Nop();
}
- // And a veneer pool has been generated.
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() >
- static_cast<int32_t>(cbz_range));
-
// Bind all the used labels.
for (uint32_t j = 0; j < kLabelsCount; j++) {
__ Bind(&labels[j]);
__ Nop();
}
- // Now that all the labels have been bound, we have no more veneer.
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
+ // Now that all the labels have been bound, we have no more veneers.
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -4972,16 +4966,15 @@ TEST_T32(veneer_and_literal) {
ASSERT_EQUAL_32(0x12345678, r1);
}
-
-// The literal pool will be emitted early and, as the emission of the literal
-// pool would have put veneer out of range, the veneers are emitted first.
+// NOTE: This test has needed modifications for the new pool manager, as it
+// was testing a corner case of the previous pool managers. We keep it as
+// another testcase.
TEST_T32(veneer_and_literal2) {
SETUP();
START();
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
- VIXL_CHECK(masm.LiteralPoolIsEmpty());
+ VIXL_CHECK(test.PoolIsEmpty());
const uint32_t ldrd_range = 1020;
const uint32_t cbz_range = 126;
@@ -5005,33 +4998,19 @@ TEST_T32(veneer_and_literal2) {
}
// Generate nops up to the literal pool limit.
- while (masm.GetMarginBeforeLiteralEmission() >=
+ while (test.GetPoolCheckpoint() - masm.GetCursorOffset() >=
kTypicalMacroInstructionMaxSize) {
__ Nop();
}
// At this point, no literals and no veneers have been generated.
- VIXL_ASSERT(!masm.LiteralPoolIsEmpty());
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() <
- static_cast<int32_t>(cbz_range));
+ VIXL_ASSERT(!test.PoolIsEmpty());
// The literal pool needs to be generated.
- VIXL_ASSERT(masm.GetMarginBeforeLiteralEmission() <
+ VIXL_ASSERT(test.GetPoolCheckpoint() - masm.GetCursorOffset() <
kTypicalMacroInstructionMaxSize);
- // But not the veneer pool.
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() >=
- kTypicalMacroInstructionMaxSize);
- // However, as the literal emission would put veneers out of range.
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() <
- kTypicalMacroInstructionMaxSize +
- static_cast<int32_t>(masm.GetLiteralPoolSize()));
- // This extra Nop will generate the literal pool and before that the veneer
- // pool.
+ // This extra Nop will generate the pools.
__ Nop();
- // Now the literal pool has been generated.
- VIXL_ASSERT(masm.LiteralPoolIsEmpty());
- // And also the veneer pool.
- VIXL_ASSERT(masm.GetMarginBeforeVeneerEmission() > 1000);
// Bind all the used labels.
for (uint32_t j = 0; j < kLabelsCount; j++) {
@@ -5039,8 +5018,8 @@ TEST_T32(veneer_and_literal2) {
__ Nop();
}
- // Now that all the labels have been bound, we have no more veneer.
- VIXL_CHECK(masm.VeneerPoolIsEmpty());
+ // Now that all the labels have been bound, we have no more veneers.
+ VIXL_CHECK(test.PoolIsEmpty());
END();
@@ -5192,13 +5171,12 @@ TEST_T32(veneer_and_literal5) {
__ Bind(&labels[test]);
// Emit the literal pool if it has not beeen emitted (it's the case for
// the lower values of test).
- __ EmitLiteralPool(MacroAssembler::kBranchRequired);
+ __ EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
}
END();
}
-
// Check that veneer and literals are well generated when they are out of
// range at the same time.
TEST_T32(veneer_and_literal6) {
@@ -5238,8 +5216,10 @@ TEST_T32(veneer_and_literal6) {
// margin (minus the size of the veneers).
// At this point, the literal and the veneer pools are not emitted.
- VIXL_CHECK(masm.GetLiteralPoolSize() > 0);
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() < kCbzCbnzRange);
+ const int kLdrdLiteralSize = 8;
+ const int kVeneerSize = 4;
+ CHECK_POOL_SIZE(7 * kLdrdLiteralSize + 5 * kVeneerSize);
+ VIXL_CHECK(test.GetPoolCheckpoint() - masm.GetCursorOffset() < kCbzCbnzRange);
// This scope will generate both veneers (they are both out of range).
{
@@ -5250,9 +5230,9 @@ TEST_T32(veneer_and_literal6) {
}
}
- // Check that both veneers have been emitted.
- VIXL_CHECK(masm.GetLiteralPoolSize() == 0);
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() > kCbzCbnzRange);
+ // Check that both literals and veneers have been emitted.
+ CHECK_POOL_SIZE(5 * kVeneerSize);
+ VIXL_CHECK(test.GetPoolCheckpoint() - masm.GetCursorOffset() > kCbzCbnzRange);
__ Bind(&t1);
__ Bind(&t2);
@@ -5260,6 +5240,8 @@ TEST_T32(veneer_and_literal6) {
__ Bind(&t4);
__ Bind(&t5);
+ CHECK_POOL_SIZE(0);
+
END();
RUN();
@@ -5279,34 +5261,33 @@ TEST_T32(veneer_and_literal6) {
ASSERT_EQUAL_32(0x12345678, r11);
}
-
// Check that a label which is just bound during the MacroEmissionCheckScope
// can be used.
TEST(ldr_label_bound_during_scope) {
SETUP();
START();
- const int32_t kTypicalMacroInstructionMaxSize =
- 8 * kMaxInstructionSizeInBytes;
-
Literal<uint64_t>* literal =
new Literal<uint64_t>(UINT64_C(0x1234567890abcdef),
RawLiteral::kPlacedWhenUsed,
RawLiteral::kDeletedOnPoolDestruction);
__ Ldrd(r0, r1, literal);
- while (masm.GetMarginBeforeLiteralEmission() >=
- kTypicalMacroInstructionMaxSize) {
- __ Nop();
+ const int nop_size = masm.IsUsingA32() ? 4 : 2;
+ while (test.GetPoolCheckpoint() >=
+ (masm.GetCursorOffset() +
+ static_cast<int32_t>(kMaxInstructionSizeInBytes))) {
+ ExactAssemblyScope scope(&masm, nop_size, ExactAssemblyScope::kExactSize);
+ __ nop();
}
- VIXL_ASSERT(!masm.LiteralPoolIsEmpty());
+ VIXL_ASSERT(!test.PoolIsEmpty());
// This Ldrd will first generate the pool and then use literal which has just
// been bound.
__ Ldrd(r2, r3, literal);
- VIXL_ASSERT(masm.LiteralPoolIsEmpty());
+ VIXL_ASSERT(test.PoolIsEmpty());
END();
@@ -5328,8 +5309,8 @@ TEST_T32(test_it_scope_and_literal_pool) {
START();
// Make sure the pool is empty.
- masm.EmitLiteralPool(MacroAssembler::kBranchRequired);
- ASSERT_LITERAL_POOL_SIZE(0);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
+ VIXL_CHECK(test.PoolIsEmpty());
Literal<uint64_t> l0(0xcafebeefdeadbaba);
__ Ldrd(r0, r1, &l0);
@@ -5338,8 +5319,8 @@ TEST_T32(test_it_scope_and_literal_pool) {
// for).
const int32_t kTypicalMacroInstructionMaxSize =
8 * kMaxInstructionSizeInBytes;
- int32_t margin =
- masm.GetMarginBeforeLiteralEmission() - kTypicalMacroInstructionMaxSize;
+ int32_t margin = test.GetPoolCheckpoint() - masm.GetCursorOffset() -
+ kTypicalMacroInstructionMaxSize;
int32_t end = masm.GetCursorOffset() + margin;
{
@@ -5348,7 +5329,7 @@ TEST_T32(test_it_scope_and_literal_pool) {
__ nop();
}
}
- VIXL_CHECK(masm.GetMarginBeforeLiteralEmission() ==
+ VIXL_CHECK((test.GetPoolCheckpoint() - masm.GetCursorOffset()) ==
kTypicalMacroInstructionMaxSize);
// We cannot use an IT block for this instruction, hence ITScope will
@@ -5621,7 +5602,6 @@ TEST(blx) {
ASSERT_EQUAL_32(0x22222222, r1);
}
-
// Check that B with a near hint use a narrow branch when it can.
TEST_T32(b_near_hint) {
SETUP();
@@ -5664,7 +5644,8 @@ TEST_T32(b_near_hint) {
__ B(&end, kNear);
}
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() < kBNarrowRange);
+ int32_t margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ VIXL_CHECK(margin < kBNarrowRange);
{
ExactAssemblyScope scope(&masm,
@@ -5677,7 +5658,8 @@ TEST_T32(b_near_hint) {
}
// A veneer should have been generated.
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() > kBNarrowRange);
+ margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ VIXL_CHECK(margin > kBNarrowRange);
__ Bind(&end);
@@ -5686,7 +5668,6 @@ TEST_T32(b_near_hint) {
DISASSEMBLE();
}
-
// Check that B with a far hint use a narrow branch only for a near backward
// branch.
TEST_T32(b_far_hint) {
@@ -5737,7 +5718,6 @@ TEST_T32(b_far_hint) {
DISASSEMBLE();
}
-
// Check that conditional B with a near hint use a narrow branch when it can.
TEST_T32(b_conditional_near_hint) {
SETUP();
@@ -5779,7 +5759,8 @@ TEST_T32(b_conditional_near_hint) {
__ B(eq, &end, kNear);
}
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() < kBConditionalNarrowRange);
+ int32_t margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ VIXL_CHECK(margin < kBConditionalNarrowRange);
{
ExactAssemblyScope scope(&masm,
@@ -5792,7 +5773,8 @@ TEST_T32(b_conditional_near_hint) {
}
// A veneer should have been generated.
- VIXL_CHECK(masm.GetMarginBeforeVeneerEmission() > kBConditionalNarrowRange);
+ margin = test.GetPoolCheckpoint() - masm.GetCursorOffset();
+ VIXL_CHECK(margin > kBConditionalNarrowRange);
__ Bind(&end);
@@ -5801,7 +5783,6 @@ TEST_T32(b_conditional_near_hint) {
DISASSEMBLE();
}
-
// Check that conditional B with a far hint use a narrow branch only for a
// near backward branch.
TEST_T32(b_conditional_far_hint) {
@@ -6129,6 +6110,345 @@ TEST_T32(macro_assembler_commute) {
// Orrs(eq, r7, r6, r7));
}
+TEST(emit_pool_when_manually_placing_literal) {
+ SETUP();
+ START();
+
+ // Literal that will be manually placed.
+ Literal<uint64_t> l0(0xcafebeefdeadbaba, RawLiteral::kManuallyPlaced);
+
+ // Create one literal pool entry.
+ __ Ldrd(r0, r1, 0x1234567890abcdef);
+
+ // Branch using the assembler, to avoid introducing a veneer.
+ Label over_literal;
+ const int kBranchSize = 4;
+ {
+ ExactAssemblyScope scope(&masm,
+ kBranchSize,
+ ExactAssemblyScope::kExactSize);
+ __ b(&over_literal);
+ }
+
+ // Almost reach the pool checkpoint.
+ int32_t margin =
+ test.GetPoolCheckpoint() - masm.GetCursorOffset() - l0.GetSize() / 2;
+ int32_t end = masm.GetCursorOffset() + margin;
+ {
+ ExactAssemblyScope scope(&masm, margin, ExactAssemblyScope::kExactSize);
+ while (masm.GetCursorOffset() < end) {
+ __ nop();
+ }
+ }
+
+ VIXL_CHECK(!test.PoolIsEmpty());
+ __ Place(&l0);
+ // The pool must now have been emitted.
+ VIXL_CHECK(test.PoolIsEmpty());
+
+ __ Bind(&over_literal);
+
+ __ Ldrd(r2, r3, &l0);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(0x90abcdef, r0);
+ ASSERT_EQUAL_32(0x12345678, r1);
+ ASSERT_EQUAL_32(0xdeadbaba, r2);
+ ASSERT_EQUAL_32(0xcafebeef, r3);
+}
+
+
+// The addition of padding only happens for T32.
+TEST_T32(emit_pool_when_adding_padding_due_to_bind) {
+ SETUP();
+ START();
+
+ // Make sure we start with a 4-byte aligned address, in order for the
+ // location where we will call Bind() to be 4-byte aligned.
+ {
+ ExactAssemblyScope scope(&masm,
+ k16BitT32InstructionSizeInBytes,
+ ExactAssemblyScope::kMaximumSize);
+ while (masm.GetCursorOffset() % 4 != 0) {
+ __ nop();
+ }
+ }
+
+ // Create one literal pool entry.
+ __ Ldrd(r0, r1, 0x1234567890abcdef);
+
+ // Almost reach the pool checkpoint.
+ const int kPaddingBytes = 2;
+ int32_t margin =
+ test.GetPoolCheckpoint() - masm.GetCursorOffset() - kPaddingBytes;
+ int32_t end = masm.GetCursorOffset() + margin;
+ {
+ ExactAssemblyScope scope(&masm, margin, ExactAssemblyScope::kExactSize);
+ while (masm.GetCursorOffset() < end) {
+ __ nop();
+ }
+ }
+
+ Label label;
+ __ Cbz(r0, &label);
+
+ VIXL_CHECK(!test.PoolIsEmpty());
+ // In order to hit the case where binding the label needs to add padding,
+ // we need this to be a 4-byte aligned address.
+ VIXL_ASSERT((masm.GetBuffer()->GetCursorOffset() % 4) == 0);
+
+ __ Bind(&label);
+ // The pool must now have been emitted.
+ VIXL_CHECK(test.PoolIsEmpty());
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_32(0x90abcdef, r0);
+ ASSERT_EQUAL_32(0x12345678, r1);
+}
+
+static void AddBranchesAndGetCloseToCheckpoint(MacroAssembler* masm,
+ TestMacroAssembler* test,
+ const int kLabelsCount,
+ Label b_labels[],
+ int32_t margin) {
+ // Add many veneers to the pool.
+ for (int i = 0; i < kLabelsCount; i++) {
+ masm->B(&b_labels[i]);
+ }
+
+ // Get close to the veneer emission margin (considering the heuristic).
+ // Use add instead of nop to make viewing the disassembled code easier.
+ const int kAddSize = masm->IsUsingT32() ? k16BitT32InstructionSizeInBytes
+ : kA32InstructionSizeInBytes;
+ int32_t end = test->GetPoolCheckpoint();
+ int32_t space = end - masm->GetCursorOffset() - margin;
+ {
+ ExactAssemblyScope scope(masm, space, ExactAssemblyScope::kExactSize);
+ while (space > 0) {
+ masm->add(r0, r0, r0);
+ space -= kAddSize;
+ }
+ }
+
+ // Make sure the veneers have not yet been emitted.
+ const int kVeneerSize = 4;
+ VIXL_CHECK(test->GetPoolSize() == kLabelsCount * kVeneerSize);
+}
+
+static void EmitIndividualNops(MacroAssembler* masm, const int kNops) {
+ for (int i = 0; i < kNops; ++i) {
+ masm->Nop();
+ }
+}
+
+static void EmitNopsInExactAssemblyScope(MacroAssembler* masm,
+ const int kNops) {
+ const int kNopSize = masm->IsUsingT32() ? k16BitT32InstructionSizeInBytes
+ : kA32InstructionSizeInBytes;
+ {
+ ExactAssemblyScope scope(masm,
+ kNops * kNopSize,
+ ExactAssemblyScope::kExactSize);
+ for (int i = 0; i < kNops; i++) {
+ masm->nop();
+ }
+ }
+}
+
+TEST_A32(literal_and_veneer_interaction_1) {
+ SETUP();
+ START();
+
+ static const int kLabelsCount = 100;
+
+ Label b_labels[kLabelsCount];
+
+ AddBranchesAndGetCloseToCheckpoint(&masm,
+ &test,
+ kLabelsCount,
+ b_labels,
+ 1 * KBytes);
+
+ // Emit a load of a large string. In the past, we have attempted to emit
+ // the literal load without emitting the veneers, which meant that we were
+ // left with an impossible scheduling problem for the pool objects (due to
+ // the short range of the ldrd).
+ std::string test_string(2 * KBytes, 'x');
+ StringLiteral big_literal(test_string.c_str());
+ __ Ldrd(r0, r1, &big_literal);
+
+ EmitIndividualNops(&masm, 1000);
+
+ // We can now safely bind the labels.
+ for (int i = 0; i < kLabelsCount; i++) {
+ __ Bind(&b_labels[i]);
+ }
+
+ END();
+
+ RUN();
+}
+
+
+TEST_A32(literal_and_veneer_interaction_2) {
+ SETUP();
+ START();
+
+ static const int kLabelsCount = 100;
+
+ Label b_labels[kLabelsCount];
+
+ AddBranchesAndGetCloseToCheckpoint(&masm,
+ &test,
+ kLabelsCount,
+ b_labels,
+ 1 * KBytes);
+
+ // This is similar to the test above. The Ldr instruction can be emitted with
+ // no problems. The Ldrd used to force emission of the literal pool, pushing
+ // the veneers out of range - we make sure this does not happen anymore.
+ std::string test_string(2 * KBytes, 'z');
+ StringLiteral big_literal(test_string.c_str());
+ __ Ldr(r2, &big_literal);
+
+ const int kVeneerSize = 4;
+ CHECK_POOL_SIZE(kLabelsCount * kVeneerSize + big_literal.GetSize());
+
+ std::string test_string2(2 * KBytes, 'x');
+ StringLiteral big_literal2(test_string.c_str());
+ __ Ldrd(r0, r1, &big_literal2);
+
+ EmitIndividualNops(&masm, 1000);
+
+ for (int i = 0; i < kLabelsCount; i++) {
+ __ Bind(&b_labels[i]);
+ }
+
+ END();
+
+ RUN();
+}
+
+
+TEST_A32(literal_and_veneer_interaction_3) {
+ SETUP();
+ START();
+
+ static const int kLabelsCount = 100;
+ Label b_labels[kLabelsCount];
+
+ AddBranchesAndGetCloseToCheckpoint(&masm,
+ &test,
+ kLabelsCount,
+ b_labels,
+ 1 * KBytes);
+
+ // Here, we used to emit the Ldrd instruction and then emit the veneers
+ // before the literal is emitted, hence pushing the Ldrd out of range.
+ // Make sure this does not happen anymore.
+ __ Ldrd(r2, r3, 0x12345678);
+
+ // The issue would only appear when emitting the nops in a single scope.
+ EmitNopsInExactAssemblyScope(&masm, 4096);
+
+ for (int i = 0; i < kLabelsCount; i++) {
+ __ Bind(&b_labels[i]);
+ }
+
+ END();
+
+ RUN();
+}
+
+
+// Equivalent to literal_and_veneer_interaction_1, but for T32.
+TEST_T32(literal_and_veneer_interaction_4) {
+ SETUP();
+ START();
+
+ static const int kLabelsCount = 550;
+
+ Label b_labels[kLabelsCount];
+
+ AddBranchesAndGetCloseToCheckpoint(&masm,
+ &test,
+ kLabelsCount,
+ b_labels,
+ KBytes / 2);
+
+ std::string test_string(3 * KBytes, 'x');
+ StringLiteral big_literal(test_string.c_str());
+ __ Ldrd(r0, r1, &big_literal);
+
+ EmitIndividualNops(&masm, 2000);
+
+ for (int i = 0; i < kLabelsCount; i++) {
+ __ Bind(&b_labels[i]);
+ }
+
+ END();
+
+ RUN();
+}
+
+// Equivalent to literal_and_veneer_interaction_3, but for T32.
+TEST_T32(literal_and_veneer_interaction_5) {
+ SETUP();
+ START();
+
+ static const int kLabelsCount = 550;
+ Label b_labels[kLabelsCount];
+
+ AddBranchesAndGetCloseToCheckpoint(&masm,
+ &test,
+ kLabelsCount,
+ b_labels,
+ 1 * KBytes);
+
+ __ Ldrd(r2, r3, 0x12345678);
+
+ EmitNopsInExactAssemblyScope(&masm, 4096);
+
+ for (int i = 0; i < kLabelsCount; i++) {
+ __ Bind(&b_labels[i]);
+ }
+
+ END();
+
+ RUN();
+}
+
+TEST_T32(assembler_bind_label) {
+ SETUP();
+ START();
+
+ Label label;
+ __ B(eq, &label, kNear);
+
+ // At this point we keep track of the veneer in the pool.
+ VIXL_CHECK(!test.PoolIsEmpty());
+
+ {
+ // Bind the label with the assembler.
+ ExactAssemblyScope scope(&masm, 2, ExactAssemblyScope::kMaximumSize);
+ __ bind(&label);
+ }
+
+ // Make sure the pool is now empty.
+ VIXL_CHECK(test.PoolIsEmpty());
+
+ EmitNopsInExactAssemblyScope(&masm, 4096);
+
+ END();
+
+ RUN();
+}
#define TEST_FORWARD_REFERENCE_INFO(INST, INFO, ASM) \
can_encode = masm.INFO; \
@@ -6138,7 +6458,7 @@ TEST_T32(macro_assembler_commute) {
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
- if (info->pc_needs_aligning == Assembler::ReferenceInfo::kAlignPc) { \
+ if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->min_offset); \
@@ -6149,7 +6469,7 @@ TEST_T32(macro_assembler_commute) {
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
- if (info->pc_needs_aligning == Assembler::ReferenceInfo::kAlignPc) { \
+ if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->max_offset); \
@@ -6164,7 +6484,7 @@ TEST_T32(macro_assembler_commute) {
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
- if (info->pc_needs_aligning == Assembler::ReferenceInfo::kAlignPc) { \
+ if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->max_offset + info->alignment); \
@@ -6178,7 +6498,7 @@ TEST_T32(macro_assembler_commute) {
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
- if (info->pc_needs_aligning == Assembler::ReferenceInfo::kAlignPc) { \
+ if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->min_offset - info->alignment); \
@@ -6195,7 +6515,7 @@ TEST_T32(forward_reference_info_T32) {
MacroAssembler masm(BUF_SIZE, T32);
Label unbound;
- const Assembler::ReferenceInfo* info;
+ const ReferenceInfo* info;
bool can_encode;
// clang-format off
@@ -6333,7 +6653,7 @@ TEST_T32(forward_reference_info_T32) {
TEST_A32(forward_reference_info_A32) {
MacroAssembler masm(BUF_SIZE, A32);
Label unbound;
- const Assembler::ReferenceInfo* info;
+ const ReferenceInfo* info;
bool can_encode;
// clang-format off
diff --git a/test/aarch32/test-utils-aarch32.h b/test/aarch32/test-utils-aarch32.h
index dd8ecb53..b255e792 100644
--- a/test/aarch32/test-utils-aarch32.h
+++ b/test/aarch32/test-utils-aarch32.h
@@ -1,4 +1,4 @@
-// Copyright 2015, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -27,14 +27,28 @@
#ifndef VIXL_AARCH32_TEST_UTILS_AARCH32_H_
#define VIXL_AARCH32_TEST_UTILS_AARCH32_H_
-#include "test-runner.h"
+#include "../test-pool-manager.h"
+#include "../test-runner.h"
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
namespace vixl {
+
namespace aarch32 {
+class TestMacroAssembler {
+ public:
+ explicit TestMacroAssembler(MacroAssembler* masm)
+ : test(&masm->pool_manager_) {}
+ int32_t GetPoolCheckpoint() const { return test.GetPoolCheckpoint(); }
+ int GetPoolSize() const { return test.GetPoolSize(); }
+ bool PoolIsEmpty() const { return test.PoolIsEmpty(); }
+
+ private:
+ TestPoolManager test;
+};
+
// Only check the simulator tests when we can actually run them.
// TODO: Improve this.
#if defined(__arm__)
diff --git a/test/test-code-generation-scopes.cc b/test/test-code-generation-scopes.cc
index 1fd98d8e..734f2223 100644
--- a/test/test-code-generation-scopes.cc
+++ b/test/test-code-generation-scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2016, VIXL authors
+// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -27,6 +27,7 @@
#include "test-runner.h"
#ifdef VIXL_INCLUDE_TARGET_AARCH32
+#include "aarch32/test-utils-aarch32.h"
#include "aarch32/macro-assembler-aarch32.h"
#endif
@@ -337,14 +338,17 @@ TEST(EmissionCheckScope_Open_Close_64) {
#ifdef VIXL_INCLUDE_TARGET_AARCH32
-#define ASSERT_LITERAL_POOL_SIZE_32(expected) \
- VIXL_CHECK((expected) == masm.GetLiteralPoolSize())
+#define ASSERT_LITERAL_POOL_SIZE_32(expected) \
+ { \
+ aarch32::TestMacroAssembler test(&masm); \
+ VIXL_CHECK((expected) == test.GetPoolSize()); \
+ }
TEST_A32(EmissionCheckScope_emit_pool_32) {
aarch32::MacroAssembler masm;
// Make sure the pool is empty;
- masm.EmitLiteralPool(aarch32::MacroAssembler::kBranchRequired);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
ASSERT_LITERAL_POOL_SIZE_32(0);
__ Ldrd(aarch32::r0, aarch32::r1, 0x1234567890abcdef);
@@ -420,7 +424,7 @@ TEST_A32(EmissionCheckScope_emit_pool_on_Open_32) {
aarch32::MacroAssembler masm;
// Make sure the pool is empty;
- masm.EmitLiteralPool(aarch32::MacroAssembler::kBranchRequired);
+ masm.EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
ASSERT_LITERAL_POOL_SIZE_32(0);
__ Ldrd(aarch32::r0, aarch32::r1, 0x1234567890abcdef);
diff --git a/test/test-pool-manager.cc b/test/test-pool-manager.cc
index b7bd379f..83d8a7b6 100644
--- a/test/test-pool-manager.cc
+++ b/test/test-pool-manager.cc
@@ -86,10 +86,10 @@ static int RandomPCIncrement() {
return 2 * (Random() % 4 + 1);
}
-class TestObject : public LabelBase<int32_t> {
+class TestObject : public LocationBase<int32_t> {
public:
TestObject(int size, int alignment, int id = 0)
- : LabelBase(0 /*type*/, size, alignment), id_(id) {}
+ : LocationBase(0 /*type*/, size, alignment), id_(id) {}
~TestObject() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {}
@@ -141,10 +141,10 @@ class TestObject : public LabelBase<int32_t> {
int id_;
};
-class TestBranchObject : public LabelBase<int32_t> {
+class TestBranchObject : public LocationBase<int32_t> {
public:
TestBranchObject(int size, int alignment, int id = 0)
- : LabelBase(1 /* type */, size, alignment), id_(id) {}
+ : LocationBase(1 /* type */, size, alignment), id_(id) {}
~TestBranchObject() VIXL_THROW_IN_NEGATIVE_TESTING_MODE(std::runtime_error) {}
@@ -861,7 +861,7 @@ TEST(MustEmitNewReferenceDueToSizeOfObject) {
}
template <typename ObjectType>
-void ManagedLabelBaseTestHelper() {
+void ManagedLocationBaseTestHelper() {
TestMacroAssembler masm;
PoolManager<int32_t> pool_manager(4 /*header_size*/,
@@ -892,8 +892,8 @@ class TestObjectDeletedOnPlacement : public TestObject {
}
};
-TEST(DeleteLabelBaseOnPlacement) {
- ManagedLabelBaseTestHelper<TestObjectDeletedOnPlacement>();
+TEST(DeleteLocationBaseOnPlacement) {
+ ManagedLocationBaseTestHelper<TestObjectDeletedOnPlacement>();
}
class TestObjectDeletedOnPoolManagerDestruction : public TestObject {
@@ -909,6 +909,6 @@ class TestObjectDeletedOnPoolManagerDestruction : public TestObject {
};
-TEST(DeleteLabelBaseOnPoolManagerDestruction) {
- ManagedLabelBaseTestHelper<TestObjectDeletedOnPoolManagerDestruction>();
+TEST(DeleteLocationBaseOnPoolManagerDestruction) {
+ ManagedLocationBaseTestHelper<TestObjectDeletedOnPoolManagerDestruction>();
}