Configure a basic .clang-tidy.
Also, fix some issues that it found.
We will enable more rules in the future. In particular, some google-*
rules should already pass, but appear not to at the moment.
Once we have enabled enough of these rules, we intend to drop the
dependency on cpplint.
Change-Id: I107231cf57b5500a3f1b623510419665589531dc
diff --git a/src/aarch64/macro-assembler-aarch64.cc b/src/aarch64/macro-assembler-aarch64.cc
index 3b99df0..85954fc 100644
--- a/src/aarch64/macro-assembler-aarch64.cc
+++ b/src/aarch64/macro-assembler-aarch64.cc
@@ -506,7 +506,7 @@
bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
const Register& dst,
- int64_t imm) {
+ uint64_t imm) {
bool emit_code = masm != NULL;
unsigned n, imm_s, imm_r;
int reg_size = dst.GetSizeInBits();
@@ -1617,13 +1617,13 @@
bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
- int64_t imm) {
+ uint64_t imm) {
return OneInstrMoveImmediateHelper(this, dst, imm);
}
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
- int64_t imm,
+ uint64_t imm,
PreShiftImmMode mode) {
int reg_size = dst.GetSizeInBits();
@@ -1640,12 +1640,17 @@
// immediate is tested.
shift_low = std::min(shift_low, 4);
}
- int64_t imm_low = imm >> shift_low;
+ // TryOneInstrMoveImmediate handles `imm` with a value of zero, so shift_low
+ // must lie in the range [0, 63], and the shifts below are well-defined.
+ VIXL_ASSERT((shift_low >= 0) && (shift_low < 64));
+ // imm_low = imm >> shift_low (with sign extension)
+ uint64_t imm_low = ExtractSignedBitfield64(63, shift_low, imm);
// Pre-shift the immediate to the most-significant bits of the register,
// inserting set bits in the least-significant bits.
int shift_high = CountLeadingZeros(imm, reg_size);
- int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
+ VIXL_ASSERT((shift_high >= 0) && (shift_high < 64));
+ uint64_t imm_high = (imm << shift_high) | GetUintMask(shift_high);
if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
// The new immediate has been moved into the destination's low bits:
diff --git a/src/aarch64/macro-assembler-aarch64.h b/src/aarch64/macro-assembler-aarch64.h
index 7aa8db2..9c09f1c 100644
--- a/src/aarch64/macro-assembler-aarch64.h
+++ b/src/aarch64/macro-assembler-aarch64.h
@@ -644,7 +644,7 @@
uint64_t imm);
static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
const Register& dst,
- int64_t imm);
+ uint64_t imm);
// Logical macros.
@@ -714,7 +714,7 @@
// Try to move an immediate into the destination register in a single
// instruction. Returns true for success, and updates the contents of dst.
// Returns false, otherwise.
- bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+ bool TryOneInstrMoveImmediate(const Register& dst, uint64_t imm);
// Move an immediate into register dst, and return an Operand object for
// use with a subsequent instruction that accepts a shift. The value moved
@@ -722,7 +722,7 @@
// operation applied to it that will be subsequently undone by the shift
// applied in the Operand.
Operand MoveImmediateForShiftedOp(const Register& dst,
- int64_t imm,
+ uint64_t imm,
PreShiftImmMode mode);
void Move(const GenericOperand& dst, const GenericOperand& src);
diff --git a/src/cpu-features.h b/src/cpu-features.h
index 012c5e4..50ddc26 100644
--- a/src/cpu-features.h
+++ b/src/cpu-features.h
@@ -333,8 +333,10 @@
CPUFeatures::Feature feature_;
bool IsValid() const {
- return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
- cpu_features_->Has(feature_);
+ if (cpu_features_ == NULL) {
+ return feature_ == CPUFeatures::kNone;
+ }
+ return cpu_features_->Has(feature_);
}
};
diff --git a/src/globals-vixl.h b/src/globals-vixl.h
index 2fe0cb0..640b4b9 100644
--- a/src/globals-vixl.h
+++ b/src/globals-vixl.h
@@ -267,16 +267,24 @@
// Target Architecture/ISA
#ifdef VIXL_INCLUDE_TARGET_A64
+#ifndef VIXL_INCLUDE_TARGET_AARCH64
#define VIXL_INCLUDE_TARGET_AARCH64
#endif
+#endif
#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
+#ifndef VIXL_INCLUDE_TARGET_AARCH32
#define VIXL_INCLUDE_TARGET_AARCH32
+#endif
#elif defined(VIXL_INCLUDE_TARGET_A32)
+#ifndef VIXL_INCLUDE_TARGET_A32_ONLY
#define VIXL_INCLUDE_TARGET_A32_ONLY
+#endif
#else
+#ifndef VIXL_INCLUDE_TARGET_T32_ONLY
#define VIXL_INCLUDE_TARGET_T32_ONLY
#endif
+#endif
#endif // VIXL_GLOBALS_H
diff --git a/src/utils-vixl.h b/src/utils-vixl.h
index 1c76fcb..c9287e4 100644
--- a/src/utils-vixl.h
+++ b/src/utils-vixl.h
@@ -71,6 +71,12 @@
return n;
}
+inline uint64_t GetUintMask(unsigned bits) {
+ VIXL_ASSERT(bits <= 64);
+ uint64_t base = (bits >= 64) ? 0 : (UINT64_C(1) << bits);
+ return base - 1;
+}
+
// Check number width.
// TODO: Refactor these using templates.
inline bool IsIntN(unsigned n, uint32_t x) {
@@ -190,7 +196,7 @@
}
-inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) {
+inline int64_t ExtractSignedBitfield64(int msb, int lsb, uint64_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
@@ -204,7 +210,7 @@
}
-inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) {
+inline int32_t ExtractSignedBitfield32(int msb, int lsb, uint32_t x) {
VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
(msb >= lsb));
uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));