Fix compilation with Microsoft Visual C++ (#46)
Also, fix almost all warnings with the "/W3" setting (default for
new command-line projects), and some with "/W4" and "/Wall". The
simulator implementation is out of scope because it uses too many
POSIX interfaces to be usable (and in fact buildable) on Windows.
diff --git a/src/aarch32/disasm-aarch32.cc b/src/aarch32/disasm-aarch32.cc
index 535f60c..54dafe1 100644
--- a/src/aarch32/disasm-aarch32.cc
+++ b/src/aarch32/disasm-aarch32.cc
@@ -348,7 +348,7 @@
*lane = (value >> 2) & 1;
return Untyped32;
}
- *lane = -1;
+ *lane = ~0U;
return kDataTypeValueInvalid;
}
@@ -365,7 +365,7 @@
*lane = (value >> 2) & 1;
return Untyped32;
}
- *lane = -1;
+ *lane = ~0U;
return kDataTypeValueInvalid;
}
@@ -382,7 +382,7 @@
*lane = (value >> 3) & 1;
return Untyped32;
}
- *lane = -1;
+ *lane = ~0U;
return kDataTypeValueInvalid;
}
@@ -60977,7 +60977,7 @@
Condition condition((instr >> 28) & 0xf);
unsigned rd = (instr >> 12) & 0xf;
uint32_t imm = ImmediateA32::Decode(instr & 0xfff);
- Location location(-imm, kA32PcDelta);
+ Location location(UnsignedNegate(imm), kA32PcDelta);
// ADR{<c>}{<q>} <Rd>, <label> ; A2
adr(condition, Best, Register(rd), &location);
break;
diff --git a/src/aarch32/disasm-aarch32.h b/src/aarch32/disasm-aarch32.h
index 679f47b..4696408 100644
--- a/src/aarch32/disasm-aarch32.h
+++ b/src/aarch32/disasm-aarch32.h
@@ -36,6 +36,12 @@
#include "aarch32/constants-aarch32.h"
#include "aarch32/operands-aarch32.h"
+// Microsoft Visual C++ defines a `mvn` macro that conflicts with our own
+// definition.
+#if defined(_MSC_VER) && defined(mvn)
+#undef mvn
+#endif
+
namespace vixl {
namespace aarch32 {
diff --git a/src/aarch32/instructions-aarch32.cc b/src/aarch32/instructions-aarch32.cc
index 92450d4..fe5458f 100644
--- a/src/aarch32/instructions-aarch32.cc
+++ b/src/aarch32/instructions-aarch32.cc
@@ -651,7 +651,7 @@
(((imm & 0xff00) == 0) || ((imm & 0xff) == 0)))
return true;
/* isolate least-significant set bit */
- uint32_t lsb = imm & -imm;
+ uint32_t lsb = imm & UnsignedNegate(imm);
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
@@ -702,7 +702,7 @@
* that the least-significant set bit is always an even bit */
imm = imm | ((imm >> 1) & 0x55555555);
/* isolate least-significant set bit (always even) */
- uint32_t lsb = imm & -imm;
+ uint32_t lsb = imm & UnsignedNegate(imm);
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
diff --git a/src/aarch32/instructions-aarch32.h b/src/aarch32/instructions-aarch32.h
index e2c95d1..393f1ea 100644
--- a/src/aarch32/instructions-aarch32.h
+++ b/src/aarch32/instructions-aarch32.h
@@ -40,6 +40,8 @@
#if defined(__arm__) && !defined(__SOFTFP__)
#define HARDFLOAT __attribute__((noinline, pcs("aapcs-vfp")))
+#elif defined(_MSC_VER)
+#define HARDFLOAT __declspec(noinline)
#else
#define HARDFLOAT __attribute__((noinline))
#endif
@@ -1040,7 +1042,9 @@
const char* GetName() const { return (IsPlus() ? "" : "-"); }
bool IsPlus() const { return sign_ == plus; }
bool IsMinus() const { return sign_ == minus; }
- int32_t ApplyTo(uint32_t value) { return IsPlus() ? value : -value; }
+ int32_t ApplyTo(uint32_t value) {
+ return IsPlus() ? value : UnsignedNegate(value);
+ }
private:
SignType sign_;
diff --git a/src/aarch32/macro-assembler-aarch32.cc b/src/aarch32/macro-assembler-aarch32.cc
index fa93fb3..89101e7 100644
--- a/src/aarch32/macro-assembler-aarch32.cc
+++ b/src/aarch32/macro-assembler-aarch32.cc
@@ -266,8 +266,8 @@
uint32_t load_store_offset = offset & extra_offset_mask;
uint32_t add_offset = offset & ~extra_offset_mask;
- if ((add_offset != 0) &&
- (IsModifiedImmediate(offset) || IsModifiedImmediate(-offset))) {
+ if ((add_offset != 0) && (IsModifiedImmediate(offset) ||
+ IsModifiedImmediate(UnsignedNegate(offset)))) {
load_store_offset = 0;
add_offset = offset;
}
@@ -288,7 +288,7 @@
// of ADR -- to get behaviour like loads and stores. This ADR can handle
// at least as much offset as the load_store_offset so it can replace it.
- uint32_t sub_pc_offset = (-offset) & 0xfff;
+ uint32_t sub_pc_offset = UnsignedNegate(offset) & 0xfff;
load_store_offset = (offset + sub_pc_offset) & extra_offset_mask;
add_offset = (offset + sub_pc_offset) & ~extra_offset_mask;
diff --git a/src/aarch64/assembler-aarch64.cc b/src/aarch64/assembler-aarch64.cc
index d093271..df17813 100644
--- a/src/aarch64/assembler-aarch64.cc
+++ b/src/aarch64/assembler-aarch64.cc
@@ -3242,7 +3242,7 @@
Emit(FMOV_h_imm | Rd(vd) | ImmFP16(imm));
} else {
VIXL_ASSERT(CPUHas(CPUFeatures::kNEON, CPUFeatures::kNEONHalf));
- VIXL_ASSERT(vd.Is4H() | vd.Is8H());
+ VIXL_ASSERT(vd.Is4H() || vd.Is8H());
Instr q = vd.Is8H() ? NEON_Q : 0;
uint32_t encoded_imm = FP16ToImm8(imm);
Emit(q | NEONModifiedImmediate_FMOV | ImmNEONabcdefgh(encoded_imm) |
@@ -6426,16 +6426,18 @@
bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size_in_bytes_log2) {
+ const auto access_size_in_bytes = 1U << access_size_in_bytes_log2;
VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
- return IsMultiple(offset, 1 << access_size_in_bytes_log2) &&
- IsInt7(offset / (1 << access_size_in_bytes_log2));
+ return IsMultiple(offset, access_size_in_bytes) &&
+ IsInt7(offset / access_size_in_bytes);
}
bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size_in_bytes_log2) {
+ const auto access_size_in_bytes = 1U << access_size_in_bytes_log2;
VIXL_ASSERT(access_size_in_bytes_log2 <= kQRegSizeInBytesLog2);
- return IsMultiple(offset, 1 << access_size_in_bytes_log2) &&
- IsUint12(offset / (1 << access_size_in_bytes_log2));
+ return IsMultiple(offset, access_size_in_bytes) &&
+ IsUint12(offset / access_size_in_bytes);
}
diff --git a/src/aarch64/assembler-aarch64.h b/src/aarch64/assembler-aarch64.h
index b3d408d..0d0387f 100644
--- a/src/aarch64/assembler-aarch64.h
+++ b/src/aarch64/assembler-aarch64.h
@@ -7368,8 +7368,9 @@
}
static Instr ImmLSPair(int64_t imm7, unsigned access_size_in_bytes_log2) {
- VIXL_ASSERT(IsMultiple(imm7, 1 << access_size_in_bytes_log2));
- int64_t scaled_imm7 = imm7 / (1 << access_size_in_bytes_log2);
+ const auto access_size_in_bytes = 1U << access_size_in_bytes_log2;
+ VIXL_ASSERT(IsMultiple(imm7, access_size_in_bytes));
+ int64_t scaled_imm7 = imm7 / access_size_in_bytes;
VIXL_ASSERT(IsInt7(scaled_imm7));
return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
}
diff --git a/src/aarch64/assembler-sve-aarch64.cc b/src/aarch64/assembler-sve-aarch64.cc
index 84d4d51..e99cfdc 100644
--- a/src/aarch64/assembler-sve-aarch64.cc
+++ b/src/aarch64/assembler-sve-aarch64.cc
@@ -6505,7 +6505,7 @@
void Assembler::fmov(const ZRegister& zd, double imm) {
if (IsPositiveZero(imm)) {
- dup(zd, imm);
+ dup(zd, 0);
} else {
fdup(zd, imm);
}
diff --git a/src/aarch64/cpu-aarch64.cc b/src/aarch64/cpu-aarch64.cc
index 1700ec5..3f01797 100644
--- a/src/aarch64/cpu-aarch64.cc
+++ b/src/aarch64/cpu-aarch64.cc
@@ -274,7 +274,7 @@
CPUFeatures::QueryIDRegistersOption option) {
CPUFeatures features;
-#if VIXL_USE_LINUX_HWCAP
+#ifdef VIXL_USE_LINUX_HWCAP
// Map each set bit onto a feature. Ideally, we'd use HWCAP_* macros rather
// than explicit bits, but explicit bits allow us to identify features that
// the toolchain doesn't know about.
diff --git a/src/aarch64/decoder-aarch64.cc b/src/aarch64/decoder-aarch64.cc
index c7825a0..c93b7e2 100644
--- a/src/aarch64/decoder-aarch64.cc
+++ b/src/aarch64/decoder-aarch64.cc
@@ -467,7 +467,8 @@
// Create a compiled node that contains a table with an entry for every bit
// pattern.
- CreateCompiledNode(bit_extract_fn, 1U << GetSampledBitsCount());
+ CreateCompiledNode(bit_extract_fn,
+ static_cast<size_t>(1) << GetSampledBitsCount());
VIXL_ASSERT(compiled_node_ != NULL);
// When we find a pattern matches the representation, set the node's decode
diff --git a/src/aarch64/disasm-aarch64.cc b/src/aarch64/disasm-aarch64.cc
index e5c75dc..ab59949 100644
--- a/src/aarch64/disasm-aarch64.cc
+++ b/src/aarch64/disasm-aarch64.cc
@@ -6122,7 +6122,7 @@
USE(instr);
if (offset < 0) {
// Cast to uint64_t so that INT64_MIN is handled in a well-defined way.
- uint64_t abs_offset = -static_cast<uint64_t>(offset);
+ uint64_t abs_offset = UnsignedNegate(static_cast<uint64_t>(offset));
AppendToOutput("#-0x%" PRIx64, abs_offset);
} else {
AppendToOutput("#+0x%" PRIx64, offset);
@@ -6366,7 +6366,7 @@
const char *reg_field = &format[1];
if (reg_prefix == 'R') {
- bool is_x = instr->GetSixtyFourBits();
+ bool is_x = instr->GetSixtyFourBits() == 1;
if (strspn(reg_field, "0123456789") == 2) { // r20d, r31n, etc.
// Core W or X registers where the type is determined by a specified bit
// position, eg. 'R20d, 'R05n. This is like the 'Rd syntax, where bit 31
@@ -6397,7 +6397,7 @@
field_len = 3;
char *eimm;
int imm = static_cast<int>(strtol(®_field[2], &eimm, 10));
- field_len += eimm - ®_field[2];
+ field_len += static_cast<unsigned>(eimm - ®_field[2]);
if (reg_num == 31) {
switch (reg_field[1]) {
case 'z':
@@ -6591,12 +6591,12 @@
}
case 'F': { // IFP, IFPNeon, IFPSve or IFPFBits.
int imm8 = 0;
- int len = strlen("IFP");
+ size_t len = strlen("IFP");
switch (format[3]) {
case 'F':
VIXL_ASSERT(strncmp(format, "IFPFBits", strlen("IFPFBits")) == 0);
AppendToOutput("#%" PRId32, 64 - instr->GetFPScale());
- return strlen("IFPFBits");
+ return static_cast<int>(strlen("IFPFBits"));
case 'N':
VIXL_ASSERT(strncmp(format, "IFPNeon", strlen("IFPNeon")) == 0);
imm8 = instr->GetImmNEONabcdefgh();
@@ -6615,7 +6615,7 @@
AppendToOutput("#0x%" PRIx32 " (%.4f)",
imm8,
Instruction::Imm8ToFP32(imm8));
- return len;
+ return static_cast<int>(len);
}
case 'H': { // IH - ImmHint
AppendToOutput("#%" PRId32, instr->GetImmHint());
@@ -6742,7 +6742,7 @@
return 9;
}
case 'B': { // IVByElemIndex.
- int ret = strlen("IVByElemIndex");
+ int ret = static_cast<int>(strlen("IVByElemIndex"));
uint32_t vm_index = instr->GetNEONH() << 2;
vm_index |= instr->GetNEONL() << 1;
vm_index |= instr->GetNEONM();
@@ -6781,12 +6781,12 @@
rn_index = imm4 >> tz;
if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
AppendToOutput("%d", rd_index);
- return strlen("IVInsIndex1");
+ return static_cast<int>(strlen("IVInsIndex1"));
} else if (strncmp(format,
"IVInsIndex2",
strlen("IVInsIndex2")) == 0) {
AppendToOutput("%d", rn_index);
- return strlen("IVInsIndex2");
+ return static_cast<int>(strlen("IVInsIndex2"));
}
}
return 0;
@@ -6796,7 +6796,7 @@
std::pair<int, int> index_and_lane_size =
instr->GetSVEPermuteIndexAndLaneSizeLog2();
AppendToOutput("%d", index_and_lane_size.first);
- return strlen("IVInsSVEIndex");
+ return static_cast<int>(strlen("IVInsSVEIndex"));
}
VIXL_FALLTHROUGH();
}
@@ -6808,31 +6808,31 @@
if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
uint64_t imm8 = instr->GetImmNEONabcdefgh();
AppendToOutput("#0x%" PRIx64, imm8);
- return strlen("IVMIImm8");
+ return static_cast<int>(strlen("IVMIImm8"));
} else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
uint64_t imm8 = instr->GetImmNEONabcdefgh();
uint64_t imm = 0;
for (int i = 0; i < 8; ++i) {
- if (imm8 & (1 << i)) {
+ if (imm8 & (UINT64_C(1) << i)) {
imm |= (UINT64_C(0xff) << (8 * i));
}
}
AppendToOutput("#0x%" PRIx64, imm);
- return strlen("IVMIImm");
+ return static_cast<int>(strlen("IVMIImm"));
} else if (strncmp(format,
"IVMIShiftAmt1",
strlen("IVMIShiftAmt1")) == 0) {
int cmode = instr->GetNEONCmode();
int shift_amount = 8 * ((cmode >> 1) & 3);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt1");
+ return static_cast<int>(strlen("IVMIShiftAmt1"));
} else if (strncmp(format,
"IVMIShiftAmt2",
strlen("IVMIShiftAmt2")) == 0) {
int cmode = instr->GetNEONCmode();
int shift_amount = 8 << (cmode & 1);
AppendToOutput("#%d", shift_amount);
- return strlen("IVMIShiftAmt2");
+ return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
VIXL_UNIMPLEMENTED();
return 0;
@@ -7339,7 +7339,7 @@
uint64_t value = strtoul(c + 1, &new_c, 10);
c = new_c;
VIXL_ASSERT(IsInt32(value));
- bits += value;
+ bits = static_cast<int32_t>(bits + value);
} else if (*c == '*') {
// Similarly, a "*n" trailing the format specifier indicates the extracted
// value should be multiplied by n. This is for cases where the encoded
@@ -7348,7 +7348,7 @@
uint64_t value = strtoul(c + 1, &new_c, 10);
c = new_c;
VIXL_ASSERT(IsInt32(value));
- bits *= value;
+ bits = static_cast<int32_t>(bits * value);
}
AppendToOutput("%d", bits);
@@ -7500,7 +7500,7 @@
if (signed_addresses_) {
if (address < 0) {
sign = "-";
- abs_address = -static_cast<uint64_t>(address);
+ abs_address = UnsignedNegate(static_cast<uint64_t>(address));
} else {
// Leave a leading space, to maintain alignment.
sign = " ";
diff --git a/src/aarch64/macro-assembler-aarch64.cc b/src/aarch64/macro-assembler-aarch64.cc
index 2876028..a48931b 100644
--- a/src/aarch64/macro-assembler-aarch64.cc
+++ b/src/aarch64/macro-assembler-aarch64.cc
@@ -3123,7 +3123,6 @@
return masm_->GetScratchVRegisterList();
case CPURegister::kPRegisterBank:
return masm_->GetScratchPRegisterList();
- return NULL;
}
VIXL_UNREACHABLE();
return NULL;
diff --git a/src/aarch64/operands-aarch64.cc b/src/aarch64/operands-aarch64.cc
index 8db129c..e01d190 100644
--- a/src/aarch64/operands-aarch64.cc
+++ b/src/aarch64/operands-aarch64.cc
@@ -34,7 +34,7 @@
RegList list = list_ & mask;
if (list == 0) return NoCPUReg;
int index = CountTrailingZeros(list);
- VIXL_ASSERT(((1 << index) & list) != 0);
+ VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
Remove(index);
return CPURegister(index, size_, type_);
}
@@ -45,7 +45,7 @@
if (list == 0) return NoCPUReg;
int index = CountLeadingZeros(list);
index = kRegListSizeInBits - 1 - index;
- VIXL_ASSERT(((1 << index) & list) != 0);
+ VIXL_ASSERT(((static_cast<RegList>(1) << index) & list) != 0);
Remove(index);
return CPURegister(index, size_, type_);
}
diff --git a/src/aarch64/operands-aarch64.h b/src/aarch64/operands-aarch64.h
index 0c20367..ba3df18 100644
--- a/src/aarch64/operands-aarch64.h
+++ b/src/aarch64/operands-aarch64.h
@@ -909,7 +909,7 @@
bool IsPositiveOrZero() const { return !is_negative_; }
uint64_t GetMagnitude() const {
- return is_negative_ ? -raw_bits_ : raw_bits_;
+ return is_negative_ ? UnsignedNegate(raw_bits_) : raw_bits_;
}
private:
diff --git a/src/assembler-base-vixl.h b/src/assembler-base-vixl.h
index ee54dcb..7bd6af2 100644
--- a/src/assembler-base-vixl.h
+++ b/src/assembler-base-vixl.h
@@ -29,6 +29,12 @@
#include "code-buffer-vixl.h"
+// Microsoft Visual C++ defines a `mvn` macro that conflicts with our own
+// definition.
+#if defined(_MSC_VER) && defined(mvn)
+#undef mvn
+#endif
+
namespace vixl {
class CodeBufferCheckScope;
diff --git a/src/code-buffer-vixl.cc b/src/code-buffer-vixl.cc
index 5c906c5..2cfe8b7 100644
--- a/src/code-buffer-vixl.cc
+++ b/src/code-buffer-vixl.cc
@@ -24,9 +24,11 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef VIXL_CODE_BUFFER_MMAP
extern "C" {
#include <sys/mman.h>
}
+#endif
#include "code-buffer-vixl.h"
#include "utils-vixl.h"
@@ -113,11 +115,12 @@
void CodeBuffer::EmitString(const char* string) {
- VIXL_ASSERT(HasSpaceFor(strlen(string) + 1));
+ const auto len = strlen(string) + 1;
+ VIXL_ASSERT(HasSpaceFor(len));
char* dst = reinterpret_cast<char*>(cursor_);
dirty_ = true;
- char* null_char = stpcpy(dst, string);
- cursor_ = reinterpret_cast<byte*>(null_char) + 1;
+ memcpy(dst, string, len);
+ cursor_ = reinterpret_cast<byte*>(dst + len);
}
diff --git a/src/globals-vixl.h b/src/globals-vixl.h
index c7da8a6..4548ba8 100644
--- a/src/globals-vixl.h
+++ b/src/globals-vixl.h
@@ -207,7 +207,7 @@
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
// Fallthrough annotation for GCC >= 7.
-#elif __GNUC__ >= 7
+#elif defined(__GNUC__) && __GNUC__ >= 7
#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
#else
#define VIXL_FALLTHROUGH() \
diff --git a/src/utils-vixl.cc b/src/utils-vixl.cc
index 41b5586..639a4b1 100644
--- a/src/utils-vixl.cc
+++ b/src/utils-vixl.cc
@@ -24,10 +24,10 @@
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <cstdio>
-
#include "utils-vixl.h"
+#include <cstdio>
+
namespace vixl {
// The default NaN values (for FPCR.DN=1).
@@ -391,7 +391,7 @@
}
VIXL_UNREACHABLE();
- return value;
+ return static_cast<float>(value);
}
// TODO: We should consider implementing a full FPToDouble(Float16)
diff --git a/src/utils-vixl.h b/src/utils-vixl.h
index 3813b59..b6c8455 100644
--- a/src/utils-vixl.h
+++ b/src/utils-vixl.h
@@ -30,6 +30,7 @@
#include <cmath>
#include <cstring>
#include <limits>
+#include <type_traits>
#include <vector>
#include "compiler-intrinsics-vixl.h"
@@ -282,17 +283,26 @@
return RawbitsToDouble(bits);
}
+// Some compilers dislike negating unsigned integers,
+// so we provide an equivalent.
+template <typename T>
+T UnsignedNegate(T value) {
+ VIXL_STATIC_ASSERT(std::is_unsigned<T>::value);
+ return ~value + 1;
+}
+
// Convert unsigned to signed numbers in a well-defined way (using two's
// complement representations).
inline int64_t RawbitsToInt64(uint64_t bits) {
return (bits >= UINT64_C(0x8000000000000000))
- ? (-static_cast<int64_t>(-bits - 1) - 1)
+ ? (-static_cast<int64_t>(UnsignedNegate(bits) - 1) - 1)
: static_cast<int64_t>(bits);
}
inline int32_t RawbitsToInt32(uint32_t bits) {
- return (bits >= UINT64_C(0x80000000)) ? (-static_cast<int32_t>(-bits - 1) - 1)
- : static_cast<int32_t>(bits);
+ return (bits >= UINT64_C(0x80000000))
+ ? (-static_cast<int32_t>(UnsignedNegate(bits) - 1) - 1)
+ : static_cast<int32_t>(bits);
}
namespace internal {
@@ -475,7 +485,9 @@
}
-inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
+inline uint64_t LowestSetBit(uint64_t value) {
+ return value & UnsignedNegate(value);
+}
template <typename T>
@@ -829,7 +841,7 @@
}
int32_t GetSigned() const { return data_; }
Uint32 operator~() const { return Uint32(~data_); }
- Uint32 operator-() const { return Uint32(-data_); }
+ Uint32 operator-() const { return Uint32(UnsignedNegate(data_)); }
bool operator==(Uint32 value) const { return data_ == value.data_; }
bool operator!=(Uint32 value) const { return data_ != value.data_; }
bool operator>(Uint32 value) const { return data_ > value.data_; }
@@ -897,7 +909,7 @@
Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
Uint64 operator~() const { return Uint64(~data_); }
- Uint64 operator-() const { return Uint64(-data_); }
+ Uint64 operator-() const { return Uint64(UnsignedNegate(data_)); }
bool operator==(Uint64 value) const { return data_ == value.data_; }
bool operator!=(Uint64 value) const { return data_ != value.data_; }
Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
@@ -1203,7 +1215,7 @@
// For subnormal outputs, the shift must be adjusted by the exponent. The +1
// is necessary because the exponent of a subnormal value (encoded as 0) is
// the same as the exponent of the smallest normal value (encoded as 1).
- shift += -exponent + 1;
+ shift += static_cast<int>(-exponent + 1);
// Handle inputs that would produce a zero output.
//
diff --git a/test/aarch64/test-assembler-aarch64.cc b/test/aarch64/test-assembler-aarch64.cc
index 1ea4f1a..5d84916 100644
--- a/test/aarch64/test-assembler-aarch64.cc
+++ b/test/aarch64/test-assembler-aarch64.cc
@@ -2876,7 +2876,6 @@
// This method does nothing when the size is zero. i.e. stg and st2g.
// Reserve x9 and x10.
auto LoadDataAndSum = [&](Register reg, int off, unsigned size_in_bytes) {
- VIXL_ASSERT(size_in_bytes >= 0);
for (unsigned j = 0; j < size_in_bytes / kXRegSizeInBytes; j++) {
__ Ldr(x9, MemOperand(reg, off));
__ Add(x10, x9, x10);