armvixl | 5289c59 | 2015-03-02 13:52:04 +0000 | [diff] [blame] | 1 | // Copyright 2015, ARM Limited |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 2 | // All rights reserved. |
| 3 | // |
| 4 | // Redistribution and use in source and binary forms, with or without |
| 5 | // modification, are permitted provided that the following conditions are met: |
| 6 | // |
| 7 | // * Redistributions of source code must retain the above copyright notice, |
| 8 | // this list of conditions and the following disclaimer. |
| 9 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 10 | // this list of conditions and the following disclaimer in the documentation |
| 11 | // and/or other materials provided with the distribution. |
| 12 | // * Neither the name of ARM Limited nor the names of its contributors may be |
| 13 | // used to endorse or promote products derived from this software without |
| 14 | // specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND |
| 17 | // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 18 | // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 19 | // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE |
| 20 | // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 21 | // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 22 | // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| 23 | // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 25 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | |
| 27 | #ifndef VIXL_UTILS_H |
| 28 | #define VIXL_UTILS_H |
| 29 | |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 30 | #include <string.h> |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 31 | #include <cmath> |
| 32 | #include "vixl/globals.h" |
| 33 | #include "vixl/compiler-intrinsics.h" |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 34 | |
| 35 | namespace vixl { |
| 36 | |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 37 | // Macros for compile-time format checking. |
| 38 | #if defined(__GNUC__) |
| 39 | #define PRINTF_CHECK(format_index, varargs_index) \ |
| 40 | __attribute__((format(printf, format_index, varargs_index))) |
| 41 | #else |
| 42 | #define PRINTF_CHECK(format_index, varargs_index) |
| 43 | #endif |
| 44 | |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 45 | // Check number width. |
| 46 | inline bool is_intn(unsigned n, int64_t x) { |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 47 | VIXL_ASSERT((0 < n) && (n < 64)); |
| 48 | int64_t limit = INT64_C(1) << (n - 1); |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 49 | return (-limit <= x) && (x < limit); |
| 50 | } |
| 51 | |
| 52 | inline bool is_uintn(unsigned n, int64_t x) { |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 53 | VIXL_ASSERT((0 < n) && (n < 64)); |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 54 | return !(x >> n); |
| 55 | } |
| 56 | |
| 57 | inline unsigned truncate_to_intn(unsigned n, int64_t x) { |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 58 | VIXL_ASSERT((0 < n) && (n < 64)); |
| 59 | return (x & ((INT64_C(1) << n) - 1)); |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | #define INT_1_TO_63_LIST(V) \ |
| 63 | V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ |
| 64 | V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ |
| 65 | V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ |
| 66 | V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ |
| 67 | V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ |
| 68 | V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ |
| 69 | V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ |
| 70 | V(57) V(58) V(59) V(60) V(61) V(62) V(63) |
| 71 | |
| 72 | #define DECLARE_IS_INT_N(N) \ |
| 73 | inline bool is_int##N(int64_t x) { return is_intn(N, x); } |
| 74 | #define DECLARE_IS_UINT_N(N) \ |
| 75 | inline bool is_uint##N(int64_t x) { return is_uintn(N, x); } |
| 76 | #define DECLARE_TRUNCATE_TO_INT_N(N) \ |
| 77 | inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); } |
| 78 | INT_1_TO_63_LIST(DECLARE_IS_INT_N) |
| 79 | INT_1_TO_63_LIST(DECLARE_IS_UINT_N) |
| 80 | INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) |
| 81 | #undef DECLARE_IS_INT_N |
| 82 | #undef DECLARE_IS_UINT_N |
| 83 | #undef DECLARE_TRUNCATE_TO_INT_N |
| 84 | |
| 85 | // Bit field extraction. |
| 86 | inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { |
| 87 | return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); |
| 88 | } |
| 89 | |
| 90 | inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { |
armvixl | 578645f | 2013-08-15 17:21:42 +0100 | [diff] [blame] | 91 | return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { |
| 95 | return (x << (31 - msb)) >> (lsb + 31 - msb); |
| 96 | } |
| 97 | |
| 98 | inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) { |
| 99 | return (x << (63 - msb)) >> (lsb + 63 - msb); |
| 100 | } |
| 101 | |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 102 | // Floating point representation. |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 103 | uint32_t float_to_rawbits(float value); |
| 104 | uint64_t double_to_rawbits(double value); |
| 105 | float rawbits_to_float(uint32_t bits); |
| 106 | double rawbits_to_double(uint64_t bits); |
| 107 | |
armvixl | 5289c59 | 2015-03-02 13:52:04 +0000 | [diff] [blame] | 108 | uint32_t float_sign(float val); |
| 109 | uint32_t float_exp(float val); |
| 110 | uint32_t float_mantissa(float val); |
| 111 | uint32_t double_sign(double val); |
| 112 | uint32_t double_exp(double val); |
| 113 | uint64_t double_mantissa(double val); |
| 114 | |
| 115 | float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa); |
| 116 | double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa); |
| 117 | |
| 118 | // An fpclassify() function for 16-bit half-precision floats. |
| 119 | int float16classify(float16 value); |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 120 | |
| 121 | // NaN tests. |
| 122 | inline bool IsSignallingNaN(double num) { |
armvixl | 5799d6c | 2014-05-01 11:05:00 +0100 | [diff] [blame] | 123 | const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 124 | uint64_t raw = double_to_rawbits(num); |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 125 | if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) { |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 126 | return true; |
| 127 | } |
| 128 | return false; |
| 129 | } |
| 130 | |
| 131 | |
| 132 | inline bool IsSignallingNaN(float num) { |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 133 | const uint32_t kFP32QuietNaNMask = 0x00400000; |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 134 | uint32_t raw = float_to_rawbits(num); |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 135 | if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) { |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 136 | return true; |
| 137 | } |
| 138 | return false; |
| 139 | } |
| 140 | |
| 141 | |
armvixl | 5289c59 | 2015-03-02 13:52:04 +0000 | [diff] [blame] | 142 | inline bool IsSignallingNaN(float16 num) { |
| 143 | const uint16_t kFP16QuietNaNMask = 0x0200; |
| 144 | return (float16classify(num) == FP_NAN) && |
| 145 | ((num & kFP16QuietNaNMask) == 0); |
| 146 | } |
| 147 | |
| 148 | |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 149 | template <typename T> |
| 150 | inline bool IsQuietNaN(T num) { |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 151 | return std::isnan(num) && !IsSignallingNaN(num); |
armvixl | f37fdc0 | 2014-02-05 13:22:16 +0000 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 155 | // Convert the NaN in 'num' to a quiet NaN. |
| 156 | inline double ToQuietNaN(double num) { |
armvixl | 5799d6c | 2014-05-01 11:05:00 +0100 | [diff] [blame] | 157 | const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 158 | VIXL_ASSERT(std::isnan(num)); |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 159 | return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask); |
| 160 | } |
| 161 | |
| 162 | |
| 163 | inline float ToQuietNaN(float num) { |
| 164 | const uint32_t kFP32QuietNaNMask = 0x00400000; |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 165 | VIXL_ASSERT(std::isnan(num)); |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 166 | return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask); |
| 167 | } |
| 168 | |
| 169 | |
| 170 | // Fused multiply-add. |
| 171 | inline double FusedMultiplyAdd(double op1, double op2, double a) { |
| 172 | return fma(op1, op2, a); |
| 173 | } |
| 174 | |
| 175 | |
| 176 | inline float FusedMultiplyAdd(float op1, float op2, float a) { |
| 177 | return fmaf(op1, op2, a); |
| 178 | } |
| 179 | |
| 180 | |
armvixl | 6e2c827 | 2015-03-31 11:04:14 +0100 | [diff] [blame^] | 181 | inline uint64_t LowestSetBit(uint64_t value) { |
| 182 | return value & -value; |
| 183 | } |
| 184 | |
| 185 | |
| 186 | template<typename T> |
| 187 | inline int HighestSetBitPosition(T value) { |
| 188 | VIXL_ASSERT(value != 0); |
| 189 | return (sizeof(value) * 8 - 1) - CountLeadingZeros(value); |
| 190 | } |
| 191 | |
| 192 | |
| 193 | template<typename V> |
| 194 | inline int WhichPowerOf2(V value) { |
| 195 | VIXL_ASSERT(IsPowerOf2(value)); |
| 196 | return CountTrailingZeros(value); |
| 197 | } |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 198 | |
armvixl | 330dc71 | 2014-11-25 10:38:32 +0000 | [diff] [blame] | 199 | unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size); |
| 200 | |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 201 | // Pointer alignment |
| 202 | // TODO: rename/refactor to make it specific to instructions. |
| 203 | template<typename T> |
| 204 | bool IsWordAligned(T pointer) { |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 205 | VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) |
armvixl | c68cb64 | 2014-09-25 18:49:30 +0100 | [diff] [blame] | 206 | return ((intptr_t)(pointer) & 3) == 0; |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 207 | } |
| 208 | |
armvixl | 330dc71 | 2014-11-25 10:38:32 +0000 | [diff] [blame] | 209 | // Increment a pointer (up to 64 bits) until it has the specified alignment. |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 210 | template<class T> |
| 211 | T AlignUp(T pointer, size_t alignment) { |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 212 | // Use C-style casts to get static_cast behaviour for integral types (T), and |
| 213 | // reinterpret_cast behaviour for other types. |
| 214 | |
armvixl | 330dc71 | 2014-11-25 10:38:32 +0000 | [diff] [blame] | 215 | uint64_t pointer_raw = (uint64_t)pointer; |
| 216 | VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 217 | |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 218 | size_t align_step = (alignment - pointer_raw) % alignment; |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 219 | VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 220 | |
| 221 | return (T)(pointer_raw + align_step); |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 222 | } |
| 223 | |
armvixl | 330dc71 | 2014-11-25 10:38:32 +0000 | [diff] [blame] | 224 | // Decrement a pointer (up to 64 bits) until it has the specified alignment. |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 225 | template<class T> |
| 226 | T AlignDown(T pointer, size_t alignment) { |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 227 | // Use C-style casts to get static_cast behaviour for integral types (T), and |
| 228 | // reinterpret_cast behaviour for other types. |
| 229 | |
armvixl | 330dc71 | 2014-11-25 10:38:32 +0000 | [diff] [blame] | 230 | uint64_t pointer_raw = (uint64_t)pointer; |
| 231 | VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw)); |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 232 | |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 233 | size_t align_step = pointer_raw % alignment; |
| 234 | VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); |
armvixl | 4a102ba | 2014-07-14 09:02:40 +0100 | [diff] [blame] | 235 | |
| 236 | return (T)(pointer_raw - align_step); |
armvixl | b0c8ae2 | 2014-03-21 14:03:59 +0000 | [diff] [blame] | 237 | } |
| 238 | |
armvixl | ad96eda | 2013-06-14 11:42:37 +0100 | [diff] [blame] | 239 | } // namespace vixl |
| 240 | |
| 241 | #endif // VIXL_UTILS_H |