blob: 3ce62bf4a569228aff0cde42c893b96a0cc54440 [file] [log] [blame]
ths05f778c2007-10-27 13:05:54 +00001/*
2 * Utility compute operations used by translated code.
3 *
4 * Copyright (c) 2007 Thiemo Seufer
5 * Copyright (c) 2007 Jocelyn Mayer
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
Markus Armbruster175de522016-06-29 15:29:06 +020025
Luis Pires8ac2d6c2021-10-25 16:11:37 -030026/* Portions of this work are licensed under the terms of the GNU GPL,
27 * version 2 or later. See the COPYING file in the top-level directory.
28 */
29
Paolo Bonzinicb9c3772012-12-06 12:15:58 +010030#ifndef HOST_UTILS_H
Markus Armbruster175de522016-06-29 15:29:06 +020031#define HOST_UTILS_H
ths05f778c2007-10-27 13:05:54 +000032
Richard Henderson652a4b72015-09-14 13:00:34 -070033#include "qemu/bswap.h"
Lucas Mateus Castro (alqotel)4724bbd2022-05-25 10:49:50 -030034#include "qemu/int128.h"
thscebdff72008-06-05 22:55:54 +000035
Richard Hendersonf5401662013-02-16 12:46:59 -080036#ifdef CONFIG_INT128
Blue Swirlfacd2852009-08-16 08:03:26 +000037static inline void mulu64(uint64_t *plow, uint64_t *phigh,
38 uint64_t a, uint64_t b)
j_mayer7a51ad82007-11-04 02:24:58 +000039{
Richard Hendersonf5401662013-02-16 12:46:59 -080040 __uint128_t r = (__uint128_t)a * b;
41 *plow = r;
42 *phigh = r >> 64;
j_mayer7a51ad82007-11-04 02:24:58 +000043}
Richard Hendersonf5401662013-02-16 12:46:59 -080044
Blue Swirlfacd2852009-08-16 08:03:26 +000045static inline void muls64(uint64_t *plow, uint64_t *phigh,
46 int64_t a, int64_t b)
j_mayer7a51ad82007-11-04 02:24:58 +000047{
Richard Hendersonf5401662013-02-16 12:46:59 -080048 __int128_t r = (__int128_t)a * b;
49 *plow = r;
50 *phigh = r >> 64;
j_mayer7a51ad82007-11-04 02:24:58 +000051}
Tom Musta98d1eb22014-01-07 10:05:51 -060052
Peter Maydell49caffe2015-08-19 16:20:20 +010053/* compute with 96 bit intermediate result: (a*b)/c */
54static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
55{
56 return (__int128_t)a * b / c;
57}
58
Luis Pires40f3e792021-10-25 16:11:38 -030059static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh,
60 uint64_t divisor)
Tom Musta98d1eb22014-01-07 10:05:51 -060061{
Luis Pires9276a312021-10-25 16:11:36 -030062 __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow;
63 __uint128_t result = dividend / divisor;
Luis Pires40f3e792021-10-25 16:11:38 -030064
Luis Pires9276a312021-10-25 16:11:36 -030065 *plow = result;
Luis Pires40f3e792021-10-25 16:11:38 -030066 *phigh = result >> 64;
67 return dividend % divisor;
Tom Musta98d1eb22014-01-07 10:05:51 -060068}
Tom Mustae44259b2014-01-07 10:05:52 -060069
Luis Pires40f3e792021-10-25 16:11:38 -030070static inline int64_t divs128(uint64_t *plow, int64_t *phigh,
71 int64_t divisor)
Tom Mustae44259b2014-01-07 10:05:52 -060072{
Luis Pires40f3e792021-10-25 16:11:38 -030073 __int128_t dividend = ((__int128_t)*phigh << 64) | *plow;
Luis Pires9276a312021-10-25 16:11:36 -030074 __int128_t result = dividend / divisor;
Luis Pires40f3e792021-10-25 16:11:38 -030075
Luis Pires9276a312021-10-25 16:11:36 -030076 *plow = result;
Luis Pires40f3e792021-10-25 16:11:38 -030077 *phigh = result >> 64;
78 return dividend % divisor;
Tom Mustae44259b2014-01-07 10:05:52 -060079}
j_mayer7a51ad82007-11-04 02:24:58 +000080#else
Lijun Pandb7b62e2020-07-01 18:43:44 -050081void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b);
82void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b);
Luis Pires40f3e792021-10-25 16:11:38 -030083uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor);
84int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor);
Peter Maydell49caffe2015-08-19 16:20:20 +010085
86static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c)
87{
88 union {
89 uint64_t ll;
90 struct {
Marc-André Lureaue03b5682022-03-23 19:57:17 +040091#if HOST_BIG_ENDIAN
Peter Maydell49caffe2015-08-19 16:20:20 +010092 uint32_t high, low;
93#else
94 uint32_t low, high;
95#endif
96 } l;
97 } u, res;
98 uint64_t rl, rh;
99
100 u.ll = a;
101 rl = (uint64_t)u.l.low * (uint64_t)b;
102 rh = (uint64_t)u.l.high * (uint64_t)b;
103 rh += (rl >> 32);
104 res.l.high = rh / c;
105 res.l.low = (((rh % c) << 32) + (rl & 0xffffffff)) / c;
106 return res.ll;
107}
j_mayer7a51ad82007-11-04 02:24:58 +0000108#endif
109
Richard Henderson72d81152013-02-13 17:47:35 -0800110/**
111 * clz32 - count leading zeros in a 32-bit value.
112 * @val: The value to search
113 *
114 * Returns 32 if the value is zero. Note that the GCC builtin is
115 * undefined if the value is zero.
116 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000117static inline int clz32(uint32_t val)
ths05f778c2007-10-27 13:05:54 +0000118{
Richard Henderson72d81152013-02-13 17:47:35 -0800119 return val ? __builtin_clz(val) : 32;
ths05f778c2007-10-27 13:05:54 +0000120}
121
Richard Henderson72d81152013-02-13 17:47:35 -0800122/**
123 * clo32 - count leading ones in a 32-bit value.
124 * @val: The value to search
125 *
126 * Returns 32 if the value is -1.
127 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000128static inline int clo32(uint32_t val)
ths05f778c2007-10-27 13:05:54 +0000129{
130 return clz32(~val);
131}
132
Richard Henderson72d81152013-02-13 17:47:35 -0800133/**
134 * clz64 - count leading zeros in a 64-bit value.
135 * @val: The value to search
136 *
137 * Returns 64 if the value is zero. Note that the GCC builtin is
138 * undefined if the value is zero.
139 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000140static inline int clz64(uint64_t val)
ths05f778c2007-10-27 13:05:54 +0000141{
Richard Henderson72d81152013-02-13 17:47:35 -0800142 return val ? __builtin_clzll(val) : 64;
ths05f778c2007-10-27 13:05:54 +0000143}
144
Richard Henderson72d81152013-02-13 17:47:35 -0800145/**
146 * clo64 - count leading ones in a 64-bit value.
147 * @val: The value to search
148 *
149 * Returns 64 if the value is -1.
150 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000151static inline int clo64(uint64_t val)
ths05f778c2007-10-27 13:05:54 +0000152{
153 return clz64(~val);
154}
j_mayerb9ef45f2007-10-28 12:52:38 +0000155
Richard Henderson72d81152013-02-13 17:47:35 -0800156/**
157 * ctz32 - count trailing zeros in a 32-bit value.
158 * @val: The value to search
159 *
160 * Returns 32 if the value is zero. Note that the GCC builtin is
161 * undefined if the value is zero.
162 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000163static inline int ctz32(uint32_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000164{
Richard Henderson72d81152013-02-13 17:47:35 -0800165 return val ? __builtin_ctz(val) : 32;
balrogc8906842008-11-12 17:18:41 +0000166}
167
Richard Henderson72d81152013-02-13 17:47:35 -0800168/**
169 * cto32 - count trailing ones in a 32-bit value.
170 * @val: The value to search
171 *
172 * Returns 32 if the value is -1.
173 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000174static inline int cto32(uint32_t val)
balrogc8906842008-11-12 17:18:41 +0000175{
j_mayerb9ef45f2007-10-28 12:52:38 +0000176 return ctz32(~val);
177}
178
Richard Henderson72d81152013-02-13 17:47:35 -0800179/**
180 * ctz64 - count trailing zeros in a 64-bit value.
181 * @val: The value to search
182 *
183 * Returns 64 if the value is zero. Note that the GCC builtin is
184 * undefined if the value is zero.
185 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000186static inline int ctz64(uint64_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000187{
Richard Henderson72d81152013-02-13 17:47:35 -0800188 return val ? __builtin_ctzll(val) : 64;
j_mayerb9ef45f2007-10-28 12:52:38 +0000189}
190
Richard Henderson72d81152013-02-13 17:47:35 -0800191/**
Dr. David Alan Gilbert1c884ab2014-02-12 17:14:33 +0000192 * cto64 - count trailing ones in a 64-bit value.
Richard Henderson72d81152013-02-13 17:47:35 -0800193 * @val: The value to search
194 *
195 * Returns 64 if the value is -1.
196 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000197static inline int cto64(uint64_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000198{
199 return ctz64(~val);
200}
201
Richard Henderson72d81152013-02-13 17:47:35 -0800202/**
Claudio Fontanaafd3fe42013-12-17 19:42:35 +0000203 * clrsb32 - count leading redundant sign bits in a 32-bit value.
204 * @val: The value to search
205 *
206 * Returns the number of bits following the sign bit that are equal to it.
207 * No special cases; output range is [0-31].
208 */
209static inline int clrsb32(uint32_t val)
210{
Thomas Huthf773b422018-12-03 14:33:12 +0100211#if __has_builtin(__builtin_clrsb) || !defined(__clang__)
Claudio Fontanaafd3fe42013-12-17 19:42:35 +0000212 return __builtin_clrsb(val);
213#else
214 return clz32(val ^ ((int32_t)val >> 1)) - 1;
215#endif
216}
217
218/**
219 * clrsb64 - count leading redundant sign bits in a 64-bit value.
220 * @val: The value to search
221 *
222 * Returns the number of bits following the sign bit that are equal to it.
223 * No special cases; output range is [0-63].
224 */
225static inline int clrsb64(uint64_t val)
226{
Thomas Huthf773b422018-12-03 14:33:12 +0100227#if __has_builtin(__builtin_clrsbll) || !defined(__clang__)
Claudio Fontanaafd3fe42013-12-17 19:42:35 +0000228 return __builtin_clrsbll(val);
229#else
230 return clz64(val ^ ((int64_t)val >> 1)) - 1;
231#endif
232}
233
234/**
Richard Henderson72d81152013-02-13 17:47:35 -0800235 * ctpop8 - count the population of one bits in an 8-bit value.
236 * @val: The value to search
237 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000238static inline int ctpop8(uint8_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000239{
Richard Henderson72d81152013-02-13 17:47:35 -0800240 return __builtin_popcount(val);
j_mayerb9ef45f2007-10-28 12:52:38 +0000241}
242
Richard Henderson72d81152013-02-13 17:47:35 -0800243/**
244 * ctpop16 - count the population of one bits in a 16-bit value.
245 * @val: The value to search
246 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000247static inline int ctpop16(uint16_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000248{
Richard Henderson72d81152013-02-13 17:47:35 -0800249 return __builtin_popcount(val);
j_mayerb9ef45f2007-10-28 12:52:38 +0000250}
251
Richard Henderson72d81152013-02-13 17:47:35 -0800252/**
253 * ctpop32 - count the population of one bits in a 32-bit value.
254 * @val: The value to search
255 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000256static inline int ctpop32(uint32_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000257{
aurel327d019982008-10-12 00:53:08 +0000258 return __builtin_popcount(val);
j_mayerb9ef45f2007-10-28 12:52:38 +0000259}
260
Richard Henderson72d81152013-02-13 17:47:35 -0800261/**
262 * ctpop64 - count the population of one bits in a 64-bit value.
263 * @val: The value to search
264 */
Blue Swirlfacd2852009-08-16 08:03:26 +0000265static inline int ctpop64(uint64_t val)
j_mayerb9ef45f2007-10-28 12:52:38 +0000266{
aurel327d019982008-10-12 00:53:08 +0000267 return __builtin_popcountll(val);
ths3800af92007-12-18 01:58:05 +0000268}
Paolo Bonzinicb9c3772012-12-06 12:15:58 +0100269
Richard Henderson652a4b72015-09-14 13:00:34 -0700270/**
271 * revbit8 - reverse the bits in an 8-bit value.
272 * @x: The value to modify.
273 */
274static inline uint8_t revbit8(uint8_t x)
275{
Richard Henderson5140d6b2020-11-06 10:59:36 -0800276#if __has_builtin(__builtin_bitreverse8)
277 return __builtin_bitreverse8(x);
278#else
Richard Henderson652a4b72015-09-14 13:00:34 -0700279 /* Assign the correct nibble position. */
280 x = ((x & 0xf0) >> 4)
281 | ((x & 0x0f) << 4);
282 /* Assign the correct bit position. */
283 x = ((x & 0x88) >> 3)
284 | ((x & 0x44) >> 1)
285 | ((x & 0x22) << 1)
286 | ((x & 0x11) << 3);
287 return x;
Richard Henderson5140d6b2020-11-06 10:59:36 -0800288#endif
Richard Henderson652a4b72015-09-14 13:00:34 -0700289}
290
291/**
292 * revbit16 - reverse the bits in a 16-bit value.
293 * @x: The value to modify.
294 */
295static inline uint16_t revbit16(uint16_t x)
296{
Richard Henderson5140d6b2020-11-06 10:59:36 -0800297#if __has_builtin(__builtin_bitreverse16)
298 return __builtin_bitreverse16(x);
299#else
Richard Henderson652a4b72015-09-14 13:00:34 -0700300 /* Assign the correct byte position. */
301 x = bswap16(x);
302 /* Assign the correct nibble position. */
303 x = ((x & 0xf0f0) >> 4)
304 | ((x & 0x0f0f) << 4);
305 /* Assign the correct bit position. */
306 x = ((x & 0x8888) >> 3)
307 | ((x & 0x4444) >> 1)
308 | ((x & 0x2222) << 1)
309 | ((x & 0x1111) << 3);
310 return x;
Richard Henderson5140d6b2020-11-06 10:59:36 -0800311#endif
Richard Henderson652a4b72015-09-14 13:00:34 -0700312}
313
314/**
315 * revbit32 - reverse the bits in a 32-bit value.
316 * @x: The value to modify.
317 */
318static inline uint32_t revbit32(uint32_t x)
319{
Richard Henderson5140d6b2020-11-06 10:59:36 -0800320#if __has_builtin(__builtin_bitreverse32)
321 return __builtin_bitreverse32(x);
322#else
Richard Henderson652a4b72015-09-14 13:00:34 -0700323 /* Assign the correct byte position. */
324 x = bswap32(x);
325 /* Assign the correct nibble position. */
326 x = ((x & 0xf0f0f0f0u) >> 4)
327 | ((x & 0x0f0f0f0fu) << 4);
328 /* Assign the correct bit position. */
329 x = ((x & 0x88888888u) >> 3)
330 | ((x & 0x44444444u) >> 1)
331 | ((x & 0x22222222u) << 1)
332 | ((x & 0x11111111u) << 3);
333 return x;
Richard Henderson5140d6b2020-11-06 10:59:36 -0800334#endif
Richard Henderson652a4b72015-09-14 13:00:34 -0700335}
336
337/**
338 * revbit64 - reverse the bits in a 64-bit value.
339 * @x: The value to modify.
340 */
341static inline uint64_t revbit64(uint64_t x)
342{
Richard Henderson5140d6b2020-11-06 10:59:36 -0800343#if __has_builtin(__builtin_bitreverse64)
344 return __builtin_bitreverse64(x);
345#else
Richard Henderson652a4b72015-09-14 13:00:34 -0700346 /* Assign the correct byte position. */
347 x = bswap64(x);
348 /* Assign the correct nibble position. */
349 x = ((x & 0xf0f0f0f0f0f0f0f0ull) >> 4)
350 | ((x & 0x0f0f0f0f0f0f0f0full) << 4);
351 /* Assign the correct bit position. */
352 x = ((x & 0x8888888888888888ull) >> 3)
353 | ((x & 0x4444444444444444ull) >> 1)
354 | ((x & 0x2222222222222222ull) << 1)
355 | ((x & 0x1111111111111111ull) << 3);
356 return x;
Richard Henderson5140d6b2020-11-06 10:59:36 -0800357#endif
Richard Henderson652a4b72015-09-14 13:00:34 -0700358}
359
Richard Hendersoncec07c02020-11-06 17:42:36 -0800360/**
Luis Piresd03bba02021-09-10 08:26:05 -0300361 * Return the absolute value of a 64-bit integer as an unsigned 64-bit value
362 */
363static inline uint64_t uabs64(int64_t v)
364{
365 return v < 0 ? -v : v;
366}
367
368/**
Richard Hendersoncec07c02020-11-06 17:42:36 -0800369 * sadd32_overflow - addition with overflow indication
370 * @x, @y: addends
371 * @ret: Output for sum
372 *
373 * Computes *@ret = @x + @y, and returns true if and only if that
374 * value has been truncated.
375 */
376static inline bool sadd32_overflow(int32_t x, int32_t y, int32_t *ret)
377{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800378 return __builtin_add_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800379}
380
381/**
382 * sadd64_overflow - addition with overflow indication
383 * @x, @y: addends
384 * @ret: Output for sum
385 *
386 * Computes *@ret = @x + @y, and returns true if and only if that
387 * value has been truncated.
388 */
389static inline bool sadd64_overflow(int64_t x, int64_t y, int64_t *ret)
390{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800391 return __builtin_add_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800392}
393
394/**
395 * uadd32_overflow - addition with overflow indication
396 * @x, @y: addends
397 * @ret: Output for sum
398 *
399 * Computes *@ret = @x + @y, and returns true if and only if that
400 * value has been truncated.
401 */
402static inline bool uadd32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
403{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800404 return __builtin_add_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800405}
406
407/**
408 * uadd64_overflow - addition with overflow indication
409 * @x, @y: addends
410 * @ret: Output for sum
411 *
412 * Computes *@ret = @x + @y, and returns true if and only if that
413 * value has been truncated.
414 */
415static inline bool uadd64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
416{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800417 return __builtin_add_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800418}
419
420/**
421 * ssub32_overflow - subtraction with overflow indication
422 * @x: Minuend
423 * @y: Subtrahend
424 * @ret: Output for difference
425 *
426 * Computes *@ret = @x - @y, and returns true if and only if that
427 * value has been truncated.
428 */
429static inline bool ssub32_overflow(int32_t x, int32_t y, int32_t *ret)
430{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800431 return __builtin_sub_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800432}
433
434/**
435 * ssub64_overflow - subtraction with overflow indication
436 * @x: Minuend
437 * @y: Subtrahend
438 * @ret: Output for sum
439 *
440 * Computes *@ret = @x - @y, and returns true if and only if that
441 * value has been truncated.
442 */
443static inline bool ssub64_overflow(int64_t x, int64_t y, int64_t *ret)
444{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800445 return __builtin_sub_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800446}
447
448/**
449 * usub32_overflow - subtraction with overflow indication
450 * @x: Minuend
451 * @y: Subtrahend
452 * @ret: Output for sum
453 *
454 * Computes *@ret = @x - @y, and returns true if and only if that
455 * value has been truncated.
456 */
457static inline bool usub32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
458{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800459 return __builtin_sub_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800460}
461
462/**
463 * usub64_overflow - subtraction with overflow indication
464 * @x: Minuend
465 * @y: Subtrahend
466 * @ret: Output for sum
467 *
468 * Computes *@ret = @x - @y, and returns true if and only if that
469 * value has been truncated.
470 */
471static inline bool usub64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
472{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800473 return __builtin_sub_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800474}
475
476/**
477 * smul32_overflow - multiplication with overflow indication
478 * @x, @y: Input multipliers
479 * @ret: Output for product
480 *
481 * Computes *@ret = @x * @y, and returns true if and only if that
482 * value has been truncated.
483 */
484static inline bool smul32_overflow(int32_t x, int32_t y, int32_t *ret)
485{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800486 return __builtin_mul_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800487}
488
489/**
490 * smul64_overflow - multiplication with overflow indication
491 * @x, @y: Input multipliers
492 * @ret: Output for product
493 *
494 * Computes *@ret = @x * @y, and returns true if and only if that
495 * value has been truncated.
496 */
497static inline bool smul64_overflow(int64_t x, int64_t y, int64_t *ret)
498{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800499 return __builtin_mul_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800500}
501
502/**
503 * umul32_overflow - multiplication with overflow indication
504 * @x, @y: Input multipliers
505 * @ret: Output for product
506 *
507 * Computes *@ret = @x * @y, and returns true if and only if that
508 * value has been truncated.
509 */
510static inline bool umul32_overflow(uint32_t x, uint32_t y, uint32_t *ret)
511{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800512 return __builtin_mul_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800513}
514
515/**
516 * umul64_overflow - multiplication with overflow indication
517 * @x, @y: Input multipliers
518 * @ret: Output for product
519 *
520 * Computes *@ret = @x * @y, and returns true if and only if that
521 * value has been truncated.
522 */
523static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
524{
Richard Hendersoncec07c02020-11-06 17:42:36 -0800525 return __builtin_mul_overflow(x, y, ret);
Richard Hendersoncec07c02020-11-06 17:42:36 -0800526}
527
Luis Pirese06049f2021-10-29 16:24:07 -0300528/*
529 * Unsigned 128x64 multiplication.
530 * Returns true if the result got truncated to 128 bits.
531 * Otherwise, returns false and the multiplication result via plow and phigh.
532 */
533static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
534{
Thomas Huth21d4e552022-07-21 09:48:09 +0200535#if defined(CONFIG_INT128)
Luis Pirese06049f2021-10-29 16:24:07 -0300536 bool res;
537 __uint128_t r;
538 __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
539 res = __builtin_mul_overflow(f, factor, &r);
540
541 *plow = r;
542 *phigh = r >> 64;
543
544 return res;
545#else
546 uint64_t dhi = *phigh;
547 uint64_t dlo = *plow;
548 uint64_t ahi;
549 uint64_t blo, bhi;
550
551 if (dhi == 0) {
552 mulu64(plow, phigh, dlo, factor);
553 return false;
554 }
555
556 mulu64(plow, &ahi, dlo, factor);
557 mulu64(&blo, &bhi, dhi, factor);
558
559 return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
560#endif
561}
562
Richard Henderson1ec80702020-11-13 03:22:23 +0000563/**
564 * uadd64_carry - addition with carry-in and carry-out
565 * @x, @y: addends
566 * @pcarry: in-out carry value
567 *
568 * Computes @x + @y + *@pcarry, placing the carry-out back
569 * into *@pcarry and returning the 64-bit sum.
570 */
571static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry)
572{
573#if __has_builtin(__builtin_addcll)
574 unsigned long long c = *pcarry;
575 x = __builtin_addcll(x, y, c, &c);
576 *pcarry = c & 1;
577 return x;
578#else
579 bool c = *pcarry;
580 /* This is clang's internal expansion of __builtin_addc. */
581 c = uadd64_overflow(x, c, &x);
582 c |= uadd64_overflow(x, y, &x);
583 *pcarry = c;
584 return x;
585#endif
586}
587
588/**
589 * usub64_borrow - subtraction with borrow-in and borrow-out
590 * @x, @y: addends
591 * @pborrow: in-out borrow value
592 *
593 * Computes @x - @y - *@pborrow, placing the borrow-out back
594 * into *@pborrow and returning the 64-bit sum.
595 */
596static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow)
597{
598#if __has_builtin(__builtin_subcll)
599 unsigned long long b = *pborrow;
600 x = __builtin_subcll(x, y, b, &b);
601 *pborrow = b & 1;
602 return x;
603#else
604 bool b = *pborrow;
605 b = usub64_overflow(x, b, &x);
606 b |= usub64_overflow(x, y, &x);
607 *pborrow = b;
608 return x;
609#endif
610}
611
Richard Henderson01654372013-02-13 17:47:34 -0800612/* Host type specific sizes of these routines. */
613
614#if ULONG_MAX == UINT32_MAX
615# define clzl clz32
616# define ctzl ctz32
617# define clol clo32
618# define ctol cto32
619# define ctpopl ctpop32
Richard Henderson652a4b72015-09-14 13:00:34 -0700620# define revbitl revbit32
Richard Henderson01654372013-02-13 17:47:34 -0800621#elif ULONG_MAX == UINT64_MAX
622# define clzl clz64
623# define ctzl ctz64
624# define clol clo64
625# define ctol cto64
626# define ctpopl ctpop64
Richard Henderson652a4b72015-09-14 13:00:34 -0700627# define revbitl revbit64
Richard Henderson01654372013-02-13 17:47:34 -0800628#else
629# error Unknown sizeof long
630#endif
631
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100632static inline bool is_power_of_2(uint64_t value)
633{
634 if (!value) {
Eric Blakee52eeb42016-05-31 12:33:31 -0600635 return false;
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100636 }
637
638 return !(value & (value - 1));
639}
640
Markus Armbruster43c64a02017-07-27 11:46:15 +0200641/**
642 * Return @value rounded down to the nearest power of two or zero.
643 */
644static inline uint64_t pow2floor(uint64_t value)
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100645{
Markus Armbruster43c64a02017-07-27 11:46:15 +0200646 if (!value) {
647 /* Avoid undefined shift by 64 */
648 return 0;
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100649 }
Markus Armbruster43c64a02017-07-27 11:46:15 +0200650 return 0x8000000000000000ull >> clz64(value);
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100651}
652
Markus Armbruster362aaf12017-07-27 11:46:16 +0200653/*
654 * Return @value rounded up to the nearest power of two modulo 2^64.
655 * This is *zero* for @value > 2^63, so be careful.
656 */
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100657static inline uint64_t pow2ceil(uint64_t value)
658{
Markus Armbruster362aaf12017-07-27 11:46:16 +0200659 int n = clz64(value - 1);
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100660
Markus Armbruster362aaf12017-07-27 11:46:16 +0200661 if (!n) {
662 /*
663 * @value - 1 has no leading zeroes, thus @value - 1 >= 2^63
664 * Therefore, either @value == 0 or @value > 2^63.
665 * If it's 0, return 1, else return 0.
666 */
667 return !value;
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100668 }
Markus Armbruster362aaf12017-07-27 11:46:16 +0200669 return 0x8000000000000000ull >> (n - 1);
Peter Maydell8f1ed5f2015-07-24 13:33:12 +0100670}
671
Yuval Shaia37e626c2018-01-14 11:01:43 +0200672static inline uint32_t pow2roundup32(uint32_t x)
673{
674 x |= (x >> 1);
675 x |= (x >> 2);
676 x |= (x >> 4);
677 x |= (x >> 8);
678 x |= (x >> 16);
679 return x + 1;
680}
681
Jose Ricardo Zivianif539fbe2017-01-10 00:10:09 -0200682/**
683 * urshift - 128-bit Unsigned Right Shift.
684 * @plow: in/out - lower 64-bit integer.
685 * @phigh: in/out - higher 64-bit integer.
686 * @shift: in - bytes to shift, between 0 and 127.
687 *
688 * Result is zero-extended and stored in plow/phigh, which are
689 * input/output variables. Shift values outside the range will
690 * be mod to 128. In other words, the caller is responsible to
691 * verify/assert both the shift range and plow/phigh pointers.
692 */
693void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift);
694
695/**
696 * ulshift - 128-bit Unsigned Left Shift.
697 * @plow: in/out - lower 64-bit integer.
698 * @phigh: in/out - higher 64-bit integer.
699 * @shift: in - bytes to shift, between 0 and 127.
700 * @overflow: out - true if any 1-bit is shifted out.
701 *
702 * Result is zero-extended and stored in plow/phigh, which are
703 * input/output variables. Shift values outside the range will
704 * be mod to 128. In other words, the caller is responsible to
705 * verify/assert both the shift range and plow/phigh pointers.
706 */
707void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow);
708
Luis Pires8ac2d6c2021-10-25 16:11:37 -0300709/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd
710 * (https://gmplib.org/repo/gmp/file/tip/longlong.h)
711 *
712 * Licensed under the GPLv2/LGPLv3
713 */
714static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1,
715 uint64_t n0, uint64_t d)
716{
717#if defined(__x86_64__)
718 uint64_t q;
719 asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d));
720 return q;
721#elif defined(__s390x__) && !defined(__clang__)
722 /* Need to use a TImode type to get an even register pair for DLGR. */
723 unsigned __int128 n = (unsigned __int128)n1 << 64 | n0;
724 asm("dlgr %0, %1" : "+r"(n) : "r"(d));
725 *r = n >> 64;
726 return n;
727#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7)
728 /* From Power ISA 2.06, programming note for divdeu. */
729 uint64_t q1, q2, Q, r1, r2, R;
730 asm("divdeu %0,%2,%4; divdu %1,%3,%4"
731 : "=&r"(q1), "=r"(q2)
732 : "r"(n1), "r"(n0), "r"(d));
733 r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */
734 r2 = n0 - (q2 * d);
735 Q = q1 + q2;
736 R = r1 + r2;
737 if (R >= d || R < r2) { /* overflow implies R > d */
738 Q += 1;
739 R -= d;
740 }
741 *r = R;
742 return Q;
743#else
744 uint64_t d0, d1, q0, q1, r1, r0, m;
745
746 d0 = (uint32_t)d;
747 d1 = d >> 32;
748
749 r1 = n1 % d1;
750 q1 = n1 / d1;
751 m = q1 * d0;
752 r1 = (r1 << 32) | (n0 >> 32);
753 if (r1 < m) {
754 q1 -= 1;
755 r1 += d;
756 if (r1 >= d) {
757 if (r1 < m) {
758 q1 -= 1;
759 r1 += d;
760 }
761 }
762 }
763 r1 -= m;
764
765 r0 = r1 % d1;
766 q0 = r1 / d1;
767 m = q0 * d0;
768 r0 = (r0 << 32) | (uint32_t)n0;
769 if (r0 < m) {
770 q0 -= 1;
771 r0 += d;
772 if (r0 >= d) {
773 if (r0 < m) {
774 q0 -= 1;
775 r0 += d;
776 }
777 }
778 }
779 r0 -= m;
780
781 *r = r0;
782 return (q1 << 32) | q0;
783#endif
784}
785
Lucas Mateus Castro (alqotel)4724bbd2022-05-25 10:49:50 -0300786Int128 divu256(Int128 *plow, Int128 *phigh, Int128 divisor);
Lucas Mateus Castro (alqotel)62c99472022-05-25 10:49:51 -0300787Int128 divs256(Int128 *plow, Int128 *phigh, Int128 divisor);
Paolo Bonzinicb9c3772012-12-06 12:15:58 +0100788#endif