blob: e42f5a145f28ba05952e1b39cbcd3b85fbdbb6c5 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Philippe Mathieu-Daudédcb32f12020-01-01 12:23:00 +010028#include "tcg/tcg-op.h"
Richard Henderson90163902021-03-18 10:21:45 -060029#include "tcg-internal.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040030
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040031#define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040034
Richard Henderson170ba882017-11-22 09:07:11 +010035#define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
39
Richard Henderson6fcb98e2020-03-30 17:44:30 -070040typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020041 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070042 TCGTemp *prev_copy;
43 TCGTemp *next_copy;
Richard Henderson54795542020-09-06 16:21:32 -070044 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070045 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070046} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040047
Richard Henderson3b3f8472021-08-23 22:06:31 -070048typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070049 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070050 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070051 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070052
53 /* In flight values from optimization. */
Richard Hendersonfae450b2021-08-25 22:42:19 -070054 uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
55 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
Richard Henderson67f84c92021-08-25 08:00:20 -070056 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070057} OptContext;
58
Richard Henderson6fcb98e2020-03-30 17:44:30 -070059static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020060{
Richard Henderson63490392017-06-20 13:43:15 -070061 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020062}
63
Richard Henderson6fcb98e2020-03-30 17:44:30 -070064static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020065{
Richard Henderson63490392017-06-20 13:43:15 -070066 return ts_info(arg_temp(arg));
67}
68
69static inline bool ts_is_const(TCGTemp *ts)
70{
71 return ts_info(ts)->is_const;
72}
73
74static inline bool arg_is_const(TCGArg arg)
75{
76 return ts_is_const(arg_temp(arg));
77}
78
79static inline bool ts_is_copy(TCGTemp *ts)
80{
81 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020082}
83
Aurelien Jarnob41059d2015-07-27 12:41:44 +020084/* Reset TEMP's state, possibly removing the temp for the list of copies. */
Richard Henderson63490392017-06-20 13:43:15 -070085static void reset_ts(TCGTemp *ts)
Kirill Batuzov22613af2011-07-07 16:37:13 +040086{
Richard Henderson6fcb98e2020-03-30 17:44:30 -070087 TempOptInfo *ti = ts_info(ts);
88 TempOptInfo *pi = ts_info(ti->prev_copy);
89 TempOptInfo *ni = ts_info(ti->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -070090
91 ni->prev_copy = ti->prev_copy;
92 pi->next_copy = ti->next_copy;
93 ti->next_copy = ts;
94 ti->prev_copy = ts;
95 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -070096 ti->z_mask = -1;
Richard Henderson63490392017-06-20 13:43:15 -070097}
98
99static void reset_temp(TCGArg arg)
100{
101 reset_ts(arg_temp(arg));
Kirill Batuzov22613af2011-07-07 16:37:13 +0400102}
103
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200104/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700105static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200106{
Richard Henderson63490392017-06-20 13:43:15 -0700107 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700108 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700109
Richard Henderson3b3f8472021-08-23 22:06:31 -0700110 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700111 return;
112 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700113 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700114
115 ti = ts->state_ptr;
116 if (ti == NULL) {
117 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700118 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700119 }
120
121 ti->next_copy = ts;
122 ti->prev_copy = ts;
123 if (ts->kind == TEMP_CONST) {
124 ti->is_const = true;
125 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700126 ti->z_mask = ts->val;
Richard Henderson8f17a972020-03-30 19:52:02 -0700127 if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) {
128 /* High bits of a 32-bit quantity are garbage. */
Richard Hendersonb1fde412021-08-23 13:07:49 -0700129 ti->z_mask |= ~0xffffffffull;
Richard Hendersonc0522132020-03-29 18:55:52 -0700130 }
Richard Henderson8f17a972020-03-30 19:52:02 -0700131 } else {
132 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700133 ti->z_mask = -1;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200134 }
135}
136
Richard Henderson63490392017-06-20 13:43:15 -0700137static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200138{
Richard Henderson4c868ce2020-04-23 09:02:23 -0700139 TCGTemp *i, *g, *l;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200140
Richard Henderson4c868ce2020-04-23 09:02:23 -0700141 /* If this is already readonly, we can't do better. */
142 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700143 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200144 }
145
Richard Henderson4c868ce2020-04-23 09:02:23 -0700146 g = l = NULL;
Richard Henderson63490392017-06-20 13:43:15 -0700147 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson4c868ce2020-04-23 09:02:23 -0700148 if (temp_readonly(i)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200149 return i;
Richard Henderson4c868ce2020-04-23 09:02:23 -0700150 } else if (i->kind > ts->kind) {
151 if (i->kind == TEMP_GLOBAL) {
152 g = i;
153 } else if (i->kind == TEMP_LOCAL) {
154 l = i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200155 }
156 }
157 }
158
Richard Henderson4c868ce2020-04-23 09:02:23 -0700159 /* If we didn't find a better representation, return the same temp. */
160 return g ? g : l ? l : ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200161}
162
Richard Henderson63490392017-06-20 13:43:15 -0700163static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200164{
Richard Henderson63490392017-06-20 13:43:15 -0700165 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200166
Richard Henderson63490392017-06-20 13:43:15 -0700167 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200168 return true;
169 }
170
Richard Henderson63490392017-06-20 13:43:15 -0700171 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200172 return false;
173 }
174
Richard Henderson63490392017-06-20 13:43:15 -0700175 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
176 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200177 return true;
178 }
179 }
180
181 return false;
182}
183
Richard Henderson63490392017-06-20 13:43:15 -0700184static bool args_are_copies(TCGArg arg1, TCGArg arg2)
185{
186 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
187}
188
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700189static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400190{
Richard Henderson63490392017-06-20 13:43:15 -0700191 TCGTemp *dst_ts = arg_temp(dst);
192 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700193 TempOptInfo *di;
194 TempOptInfo *si;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700195 uint64_t z_mask;
Richard Henderson63490392017-06-20 13:43:15 -0700196 TCGOpcode new_op;
197
198 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700199 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700200 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200201 }
202
Richard Henderson63490392017-06-20 13:43:15 -0700203 reset_ts(dst_ts);
204 di = ts_info(dst_ts);
205 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700206
207 switch (ctx->type) {
208 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100209 new_op = INDEX_op_mov_i32;
Richard Henderson67f84c92021-08-25 08:00:20 -0700210 break;
211 case TCG_TYPE_I64:
212 new_op = INDEX_op_mov_i64;
213 break;
214 case TCG_TYPE_V64:
215 case TCG_TYPE_V128:
216 case TCG_TYPE_V256:
217 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
218 new_op = INDEX_op_mov_vec;
219 break;
220 default:
221 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100222 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700223 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700224 op->args[0] = dst;
225 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700226
Richard Hendersonb1fde412021-08-23 13:07:49 -0700227 z_mask = si->z_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700228 if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
229 /* High bits of the destination are now garbage. */
Richard Hendersonb1fde412021-08-23 13:07:49 -0700230 z_mask |= ~0xffffffffull;
Richard Henderson24666ba2014-05-22 11:14:10 -0700231 }
Richard Hendersonb1fde412021-08-23 13:07:49 -0700232 di->z_mask = z_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700233
Richard Henderson63490392017-06-20 13:43:15 -0700234 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700235 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700236
237 di->next_copy = si->next_copy;
238 di->prev_copy = src_ts;
239 ni->prev_copy = dst_ts;
240 si->next_copy = dst_ts;
241 di->is_const = si->is_const;
242 di->val = si->val;
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800243 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700244 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400245}
246
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700247static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700248 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700249{
Richard Henderson8fe35e02020-03-30 20:42:43 -0700250 /* Convert movi to mov with constant temp. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700251 TCGTemp *tv = tcg_constant_internal(ctx->type, val);
252
Richard Henderson3b3f8472021-08-23 22:06:31 -0700253 init_ts_info(ctx, tv);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700254 return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700255}
256
Richard Henderson54795542020-09-06 16:21:32 -0700257static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400258{
Richard Henderson03271522013-08-14 14:35:56 -0700259 uint64_t l64, h64;
260
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400261 switch (op) {
262 CASE_OP_32_64(add):
263 return x + y;
264
265 CASE_OP_32_64(sub):
266 return x - y;
267
268 CASE_OP_32_64(mul):
269 return x * y;
270
Kirill Batuzov9a810902011-07-07 16:37:15 +0400271 CASE_OP_32_64(and):
272 return x & y;
273
274 CASE_OP_32_64(or):
275 return x | y;
276
277 CASE_OP_32_64(xor):
278 return x ^ y;
279
Kirill Batuzov55c09752011-07-07 16:37:16 +0400280 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700281 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400282
Kirill Batuzov55c09752011-07-07 16:37:16 +0400283 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700284 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400285
286 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700287 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400288
Kirill Batuzov55c09752011-07-07 16:37:16 +0400289 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700290 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400291
292 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700293 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400294
Kirill Batuzov55c09752011-07-07 16:37:16 +0400295 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700296 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400297
298 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700299 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400300
Kirill Batuzov55c09752011-07-07 16:37:16 +0400301 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700302 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400303
304 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700305 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400306
Kirill Batuzov55c09752011-07-07 16:37:16 +0400307 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700308 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400309
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700310 CASE_OP_32_64(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400311 return ~x;
312
Richard Hendersoncb25c802011-08-17 14:11:47 -0700313 CASE_OP_32_64(neg):
314 return -x;
315
316 CASE_OP_32_64(andc):
317 return x & ~y;
318
319 CASE_OP_32_64(orc):
320 return x | ~y;
321
322 CASE_OP_32_64(eqv):
323 return ~(x ^ y);
324
325 CASE_OP_32_64(nand):
326 return ~(x & y);
327
328 CASE_OP_32_64(nor):
329 return ~(x | y);
330
Richard Henderson0e28d002016-11-16 09:23:28 +0100331 case INDEX_op_clz_i32:
332 return (uint32_t)x ? clz32(x) : y;
333
334 case INDEX_op_clz_i64:
335 return x ? clz64(x) : y;
336
337 case INDEX_op_ctz_i32:
338 return (uint32_t)x ? ctz32(x) : y;
339
340 case INDEX_op_ctz_i64:
341 return x ? ctz64(x) : y;
342
Richard Hendersona768e4e2016-11-21 11:13:39 +0100343 case INDEX_op_ctpop_i32:
344 return ctpop32(x);
345
346 case INDEX_op_ctpop_i64:
347 return ctpop64(x);
348
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700349 CASE_OP_32_64(ext8s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400350 return (int8_t)x;
351
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700352 CASE_OP_32_64(ext16s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400353 return (int16_t)x;
354
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700355 CASE_OP_32_64(ext8u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400356 return (uint8_t)x;
357
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700358 CASE_OP_32_64(ext16u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400359 return (uint16_t)x;
360
Richard Henderson64985942018-11-20 08:53:34 +0100361 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700362 x = bswap16(x);
363 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100364
365 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700366 x = bswap32(x);
367 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100368
369 case INDEX_op_bswap64_i64:
370 return bswap64(x);
371
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200372 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400373 case INDEX_op_ext32s_i64:
374 return (int32_t)x;
375
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200376 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700377 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400378 case INDEX_op_ext32u_i64:
379 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400380
Richard Henderson609ad702015-07-24 07:16:00 -0700381 case INDEX_op_extrh_i64_i32:
382 return (uint64_t)x >> 32;
383
Richard Henderson03271522013-08-14 14:35:56 -0700384 case INDEX_op_muluh_i32:
385 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
386 case INDEX_op_mulsh_i32:
387 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
388
389 case INDEX_op_muluh_i64:
390 mulu64(&l64, &h64, x, y);
391 return h64;
392 case INDEX_op_mulsh_i64:
393 muls64(&l64, &h64, x, y);
394 return h64;
395
Richard Henderson01547f72013-08-14 15:22:46 -0700396 case INDEX_op_div_i32:
397 /* Avoid crashing on divide by zero, otherwise undefined. */
398 return (int32_t)x / ((int32_t)y ? : 1);
399 case INDEX_op_divu_i32:
400 return (uint32_t)x / ((uint32_t)y ? : 1);
401 case INDEX_op_div_i64:
402 return (int64_t)x / ((int64_t)y ? : 1);
403 case INDEX_op_divu_i64:
404 return (uint64_t)x / ((uint64_t)y ? : 1);
405
406 case INDEX_op_rem_i32:
407 return (int32_t)x % ((int32_t)y ? : 1);
408 case INDEX_op_remu_i32:
409 return (uint32_t)x % ((uint32_t)y ? : 1);
410 case INDEX_op_rem_i64:
411 return (int64_t)x % ((int64_t)y ? : 1);
412 case INDEX_op_remu_i64:
413 return (uint64_t)x % ((uint64_t)y ? : 1);
414
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400415 default:
416 fprintf(stderr,
417 "Unrecognized operation %d in do_constant_folding.\n", op);
418 tcg_abort();
419 }
420}
421
Richard Henderson67f84c92021-08-25 08:00:20 -0700422static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
423 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400424{
Richard Henderson54795542020-09-06 16:21:32 -0700425 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700426 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200427 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400428 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400429 return res;
430}
431
Richard Henderson9519da72012-10-02 11:32:26 -0700432static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
433{
434 switch (c) {
435 case TCG_COND_EQ:
436 return x == y;
437 case TCG_COND_NE:
438 return x != y;
439 case TCG_COND_LT:
440 return (int32_t)x < (int32_t)y;
441 case TCG_COND_GE:
442 return (int32_t)x >= (int32_t)y;
443 case TCG_COND_LE:
444 return (int32_t)x <= (int32_t)y;
445 case TCG_COND_GT:
446 return (int32_t)x > (int32_t)y;
447 case TCG_COND_LTU:
448 return x < y;
449 case TCG_COND_GEU:
450 return x >= y;
451 case TCG_COND_LEU:
452 return x <= y;
453 case TCG_COND_GTU:
454 return x > y;
455 default:
456 tcg_abort();
457 }
458}
459
460static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
461{
462 switch (c) {
463 case TCG_COND_EQ:
464 return x == y;
465 case TCG_COND_NE:
466 return x != y;
467 case TCG_COND_LT:
468 return (int64_t)x < (int64_t)y;
469 case TCG_COND_GE:
470 return (int64_t)x >= (int64_t)y;
471 case TCG_COND_LE:
472 return (int64_t)x <= (int64_t)y;
473 case TCG_COND_GT:
474 return (int64_t)x > (int64_t)y;
475 case TCG_COND_LTU:
476 return x < y;
477 case TCG_COND_GEU:
478 return x >= y;
479 case TCG_COND_LEU:
480 return x <= y;
481 case TCG_COND_GTU:
482 return x > y;
483 default:
484 tcg_abort();
485 }
486}
487
488static bool do_constant_folding_cond_eq(TCGCond c)
489{
490 switch (c) {
491 case TCG_COND_GT:
492 case TCG_COND_LTU:
493 case TCG_COND_LT:
494 case TCG_COND_GTU:
495 case TCG_COND_NE:
496 return 0;
497 case TCG_COND_GE:
498 case TCG_COND_GEU:
499 case TCG_COND_LE:
500 case TCG_COND_LEU:
501 case TCG_COND_EQ:
502 return 1;
503 default:
504 tcg_abort();
505 }
506}
507
Richard Henderson8d57bf12021-08-24 08:34:27 -0700508/*
509 * Return -1 if the condition can't be simplified,
510 * and the result of the condition (0 or 1) if it can.
511 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700512static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700513 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200514{
Richard Henderson54795542020-09-06 16:21:32 -0700515 uint64_t xv = arg_info(x)->val;
516 uint64_t yv = arg_info(y)->val;
517
Richard Henderson63490392017-06-20 13:43:15 -0700518 if (arg_is_const(x) && arg_is_const(y)) {
Richard Henderson67f84c92021-08-25 08:00:20 -0700519 switch (type) {
520 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100521 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700522 case TCG_TYPE_I64:
523 return do_constant_folding_cond_64(xv, yv, c);
524 default:
525 /* Only scalar comparisons are optimizable */
526 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200527 }
Richard Henderson63490392017-06-20 13:43:15 -0700528 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700529 return do_constant_folding_cond_eq(c);
Richard Henderson63490392017-06-20 13:43:15 -0700530 } else if (arg_is_const(y) && yv == 0) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200531 switch (c) {
532 case TCG_COND_LTU:
533 return 0;
534 case TCG_COND_GEU:
535 return 1;
536 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700537 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200538 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200539 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700540 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200541}
542
Richard Henderson8d57bf12021-08-24 08:34:27 -0700543/*
544 * Return -1 if the condition can't be simplified,
545 * and the result of the condition (0 or 1) if it can.
546 */
547static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
Richard Henderson6c4382f2012-10-02 11:32:27 -0700548{
549 TCGArg al = p1[0], ah = p1[1];
550 TCGArg bl = p2[0], bh = p2[1];
551
Richard Henderson63490392017-06-20 13:43:15 -0700552 if (arg_is_const(bl) && arg_is_const(bh)) {
553 tcg_target_ulong blv = arg_info(bl)->val;
554 tcg_target_ulong bhv = arg_info(bh)->val;
555 uint64_t b = deposit64(blv, 32, 32, bhv);
Richard Henderson6c4382f2012-10-02 11:32:27 -0700556
Richard Henderson63490392017-06-20 13:43:15 -0700557 if (arg_is_const(al) && arg_is_const(ah)) {
558 tcg_target_ulong alv = arg_info(al)->val;
559 tcg_target_ulong ahv = arg_info(ah)->val;
560 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Henderson6c4382f2012-10-02 11:32:27 -0700561 return do_constant_folding_cond_64(a, b, c);
562 }
563 if (b == 0) {
564 switch (c) {
565 case TCG_COND_LTU:
566 return 0;
567 case TCG_COND_GEU:
568 return 1;
569 default:
570 break;
571 }
572 }
573 }
Richard Henderson63490392017-06-20 13:43:15 -0700574 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Henderson6c4382f2012-10-02 11:32:27 -0700575 return do_constant_folding_cond_eq(c);
576 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700577 return -1;
Richard Henderson6c4382f2012-10-02 11:32:27 -0700578}
579
Richard Henderson7a2f7082021-08-26 07:06:39 -0700580/**
581 * swap_commutative:
582 * @dest: TCGArg of the destination argument, or NO_DEST.
583 * @p1: first paired argument
584 * @p2: second paired argument
585 *
586 * If *@p1 is a constant and *@p2 is not, swap.
587 * If *@p2 matches @dest, swap.
588 * Return true if a swap was performed.
589 */
590
591#define NO_DEST temp_arg(NULL)
592
Richard Henderson24c9ae42012-10-02 11:32:21 -0700593static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
594{
595 TCGArg a1 = *p1, a2 = *p2;
596 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700597 sum += arg_is_const(a1);
598 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700599
600 /* Prefer the constant in second argument, and then the form
601 op a, a, b, which is better handled on non-RISC hosts. */
602 if (sum > 0 || (sum == 0 && dest == a2)) {
603 *p1 = a2;
604 *p2 = a1;
605 return true;
606 }
607 return false;
608}
609
Richard Henderson0bfcb862012-10-02 11:32:23 -0700610static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
611{
612 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700613 sum += arg_is_const(p1[0]);
614 sum += arg_is_const(p1[1]);
615 sum -= arg_is_const(p2[0]);
616 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700617 if (sum > 0) {
618 TCGArg t;
619 t = p1[0], p1[0] = p2[0], p2[0] = t;
620 t = p1[1], p1[1] = p2[1], p2[1] = t;
621 return true;
622 }
623 return false;
624}
625
Richard Hendersone2577ea2021-08-24 08:00:48 -0700626static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
627{
628 for (int i = 0; i < nb_args; i++) {
629 TCGTemp *ts = arg_temp(op->args[i]);
630 if (ts) {
631 init_ts_info(ctx, ts);
632 }
633 }
634}
635
Richard Henderson8774dde2021-08-24 08:04:47 -0700636static void copy_propagate(OptContext *ctx, TCGOp *op,
637 int nb_oargs, int nb_iargs)
638{
639 TCGContext *s = ctx->tcg;
640
641 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
642 TCGTemp *ts = arg_temp(op->args[i]);
643 if (ts && ts_is_copy(ts)) {
644 op->args[i] = temp_arg(find_better_copy(s, ts));
645 }
646 }
647}
648
Richard Henderson137f1f42021-08-24 08:49:25 -0700649static void finish_folding(OptContext *ctx, TCGOp *op)
650{
651 const TCGOpDef *def = &tcg_op_defs[op->opc];
652 int i, nb_oargs;
653
654 /*
655 * For an opcode that ends a BB, reset all temp data.
656 * We do no cross-BB optimization.
657 */
658 if (def->flags & TCG_OPF_BB_END) {
659 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
660 ctx->prev_mb = NULL;
661 return;
662 }
663
664 nb_oargs = def->nb_oargs;
665 for (i = 0; i < nb_oargs; i++) {
666 reset_temp(op->args[i]);
667 /*
668 * Save the corresponding known-zero bits mask for the
669 * first output argument (only one supported so far).
670 */
671 if (i == 0) {
672 arg_info(op->args[i])->z_mask = ctx->z_mask;
673 }
674 }
675}
676
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700677/*
678 * The fold_* functions return true when processing is complete,
679 * usually by folding the operation to a constant or to a copy,
680 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
681 * like collect information about the value produced, for use in
682 * optimizing a subsequent operation.
683 *
684 * These first fold_* functions are all helpers, used by other
685 * folders for more specific operations.
686 */
687
688static bool fold_const1(OptContext *ctx, TCGOp *op)
689{
690 if (arg_is_const(op->args[1])) {
691 uint64_t t;
692
693 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700694 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700695 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
696 }
697 return false;
698}
699
700static bool fold_const2(OptContext *ctx, TCGOp *op)
701{
702 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
703 uint64_t t1 = arg_info(op->args[1])->val;
704 uint64_t t2 = arg_info(op->args[2])->val;
705
Richard Henderson67f84c92021-08-25 08:00:20 -0700706 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700707 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
708 }
709 return false;
710}
711
Richard Henderson7a2f7082021-08-26 07:06:39 -0700712static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
713{
714 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
715 return fold_const2(ctx, op);
716}
717
Richard Hendersonfae450b2021-08-25 22:42:19 -0700718static bool fold_masks(OptContext *ctx, TCGOp *op)
719{
720 uint64_t a_mask = ctx->a_mask;
721 uint64_t z_mask = ctx->z_mask;
722
723 /*
724 * 32-bit ops generate 32-bit results. For the result is zero test
725 * below, we can ignore high bits, but for further optimizations we
726 * need to record that the high bits contain garbage.
727 */
728 if (ctx->type == TCG_TYPE_I32) {
729 ctx->z_mask |= MAKE_64BIT_MASK(32, 32);
730 a_mask &= MAKE_64BIT_MASK(0, 32);
731 z_mask &= MAKE_64BIT_MASK(0, 32);
732 }
733
734 if (z_mask == 0) {
735 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
736 }
737 if (a_mask == 0) {
738 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
739 }
740 return false;
741}
742
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700743/*
744 * Convert @op to NOT, if NOT is supported by the host.
745 * Return true f the conversion is successful, which will still
746 * indicate that the processing is complete.
747 */
748static bool fold_not(OptContext *ctx, TCGOp *op);
749static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
750{
751 TCGOpcode not_op;
752 bool have_not;
753
754 switch (ctx->type) {
755 case TCG_TYPE_I32:
756 not_op = INDEX_op_not_i32;
757 have_not = TCG_TARGET_HAS_not_i32;
758 break;
759 case TCG_TYPE_I64:
760 not_op = INDEX_op_not_i64;
761 have_not = TCG_TARGET_HAS_not_i64;
762 break;
763 case TCG_TYPE_V64:
764 case TCG_TYPE_V128:
765 case TCG_TYPE_V256:
766 not_op = INDEX_op_not_vec;
767 have_not = TCG_TARGET_HAS_not_vec;
768 break;
769 default:
770 g_assert_not_reached();
771 }
772 if (have_not) {
773 op->opc = not_op;
774 op->args[1] = op->args[idx];
775 return fold_not(ctx, op);
776 }
777 return false;
778}
779
Richard Hendersonda48e272021-08-25 20:42:04 -0700780/* If the binary operation has first argument @i, fold to @i. */
781static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
782{
783 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
784 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
785 }
786 return false;
787}
788
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700789/* If the binary operation has first argument @i, fold to NOT. */
790static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
791{
792 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
793 return fold_to_not(ctx, op, 2);
794 }
795 return false;
796}
797
Richard Hendersone8679952021-08-25 13:19:52 -0700798/* If the binary operation has second argument @i, fold to @i. */
799static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
800{
801 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
802 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
803 }
804 return false;
805}
806
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700807/* If the binary operation has second argument @i, fold to identity. */
808static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
809{
810 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
811 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
812 }
813 return false;
814}
815
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700816/* If the binary operation has second argument @i, fold to NOT. */
817static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
818{
819 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
820 return fold_to_not(ctx, op, 1);
821 }
822 return false;
823}
824
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700825/* If the binary operation has both arguments equal, fold to @i. */
826static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
827{
828 if (args_are_copies(op->args[1], op->args[2])) {
829 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
830 }
831 return false;
832}
833
Richard Hendersonca7bb042021-08-25 13:14:21 -0700834/* If the binary operation has both arguments equal, fold to identity. */
835static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
836{
837 if (args_are_copies(op->args[1], op->args[2])) {
838 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
839 }
840 return false;
841}
842
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700843/*
844 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -0700845 *
846 * The ordering of the transformations should be:
847 * 1) those that produce a constant
848 * 2) those that produce a copy
849 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700850 */
851
852static bool fold_add(OptContext *ctx, TCGOp *op)
853{
Richard Henderson7a2f7082021-08-26 07:06:39 -0700854 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700855 fold_xi_to_x(ctx, op, 0)) {
856 return true;
857 }
858 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700859}
860
Richard Henderson9531c072021-08-26 06:51:39 -0700861static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700862{
863 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
864 arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
Richard Henderson9531c072021-08-26 06:51:39 -0700865 uint64_t al = arg_info(op->args[2])->val;
866 uint64_t ah = arg_info(op->args[3])->val;
867 uint64_t bl = arg_info(op->args[4])->val;
868 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700869 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -0700870 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700871
Richard Henderson9531c072021-08-26 06:51:39 -0700872 if (ctx->type == TCG_TYPE_I32) {
873 uint64_t a = deposit64(al, 32, 32, ah);
874 uint64_t b = deposit64(bl, 32, 32, bh);
875
876 if (add) {
877 a += b;
878 } else {
879 a -= b;
880 }
881
882 al = sextract64(a, 0, 32);
883 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700884 } else {
Richard Henderson9531c072021-08-26 06:51:39 -0700885 Int128 a = int128_make128(al, ah);
886 Int128 b = int128_make128(bl, bh);
887
888 if (add) {
889 a = int128_add(a, b);
890 } else {
891 a = int128_sub(a, b);
892 }
893
894 al = int128_getlo(a);
895 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700896 }
897
898 rl = op->args[0];
899 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -0700900
901 /* The proper opcode is supplied by tcg_opt_gen_mov. */
902 op2 = tcg_op_insert_before(ctx->tcg, op, 0);
903
904 tcg_opt_gen_movi(ctx, op, rl, al);
905 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700906 return true;
907 }
908 return false;
909}
910
Richard Henderson9531c072021-08-26 06:51:39 -0700911static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700912{
Richard Henderson7a2f7082021-08-26 07:06:39 -0700913 /* Note that the high and low parts may be independently swapped. */
914 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
915 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
916
Richard Henderson9531c072021-08-26 06:51:39 -0700917 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700918}
919
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700920static bool fold_and(OptContext *ctx, TCGOp *op)
921{
Richard Hendersonfae450b2021-08-25 22:42:19 -0700922 uint64_t z1, z2;
923
Richard Henderson7a2f7082021-08-26 07:06:39 -0700924 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -0700925 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700926 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -0700927 fold_xx_to_x(ctx, op)) {
928 return true;
929 }
Richard Hendersonfae450b2021-08-25 22:42:19 -0700930
931 z1 = arg_info(op->args[1])->z_mask;
932 z2 = arg_info(op->args[2])->z_mask;
933 ctx->z_mask = z1 & z2;
934
935 /*
936 * Known-zeros does not imply known-ones. Therefore unless
937 * arg2 is constant, we can't infer affected bits from it.
938 */
939 if (arg_is_const(op->args[2])) {
940 ctx->a_mask = z1 & ~z2;
941 }
942
943 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700944}
945
946static bool fold_andc(OptContext *ctx, TCGOp *op)
947{
Richard Hendersonfae450b2021-08-25 22:42:19 -0700948 uint64_t z1;
949
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700950 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700951 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700952 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700953 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700954 return true;
955 }
Richard Hendersonfae450b2021-08-25 22:42:19 -0700956
957 z1 = arg_info(op->args[1])->z_mask;
958
959 /*
960 * Known-zeros does not imply known-ones. Therefore unless
961 * arg2 is constant, we can't infer anything from it.
962 */
963 if (arg_is_const(op->args[2])) {
964 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
965 ctx->a_mask = z1 & ~z2;
966 z1 &= z2;
967 }
968 ctx->z_mask = z1;
969
970 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700971}
972
Richard Henderson079b0802021-08-24 09:30:59 -0700973static bool fold_brcond(OptContext *ctx, TCGOp *op)
974{
975 TCGCond cond = op->args[2];
Richard Henderson7a2f7082021-08-26 07:06:39 -0700976 int i;
Richard Henderson079b0802021-08-24 09:30:59 -0700977
Richard Henderson7a2f7082021-08-26 07:06:39 -0700978 if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
979 op->args[2] = cond = tcg_swap_cond(cond);
980 }
981
982 i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
Richard Henderson079b0802021-08-24 09:30:59 -0700983 if (i == 0) {
984 tcg_op_remove(ctx->tcg, op);
985 return true;
986 }
987 if (i > 0) {
988 op->opc = INDEX_op_br;
989 op->args[0] = op->args[3];
990 }
991 return false;
992}
993
Richard Henderson764d2ab2021-08-24 09:22:11 -0700994static bool fold_brcond2(OptContext *ctx, TCGOp *op)
995{
996 TCGCond cond = op->args[4];
Richard Henderson764d2ab2021-08-24 09:22:11 -0700997 TCGArg label = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -0700998 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -0700999
Richard Henderson7a2f7082021-08-26 07:06:39 -07001000 if (swap_commutative2(&op->args[0], &op->args[2])) {
1001 op->args[4] = cond = tcg_swap_cond(cond);
1002 }
1003
1004 i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001005 if (i >= 0) {
1006 goto do_brcond_const;
1007 }
1008
1009 switch (cond) {
1010 case TCG_COND_LT:
1011 case TCG_COND_GE:
1012 /*
1013 * Simplify LT/GE comparisons vs zero to a single compare
1014 * vs the high word of the input.
1015 */
1016 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
1017 arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
1018 goto do_brcond_high;
1019 }
1020 break;
1021
1022 case TCG_COND_NE:
1023 inv = 1;
1024 QEMU_FALLTHROUGH;
1025 case TCG_COND_EQ:
1026 /*
1027 * Simplify EQ/NE comparisons where one of the pairs
1028 * can be simplified.
1029 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001030 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001031 op->args[2], cond);
1032 switch (i ^ inv) {
1033 case 0:
1034 goto do_brcond_const;
1035 case 1:
1036 goto do_brcond_high;
1037 }
1038
Richard Henderson67f84c92021-08-25 08:00:20 -07001039 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001040 op->args[3], cond);
1041 switch (i ^ inv) {
1042 case 0:
1043 goto do_brcond_const;
1044 case 1:
1045 op->opc = INDEX_op_brcond_i32;
1046 op->args[1] = op->args[2];
1047 op->args[2] = cond;
1048 op->args[3] = label;
1049 break;
1050 }
1051 break;
1052
1053 default:
1054 break;
1055
1056 do_brcond_high:
1057 op->opc = INDEX_op_brcond_i32;
1058 op->args[0] = op->args[1];
1059 op->args[1] = op->args[3];
1060 op->args[2] = cond;
1061 op->args[3] = label;
1062 break;
1063
1064 do_brcond_const:
1065 if (i == 0) {
1066 tcg_op_remove(ctx->tcg, op);
1067 return true;
1068 }
1069 op->opc = INDEX_op_br;
1070 op->args[0] = label;
1071 break;
1072 }
1073 return false;
1074}
1075
Richard Henderson09bacdc2021-08-24 11:58:12 -07001076static bool fold_bswap(OptContext *ctx, TCGOp *op)
1077{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001078 uint64_t z_mask, sign;
1079
Richard Henderson09bacdc2021-08-24 11:58:12 -07001080 if (arg_is_const(op->args[1])) {
1081 uint64_t t = arg_info(op->args[1])->val;
1082
Richard Henderson67f84c92021-08-25 08:00:20 -07001083 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001084 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1085 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001086
1087 z_mask = arg_info(op->args[1])->z_mask;
1088 switch (op->opc) {
1089 case INDEX_op_bswap16_i32:
1090 case INDEX_op_bswap16_i64:
1091 z_mask = bswap16(z_mask);
1092 sign = INT16_MIN;
1093 break;
1094 case INDEX_op_bswap32_i32:
1095 case INDEX_op_bswap32_i64:
1096 z_mask = bswap32(z_mask);
1097 sign = INT32_MIN;
1098 break;
1099 case INDEX_op_bswap64_i64:
1100 z_mask = bswap64(z_mask);
1101 sign = INT64_MIN;
1102 break;
1103 default:
1104 g_assert_not_reached();
1105 }
1106
1107 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1108 case TCG_BSWAP_OZ:
1109 break;
1110 case TCG_BSWAP_OS:
1111 /* If the sign bit may be 1, force all the bits above to 1. */
1112 if (z_mask & sign) {
1113 z_mask |= sign;
1114 }
1115 break;
1116 default:
1117 /* The high bits are undefined: force all bits above the sign to 1. */
1118 z_mask |= sign << 1;
1119 break;
1120 }
1121 ctx->z_mask = z_mask;
1122
1123 return fold_masks(ctx, op);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001124}
1125
Richard Henderson5cf32be2021-08-24 08:17:08 -07001126static bool fold_call(OptContext *ctx, TCGOp *op)
1127{
1128 TCGContext *s = ctx->tcg;
1129 int nb_oargs = TCGOP_CALLO(op);
1130 int nb_iargs = TCGOP_CALLI(op);
1131 int flags, i;
1132
1133 init_arguments(ctx, op, nb_oargs + nb_iargs);
1134 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1135
1136 /* If the function reads or writes globals, reset temp data. */
1137 flags = tcg_call_flags(op);
1138 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1139 int nb_globals = s->nb_globals;
1140
1141 for (i = 0; i < nb_globals; i++) {
1142 if (test_bit(i, ctx->temps_used.l)) {
1143 reset_ts(&ctx->tcg->temps[i]);
1144 }
1145 }
1146 }
1147
1148 /* Reset temp data for outputs. */
1149 for (i = 0; i < nb_oargs; i++) {
1150 reset_temp(op->args[i]);
1151 }
1152
1153 /* Stop optimizing MB across calls. */
1154 ctx->prev_mb = NULL;
1155 return true;
1156}
1157
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001158static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1159{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001160 uint64_t z_mask;
1161
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001162 if (arg_is_const(op->args[1])) {
1163 uint64_t t = arg_info(op->args[1])->val;
1164
1165 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001166 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001167 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1168 }
1169 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1170 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001171
1172 switch (ctx->type) {
1173 case TCG_TYPE_I32:
1174 z_mask = 31;
1175 break;
1176 case TCG_TYPE_I64:
1177 z_mask = 63;
1178 break;
1179 default:
1180 g_assert_not_reached();
1181 }
1182 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
1183
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001184 return false;
1185}
1186
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001187static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1188{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001189 if (fold_const1(ctx, op)) {
1190 return true;
1191 }
1192
1193 switch (ctx->type) {
1194 case TCG_TYPE_I32:
1195 ctx->z_mask = 32 | 31;
1196 break;
1197 case TCG_TYPE_I64:
1198 ctx->z_mask = 64 | 63;
1199 break;
1200 default:
1201 g_assert_not_reached();
1202 }
1203 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001204}
1205
Richard Henderson1b1907b2021-08-24 10:47:04 -07001206static bool fold_deposit(OptContext *ctx, TCGOp *op)
1207{
1208 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1209 uint64_t t1 = arg_info(op->args[1])->val;
1210 uint64_t t2 = arg_info(op->args[2])->val;
1211
1212 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1213 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1214 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001215
1216 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1217 op->args[3], op->args[4],
1218 arg_info(op->args[2])->z_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001219 return false;
1220}
1221
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001222static bool fold_divide(OptContext *ctx, TCGOp *op)
1223{
1224 return fold_const2(ctx, op);
1225}
1226
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001227static bool fold_dup(OptContext *ctx, TCGOp *op)
1228{
1229 if (arg_is_const(op->args[1])) {
1230 uint64_t t = arg_info(op->args[1])->val;
1231 t = dup_const(TCGOP_VECE(op), t);
1232 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1233 }
1234 return false;
1235}
1236
1237static bool fold_dup2(OptContext *ctx, TCGOp *op)
1238{
1239 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1240 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1241 arg_info(op->args[2])->val);
1242 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1243 }
1244
1245 if (args_are_copies(op->args[1], op->args[2])) {
1246 op->opc = INDEX_op_dup_vec;
1247 TCGOP_VECE(op) = MO_32;
1248 }
1249 return false;
1250}
1251
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001252static bool fold_eqv(OptContext *ctx, TCGOp *op)
1253{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001254 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001255 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001256 fold_xi_to_not(ctx, op, 0)) {
1257 return true;
1258 }
1259 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001260}
1261
Richard Hendersonb6617c82021-08-24 10:44:53 -07001262static bool fold_extract(OptContext *ctx, TCGOp *op)
1263{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001264 uint64_t z_mask_old, z_mask;
1265
Richard Hendersonb6617c82021-08-24 10:44:53 -07001266 if (arg_is_const(op->args[1])) {
1267 uint64_t t;
1268
1269 t = arg_info(op->args[1])->val;
1270 t = extract64(t, op->args[2], op->args[3]);
1271 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1272 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001273
1274 z_mask_old = arg_info(op->args[1])->z_mask;
1275 z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
1276 if (op->args[2] == 0) {
1277 ctx->a_mask = z_mask_old ^ z_mask;
1278 }
1279 ctx->z_mask = z_mask;
1280
1281 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001282}
1283
Richard Hendersondcd08992021-08-24 10:41:39 -07001284static bool fold_extract2(OptContext *ctx, TCGOp *op)
1285{
1286 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1287 uint64_t v1 = arg_info(op->args[1])->val;
1288 uint64_t v2 = arg_info(op->args[2])->val;
1289 int shr = op->args[3];
1290
1291 if (op->opc == INDEX_op_extract2_i64) {
1292 v1 >>= shr;
1293 v2 <<= 64 - shr;
1294 } else {
1295 v1 = (uint32_t)v1 >> shr;
1296 v2 = (int32_t)v2 << (32 - shr);
1297 }
1298 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1299 }
1300 return false;
1301}
1302
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001303static bool fold_exts(OptContext *ctx, TCGOp *op)
1304{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001305 uint64_t z_mask_old, z_mask, sign;
1306 bool type_change = false;
1307
1308 if (fold_const1(ctx, op)) {
1309 return true;
1310 }
1311
1312 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1313
1314 switch (op->opc) {
1315 CASE_OP_32_64(ext8s):
1316 sign = INT8_MIN;
1317 z_mask = (uint8_t)z_mask;
1318 break;
1319 CASE_OP_32_64(ext16s):
1320 sign = INT16_MIN;
1321 z_mask = (uint16_t)z_mask;
1322 break;
1323 case INDEX_op_ext_i32_i64:
1324 type_change = true;
1325 QEMU_FALLTHROUGH;
1326 case INDEX_op_ext32s_i64:
1327 sign = INT32_MIN;
1328 z_mask = (uint32_t)z_mask;
1329 break;
1330 default:
1331 g_assert_not_reached();
1332 }
1333
1334 if (z_mask & sign) {
1335 z_mask |= sign;
1336 } else if (!type_change) {
1337 ctx->a_mask = z_mask_old ^ z_mask;
1338 }
1339 ctx->z_mask = z_mask;
1340
1341 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001342}
1343
1344static bool fold_extu(OptContext *ctx, TCGOp *op)
1345{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001346 uint64_t z_mask_old, z_mask;
1347 bool type_change = false;
1348
1349 if (fold_const1(ctx, op)) {
1350 return true;
1351 }
1352
1353 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1354
1355 switch (op->opc) {
1356 CASE_OP_32_64(ext8u):
1357 z_mask = (uint8_t)z_mask;
1358 break;
1359 CASE_OP_32_64(ext16u):
1360 z_mask = (uint16_t)z_mask;
1361 break;
1362 case INDEX_op_extrl_i64_i32:
1363 case INDEX_op_extu_i32_i64:
1364 type_change = true;
1365 QEMU_FALLTHROUGH;
1366 case INDEX_op_ext32u_i64:
1367 z_mask = (uint32_t)z_mask;
1368 break;
1369 case INDEX_op_extrh_i64_i32:
1370 type_change = true;
1371 z_mask >>= 32;
1372 break;
1373 default:
1374 g_assert_not_reached();
1375 }
1376
1377 ctx->z_mask = z_mask;
1378 if (!type_change) {
1379 ctx->a_mask = z_mask_old ^ z_mask;
1380 }
1381 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001382}
1383
Richard Henderson3eefdf22021-08-25 11:06:43 -07001384static bool fold_mb(OptContext *ctx, TCGOp *op)
1385{
1386 /* Eliminate duplicate and redundant fence instructions. */
1387 if (ctx->prev_mb) {
1388 /*
1389 * Merge two barriers of the same type into one,
1390 * or a weaker barrier into a stronger one,
1391 * or two weaker barriers into a stronger one.
1392 * mb X; mb Y => mb X|Y
1393 * mb; strl => mb; st
1394 * ldaq; mb => ld; mb
1395 * ldaq; strl => ld; mb; st
1396 * Other combinations are also merged into a strong
1397 * barrier. This is stricter than specified but for
1398 * the purposes of TCG is better than not optimizing.
1399 */
1400 ctx->prev_mb->args[0] |= op->args[0];
1401 tcg_op_remove(ctx->tcg, op);
1402 } else {
1403 ctx->prev_mb = op;
1404 }
1405 return true;
1406}
1407
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001408static bool fold_mov(OptContext *ctx, TCGOp *op)
1409{
1410 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1411}
1412
Richard Henderson0c310a32021-08-24 10:37:24 -07001413static bool fold_movcond(OptContext *ctx, TCGOp *op)
1414{
Richard Henderson0c310a32021-08-24 10:37:24 -07001415 TCGCond cond = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001416 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001417
Richard Henderson7a2f7082021-08-26 07:06:39 -07001418 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1419 op->args[5] = cond = tcg_swap_cond(cond);
1420 }
1421 /*
1422 * Canonicalize the "false" input reg to match the destination reg so
1423 * that the tcg backend can implement a "move if true" operation.
1424 */
1425 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1426 op->args[5] = cond = tcg_invert_cond(cond);
1427 }
1428
1429 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
Richard Henderson0c310a32021-08-24 10:37:24 -07001430 if (i >= 0) {
1431 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1432 }
1433
Richard Hendersonfae450b2021-08-25 22:42:19 -07001434 ctx->z_mask = arg_info(op->args[3])->z_mask
1435 | arg_info(op->args[4])->z_mask;
1436
Richard Henderson0c310a32021-08-24 10:37:24 -07001437 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1438 uint64_t tv = arg_info(op->args[3])->val;
1439 uint64_t fv = arg_info(op->args[4])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -07001440 TCGOpcode opc;
Richard Henderson0c310a32021-08-24 10:37:24 -07001441
Richard Henderson67f84c92021-08-25 08:00:20 -07001442 switch (ctx->type) {
1443 case TCG_TYPE_I32:
1444 opc = INDEX_op_setcond_i32;
1445 break;
1446 case TCG_TYPE_I64:
1447 opc = INDEX_op_setcond_i64;
1448 break;
1449 default:
1450 g_assert_not_reached();
1451 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001452
1453 if (tv == 1 && fv == 0) {
1454 op->opc = opc;
1455 op->args[3] = cond;
1456 } else if (fv == 1 && tv == 0) {
1457 op->opc = opc;
1458 op->args[3] = tcg_invert_cond(cond);
1459 }
1460 }
1461 return false;
1462}
1463
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001464static bool fold_mul(OptContext *ctx, TCGOp *op)
1465{
Richard Hendersone8679952021-08-25 13:19:52 -07001466 if (fold_const2(ctx, op) ||
1467 fold_xi_to_i(ctx, op, 0)) {
1468 return true;
1469 }
1470 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001471}
1472
1473static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1474{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001475 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001476 fold_xi_to_i(ctx, op, 0)) {
1477 return true;
1478 }
1479 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001480}
1481
Richard Henderson407112b2021-08-26 06:33:04 -07001482static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001483{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001484 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1485
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001486 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07001487 uint64_t a = arg_info(op->args[2])->val;
1488 uint64_t b = arg_info(op->args[3])->val;
1489 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001490 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07001491 TCGOp *op2;
1492
1493 switch (op->opc) {
1494 case INDEX_op_mulu2_i32:
1495 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1496 h = (int32_t)(l >> 32);
1497 l = (int32_t)l;
1498 break;
1499 case INDEX_op_muls2_i32:
1500 l = (int64_t)(int32_t)a * (int32_t)b;
1501 h = l >> 32;
1502 l = (int32_t)l;
1503 break;
1504 case INDEX_op_mulu2_i64:
1505 mulu64(&l, &h, a, b);
1506 break;
1507 case INDEX_op_muls2_i64:
1508 muls64(&l, &h, a, b);
1509 break;
1510 default:
1511 g_assert_not_reached();
1512 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001513
1514 rl = op->args[0];
1515 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07001516
1517 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1518 op2 = tcg_op_insert_before(ctx->tcg, op, 0);
1519
1520 tcg_opt_gen_movi(ctx, op, rl, l);
1521 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001522 return true;
1523 }
1524 return false;
1525}
1526
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001527static bool fold_nand(OptContext *ctx, TCGOp *op)
1528{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001529 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001530 fold_xi_to_not(ctx, op, -1)) {
1531 return true;
1532 }
1533 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001534}
1535
1536static bool fold_neg(OptContext *ctx, TCGOp *op)
1537{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001538 uint64_t z_mask;
1539
Richard Henderson9caca882021-08-24 13:30:32 -07001540 if (fold_const1(ctx, op)) {
1541 return true;
1542 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001543
1544 /* Set to 1 all bits to the left of the rightmost. */
1545 z_mask = arg_info(op->args[1])->z_mask;
1546 ctx->z_mask = -(z_mask & -z_mask);
1547
Richard Henderson9caca882021-08-24 13:30:32 -07001548 /*
1549 * Because of fold_sub_to_neg, we want to always return true,
1550 * via finish_folding.
1551 */
1552 finish_folding(ctx, op);
1553 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001554}
1555
1556static bool fold_nor(OptContext *ctx, TCGOp *op)
1557{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001558 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001559 fold_xi_to_not(ctx, op, 0)) {
1560 return true;
1561 }
1562 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001563}
1564
1565static bool fold_not(OptContext *ctx, TCGOp *op)
1566{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001567 if (fold_const1(ctx, op)) {
1568 return true;
1569 }
1570
1571 /* Because of fold_to_not, we want to always return true, via finish. */
1572 finish_folding(ctx, op);
1573 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001574}
1575
1576static bool fold_or(OptContext *ctx, TCGOp *op)
1577{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001578 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001579 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001580 fold_xx_to_x(ctx, op)) {
1581 return true;
1582 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001583
1584 ctx->z_mask = arg_info(op->args[1])->z_mask
1585 | arg_info(op->args[2])->z_mask;
1586 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001587}
1588
1589static bool fold_orc(OptContext *ctx, TCGOp *op)
1590{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001591 if (fold_const2(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001592 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001593 fold_ix_to_not(ctx, op, 0)) {
1594 return true;
1595 }
1596 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001597}
1598
Richard Henderson3eefdf22021-08-25 11:06:43 -07001599static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
1600{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001601 const TCGOpDef *def = &tcg_op_defs[op->opc];
1602 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
1603 MemOp mop = get_memop(oi);
1604 int width = 8 * memop_size(mop);
1605
1606 if (!(mop & MO_SIGN) && width < 64) {
1607 ctx->z_mask = MAKE_64BIT_MASK(0, width);
1608 }
1609
Richard Henderson3eefdf22021-08-25 11:06:43 -07001610 /* Opcodes that touch guest memory stop the mb optimization. */
1611 ctx->prev_mb = NULL;
1612 return false;
1613}
1614
1615static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
1616{
1617 /* Opcodes that touch guest memory stop the mb optimization. */
1618 ctx->prev_mb = NULL;
1619 return false;
1620}
1621
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001622static bool fold_remainder(OptContext *ctx, TCGOp *op)
1623{
1624 return fold_const2(ctx, op);
1625}
1626
Richard Hendersonc63ff552021-08-24 09:35:30 -07001627static bool fold_setcond(OptContext *ctx, TCGOp *op)
1628{
1629 TCGCond cond = op->args[3];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001630 int i;
Richard Hendersonc63ff552021-08-24 09:35:30 -07001631
Richard Henderson7a2f7082021-08-26 07:06:39 -07001632 if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
1633 op->args[3] = cond = tcg_swap_cond(cond);
1634 }
1635
1636 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
Richard Hendersonc63ff552021-08-24 09:35:30 -07001637 if (i >= 0) {
1638 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1639 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001640
1641 ctx->z_mask = 1;
Richard Hendersonc63ff552021-08-24 09:35:30 -07001642 return false;
1643}
1644
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001645static bool fold_setcond2(OptContext *ctx, TCGOp *op)
1646{
1647 TCGCond cond = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001648 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001649
Richard Henderson7a2f7082021-08-26 07:06:39 -07001650 if (swap_commutative2(&op->args[1], &op->args[3])) {
1651 op->args[5] = cond = tcg_swap_cond(cond);
1652 }
1653
1654 i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001655 if (i >= 0) {
1656 goto do_setcond_const;
1657 }
1658
1659 switch (cond) {
1660 case TCG_COND_LT:
1661 case TCG_COND_GE:
1662 /*
1663 * Simplify LT/GE comparisons vs zero to a single compare
1664 * vs the high word of the input.
1665 */
1666 if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
1667 arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
1668 goto do_setcond_high;
1669 }
1670 break;
1671
1672 case TCG_COND_NE:
1673 inv = 1;
1674 QEMU_FALLTHROUGH;
1675 case TCG_COND_EQ:
1676 /*
1677 * Simplify EQ/NE comparisons where one of the pairs
1678 * can be simplified.
1679 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001680 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001681 op->args[3], cond);
1682 switch (i ^ inv) {
1683 case 0:
1684 goto do_setcond_const;
1685 case 1:
1686 goto do_setcond_high;
1687 }
1688
Richard Henderson67f84c92021-08-25 08:00:20 -07001689 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001690 op->args[4], cond);
1691 switch (i ^ inv) {
1692 case 0:
1693 goto do_setcond_const;
1694 case 1:
1695 op->args[2] = op->args[3];
1696 op->args[3] = cond;
1697 op->opc = INDEX_op_setcond_i32;
1698 break;
1699 }
1700 break;
1701
1702 default:
1703 break;
1704
1705 do_setcond_high:
1706 op->args[1] = op->args[2];
1707 op->args[2] = op->args[4];
1708 op->args[3] = cond;
1709 op->opc = INDEX_op_setcond_i32;
1710 break;
1711 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001712
1713 ctx->z_mask = 1;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001714 return false;
1715
1716 do_setcond_const:
1717 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1718}
1719
Richard Hendersonb6617c82021-08-24 10:44:53 -07001720static bool fold_sextract(OptContext *ctx, TCGOp *op)
1721{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001722 int64_t z_mask_old, z_mask;
1723
Richard Hendersonb6617c82021-08-24 10:44:53 -07001724 if (arg_is_const(op->args[1])) {
1725 uint64_t t;
1726
1727 t = arg_info(op->args[1])->val;
1728 t = sextract64(t, op->args[2], op->args[3]);
1729 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1730 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001731
1732 z_mask_old = arg_info(op->args[1])->z_mask;
1733 z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
1734 if (op->args[2] == 0 && z_mask >= 0) {
1735 ctx->a_mask = z_mask_old ^ z_mask;
1736 }
1737 ctx->z_mask = z_mask;
1738
1739 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001740}
1741
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001742static bool fold_shift(OptContext *ctx, TCGOp *op)
1743{
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001744 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07001745 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001746 fold_xi_to_x(ctx, op, 0)) {
1747 return true;
1748 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001749
1750 if (arg_is_const(op->args[2])) {
1751 ctx->z_mask = do_constant_folding(op->opc, ctx->type,
1752 arg_info(op->args[1])->z_mask,
1753 arg_info(op->args[2])->val);
1754 return fold_masks(ctx, op);
1755 }
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001756 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001757}
1758
Richard Henderson9caca882021-08-24 13:30:32 -07001759static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
1760{
1761 TCGOpcode neg_op;
1762 bool have_neg;
1763
1764 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
1765 return false;
1766 }
1767
1768 switch (ctx->type) {
1769 case TCG_TYPE_I32:
1770 neg_op = INDEX_op_neg_i32;
1771 have_neg = TCG_TARGET_HAS_neg_i32;
1772 break;
1773 case TCG_TYPE_I64:
1774 neg_op = INDEX_op_neg_i64;
1775 have_neg = TCG_TARGET_HAS_neg_i64;
1776 break;
1777 case TCG_TYPE_V64:
1778 case TCG_TYPE_V128:
1779 case TCG_TYPE_V256:
1780 neg_op = INDEX_op_neg_vec;
1781 have_neg = (TCG_TARGET_HAS_neg_vec &&
1782 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
1783 break;
1784 default:
1785 g_assert_not_reached();
1786 }
1787 if (have_neg) {
1788 op->opc = neg_op;
1789 op->args[1] = op->args[2];
1790 return fold_neg(ctx, op);
1791 }
1792 return false;
1793}
1794
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001795static bool fold_sub(OptContext *ctx, TCGOp *op)
1796{
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001797 if (fold_const2(ctx, op) ||
Richard Henderson9caca882021-08-24 13:30:32 -07001798 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001799 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07001800 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001801 return true;
1802 }
1803 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001804}
1805
Richard Henderson9531c072021-08-26 06:51:39 -07001806static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001807{
Richard Henderson9531c072021-08-26 06:51:39 -07001808 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001809}
1810
Richard Hendersonfae450b2021-08-25 22:42:19 -07001811static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
1812{
1813 /* We can't do any folding with a load, but we can record bits. */
1814 switch (op->opc) {
1815 CASE_OP_32_64(ld8u):
1816 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
1817 break;
1818 CASE_OP_32_64(ld16u):
1819 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
1820 break;
1821 case INDEX_op_ld32u_i64:
1822 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
1823 break;
1824 default:
1825 g_assert_not_reached();
1826 }
1827 return false;
1828}
1829
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001830static bool fold_xor(OptContext *ctx, TCGOp *op)
1831{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001832 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001833 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001834 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001835 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001836 return true;
1837 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001838
1839 ctx->z_mask = arg_info(op->args[1])->z_mask
1840 | arg_info(op->args[2])->z_mask;
1841 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001842}
1843
Kirill Batuzov22613af2011-07-07 16:37:13 +04001844/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02001845void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001846{
Richard Henderson5cf32be2021-08-24 08:17:08 -07001847 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07001848 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07001849 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07001850
Kirill Batuzov22613af2011-07-07 16:37:13 +04001851 /* Array VALS has an element for each temp.
1852 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02001853 If this temp is a copy of other ones then the other copies are
1854 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001855
1856 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07001857 for (i = 0; i < nb_temps; ++i) {
1858 s->temps[i].state_ptr = NULL;
1859 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001860
Richard Henderson15fa08f2017-11-02 15:19:14 +01001861 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001862 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07001863 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07001864 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001865
Richard Henderson5cf32be2021-08-24 08:17:08 -07001866 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001867 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07001868 fold_call(&ctx, op);
1869 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07001870 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07001871
1872 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07001873 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
1874 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04001875
Richard Henderson67f84c92021-08-25 08:00:20 -07001876 /* Pre-compute the type of the operation. */
1877 if (def->flags & TCG_OPF_VECTOR) {
1878 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
1879 } else if (def->flags & TCG_OPF_64BIT) {
1880 ctx.type = TCG_TYPE_I64;
1881 } else {
1882 ctx.type = TCG_TYPE_I32;
1883 }
1884
Richard Hendersonfae450b2021-08-25 22:42:19 -07001885 /* Assume all bits affected, and no bits known zero. */
1886 ctx.a_mask = -1;
1887 ctx.z_mask = -1;
Paolo Bonzini633f6502013-01-11 15:42:53 -08001888
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001889 /*
1890 * Process each opcode.
1891 * Sorted alphabetically by opcode as much as possible.
1892 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001893 switch (opc) {
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001894 CASE_OP_32_64_VEC(add):
1895 done = fold_add(&ctx, op);
1896 break;
Richard Henderson9531c072021-08-26 06:51:39 -07001897 CASE_OP_32_64(add2):
1898 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001899 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001900 CASE_OP_32_64_VEC(and):
1901 done = fold_and(&ctx, op);
1902 break;
1903 CASE_OP_32_64_VEC(andc):
1904 done = fold_andc(&ctx, op);
1905 break;
Richard Henderson079b0802021-08-24 09:30:59 -07001906 CASE_OP_32_64(brcond):
1907 done = fold_brcond(&ctx, op);
1908 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001909 case INDEX_op_brcond2_i32:
1910 done = fold_brcond2(&ctx, op);
1911 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07001912 CASE_OP_32_64(bswap16):
1913 CASE_OP_32_64(bswap32):
1914 case INDEX_op_bswap64_i64:
1915 done = fold_bswap(&ctx, op);
1916 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001917 CASE_OP_32_64(clz):
1918 CASE_OP_32_64(ctz):
1919 done = fold_count_zeros(&ctx, op);
1920 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001921 CASE_OP_32_64(ctpop):
1922 done = fold_ctpop(&ctx, op);
1923 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07001924 CASE_OP_32_64(deposit):
1925 done = fold_deposit(&ctx, op);
1926 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001927 CASE_OP_32_64(div):
1928 CASE_OP_32_64(divu):
1929 done = fold_divide(&ctx, op);
1930 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001931 case INDEX_op_dup_vec:
1932 done = fold_dup(&ctx, op);
1933 break;
1934 case INDEX_op_dup2_vec:
1935 done = fold_dup2(&ctx, op);
1936 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001937 CASE_OP_32_64(eqv):
1938 done = fold_eqv(&ctx, op);
1939 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07001940 CASE_OP_32_64(extract):
1941 done = fold_extract(&ctx, op);
1942 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07001943 CASE_OP_32_64(extract2):
1944 done = fold_extract2(&ctx, op);
1945 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001946 CASE_OP_32_64(ext8s):
1947 CASE_OP_32_64(ext16s):
1948 case INDEX_op_ext32s_i64:
1949 case INDEX_op_ext_i32_i64:
1950 done = fold_exts(&ctx, op);
1951 break;
1952 CASE_OP_32_64(ext8u):
1953 CASE_OP_32_64(ext16u):
1954 case INDEX_op_ext32u_i64:
1955 case INDEX_op_extu_i32_i64:
1956 case INDEX_op_extrl_i64_i32:
1957 case INDEX_op_extrh_i64_i32:
1958 done = fold_extu(&ctx, op);
1959 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001960 CASE_OP_32_64(ld8u):
1961 CASE_OP_32_64(ld16u):
1962 case INDEX_op_ld32u_i64:
1963 done = fold_tcg_ld(&ctx, op);
1964 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07001965 case INDEX_op_mb:
1966 done = fold_mb(&ctx, op);
1967 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001968 CASE_OP_32_64_VEC(mov):
1969 done = fold_mov(&ctx, op);
1970 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07001971 CASE_OP_32_64(movcond):
1972 done = fold_movcond(&ctx, op);
1973 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001974 CASE_OP_32_64(mul):
1975 done = fold_mul(&ctx, op);
1976 break;
1977 CASE_OP_32_64(mulsh):
1978 CASE_OP_32_64(muluh):
1979 done = fold_mul_highpart(&ctx, op);
1980 break;
Richard Henderson407112b2021-08-26 06:33:04 -07001981 CASE_OP_32_64(muls2):
1982 CASE_OP_32_64(mulu2):
1983 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001984 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001985 CASE_OP_32_64(nand):
1986 done = fold_nand(&ctx, op);
1987 break;
1988 CASE_OP_32_64(neg):
1989 done = fold_neg(&ctx, op);
1990 break;
1991 CASE_OP_32_64(nor):
1992 done = fold_nor(&ctx, op);
1993 break;
1994 CASE_OP_32_64_VEC(not):
1995 done = fold_not(&ctx, op);
1996 break;
1997 CASE_OP_32_64_VEC(or):
1998 done = fold_or(&ctx, op);
1999 break;
2000 CASE_OP_32_64_VEC(orc):
2001 done = fold_orc(&ctx, op);
2002 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002003 case INDEX_op_qemu_ld_i32:
2004 case INDEX_op_qemu_ld_i64:
2005 done = fold_qemu_ld(&ctx, op);
2006 break;
2007 case INDEX_op_qemu_st_i32:
2008 case INDEX_op_qemu_st8_i32:
2009 case INDEX_op_qemu_st_i64:
2010 done = fold_qemu_st(&ctx, op);
2011 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002012 CASE_OP_32_64(rem):
2013 CASE_OP_32_64(remu):
2014 done = fold_remainder(&ctx, op);
2015 break;
2016 CASE_OP_32_64(rotl):
2017 CASE_OP_32_64(rotr):
2018 CASE_OP_32_64(sar):
2019 CASE_OP_32_64(shl):
2020 CASE_OP_32_64(shr):
2021 done = fold_shift(&ctx, op);
2022 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07002023 CASE_OP_32_64(setcond):
2024 done = fold_setcond(&ctx, op);
2025 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002026 case INDEX_op_setcond2_i32:
2027 done = fold_setcond2(&ctx, op);
2028 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002029 CASE_OP_32_64(sextract):
2030 done = fold_sextract(&ctx, op);
2031 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002032 CASE_OP_32_64_VEC(sub):
2033 done = fold_sub(&ctx, op);
2034 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002035 CASE_OP_32_64(sub2):
2036 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002037 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002038 CASE_OP_32_64_VEC(xor):
2039 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07002040 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002041 default:
2042 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07002043 }
2044
Richard Henderson404a1482021-08-24 11:08:21 -07002045 if (!done) {
2046 finish_folding(&ctx, op);
2047 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002048 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002049}