blob: 7ac63c9231d449f947ee357343189916adcce90b [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Philippe Mathieu-Daudédcb32f12020-01-01 12:23:00 +010028#include "tcg/tcg-op.h"
Richard Henderson90163902021-03-18 10:21:45 -060029#include "tcg-internal.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040030
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040031#define CASE_OP_32_64(x) \
32 glue(glue(case INDEX_op_, x), _i32): \
33 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040034
Richard Henderson170ba882017-11-22 09:07:11 +010035#define CASE_OP_32_64_VEC(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64): \
38 glue(glue(case INDEX_op_, x), _vec)
39
Richard Henderson6fcb98e2020-03-30 17:44:30 -070040typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020041 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070042 TCGTemp *prev_copy;
43 TCGTemp *next_copy;
Richard Henderson54795542020-09-06 16:21:32 -070044 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070045 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070046} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040047
Richard Henderson3b3f8472021-08-23 22:06:31 -070048typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070049 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070050 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070051 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070052
53 /* In flight values from optimization. */
Richard Hendersonfae450b2021-08-25 22:42:19 -070054 uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
55 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
Richard Henderson67f84c92021-08-25 08:00:20 -070056 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070057} OptContext;
58
Richard Henderson6fcb98e2020-03-30 17:44:30 -070059static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020060{
Richard Henderson63490392017-06-20 13:43:15 -070061 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020062}
63
Richard Henderson6fcb98e2020-03-30 17:44:30 -070064static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020065{
Richard Henderson63490392017-06-20 13:43:15 -070066 return ts_info(arg_temp(arg));
67}
68
69static inline bool ts_is_const(TCGTemp *ts)
70{
71 return ts_info(ts)->is_const;
72}
73
74static inline bool arg_is_const(TCGArg arg)
75{
76 return ts_is_const(arg_temp(arg));
77}
78
79static inline bool ts_is_copy(TCGTemp *ts)
80{
81 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020082}
83
Aurelien Jarnob41059d2015-07-27 12:41:44 +020084/* Reset TEMP's state, possibly removing the temp for the list of copies. */
Richard Henderson63490392017-06-20 13:43:15 -070085static void reset_ts(TCGTemp *ts)
Kirill Batuzov22613af2011-07-07 16:37:13 +040086{
Richard Henderson6fcb98e2020-03-30 17:44:30 -070087 TempOptInfo *ti = ts_info(ts);
88 TempOptInfo *pi = ts_info(ti->prev_copy);
89 TempOptInfo *ni = ts_info(ti->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -070090
91 ni->prev_copy = ti->prev_copy;
92 pi->next_copy = ti->next_copy;
93 ti->next_copy = ts;
94 ti->prev_copy = ts;
95 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -070096 ti->z_mask = -1;
Richard Henderson63490392017-06-20 13:43:15 -070097}
98
99static void reset_temp(TCGArg arg)
100{
101 reset_ts(arg_temp(arg));
Kirill Batuzov22613af2011-07-07 16:37:13 +0400102}
103
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200104/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700105static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200106{
Richard Henderson63490392017-06-20 13:43:15 -0700107 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700108 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700109
Richard Henderson3b3f8472021-08-23 22:06:31 -0700110 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700111 return;
112 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700113 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700114
115 ti = ts->state_ptr;
116 if (ti == NULL) {
117 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700118 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700119 }
120
121 ti->next_copy = ts;
122 ti->prev_copy = ts;
123 if (ts->kind == TEMP_CONST) {
124 ti->is_const = true;
125 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700126 ti->z_mask = ts->val;
Richard Henderson8f17a972020-03-30 19:52:02 -0700127 } else {
128 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700129 ti->z_mask = -1;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200130 }
131}
132
Richard Henderson63490392017-06-20 13:43:15 -0700133static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200134{
Richard Henderson4c868ce2020-04-23 09:02:23 -0700135 TCGTemp *i, *g, *l;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200136
Richard Henderson4c868ce2020-04-23 09:02:23 -0700137 /* If this is already readonly, we can't do better. */
138 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700139 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200140 }
141
Richard Henderson4c868ce2020-04-23 09:02:23 -0700142 g = l = NULL;
Richard Henderson63490392017-06-20 13:43:15 -0700143 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson4c868ce2020-04-23 09:02:23 -0700144 if (temp_readonly(i)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200145 return i;
Richard Henderson4c868ce2020-04-23 09:02:23 -0700146 } else if (i->kind > ts->kind) {
147 if (i->kind == TEMP_GLOBAL) {
148 g = i;
149 } else if (i->kind == TEMP_LOCAL) {
150 l = i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200151 }
152 }
153 }
154
Richard Henderson4c868ce2020-04-23 09:02:23 -0700155 /* If we didn't find a better representation, return the same temp. */
156 return g ? g : l ? l : ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200157}
158
Richard Henderson63490392017-06-20 13:43:15 -0700159static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200160{
Richard Henderson63490392017-06-20 13:43:15 -0700161 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200162
Richard Henderson63490392017-06-20 13:43:15 -0700163 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200164 return true;
165 }
166
Richard Henderson63490392017-06-20 13:43:15 -0700167 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200168 return false;
169 }
170
Richard Henderson63490392017-06-20 13:43:15 -0700171 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
172 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200173 return true;
174 }
175 }
176
177 return false;
178}
179
Richard Henderson63490392017-06-20 13:43:15 -0700180static bool args_are_copies(TCGArg arg1, TCGArg arg2)
181{
182 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
183}
184
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700185static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400186{
Richard Henderson63490392017-06-20 13:43:15 -0700187 TCGTemp *dst_ts = arg_temp(dst);
188 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700189 TempOptInfo *di;
190 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700191 TCGOpcode new_op;
192
193 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700194 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700195 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200196 }
197
Richard Henderson63490392017-06-20 13:43:15 -0700198 reset_ts(dst_ts);
199 di = ts_info(dst_ts);
200 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700201
202 switch (ctx->type) {
203 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100204 new_op = INDEX_op_mov_i32;
Richard Henderson67f84c92021-08-25 08:00:20 -0700205 break;
206 case TCG_TYPE_I64:
207 new_op = INDEX_op_mov_i64;
208 break;
209 case TCG_TYPE_V64:
210 case TCG_TYPE_V128:
211 case TCG_TYPE_V256:
212 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
213 new_op = INDEX_op_mov_vec;
214 break;
215 default:
216 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100217 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700218 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700219 op->args[0] = dst;
220 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700221
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700222 di->z_mask = si->z_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700223
Richard Henderson63490392017-06-20 13:43:15 -0700224 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700225 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700226
227 di->next_copy = si->next_copy;
228 di->prev_copy = src_ts;
229 ni->prev_copy = dst_ts;
230 si->next_copy = dst_ts;
231 di->is_const = si->is_const;
232 di->val = si->val;
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800233 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700234 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400235}
236
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700237static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700238 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700239{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700240 TCGTemp *tv;
Richard Henderson67f84c92021-08-25 08:00:20 -0700241
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700242 if (ctx->type == TCG_TYPE_I32) {
243 val = (int32_t)val;
244 }
245
246 /* Convert movi to mov with constant temp. */
247 tv = tcg_constant_internal(ctx->type, val);
Richard Henderson3b3f8472021-08-23 22:06:31 -0700248 init_ts_info(ctx, tv);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700249 return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700250}
251
Richard Henderson54795542020-09-06 16:21:32 -0700252static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400253{
Richard Henderson03271522013-08-14 14:35:56 -0700254 uint64_t l64, h64;
255
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400256 switch (op) {
257 CASE_OP_32_64(add):
258 return x + y;
259
260 CASE_OP_32_64(sub):
261 return x - y;
262
263 CASE_OP_32_64(mul):
264 return x * y;
265
Kirill Batuzov9a810902011-07-07 16:37:15 +0400266 CASE_OP_32_64(and):
267 return x & y;
268
269 CASE_OP_32_64(or):
270 return x | y;
271
272 CASE_OP_32_64(xor):
273 return x ^ y;
274
Kirill Batuzov55c09752011-07-07 16:37:16 +0400275 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700276 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400277
Kirill Batuzov55c09752011-07-07 16:37:16 +0400278 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700279 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400280
281 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700282 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400283
Kirill Batuzov55c09752011-07-07 16:37:16 +0400284 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700285 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400286
287 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700288 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400289
Kirill Batuzov55c09752011-07-07 16:37:16 +0400290 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700291 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400292
293 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700294 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400295
Kirill Batuzov55c09752011-07-07 16:37:16 +0400296 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700297 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400298
299 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700300 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400301
Kirill Batuzov55c09752011-07-07 16:37:16 +0400302 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700303 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400304
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700305 CASE_OP_32_64(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400306 return ~x;
307
Richard Hendersoncb25c802011-08-17 14:11:47 -0700308 CASE_OP_32_64(neg):
309 return -x;
310
311 CASE_OP_32_64(andc):
312 return x & ~y;
313
314 CASE_OP_32_64(orc):
315 return x | ~y;
316
317 CASE_OP_32_64(eqv):
318 return ~(x ^ y);
319
320 CASE_OP_32_64(nand):
321 return ~(x & y);
322
323 CASE_OP_32_64(nor):
324 return ~(x | y);
325
Richard Henderson0e28d002016-11-16 09:23:28 +0100326 case INDEX_op_clz_i32:
327 return (uint32_t)x ? clz32(x) : y;
328
329 case INDEX_op_clz_i64:
330 return x ? clz64(x) : y;
331
332 case INDEX_op_ctz_i32:
333 return (uint32_t)x ? ctz32(x) : y;
334
335 case INDEX_op_ctz_i64:
336 return x ? ctz64(x) : y;
337
Richard Hendersona768e4e2016-11-21 11:13:39 +0100338 case INDEX_op_ctpop_i32:
339 return ctpop32(x);
340
341 case INDEX_op_ctpop_i64:
342 return ctpop64(x);
343
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700344 CASE_OP_32_64(ext8s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400345 return (int8_t)x;
346
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700347 CASE_OP_32_64(ext16s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400348 return (int16_t)x;
349
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700350 CASE_OP_32_64(ext8u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400351 return (uint8_t)x;
352
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700353 CASE_OP_32_64(ext16u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400354 return (uint16_t)x;
355
Richard Henderson64985942018-11-20 08:53:34 +0100356 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700357 x = bswap16(x);
358 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100359
360 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700361 x = bswap32(x);
362 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100363
364 case INDEX_op_bswap64_i64:
365 return bswap64(x);
366
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200367 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400368 case INDEX_op_ext32s_i64:
369 return (int32_t)x;
370
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200371 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700372 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400373 case INDEX_op_ext32u_i64:
374 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400375
Richard Henderson609ad702015-07-24 07:16:00 -0700376 case INDEX_op_extrh_i64_i32:
377 return (uint64_t)x >> 32;
378
Richard Henderson03271522013-08-14 14:35:56 -0700379 case INDEX_op_muluh_i32:
380 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
381 case INDEX_op_mulsh_i32:
382 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
383
384 case INDEX_op_muluh_i64:
385 mulu64(&l64, &h64, x, y);
386 return h64;
387 case INDEX_op_mulsh_i64:
388 muls64(&l64, &h64, x, y);
389 return h64;
390
Richard Henderson01547f72013-08-14 15:22:46 -0700391 case INDEX_op_div_i32:
392 /* Avoid crashing on divide by zero, otherwise undefined. */
393 return (int32_t)x / ((int32_t)y ? : 1);
394 case INDEX_op_divu_i32:
395 return (uint32_t)x / ((uint32_t)y ? : 1);
396 case INDEX_op_div_i64:
397 return (int64_t)x / ((int64_t)y ? : 1);
398 case INDEX_op_divu_i64:
399 return (uint64_t)x / ((uint64_t)y ? : 1);
400
401 case INDEX_op_rem_i32:
402 return (int32_t)x % ((int32_t)y ? : 1);
403 case INDEX_op_remu_i32:
404 return (uint32_t)x % ((uint32_t)y ? : 1);
405 case INDEX_op_rem_i64:
406 return (int64_t)x % ((int64_t)y ? : 1);
407 case INDEX_op_remu_i64:
408 return (uint64_t)x % ((uint64_t)y ? : 1);
409
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400410 default:
411 fprintf(stderr,
412 "Unrecognized operation %d in do_constant_folding.\n", op);
413 tcg_abort();
414 }
415}
416
Richard Henderson67f84c92021-08-25 08:00:20 -0700417static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
418 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400419{
Richard Henderson54795542020-09-06 16:21:32 -0700420 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700421 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200422 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400423 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400424 return res;
425}
426
Richard Henderson9519da72012-10-02 11:32:26 -0700427static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
428{
429 switch (c) {
430 case TCG_COND_EQ:
431 return x == y;
432 case TCG_COND_NE:
433 return x != y;
434 case TCG_COND_LT:
435 return (int32_t)x < (int32_t)y;
436 case TCG_COND_GE:
437 return (int32_t)x >= (int32_t)y;
438 case TCG_COND_LE:
439 return (int32_t)x <= (int32_t)y;
440 case TCG_COND_GT:
441 return (int32_t)x > (int32_t)y;
442 case TCG_COND_LTU:
443 return x < y;
444 case TCG_COND_GEU:
445 return x >= y;
446 case TCG_COND_LEU:
447 return x <= y;
448 case TCG_COND_GTU:
449 return x > y;
450 default:
451 tcg_abort();
452 }
453}
454
455static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
456{
457 switch (c) {
458 case TCG_COND_EQ:
459 return x == y;
460 case TCG_COND_NE:
461 return x != y;
462 case TCG_COND_LT:
463 return (int64_t)x < (int64_t)y;
464 case TCG_COND_GE:
465 return (int64_t)x >= (int64_t)y;
466 case TCG_COND_LE:
467 return (int64_t)x <= (int64_t)y;
468 case TCG_COND_GT:
469 return (int64_t)x > (int64_t)y;
470 case TCG_COND_LTU:
471 return x < y;
472 case TCG_COND_GEU:
473 return x >= y;
474 case TCG_COND_LEU:
475 return x <= y;
476 case TCG_COND_GTU:
477 return x > y;
478 default:
479 tcg_abort();
480 }
481}
482
483static bool do_constant_folding_cond_eq(TCGCond c)
484{
485 switch (c) {
486 case TCG_COND_GT:
487 case TCG_COND_LTU:
488 case TCG_COND_LT:
489 case TCG_COND_GTU:
490 case TCG_COND_NE:
491 return 0;
492 case TCG_COND_GE:
493 case TCG_COND_GEU:
494 case TCG_COND_LE:
495 case TCG_COND_LEU:
496 case TCG_COND_EQ:
497 return 1;
498 default:
499 tcg_abort();
500 }
501}
502
Richard Henderson8d57bf12021-08-24 08:34:27 -0700503/*
504 * Return -1 if the condition can't be simplified,
505 * and the result of the condition (0 or 1) if it can.
506 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700507static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700508 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200509{
Richard Henderson54795542020-09-06 16:21:32 -0700510 uint64_t xv = arg_info(x)->val;
511 uint64_t yv = arg_info(y)->val;
512
Richard Henderson63490392017-06-20 13:43:15 -0700513 if (arg_is_const(x) && arg_is_const(y)) {
Richard Henderson67f84c92021-08-25 08:00:20 -0700514 switch (type) {
515 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100516 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700517 case TCG_TYPE_I64:
518 return do_constant_folding_cond_64(xv, yv, c);
519 default:
520 /* Only scalar comparisons are optimizable */
521 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200522 }
Richard Henderson63490392017-06-20 13:43:15 -0700523 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700524 return do_constant_folding_cond_eq(c);
Richard Henderson63490392017-06-20 13:43:15 -0700525 } else if (arg_is_const(y) && yv == 0) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200526 switch (c) {
527 case TCG_COND_LTU:
528 return 0;
529 case TCG_COND_GEU:
530 return 1;
531 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700532 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200533 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200534 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700535 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200536}
537
Richard Henderson8d57bf12021-08-24 08:34:27 -0700538/*
539 * Return -1 if the condition can't be simplified,
540 * and the result of the condition (0 or 1) if it can.
541 */
542static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
Richard Henderson6c4382f2012-10-02 11:32:27 -0700543{
544 TCGArg al = p1[0], ah = p1[1];
545 TCGArg bl = p2[0], bh = p2[1];
546
Richard Henderson63490392017-06-20 13:43:15 -0700547 if (arg_is_const(bl) && arg_is_const(bh)) {
548 tcg_target_ulong blv = arg_info(bl)->val;
549 tcg_target_ulong bhv = arg_info(bh)->val;
550 uint64_t b = deposit64(blv, 32, 32, bhv);
Richard Henderson6c4382f2012-10-02 11:32:27 -0700551
Richard Henderson63490392017-06-20 13:43:15 -0700552 if (arg_is_const(al) && arg_is_const(ah)) {
553 tcg_target_ulong alv = arg_info(al)->val;
554 tcg_target_ulong ahv = arg_info(ah)->val;
555 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Henderson6c4382f2012-10-02 11:32:27 -0700556 return do_constant_folding_cond_64(a, b, c);
557 }
558 if (b == 0) {
559 switch (c) {
560 case TCG_COND_LTU:
561 return 0;
562 case TCG_COND_GEU:
563 return 1;
564 default:
565 break;
566 }
567 }
568 }
Richard Henderson63490392017-06-20 13:43:15 -0700569 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Henderson6c4382f2012-10-02 11:32:27 -0700570 return do_constant_folding_cond_eq(c);
571 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700572 return -1;
Richard Henderson6c4382f2012-10-02 11:32:27 -0700573}
574
Richard Henderson7a2f7082021-08-26 07:06:39 -0700575/**
576 * swap_commutative:
577 * @dest: TCGArg of the destination argument, or NO_DEST.
578 * @p1: first paired argument
579 * @p2: second paired argument
580 *
581 * If *@p1 is a constant and *@p2 is not, swap.
582 * If *@p2 matches @dest, swap.
583 * Return true if a swap was performed.
584 */
585
586#define NO_DEST temp_arg(NULL)
587
Richard Henderson24c9ae42012-10-02 11:32:21 -0700588static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
589{
590 TCGArg a1 = *p1, a2 = *p2;
591 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700592 sum += arg_is_const(a1);
593 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700594
595 /* Prefer the constant in second argument, and then the form
596 op a, a, b, which is better handled on non-RISC hosts. */
597 if (sum > 0 || (sum == 0 && dest == a2)) {
598 *p1 = a2;
599 *p2 = a1;
600 return true;
601 }
602 return false;
603}
604
Richard Henderson0bfcb862012-10-02 11:32:23 -0700605static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
606{
607 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700608 sum += arg_is_const(p1[0]);
609 sum += arg_is_const(p1[1]);
610 sum -= arg_is_const(p2[0]);
611 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700612 if (sum > 0) {
613 TCGArg t;
614 t = p1[0], p1[0] = p2[0], p2[0] = t;
615 t = p1[1], p1[1] = p2[1], p2[1] = t;
616 return true;
617 }
618 return false;
619}
620
Richard Hendersone2577ea2021-08-24 08:00:48 -0700621static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
622{
623 for (int i = 0; i < nb_args; i++) {
624 TCGTemp *ts = arg_temp(op->args[i]);
625 if (ts) {
626 init_ts_info(ctx, ts);
627 }
628 }
629}
630
Richard Henderson8774dde2021-08-24 08:04:47 -0700631static void copy_propagate(OptContext *ctx, TCGOp *op,
632 int nb_oargs, int nb_iargs)
633{
634 TCGContext *s = ctx->tcg;
635
636 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
637 TCGTemp *ts = arg_temp(op->args[i]);
638 if (ts && ts_is_copy(ts)) {
639 op->args[i] = temp_arg(find_better_copy(s, ts));
640 }
641 }
642}
643
Richard Henderson137f1f42021-08-24 08:49:25 -0700644static void finish_folding(OptContext *ctx, TCGOp *op)
645{
646 const TCGOpDef *def = &tcg_op_defs[op->opc];
647 int i, nb_oargs;
648
649 /*
650 * For an opcode that ends a BB, reset all temp data.
651 * We do no cross-BB optimization.
652 */
653 if (def->flags & TCG_OPF_BB_END) {
654 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
655 ctx->prev_mb = NULL;
656 return;
657 }
658
659 nb_oargs = def->nb_oargs;
660 for (i = 0; i < nb_oargs; i++) {
661 reset_temp(op->args[i]);
662 /*
663 * Save the corresponding known-zero bits mask for the
664 * first output argument (only one supported so far).
665 */
666 if (i == 0) {
667 arg_info(op->args[i])->z_mask = ctx->z_mask;
668 }
669 }
670}
671
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700672/*
673 * The fold_* functions return true when processing is complete,
674 * usually by folding the operation to a constant or to a copy,
675 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
676 * like collect information about the value produced, for use in
677 * optimizing a subsequent operation.
678 *
679 * These first fold_* functions are all helpers, used by other
680 * folders for more specific operations.
681 */
682
683static bool fold_const1(OptContext *ctx, TCGOp *op)
684{
685 if (arg_is_const(op->args[1])) {
686 uint64_t t;
687
688 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700689 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700690 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
691 }
692 return false;
693}
694
695static bool fold_const2(OptContext *ctx, TCGOp *op)
696{
697 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
698 uint64_t t1 = arg_info(op->args[1])->val;
699 uint64_t t2 = arg_info(op->args[2])->val;
700
Richard Henderson67f84c92021-08-25 08:00:20 -0700701 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700702 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
703 }
704 return false;
705}
706
Richard Henderson7a2f7082021-08-26 07:06:39 -0700707static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
708{
709 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
710 return fold_const2(ctx, op);
711}
712
Richard Hendersonfae450b2021-08-25 22:42:19 -0700713static bool fold_masks(OptContext *ctx, TCGOp *op)
714{
715 uint64_t a_mask = ctx->a_mask;
716 uint64_t z_mask = ctx->z_mask;
717
718 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700719 * 32-bit ops generate 32-bit results, which for the purpose of
720 * simplifying tcg are sign-extended. Certainly that's how we
721 * represent our constants elsewhere. Note that the bits will
722 * be reset properly for a 64-bit value when encountering the
723 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -0700724 */
725 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700726 a_mask = (int32_t)a_mask;
727 z_mask = (int32_t)z_mask;
728 ctx->z_mask = z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -0700729 }
730
731 if (z_mask == 0) {
732 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
733 }
734 if (a_mask == 0) {
735 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
736 }
737 return false;
738}
739
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700740/*
741 * Convert @op to NOT, if NOT is supported by the host.
742 * Return true f the conversion is successful, which will still
743 * indicate that the processing is complete.
744 */
745static bool fold_not(OptContext *ctx, TCGOp *op);
746static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
747{
748 TCGOpcode not_op;
749 bool have_not;
750
751 switch (ctx->type) {
752 case TCG_TYPE_I32:
753 not_op = INDEX_op_not_i32;
754 have_not = TCG_TARGET_HAS_not_i32;
755 break;
756 case TCG_TYPE_I64:
757 not_op = INDEX_op_not_i64;
758 have_not = TCG_TARGET_HAS_not_i64;
759 break;
760 case TCG_TYPE_V64:
761 case TCG_TYPE_V128:
762 case TCG_TYPE_V256:
763 not_op = INDEX_op_not_vec;
764 have_not = TCG_TARGET_HAS_not_vec;
765 break;
766 default:
767 g_assert_not_reached();
768 }
769 if (have_not) {
770 op->opc = not_op;
771 op->args[1] = op->args[idx];
772 return fold_not(ctx, op);
773 }
774 return false;
775}
776
Richard Hendersonda48e272021-08-25 20:42:04 -0700777/* If the binary operation has first argument @i, fold to @i. */
778static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
779{
780 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
781 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
782 }
783 return false;
784}
785
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700786/* If the binary operation has first argument @i, fold to NOT. */
787static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
788{
789 if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
790 return fold_to_not(ctx, op, 2);
791 }
792 return false;
793}
794
Richard Hendersone8679952021-08-25 13:19:52 -0700795/* If the binary operation has second argument @i, fold to @i. */
796static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
797{
798 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
799 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
800 }
801 return false;
802}
803
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700804/* If the binary operation has second argument @i, fold to identity. */
805static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
806{
807 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
808 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
809 }
810 return false;
811}
812
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700813/* If the binary operation has second argument @i, fold to NOT. */
814static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
815{
816 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
817 return fold_to_not(ctx, op, 1);
818 }
819 return false;
820}
821
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700822/* If the binary operation has both arguments equal, fold to @i. */
823static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
824{
825 if (args_are_copies(op->args[1], op->args[2])) {
826 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
827 }
828 return false;
829}
830
Richard Hendersonca7bb042021-08-25 13:14:21 -0700831/* If the binary operation has both arguments equal, fold to identity. */
832static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
833{
834 if (args_are_copies(op->args[1], op->args[2])) {
835 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
836 }
837 return false;
838}
839
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700840/*
841 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -0700842 *
843 * The ordering of the transformations should be:
844 * 1) those that produce a constant
845 * 2) those that produce a copy
846 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700847 */
848
849static bool fold_add(OptContext *ctx, TCGOp *op)
850{
Richard Henderson7a2f7082021-08-26 07:06:39 -0700851 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700852 fold_xi_to_x(ctx, op, 0)) {
853 return true;
854 }
855 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700856}
857
Richard Henderson9531c072021-08-26 06:51:39 -0700858static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700859{
860 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
861 arg_is_const(op->args[4]) && arg_is_const(op->args[5])) {
Richard Henderson9531c072021-08-26 06:51:39 -0700862 uint64_t al = arg_info(op->args[2])->val;
863 uint64_t ah = arg_info(op->args[3])->val;
864 uint64_t bl = arg_info(op->args[4])->val;
865 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700866 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -0700867 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700868
Richard Henderson9531c072021-08-26 06:51:39 -0700869 if (ctx->type == TCG_TYPE_I32) {
870 uint64_t a = deposit64(al, 32, 32, ah);
871 uint64_t b = deposit64(bl, 32, 32, bh);
872
873 if (add) {
874 a += b;
875 } else {
876 a -= b;
877 }
878
879 al = sextract64(a, 0, 32);
880 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700881 } else {
Richard Henderson9531c072021-08-26 06:51:39 -0700882 Int128 a = int128_make128(al, ah);
883 Int128 b = int128_make128(bl, bh);
884
885 if (add) {
886 a = int128_add(a, b);
887 } else {
888 a = int128_sub(a, b);
889 }
890
891 al = int128_getlo(a);
892 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700893 }
894
895 rl = op->args[0];
896 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -0700897
898 /* The proper opcode is supplied by tcg_opt_gen_mov. */
899 op2 = tcg_op_insert_before(ctx->tcg, op, 0);
900
901 tcg_opt_gen_movi(ctx, op, rl, al);
902 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700903 return true;
904 }
905 return false;
906}
907
Richard Henderson9531c072021-08-26 06:51:39 -0700908static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700909{
Richard Henderson7a2f7082021-08-26 07:06:39 -0700910 /* Note that the high and low parts may be independently swapped. */
911 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
912 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
913
Richard Henderson9531c072021-08-26 06:51:39 -0700914 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -0700915}
916
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700917static bool fold_and(OptContext *ctx, TCGOp *op)
918{
Richard Hendersonfae450b2021-08-25 22:42:19 -0700919 uint64_t z1, z2;
920
Richard Henderson7a2f7082021-08-26 07:06:39 -0700921 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -0700922 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700923 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -0700924 fold_xx_to_x(ctx, op)) {
925 return true;
926 }
Richard Hendersonfae450b2021-08-25 22:42:19 -0700927
928 z1 = arg_info(op->args[1])->z_mask;
929 z2 = arg_info(op->args[2])->z_mask;
930 ctx->z_mask = z1 & z2;
931
932 /*
933 * Known-zeros does not imply known-ones. Therefore unless
934 * arg2 is constant, we can't infer affected bits from it.
935 */
936 if (arg_is_const(op->args[2])) {
937 ctx->a_mask = z1 & ~z2;
938 }
939
940 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700941}
942
943static bool fold_andc(OptContext *ctx, TCGOp *op)
944{
Richard Hendersonfae450b2021-08-25 22:42:19 -0700945 uint64_t z1;
946
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700947 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700948 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -0700949 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -0700950 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -0700951 return true;
952 }
Richard Hendersonfae450b2021-08-25 22:42:19 -0700953
954 z1 = arg_info(op->args[1])->z_mask;
955
956 /*
957 * Known-zeros does not imply known-ones. Therefore unless
958 * arg2 is constant, we can't infer anything from it.
959 */
960 if (arg_is_const(op->args[2])) {
961 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
962 ctx->a_mask = z1 & ~z2;
963 z1 &= z2;
964 }
965 ctx->z_mask = z1;
966
967 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700968}
969
Richard Henderson079b0802021-08-24 09:30:59 -0700970static bool fold_brcond(OptContext *ctx, TCGOp *op)
971{
972 TCGCond cond = op->args[2];
Richard Henderson7a2f7082021-08-26 07:06:39 -0700973 int i;
Richard Henderson079b0802021-08-24 09:30:59 -0700974
Richard Henderson7a2f7082021-08-26 07:06:39 -0700975 if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
976 op->args[2] = cond = tcg_swap_cond(cond);
977 }
978
979 i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
Richard Henderson079b0802021-08-24 09:30:59 -0700980 if (i == 0) {
981 tcg_op_remove(ctx->tcg, op);
982 return true;
983 }
984 if (i > 0) {
985 op->opc = INDEX_op_br;
986 op->args[0] = op->args[3];
987 }
988 return false;
989}
990
Richard Henderson764d2ab2021-08-24 09:22:11 -0700991static bool fold_brcond2(OptContext *ctx, TCGOp *op)
992{
993 TCGCond cond = op->args[4];
Richard Henderson764d2ab2021-08-24 09:22:11 -0700994 TCGArg label = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -0700995 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -0700996
Richard Henderson7a2f7082021-08-26 07:06:39 -0700997 if (swap_commutative2(&op->args[0], &op->args[2])) {
998 op->args[4] = cond = tcg_swap_cond(cond);
999 }
1000
1001 i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001002 if (i >= 0) {
1003 goto do_brcond_const;
1004 }
1005
1006 switch (cond) {
1007 case TCG_COND_LT:
1008 case TCG_COND_GE:
1009 /*
1010 * Simplify LT/GE comparisons vs zero to a single compare
1011 * vs the high word of the input.
1012 */
1013 if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
1014 arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
1015 goto do_brcond_high;
1016 }
1017 break;
1018
1019 case TCG_COND_NE:
1020 inv = 1;
1021 QEMU_FALLTHROUGH;
1022 case TCG_COND_EQ:
1023 /*
1024 * Simplify EQ/NE comparisons where one of the pairs
1025 * can be simplified.
1026 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001027 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001028 op->args[2], cond);
1029 switch (i ^ inv) {
1030 case 0:
1031 goto do_brcond_const;
1032 case 1:
1033 goto do_brcond_high;
1034 }
1035
Richard Henderson67f84c92021-08-25 08:00:20 -07001036 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001037 op->args[3], cond);
1038 switch (i ^ inv) {
1039 case 0:
1040 goto do_brcond_const;
1041 case 1:
1042 op->opc = INDEX_op_brcond_i32;
1043 op->args[1] = op->args[2];
1044 op->args[2] = cond;
1045 op->args[3] = label;
1046 break;
1047 }
1048 break;
1049
1050 default:
1051 break;
1052
1053 do_brcond_high:
1054 op->opc = INDEX_op_brcond_i32;
1055 op->args[0] = op->args[1];
1056 op->args[1] = op->args[3];
1057 op->args[2] = cond;
1058 op->args[3] = label;
1059 break;
1060
1061 do_brcond_const:
1062 if (i == 0) {
1063 tcg_op_remove(ctx->tcg, op);
1064 return true;
1065 }
1066 op->opc = INDEX_op_br;
1067 op->args[0] = label;
1068 break;
1069 }
1070 return false;
1071}
1072
Richard Henderson09bacdc2021-08-24 11:58:12 -07001073static bool fold_bswap(OptContext *ctx, TCGOp *op)
1074{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001075 uint64_t z_mask, sign;
1076
Richard Henderson09bacdc2021-08-24 11:58:12 -07001077 if (arg_is_const(op->args[1])) {
1078 uint64_t t = arg_info(op->args[1])->val;
1079
Richard Henderson67f84c92021-08-25 08:00:20 -07001080 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001081 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1082 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001083
1084 z_mask = arg_info(op->args[1])->z_mask;
1085 switch (op->opc) {
1086 case INDEX_op_bswap16_i32:
1087 case INDEX_op_bswap16_i64:
1088 z_mask = bswap16(z_mask);
1089 sign = INT16_MIN;
1090 break;
1091 case INDEX_op_bswap32_i32:
1092 case INDEX_op_bswap32_i64:
1093 z_mask = bswap32(z_mask);
1094 sign = INT32_MIN;
1095 break;
1096 case INDEX_op_bswap64_i64:
1097 z_mask = bswap64(z_mask);
1098 sign = INT64_MIN;
1099 break;
1100 default:
1101 g_assert_not_reached();
1102 }
1103
1104 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1105 case TCG_BSWAP_OZ:
1106 break;
1107 case TCG_BSWAP_OS:
1108 /* If the sign bit may be 1, force all the bits above to 1. */
1109 if (z_mask & sign) {
1110 z_mask |= sign;
1111 }
1112 break;
1113 default:
1114 /* The high bits are undefined: force all bits above the sign to 1. */
1115 z_mask |= sign << 1;
1116 break;
1117 }
1118 ctx->z_mask = z_mask;
1119
1120 return fold_masks(ctx, op);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001121}
1122
Richard Henderson5cf32be2021-08-24 08:17:08 -07001123static bool fold_call(OptContext *ctx, TCGOp *op)
1124{
1125 TCGContext *s = ctx->tcg;
1126 int nb_oargs = TCGOP_CALLO(op);
1127 int nb_iargs = TCGOP_CALLI(op);
1128 int flags, i;
1129
1130 init_arguments(ctx, op, nb_oargs + nb_iargs);
1131 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1132
1133 /* If the function reads or writes globals, reset temp data. */
1134 flags = tcg_call_flags(op);
1135 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1136 int nb_globals = s->nb_globals;
1137
1138 for (i = 0; i < nb_globals; i++) {
1139 if (test_bit(i, ctx->temps_used.l)) {
1140 reset_ts(&ctx->tcg->temps[i]);
1141 }
1142 }
1143 }
1144
1145 /* Reset temp data for outputs. */
1146 for (i = 0; i < nb_oargs; i++) {
1147 reset_temp(op->args[i]);
1148 }
1149
1150 /* Stop optimizing MB across calls. */
1151 ctx->prev_mb = NULL;
1152 return true;
1153}
1154
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001155static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1156{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001157 uint64_t z_mask;
1158
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001159 if (arg_is_const(op->args[1])) {
1160 uint64_t t = arg_info(op->args[1])->val;
1161
1162 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001163 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001164 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1165 }
1166 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1167 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001168
1169 switch (ctx->type) {
1170 case TCG_TYPE_I32:
1171 z_mask = 31;
1172 break;
1173 case TCG_TYPE_I64:
1174 z_mask = 63;
1175 break;
1176 default:
1177 g_assert_not_reached();
1178 }
1179 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
1180
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001181 return false;
1182}
1183
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001184static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1185{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001186 if (fold_const1(ctx, op)) {
1187 return true;
1188 }
1189
1190 switch (ctx->type) {
1191 case TCG_TYPE_I32:
1192 ctx->z_mask = 32 | 31;
1193 break;
1194 case TCG_TYPE_I64:
1195 ctx->z_mask = 64 | 63;
1196 break;
1197 default:
1198 g_assert_not_reached();
1199 }
1200 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001201}
1202
Richard Henderson1b1907b2021-08-24 10:47:04 -07001203static bool fold_deposit(OptContext *ctx, TCGOp *op)
1204{
1205 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1206 uint64_t t1 = arg_info(op->args[1])->val;
1207 uint64_t t2 = arg_info(op->args[2])->val;
1208
1209 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1210 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1211 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001212
1213 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1214 op->args[3], op->args[4],
1215 arg_info(op->args[2])->z_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001216 return false;
1217}
1218
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001219static bool fold_divide(OptContext *ctx, TCGOp *op)
1220{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001221 if (fold_const2(ctx, op) ||
1222 fold_xi_to_x(ctx, op, 1)) {
1223 return true;
1224 }
1225 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001226}
1227
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001228static bool fold_dup(OptContext *ctx, TCGOp *op)
1229{
1230 if (arg_is_const(op->args[1])) {
1231 uint64_t t = arg_info(op->args[1])->val;
1232 t = dup_const(TCGOP_VECE(op), t);
1233 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1234 }
1235 return false;
1236}
1237
1238static bool fold_dup2(OptContext *ctx, TCGOp *op)
1239{
1240 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1241 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1242 arg_info(op->args[2])->val);
1243 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1244 }
1245
1246 if (args_are_copies(op->args[1], op->args[2])) {
1247 op->opc = INDEX_op_dup_vec;
1248 TCGOP_VECE(op) = MO_32;
1249 }
1250 return false;
1251}
1252
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001253static bool fold_eqv(OptContext *ctx, TCGOp *op)
1254{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001255 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001256 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001257 fold_xi_to_not(ctx, op, 0)) {
1258 return true;
1259 }
1260 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001261}
1262
Richard Hendersonb6617c82021-08-24 10:44:53 -07001263static bool fold_extract(OptContext *ctx, TCGOp *op)
1264{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001265 uint64_t z_mask_old, z_mask;
1266
Richard Hendersonb6617c82021-08-24 10:44:53 -07001267 if (arg_is_const(op->args[1])) {
1268 uint64_t t;
1269
1270 t = arg_info(op->args[1])->val;
1271 t = extract64(t, op->args[2], op->args[3]);
1272 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1273 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001274
1275 z_mask_old = arg_info(op->args[1])->z_mask;
1276 z_mask = extract64(z_mask_old, op->args[2], op->args[3]);
1277 if (op->args[2] == 0) {
1278 ctx->a_mask = z_mask_old ^ z_mask;
1279 }
1280 ctx->z_mask = z_mask;
1281
1282 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001283}
1284
Richard Hendersondcd08992021-08-24 10:41:39 -07001285static bool fold_extract2(OptContext *ctx, TCGOp *op)
1286{
1287 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1288 uint64_t v1 = arg_info(op->args[1])->val;
1289 uint64_t v2 = arg_info(op->args[2])->val;
1290 int shr = op->args[3];
1291
1292 if (op->opc == INDEX_op_extract2_i64) {
1293 v1 >>= shr;
1294 v2 <<= 64 - shr;
1295 } else {
1296 v1 = (uint32_t)v1 >> shr;
1297 v2 = (int32_t)v2 << (32 - shr);
1298 }
1299 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1300 }
1301 return false;
1302}
1303
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001304static bool fold_exts(OptContext *ctx, TCGOp *op)
1305{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001306 uint64_t z_mask_old, z_mask, sign;
1307 bool type_change = false;
1308
1309 if (fold_const1(ctx, op)) {
1310 return true;
1311 }
1312
1313 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1314
1315 switch (op->opc) {
1316 CASE_OP_32_64(ext8s):
1317 sign = INT8_MIN;
1318 z_mask = (uint8_t)z_mask;
1319 break;
1320 CASE_OP_32_64(ext16s):
1321 sign = INT16_MIN;
1322 z_mask = (uint16_t)z_mask;
1323 break;
1324 case INDEX_op_ext_i32_i64:
1325 type_change = true;
1326 QEMU_FALLTHROUGH;
1327 case INDEX_op_ext32s_i64:
1328 sign = INT32_MIN;
1329 z_mask = (uint32_t)z_mask;
1330 break;
1331 default:
1332 g_assert_not_reached();
1333 }
1334
1335 if (z_mask & sign) {
1336 z_mask |= sign;
1337 } else if (!type_change) {
1338 ctx->a_mask = z_mask_old ^ z_mask;
1339 }
1340 ctx->z_mask = z_mask;
1341
1342 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001343}
1344
1345static bool fold_extu(OptContext *ctx, TCGOp *op)
1346{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001347 uint64_t z_mask_old, z_mask;
1348 bool type_change = false;
1349
1350 if (fold_const1(ctx, op)) {
1351 return true;
1352 }
1353
1354 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1355
1356 switch (op->opc) {
1357 CASE_OP_32_64(ext8u):
1358 z_mask = (uint8_t)z_mask;
1359 break;
1360 CASE_OP_32_64(ext16u):
1361 z_mask = (uint16_t)z_mask;
1362 break;
1363 case INDEX_op_extrl_i64_i32:
1364 case INDEX_op_extu_i32_i64:
1365 type_change = true;
1366 QEMU_FALLTHROUGH;
1367 case INDEX_op_ext32u_i64:
1368 z_mask = (uint32_t)z_mask;
1369 break;
1370 case INDEX_op_extrh_i64_i32:
1371 type_change = true;
1372 z_mask >>= 32;
1373 break;
1374 default:
1375 g_assert_not_reached();
1376 }
1377
1378 ctx->z_mask = z_mask;
1379 if (!type_change) {
1380 ctx->a_mask = z_mask_old ^ z_mask;
1381 }
1382 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001383}
1384
Richard Henderson3eefdf22021-08-25 11:06:43 -07001385static bool fold_mb(OptContext *ctx, TCGOp *op)
1386{
1387 /* Eliminate duplicate and redundant fence instructions. */
1388 if (ctx->prev_mb) {
1389 /*
1390 * Merge two barriers of the same type into one,
1391 * or a weaker barrier into a stronger one,
1392 * or two weaker barriers into a stronger one.
1393 * mb X; mb Y => mb X|Y
1394 * mb; strl => mb; st
1395 * ldaq; mb => ld; mb
1396 * ldaq; strl => ld; mb; st
1397 * Other combinations are also merged into a strong
1398 * barrier. This is stricter than specified but for
1399 * the purposes of TCG is better than not optimizing.
1400 */
1401 ctx->prev_mb->args[0] |= op->args[0];
1402 tcg_op_remove(ctx->tcg, op);
1403 } else {
1404 ctx->prev_mb = op;
1405 }
1406 return true;
1407}
1408
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001409static bool fold_mov(OptContext *ctx, TCGOp *op)
1410{
1411 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1412}
1413
Richard Henderson0c310a32021-08-24 10:37:24 -07001414static bool fold_movcond(OptContext *ctx, TCGOp *op)
1415{
Richard Henderson0c310a32021-08-24 10:37:24 -07001416 TCGCond cond = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001417 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001418
Richard Henderson7a2f7082021-08-26 07:06:39 -07001419 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1420 op->args[5] = cond = tcg_swap_cond(cond);
1421 }
1422 /*
1423 * Canonicalize the "false" input reg to match the destination reg so
1424 * that the tcg backend can implement a "move if true" operation.
1425 */
1426 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1427 op->args[5] = cond = tcg_invert_cond(cond);
1428 }
1429
1430 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
Richard Henderson0c310a32021-08-24 10:37:24 -07001431 if (i >= 0) {
1432 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1433 }
1434
Richard Hendersonfae450b2021-08-25 22:42:19 -07001435 ctx->z_mask = arg_info(op->args[3])->z_mask
1436 | arg_info(op->args[4])->z_mask;
1437
Richard Henderson0c310a32021-08-24 10:37:24 -07001438 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1439 uint64_t tv = arg_info(op->args[3])->val;
1440 uint64_t fv = arg_info(op->args[4])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -07001441 TCGOpcode opc;
Richard Henderson0c310a32021-08-24 10:37:24 -07001442
Richard Henderson67f84c92021-08-25 08:00:20 -07001443 switch (ctx->type) {
1444 case TCG_TYPE_I32:
1445 opc = INDEX_op_setcond_i32;
1446 break;
1447 case TCG_TYPE_I64:
1448 opc = INDEX_op_setcond_i64;
1449 break;
1450 default:
1451 g_assert_not_reached();
1452 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001453
1454 if (tv == 1 && fv == 0) {
1455 op->opc = opc;
1456 op->args[3] = cond;
1457 } else if (fv == 1 && tv == 0) {
1458 op->opc = opc;
1459 op->args[3] = tcg_invert_cond(cond);
1460 }
1461 }
1462 return false;
1463}
1464
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001465static bool fold_mul(OptContext *ctx, TCGOp *op)
1466{
Richard Hendersone8679952021-08-25 13:19:52 -07001467 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07001468 fold_xi_to_i(ctx, op, 0) ||
1469 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001470 return true;
1471 }
1472 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001473}
1474
1475static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1476{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001477 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001478 fold_xi_to_i(ctx, op, 0)) {
1479 return true;
1480 }
1481 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001482}
1483
Richard Henderson407112b2021-08-26 06:33:04 -07001484static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001485{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001486 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1487
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001488 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07001489 uint64_t a = arg_info(op->args[2])->val;
1490 uint64_t b = arg_info(op->args[3])->val;
1491 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001492 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07001493 TCGOp *op2;
1494
1495 switch (op->opc) {
1496 case INDEX_op_mulu2_i32:
1497 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1498 h = (int32_t)(l >> 32);
1499 l = (int32_t)l;
1500 break;
1501 case INDEX_op_muls2_i32:
1502 l = (int64_t)(int32_t)a * (int32_t)b;
1503 h = l >> 32;
1504 l = (int32_t)l;
1505 break;
1506 case INDEX_op_mulu2_i64:
1507 mulu64(&l, &h, a, b);
1508 break;
1509 case INDEX_op_muls2_i64:
1510 muls64(&l, &h, a, b);
1511 break;
1512 default:
1513 g_assert_not_reached();
1514 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001515
1516 rl = op->args[0];
1517 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07001518
1519 /* The proper opcode is supplied by tcg_opt_gen_mov. */
1520 op2 = tcg_op_insert_before(ctx->tcg, op, 0);
1521
1522 tcg_opt_gen_movi(ctx, op, rl, l);
1523 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001524 return true;
1525 }
1526 return false;
1527}
1528
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001529static bool fold_nand(OptContext *ctx, TCGOp *op)
1530{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001531 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001532 fold_xi_to_not(ctx, op, -1)) {
1533 return true;
1534 }
1535 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001536}
1537
1538static bool fold_neg(OptContext *ctx, TCGOp *op)
1539{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001540 uint64_t z_mask;
1541
Richard Henderson9caca882021-08-24 13:30:32 -07001542 if (fold_const1(ctx, op)) {
1543 return true;
1544 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001545
1546 /* Set to 1 all bits to the left of the rightmost. */
1547 z_mask = arg_info(op->args[1])->z_mask;
1548 ctx->z_mask = -(z_mask & -z_mask);
1549
Richard Henderson9caca882021-08-24 13:30:32 -07001550 /*
1551 * Because of fold_sub_to_neg, we want to always return true,
1552 * via finish_folding.
1553 */
1554 finish_folding(ctx, op);
1555 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001556}
1557
1558static bool fold_nor(OptContext *ctx, TCGOp *op)
1559{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001560 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001561 fold_xi_to_not(ctx, op, 0)) {
1562 return true;
1563 }
1564 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001565}
1566
1567static bool fold_not(OptContext *ctx, TCGOp *op)
1568{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001569 if (fold_const1(ctx, op)) {
1570 return true;
1571 }
1572
1573 /* Because of fold_to_not, we want to always return true, via finish. */
1574 finish_folding(ctx, op);
1575 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001576}
1577
1578static bool fold_or(OptContext *ctx, TCGOp *op)
1579{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001580 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001581 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001582 fold_xx_to_x(ctx, op)) {
1583 return true;
1584 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001585
1586 ctx->z_mask = arg_info(op->args[1])->z_mask
1587 | arg_info(op->args[2])->z_mask;
1588 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001589}
1590
1591static bool fold_orc(OptContext *ctx, TCGOp *op)
1592{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001593 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07001594 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001595 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001596 fold_ix_to_not(ctx, op, 0)) {
1597 return true;
1598 }
1599 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001600}
1601
Richard Henderson3eefdf22021-08-25 11:06:43 -07001602static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
1603{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001604 const TCGOpDef *def = &tcg_op_defs[op->opc];
1605 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
1606 MemOp mop = get_memop(oi);
1607 int width = 8 * memop_size(mop);
1608
1609 if (!(mop & MO_SIGN) && width < 64) {
1610 ctx->z_mask = MAKE_64BIT_MASK(0, width);
1611 }
1612
Richard Henderson3eefdf22021-08-25 11:06:43 -07001613 /* Opcodes that touch guest memory stop the mb optimization. */
1614 ctx->prev_mb = NULL;
1615 return false;
1616}
1617
1618static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
1619{
1620 /* Opcodes that touch guest memory stop the mb optimization. */
1621 ctx->prev_mb = NULL;
1622 return false;
1623}
1624
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001625static bool fold_remainder(OptContext *ctx, TCGOp *op)
1626{
Richard Henderson267c17e2021-10-25 11:30:33 -07001627 if (fold_const2(ctx, op) ||
1628 fold_xx_to_i(ctx, op, 0)) {
1629 return true;
1630 }
1631 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001632}
1633
Richard Hendersonc63ff552021-08-24 09:35:30 -07001634static bool fold_setcond(OptContext *ctx, TCGOp *op)
1635{
1636 TCGCond cond = op->args[3];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001637 int i;
Richard Hendersonc63ff552021-08-24 09:35:30 -07001638
Richard Henderson7a2f7082021-08-26 07:06:39 -07001639 if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
1640 op->args[3] = cond = tcg_swap_cond(cond);
1641 }
1642
1643 i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
Richard Hendersonc63ff552021-08-24 09:35:30 -07001644 if (i >= 0) {
1645 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1646 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001647
1648 ctx->z_mask = 1;
Richard Hendersonc63ff552021-08-24 09:35:30 -07001649 return false;
1650}
1651
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001652static bool fold_setcond2(OptContext *ctx, TCGOp *op)
1653{
1654 TCGCond cond = op->args[5];
Richard Henderson7a2f7082021-08-26 07:06:39 -07001655 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001656
Richard Henderson7a2f7082021-08-26 07:06:39 -07001657 if (swap_commutative2(&op->args[1], &op->args[3])) {
1658 op->args[5] = cond = tcg_swap_cond(cond);
1659 }
1660
1661 i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001662 if (i >= 0) {
1663 goto do_setcond_const;
1664 }
1665
1666 switch (cond) {
1667 case TCG_COND_LT:
1668 case TCG_COND_GE:
1669 /*
1670 * Simplify LT/GE comparisons vs zero to a single compare
1671 * vs the high word of the input.
1672 */
1673 if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
1674 arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
1675 goto do_setcond_high;
1676 }
1677 break;
1678
1679 case TCG_COND_NE:
1680 inv = 1;
1681 QEMU_FALLTHROUGH;
1682 case TCG_COND_EQ:
1683 /*
1684 * Simplify EQ/NE comparisons where one of the pairs
1685 * can be simplified.
1686 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001687 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001688 op->args[3], cond);
1689 switch (i ^ inv) {
1690 case 0:
1691 goto do_setcond_const;
1692 case 1:
1693 goto do_setcond_high;
1694 }
1695
Richard Henderson67f84c92021-08-25 08:00:20 -07001696 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001697 op->args[4], cond);
1698 switch (i ^ inv) {
1699 case 0:
1700 goto do_setcond_const;
1701 case 1:
1702 op->args[2] = op->args[3];
1703 op->args[3] = cond;
1704 op->opc = INDEX_op_setcond_i32;
1705 break;
1706 }
1707 break;
1708
1709 default:
1710 break;
1711
1712 do_setcond_high:
1713 op->args[1] = op->args[2];
1714 op->args[2] = op->args[4];
1715 op->args[3] = cond;
1716 op->opc = INDEX_op_setcond_i32;
1717 break;
1718 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001719
1720 ctx->z_mask = 1;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07001721 return false;
1722
1723 do_setcond_const:
1724 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1725}
1726
Richard Hendersonb6617c82021-08-24 10:44:53 -07001727static bool fold_sextract(OptContext *ctx, TCGOp *op)
1728{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001729 int64_t z_mask_old, z_mask;
1730
Richard Hendersonb6617c82021-08-24 10:44:53 -07001731 if (arg_is_const(op->args[1])) {
1732 uint64_t t;
1733
1734 t = arg_info(op->args[1])->val;
1735 t = sextract64(t, op->args[2], op->args[3]);
1736 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1737 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001738
1739 z_mask_old = arg_info(op->args[1])->z_mask;
1740 z_mask = sextract64(z_mask_old, op->args[2], op->args[3]);
1741 if (op->args[2] == 0 && z_mask >= 0) {
1742 ctx->a_mask = z_mask_old ^ z_mask;
1743 }
1744 ctx->z_mask = z_mask;
1745
1746 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001747}
1748
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001749static bool fold_shift(OptContext *ctx, TCGOp *op)
1750{
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001751 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07001752 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001753 fold_xi_to_x(ctx, op, 0)) {
1754 return true;
1755 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001756
1757 if (arg_is_const(op->args[2])) {
1758 ctx->z_mask = do_constant_folding(op->opc, ctx->type,
1759 arg_info(op->args[1])->z_mask,
1760 arg_info(op->args[2])->val);
1761 return fold_masks(ctx, op);
1762 }
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001763 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001764}
1765
Richard Henderson9caca882021-08-24 13:30:32 -07001766static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
1767{
1768 TCGOpcode neg_op;
1769 bool have_neg;
1770
1771 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
1772 return false;
1773 }
1774
1775 switch (ctx->type) {
1776 case TCG_TYPE_I32:
1777 neg_op = INDEX_op_neg_i32;
1778 have_neg = TCG_TARGET_HAS_neg_i32;
1779 break;
1780 case TCG_TYPE_I64:
1781 neg_op = INDEX_op_neg_i64;
1782 have_neg = TCG_TARGET_HAS_neg_i64;
1783 break;
1784 case TCG_TYPE_V64:
1785 case TCG_TYPE_V128:
1786 case TCG_TYPE_V256:
1787 neg_op = INDEX_op_neg_vec;
1788 have_neg = (TCG_TARGET_HAS_neg_vec &&
1789 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
1790 break;
1791 default:
1792 g_assert_not_reached();
1793 }
1794 if (have_neg) {
1795 op->opc = neg_op;
1796 op->args[1] = op->args[2];
1797 return fold_neg(ctx, op);
1798 }
1799 return false;
1800}
1801
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001802static bool fold_sub(OptContext *ctx, TCGOp *op)
1803{
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001804 if (fold_const2(ctx, op) ||
Richard Henderson9caca882021-08-24 13:30:32 -07001805 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001806 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07001807 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001808 return true;
1809 }
1810 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001811}
1812
Richard Henderson9531c072021-08-26 06:51:39 -07001813static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001814{
Richard Henderson9531c072021-08-26 06:51:39 -07001815 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001816}
1817
Richard Hendersonfae450b2021-08-25 22:42:19 -07001818static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
1819{
1820 /* We can't do any folding with a load, but we can record bits. */
1821 switch (op->opc) {
1822 CASE_OP_32_64(ld8u):
1823 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
1824 break;
1825 CASE_OP_32_64(ld16u):
1826 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
1827 break;
1828 case INDEX_op_ld32u_i64:
1829 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
1830 break;
1831 default:
1832 g_assert_not_reached();
1833 }
1834 return false;
1835}
1836
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001837static bool fold_xor(OptContext *ctx, TCGOp *op)
1838{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001839 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001840 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001841 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001842 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001843 return true;
1844 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001845
1846 ctx->z_mask = arg_info(op->args[1])->z_mask
1847 | arg_info(op->args[2])->z_mask;
1848 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001849}
1850
Kirill Batuzov22613af2011-07-07 16:37:13 +04001851/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02001852void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001853{
Richard Henderson5cf32be2021-08-24 08:17:08 -07001854 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07001855 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07001856 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07001857
Kirill Batuzov22613af2011-07-07 16:37:13 +04001858 /* Array VALS has an element for each temp.
1859 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02001860 If this temp is a copy of other ones then the other copies are
1861 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001862
1863 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07001864 for (i = 0; i < nb_temps; ++i) {
1865 s->temps[i].state_ptr = NULL;
1866 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001867
Richard Henderson15fa08f2017-11-02 15:19:14 +01001868 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001869 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07001870 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07001871 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001872
Richard Henderson5cf32be2021-08-24 08:17:08 -07001873 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001874 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07001875 fold_call(&ctx, op);
1876 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07001877 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07001878
1879 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07001880 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
1881 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04001882
Richard Henderson67f84c92021-08-25 08:00:20 -07001883 /* Pre-compute the type of the operation. */
1884 if (def->flags & TCG_OPF_VECTOR) {
1885 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
1886 } else if (def->flags & TCG_OPF_64BIT) {
1887 ctx.type = TCG_TYPE_I64;
1888 } else {
1889 ctx.type = TCG_TYPE_I32;
1890 }
1891
Richard Hendersonfae450b2021-08-25 22:42:19 -07001892 /* Assume all bits affected, and no bits known zero. */
1893 ctx.a_mask = -1;
1894 ctx.z_mask = -1;
Paolo Bonzini633f6502013-01-11 15:42:53 -08001895
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001896 /*
1897 * Process each opcode.
1898 * Sorted alphabetically by opcode as much as possible.
1899 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07001900 switch (opc) {
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001901 CASE_OP_32_64_VEC(add):
1902 done = fold_add(&ctx, op);
1903 break;
Richard Henderson9531c072021-08-26 06:51:39 -07001904 CASE_OP_32_64(add2):
1905 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001906 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001907 CASE_OP_32_64_VEC(and):
1908 done = fold_and(&ctx, op);
1909 break;
1910 CASE_OP_32_64_VEC(andc):
1911 done = fold_andc(&ctx, op);
1912 break;
Richard Henderson079b0802021-08-24 09:30:59 -07001913 CASE_OP_32_64(brcond):
1914 done = fold_brcond(&ctx, op);
1915 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001916 case INDEX_op_brcond2_i32:
1917 done = fold_brcond2(&ctx, op);
1918 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07001919 CASE_OP_32_64(bswap16):
1920 CASE_OP_32_64(bswap32):
1921 case INDEX_op_bswap64_i64:
1922 done = fold_bswap(&ctx, op);
1923 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001924 CASE_OP_32_64(clz):
1925 CASE_OP_32_64(ctz):
1926 done = fold_count_zeros(&ctx, op);
1927 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001928 CASE_OP_32_64(ctpop):
1929 done = fold_ctpop(&ctx, op);
1930 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07001931 CASE_OP_32_64(deposit):
1932 done = fold_deposit(&ctx, op);
1933 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001934 CASE_OP_32_64(div):
1935 CASE_OP_32_64(divu):
1936 done = fold_divide(&ctx, op);
1937 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001938 case INDEX_op_dup_vec:
1939 done = fold_dup(&ctx, op);
1940 break;
1941 case INDEX_op_dup2_vec:
1942 done = fold_dup2(&ctx, op);
1943 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001944 CASE_OP_32_64(eqv):
1945 done = fold_eqv(&ctx, op);
1946 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07001947 CASE_OP_32_64(extract):
1948 done = fold_extract(&ctx, op);
1949 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07001950 CASE_OP_32_64(extract2):
1951 done = fold_extract2(&ctx, op);
1952 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001953 CASE_OP_32_64(ext8s):
1954 CASE_OP_32_64(ext16s):
1955 case INDEX_op_ext32s_i64:
1956 case INDEX_op_ext_i32_i64:
1957 done = fold_exts(&ctx, op);
1958 break;
1959 CASE_OP_32_64(ext8u):
1960 CASE_OP_32_64(ext16u):
1961 case INDEX_op_ext32u_i64:
1962 case INDEX_op_extu_i32_i64:
1963 case INDEX_op_extrl_i64_i32:
1964 case INDEX_op_extrh_i64_i32:
1965 done = fold_extu(&ctx, op);
1966 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001967 CASE_OP_32_64(ld8u):
1968 CASE_OP_32_64(ld16u):
1969 case INDEX_op_ld32u_i64:
1970 done = fold_tcg_ld(&ctx, op);
1971 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07001972 case INDEX_op_mb:
1973 done = fold_mb(&ctx, op);
1974 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001975 CASE_OP_32_64_VEC(mov):
1976 done = fold_mov(&ctx, op);
1977 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07001978 CASE_OP_32_64(movcond):
1979 done = fold_movcond(&ctx, op);
1980 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001981 CASE_OP_32_64(mul):
1982 done = fold_mul(&ctx, op);
1983 break;
1984 CASE_OP_32_64(mulsh):
1985 CASE_OP_32_64(muluh):
1986 done = fold_mul_highpart(&ctx, op);
1987 break;
Richard Henderson407112b2021-08-26 06:33:04 -07001988 CASE_OP_32_64(muls2):
1989 CASE_OP_32_64(mulu2):
1990 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001991 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001992 CASE_OP_32_64(nand):
1993 done = fold_nand(&ctx, op);
1994 break;
1995 CASE_OP_32_64(neg):
1996 done = fold_neg(&ctx, op);
1997 break;
1998 CASE_OP_32_64(nor):
1999 done = fold_nor(&ctx, op);
2000 break;
2001 CASE_OP_32_64_VEC(not):
2002 done = fold_not(&ctx, op);
2003 break;
2004 CASE_OP_32_64_VEC(or):
2005 done = fold_or(&ctx, op);
2006 break;
2007 CASE_OP_32_64_VEC(orc):
2008 done = fold_orc(&ctx, op);
2009 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002010 case INDEX_op_qemu_ld_i32:
2011 case INDEX_op_qemu_ld_i64:
2012 done = fold_qemu_ld(&ctx, op);
2013 break;
2014 case INDEX_op_qemu_st_i32:
2015 case INDEX_op_qemu_st8_i32:
2016 case INDEX_op_qemu_st_i64:
2017 done = fold_qemu_st(&ctx, op);
2018 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002019 CASE_OP_32_64(rem):
2020 CASE_OP_32_64(remu):
2021 done = fold_remainder(&ctx, op);
2022 break;
2023 CASE_OP_32_64(rotl):
2024 CASE_OP_32_64(rotr):
2025 CASE_OP_32_64(sar):
2026 CASE_OP_32_64(shl):
2027 CASE_OP_32_64(shr):
2028 done = fold_shift(&ctx, op);
2029 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07002030 CASE_OP_32_64(setcond):
2031 done = fold_setcond(&ctx, op);
2032 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002033 case INDEX_op_setcond2_i32:
2034 done = fold_setcond2(&ctx, op);
2035 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002036 CASE_OP_32_64(sextract):
2037 done = fold_sextract(&ctx, op);
2038 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002039 CASE_OP_32_64_VEC(sub):
2040 done = fold_sub(&ctx, op);
2041 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002042 CASE_OP_32_64(sub2):
2043 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002044 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002045 CASE_OP_32_64_VEC(xor):
2046 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07002047 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002048 default:
2049 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07002050 }
2051
Richard Henderson404a1482021-08-24 11:08:21 -07002052 if (!done) {
2053 finish_folding(&ctx, op);
2054 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002055 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002056}