blob: 453e8c43bd50859ad8bfc16d5d5cb732e958fde4 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040031
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032#define CASE_OP_32_64(x) \
33 glue(glue(case INDEX_op_, x), _i32): \
34 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040035
Richard Henderson170ba882017-11-22 09:07:11 +010036#define CASE_OP_32_64_VEC(x) \
37 glue(glue(case INDEX_op_, x), _i32): \
38 glue(glue(case INDEX_op_, x), _i64): \
39 glue(glue(case INDEX_op_, x), _vec)
40
Richard Hendersonab84dc32023-08-23 23:04:24 -070041typedef struct MemCopyInfo {
42 IntervalTreeNode itree;
43 QSIMPLEQ_ENTRY (MemCopyInfo) next;
44 TCGTemp *ts;
45 TCGType type;
46} MemCopyInfo;
47
Richard Henderson6fcb98e2020-03-30 17:44:30 -070048typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020049 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070050 TCGTemp *prev_copy;
51 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070052 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070053 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070054 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson57fe5c62021-08-26 12:04:46 -070055 uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070056} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040057
Richard Henderson3b3f8472021-08-23 22:06:31 -070058typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070059 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070060 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070061 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070062
Richard Hendersonab84dc32023-08-23 23:04:24 -070063 IntervalTreeRoot mem_copy;
64 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
65
Richard Henderson137f1f42021-08-24 08:49:25 -070066 /* In flight values from optimization. */
Richard Hendersonfae450b2021-08-25 22:42:19 -070067 uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
68 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
Richard Henderson57fe5c62021-08-26 12:04:46 -070069 uint64_t s_mask; /* mask of clrsb(value) bits */
Richard Henderson67f84c92021-08-25 08:00:20 -070070 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070071} OptContext;
72
Richard Henderson57fe5c62021-08-26 12:04:46 -070073/* Calculate the smask for a specific value. */
74static uint64_t smask_from_value(uint64_t value)
75{
76 int rep = clrsb64(value);
77 return ~(~0ull >> rep);
78}
79
80/*
81 * Calculate the smask for a given set of known-zeros.
82 * If there are lots of zeros on the left, we can consider the remainder
83 * an unsigned field, and thus the corresponding signed field is one bit
84 * larger.
85 */
86static uint64_t smask_from_zmask(uint64_t zmask)
87{
88 /*
89 * Only the 0 bits are significant for zmask, thus the msb itself
90 * must be zero, else we have no sign information.
91 */
92 int rep = clz64(zmask);
93 if (rep == 0) {
94 return 0;
95 }
96 rep -= 1;
97 return ~(~0ull >> rep);
98}
99
Richard Henderson93a967f2021-08-26 13:24:59 -0700100/*
101 * Recreate a properly left-aligned smask after manipulation.
102 * Some bit-shuffling, particularly shifts and rotates, may
103 * retain sign bits on the left, but may scatter disconnected
104 * sign bits on the right. Retain only what remains to the left.
105 */
106static uint64_t smask_from_smask(int64_t smask)
107{
108 /* Only the 1 bits are significant for smask */
109 return smask_from_zmask(~smask);
110}
111
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700112static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200113{
Richard Henderson63490392017-06-20 13:43:15 -0700114 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200115}
116
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700117static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200118{
Richard Henderson63490392017-06-20 13:43:15 -0700119 return ts_info(arg_temp(arg));
120}
121
122static inline bool ts_is_const(TCGTemp *ts)
123{
124 return ts_info(ts)->is_const;
125}
126
Richard Henderson27cdb852023-10-23 11:38:00 -0700127static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
128{
129 TempOptInfo *ti = ts_info(ts);
130 return ti->is_const && ti->val == val;
131}
132
Richard Henderson63490392017-06-20 13:43:15 -0700133static inline bool arg_is_const(TCGArg arg)
134{
135 return ts_is_const(arg_temp(arg));
136}
137
Richard Henderson27cdb852023-10-23 11:38:00 -0700138static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
139{
140 return ts_is_const_val(arg_temp(arg), val);
141}
142
Richard Henderson63490392017-06-20 13:43:15 -0700143static inline bool ts_is_copy(TCGTemp *ts)
144{
145 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200146}
147
Richard Henderson9f75e522023-11-02 13:37:46 -0700148static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
149{
150 return a->kind < b->kind ? b : a;
151}
152
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200153/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700154static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155{
Richard Henderson63490392017-06-20 13:43:15 -0700156 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700157 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700158
Richard Henderson3b3f8472021-08-23 22:06:31 -0700159 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700160 return;
161 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700162 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700163
164 ti = ts->state_ptr;
165 if (ti == NULL) {
166 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700167 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700168 }
169
170 ti->next_copy = ts;
171 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700172 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700173 if (ts->kind == TEMP_CONST) {
174 ti->is_const = true;
175 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700176 ti->z_mask = ts->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700177 ti->s_mask = smask_from_value(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700178 } else {
179 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700180 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700181 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200182 }
183}
184
Richard Hendersonab84dc32023-08-23 23:04:24 -0700185static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
186{
187 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
188 return r ? container_of(r, MemCopyInfo, itree) : NULL;
189}
190
191static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
192{
193 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
194 return r ? container_of(r, MemCopyInfo, itree) : NULL;
195}
196
197static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
198{
199 TCGTemp *ts = mc->ts;
200 TempOptInfo *ti = ts_info(ts);
201
202 interval_tree_remove(&mc->itree, &ctx->mem_copy);
203 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
204 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
205}
206
207static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
208{
209 while (true) {
210 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
211 if (!mc) {
212 break;
213 }
214 remove_mem_copy(ctx, mc);
215 }
216}
217
218static void remove_mem_copy_all(OptContext *ctx)
219{
220 remove_mem_copy_in(ctx, 0, -1);
221 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
222}
223
Richard Henderson9f75e522023-11-02 13:37:46 -0700224static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200225{
Richard Henderson9f75e522023-11-02 13:37:46 -0700226 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200227
Richard Henderson4c868ce2020-04-23 09:02:23 -0700228 /* If this is already readonly, we can't do better. */
229 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700230 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200231 }
232
Richard Henderson9f75e522023-11-02 13:37:46 -0700233 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700234 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700235 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200236 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700237 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200238}
239
Richard Hendersonab84dc32023-08-23 23:04:24 -0700240static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
241{
242 TempOptInfo *si = ts_info(src_ts);
243 TempOptInfo *di = ts_info(dst_ts);
244 MemCopyInfo *mc;
245
246 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
247 tcg_debug_assert(mc->ts == src_ts);
248 mc->ts = dst_ts;
249 }
250 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
251}
252
253/* Reset TEMP's state, possibly removing the temp for the list of copies. */
254static void reset_ts(OptContext *ctx, TCGTemp *ts)
255{
256 TempOptInfo *ti = ts_info(ts);
257 TCGTemp *pts = ti->prev_copy;
258 TCGTemp *nts = ti->next_copy;
259 TempOptInfo *pi = ts_info(pts);
260 TempOptInfo *ni = ts_info(nts);
261
262 ni->prev_copy = ti->prev_copy;
263 pi->next_copy = ti->next_copy;
264 ti->next_copy = ts;
265 ti->prev_copy = ts;
266 ti->is_const = false;
267 ti->z_mask = -1;
268 ti->s_mask = 0;
269
270 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
271 if (ts == nts) {
272 /* Last temp copy being removed, the mem copies die. */
273 MemCopyInfo *mc;
274 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
275 interval_tree_remove(&mc->itree, &ctx->mem_copy);
276 }
277 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
278 } else {
279 move_mem_copies(find_better_copy(nts), ts);
280 }
281 }
282}
283
284static void reset_temp(OptContext *ctx, TCGArg arg)
285{
286 reset_ts(ctx, arg_temp(arg));
287}
288
289static void record_mem_copy(OptContext *ctx, TCGType type,
290 TCGTemp *ts, intptr_t start, intptr_t last)
291{
292 MemCopyInfo *mc;
293 TempOptInfo *ti;
294
295 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
296 if (mc) {
297 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
298 } else {
299 mc = tcg_malloc(sizeof(*mc));
300 }
301
302 memset(mc, 0, sizeof(*mc));
303 mc->itree.start = start;
304 mc->itree.last = last;
305 mc->type = type;
306 interval_tree_insert(&mc->itree, &ctx->mem_copy);
307
308 ts = find_better_copy(ts);
309 ti = ts_info(ts);
310 mc->ts = ts;
311 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
312}
313
Richard Henderson63490392017-06-20 13:43:15 -0700314static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200315{
Richard Henderson63490392017-06-20 13:43:15 -0700316 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200317
Richard Henderson63490392017-06-20 13:43:15 -0700318 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200319 return true;
320 }
321
Richard Henderson63490392017-06-20 13:43:15 -0700322 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200323 return false;
324 }
325
Richard Henderson63490392017-06-20 13:43:15 -0700326 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
327 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200328 return true;
329 }
330 }
331
332 return false;
333}
334
Richard Henderson63490392017-06-20 13:43:15 -0700335static bool args_are_copies(TCGArg arg1, TCGArg arg2)
336{
337 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
338}
339
Richard Hendersonab84dc32023-08-23 23:04:24 -0700340static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
341{
342 MemCopyInfo *mc;
343
344 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
345 if (mc->itree.start == s && mc->type == type) {
346 return find_better_copy(mc->ts);
347 }
348 }
349 return NULL;
350}
351
Richard Henderson26aac972023-10-23 12:31:57 -0700352static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
353{
354 TCGType type = ctx->type;
355 TCGTemp *ts;
356
357 if (type == TCG_TYPE_I32) {
358 val = (int32_t)val;
359 }
360
361 ts = tcg_constant_internal(type, val);
362 init_ts_info(ctx, ts);
363
364 return temp_arg(ts);
365}
366
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100367static TCGArg arg_new_temp(OptContext *ctx)
368{
369 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
370 init_ts_info(ctx, ts);
371 return temp_arg(ts);
372}
373
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700374static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400375{
Richard Henderson63490392017-06-20 13:43:15 -0700376 TCGTemp *dst_ts = arg_temp(dst);
377 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700378 TempOptInfo *di;
379 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700380 TCGOpcode new_op;
381
382 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700383 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700384 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200385 }
386
Richard Henderson986cac12023-01-09 13:59:35 -0800387 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700388 di = ts_info(dst_ts);
389 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700390
391 switch (ctx->type) {
392 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100393 new_op = INDEX_op_mov_i32;
Richard Henderson67f84c92021-08-25 08:00:20 -0700394 break;
395 case TCG_TYPE_I64:
396 new_op = INDEX_op_mov_i64;
397 break;
398 case TCG_TYPE_V64:
399 case TCG_TYPE_V128:
400 case TCG_TYPE_V256:
401 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
402 new_op = INDEX_op_mov_vec;
403 break;
404 default:
405 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100406 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700407 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700408 op->args[0] = dst;
409 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700410
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700411 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700412 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700413
Richard Henderson63490392017-06-20 13:43:15 -0700414 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700415 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700416
417 di->next_copy = si->next_copy;
418 di->prev_copy = src_ts;
419 ni->prev_copy = dst_ts;
420 si->next_copy = dst_ts;
421 di->is_const = si->is_const;
422 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700423
424 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
425 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
426 move_mem_copies(dst_ts, src_ts);
427 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800428 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700429 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400430}
431
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700432static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700433 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700434{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700435 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700436 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700437}
438
Richard Henderson54795542020-09-06 16:21:32 -0700439static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400440{
Richard Henderson03271522013-08-14 14:35:56 -0700441 uint64_t l64, h64;
442
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400443 switch (op) {
444 CASE_OP_32_64(add):
445 return x + y;
446
447 CASE_OP_32_64(sub):
448 return x - y;
449
450 CASE_OP_32_64(mul):
451 return x * y;
452
Richard Hendersonc578ff12021-12-16 06:07:25 -0800453 CASE_OP_32_64_VEC(and):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400454 return x & y;
455
Richard Hendersonc578ff12021-12-16 06:07:25 -0800456 CASE_OP_32_64_VEC(or):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400457 return x | y;
458
Richard Hendersonc578ff12021-12-16 06:07:25 -0800459 CASE_OP_32_64_VEC(xor):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400460 return x ^ y;
461
Kirill Batuzov55c09752011-07-07 16:37:16 +0400462 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700463 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400464
Kirill Batuzov55c09752011-07-07 16:37:16 +0400465 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700466 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400467
468 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700469 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400470
Kirill Batuzov55c09752011-07-07 16:37:16 +0400471 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700472 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400473
474 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700475 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400476
Kirill Batuzov55c09752011-07-07 16:37:16 +0400477 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700478 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400479
480 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700481 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400482
Kirill Batuzov55c09752011-07-07 16:37:16 +0400483 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700484 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400485
486 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700487 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400488
Kirill Batuzov55c09752011-07-07 16:37:16 +0400489 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700490 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400491
Richard Hendersonc578ff12021-12-16 06:07:25 -0800492 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400493 return ~x;
494
Richard Hendersoncb25c802011-08-17 14:11:47 -0700495 CASE_OP_32_64(neg):
496 return -x;
497
Richard Hendersonc578ff12021-12-16 06:07:25 -0800498 CASE_OP_32_64_VEC(andc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700499 return x & ~y;
500
Richard Hendersonc578ff12021-12-16 06:07:25 -0800501 CASE_OP_32_64_VEC(orc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700502 return x | ~y;
503
Richard Hendersoned523472021-12-16 11:17:46 -0800504 CASE_OP_32_64_VEC(eqv):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700505 return ~(x ^ y);
506
Richard Hendersoned523472021-12-16 11:17:46 -0800507 CASE_OP_32_64_VEC(nand):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700508 return ~(x & y);
509
Richard Hendersoned523472021-12-16 11:17:46 -0800510 CASE_OP_32_64_VEC(nor):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700511 return ~(x | y);
512
Richard Henderson0e28d002016-11-16 09:23:28 +0100513 case INDEX_op_clz_i32:
514 return (uint32_t)x ? clz32(x) : y;
515
516 case INDEX_op_clz_i64:
517 return x ? clz64(x) : y;
518
519 case INDEX_op_ctz_i32:
520 return (uint32_t)x ? ctz32(x) : y;
521
522 case INDEX_op_ctz_i64:
523 return x ? ctz64(x) : y;
524
Richard Hendersona768e4e2016-11-21 11:13:39 +0100525 case INDEX_op_ctpop_i32:
526 return ctpop32(x);
527
528 case INDEX_op_ctpop_i64:
529 return ctpop64(x);
530
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700531 CASE_OP_32_64(ext8s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400532 return (int8_t)x;
533
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700534 CASE_OP_32_64(ext16s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400535 return (int16_t)x;
536
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700537 CASE_OP_32_64(ext8u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400538 return (uint8_t)x;
539
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700540 CASE_OP_32_64(ext16u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400541 return (uint16_t)x;
542
Richard Henderson64985942018-11-20 08:53:34 +0100543 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700544 x = bswap16(x);
545 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100546
547 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700548 x = bswap32(x);
549 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100550
551 case INDEX_op_bswap64_i64:
552 return bswap64(x);
553
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200554 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400555 case INDEX_op_ext32s_i64:
556 return (int32_t)x;
557
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200558 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700559 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400560 case INDEX_op_ext32u_i64:
561 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400562
Richard Henderson609ad702015-07-24 07:16:00 -0700563 case INDEX_op_extrh_i64_i32:
564 return (uint64_t)x >> 32;
565
Richard Henderson03271522013-08-14 14:35:56 -0700566 case INDEX_op_muluh_i32:
567 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
568 case INDEX_op_mulsh_i32:
569 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
570
571 case INDEX_op_muluh_i64:
572 mulu64(&l64, &h64, x, y);
573 return h64;
574 case INDEX_op_mulsh_i64:
575 muls64(&l64, &h64, x, y);
576 return h64;
577
Richard Henderson01547f72013-08-14 15:22:46 -0700578 case INDEX_op_div_i32:
579 /* Avoid crashing on divide by zero, otherwise undefined. */
580 return (int32_t)x / ((int32_t)y ? : 1);
581 case INDEX_op_divu_i32:
582 return (uint32_t)x / ((uint32_t)y ? : 1);
583 case INDEX_op_div_i64:
584 return (int64_t)x / ((int64_t)y ? : 1);
585 case INDEX_op_divu_i64:
586 return (uint64_t)x / ((uint64_t)y ? : 1);
587
588 case INDEX_op_rem_i32:
589 return (int32_t)x % ((int32_t)y ? : 1);
590 case INDEX_op_remu_i32:
591 return (uint32_t)x % ((uint32_t)y ? : 1);
592 case INDEX_op_rem_i64:
593 return (int64_t)x % ((int64_t)y ? : 1);
594 case INDEX_op_remu_i64:
595 return (uint64_t)x % ((uint64_t)y ? : 1);
596
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400597 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700598 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400599 }
600}
601
Richard Henderson67f84c92021-08-25 08:00:20 -0700602static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
603 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400604{
Richard Henderson54795542020-09-06 16:21:32 -0700605 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700606 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200607 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400608 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400609 return res;
610}
611
Richard Henderson9519da72012-10-02 11:32:26 -0700612static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
613{
614 switch (c) {
615 case TCG_COND_EQ:
616 return x == y;
617 case TCG_COND_NE:
618 return x != y;
619 case TCG_COND_LT:
620 return (int32_t)x < (int32_t)y;
621 case TCG_COND_GE:
622 return (int32_t)x >= (int32_t)y;
623 case TCG_COND_LE:
624 return (int32_t)x <= (int32_t)y;
625 case TCG_COND_GT:
626 return (int32_t)x > (int32_t)y;
627 case TCG_COND_LTU:
628 return x < y;
629 case TCG_COND_GEU:
630 return x >= y;
631 case TCG_COND_LEU:
632 return x <= y;
633 case TCG_COND_GTU:
634 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700635 case TCG_COND_TSTEQ:
636 return (x & y) == 0;
637 case TCG_COND_TSTNE:
638 return (x & y) != 0;
639 case TCG_COND_ALWAYS:
640 case TCG_COND_NEVER:
641 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700642 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700643 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700644}
645
646static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
647{
648 switch (c) {
649 case TCG_COND_EQ:
650 return x == y;
651 case TCG_COND_NE:
652 return x != y;
653 case TCG_COND_LT:
654 return (int64_t)x < (int64_t)y;
655 case TCG_COND_GE:
656 return (int64_t)x >= (int64_t)y;
657 case TCG_COND_LE:
658 return (int64_t)x <= (int64_t)y;
659 case TCG_COND_GT:
660 return (int64_t)x > (int64_t)y;
661 case TCG_COND_LTU:
662 return x < y;
663 case TCG_COND_GEU:
664 return x >= y;
665 case TCG_COND_LEU:
666 return x <= y;
667 case TCG_COND_GTU:
668 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700669 case TCG_COND_TSTEQ:
670 return (x & y) == 0;
671 case TCG_COND_TSTNE:
672 return (x & y) != 0;
673 case TCG_COND_ALWAYS:
674 case TCG_COND_NEVER:
675 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700676 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700677 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700678}
679
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700680static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700681{
682 switch (c) {
683 case TCG_COND_GT:
684 case TCG_COND_LTU:
685 case TCG_COND_LT:
686 case TCG_COND_GTU:
687 case TCG_COND_NE:
688 return 0;
689 case TCG_COND_GE:
690 case TCG_COND_GEU:
691 case TCG_COND_LE:
692 case TCG_COND_LEU:
693 case TCG_COND_EQ:
694 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700695 case TCG_COND_TSTEQ:
696 case TCG_COND_TSTNE:
697 return -1;
698 case TCG_COND_ALWAYS:
699 case TCG_COND_NEVER:
700 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700701 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700702 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700703}
704
Richard Henderson8d57bf12021-08-24 08:34:27 -0700705/*
706 * Return -1 if the condition can't be simplified,
707 * and the result of the condition (0 or 1) if it can.
708 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700709static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700710 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200711{
Richard Henderson63490392017-06-20 13:43:15 -0700712 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000713 uint64_t xv = arg_info(x)->val;
714 uint64_t yv = arg_info(y)->val;
715
Richard Henderson67f84c92021-08-25 08:00:20 -0700716 switch (type) {
717 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100718 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700719 case TCG_TYPE_I64:
720 return do_constant_folding_cond_64(xv, yv, c);
721 default:
722 /* Only scalar comparisons are optimizable */
723 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200724 }
Richard Henderson63490392017-06-20 13:43:15 -0700725 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700726 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700727 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200728 switch (c) {
729 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700730 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200731 return 0;
732 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700733 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200734 return 1;
735 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700736 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200737 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200738 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700739 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200740}
741
Richard Henderson7a2f7082021-08-26 07:06:39 -0700742/**
743 * swap_commutative:
744 * @dest: TCGArg of the destination argument, or NO_DEST.
745 * @p1: first paired argument
746 * @p2: second paired argument
747 *
748 * If *@p1 is a constant and *@p2 is not, swap.
749 * If *@p2 matches @dest, swap.
750 * Return true if a swap was performed.
751 */
752
753#define NO_DEST temp_arg(NULL)
754
Richard Henderson24c9ae42012-10-02 11:32:21 -0700755static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
756{
757 TCGArg a1 = *p1, a2 = *p2;
758 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700759 sum += arg_is_const(a1);
760 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700761
762 /* Prefer the constant in second argument, and then the form
763 op a, a, b, which is better handled on non-RISC hosts. */
764 if (sum > 0 || (sum == 0 && dest == a2)) {
765 *p1 = a2;
766 *p2 = a1;
767 return true;
768 }
769 return false;
770}
771
Richard Henderson0bfcb862012-10-02 11:32:23 -0700772static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
773{
774 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700775 sum += arg_is_const(p1[0]);
776 sum += arg_is_const(p1[1]);
777 sum -= arg_is_const(p2[0]);
778 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700779 if (sum > 0) {
780 TCGArg t;
781 t = p1[0], p1[0] = p2[0], p2[0] = t;
782 t = p1[1], p1[1] = p2[1], p2[1] = t;
783 return true;
784 }
785 return false;
786}
787
Richard Henderson7e64b112023-10-24 16:53:56 -0700788/*
789 * Return -1 if the condition can't be simplified,
790 * and the result of the condition (0 or 1) if it can.
791 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100792static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700793 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
794{
795 TCGCond cond;
796 bool swap;
797 int r;
798
799 swap = swap_commutative(dest, p1, p2);
800 cond = *pcond;
801 if (swap) {
802 *pcond = cond = tcg_swap_cond(cond);
803 }
804
805 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700806 if (r >= 0) {
807 return r;
808 }
809 if (!is_tst_cond(cond)) {
810 return -1;
811 }
812
813 /*
814 * TSTNE x,x -> NE x,0
815 * TSTNE x,-1 -> NE x,0
816 */
817 if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
818 *p2 = arg_new_constant(ctx, 0);
819 *pcond = tcg_tst_eqne_cond(cond);
820 return -1;
821 }
822
823 /* TSTNE x,sign -> LT x,0 */
824 if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
825 ? INT32_MIN : INT64_MIN))) {
826 *p2 = arg_new_constant(ctx, 0);
827 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100828 return -1;
829 }
830
831 /* Expand to AND with a temporary if no backend support. */
832 if (!TCG_TARGET_HAS_tst) {
833 TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
834 ? INDEX_op_and_i32 : INDEX_op_and_i64);
835 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
836 TCGArg tmp = arg_new_temp(ctx);
837
838 op2->args[0] = tmp;
839 op2->args[1] = *p1;
840 op2->args[2] = *p2;
841
842 *p1 = tmp;
843 *p2 = arg_new_constant(ctx, 0);
844 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700845 }
846 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700847}
848
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100849static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700850{
851 TCGArg al, ah, bl, bh;
852 TCGCond c;
853 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700854 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700855
856 swap = swap_commutative2(args, args + 2);
857 c = args[4];
858 if (swap) {
859 args[4] = c = tcg_swap_cond(c);
860 }
861
862 al = args[0];
863 ah = args[1];
864 bl = args[2];
865 bh = args[3];
866
867 if (arg_is_const(bl) && arg_is_const(bh)) {
868 tcg_target_ulong blv = arg_info(bl)->val;
869 tcg_target_ulong bhv = arg_info(bh)->val;
870 uint64_t b = deposit64(blv, 32, 32, bhv);
871
872 if (arg_is_const(al) && arg_is_const(ah)) {
873 tcg_target_ulong alv = arg_info(al)->val;
874 tcg_target_ulong ahv = arg_info(ah)->val;
875 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700876
877 r = do_constant_folding_cond_64(a, b, c);
878 if (r >= 0) {
879 return r;
880 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700881 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700882
Richard Henderson7e64b112023-10-24 16:53:56 -0700883 if (b == 0) {
884 switch (c) {
885 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700886 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700887 return 0;
888 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700889 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700890 return 1;
891 default:
892 break;
893 }
894 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700895
896 /* TSTNE x,-1 -> NE x,0 */
897 if (b == -1 && is_tst_cond(c)) {
898 args[3] = args[2] = arg_new_constant(ctx, 0);
899 args[4] = tcg_tst_eqne_cond(c);
900 return -1;
901 }
902
903 /* TSTNE x,sign -> LT x,0 */
904 if (b == INT64_MIN && is_tst_cond(c)) {
905 /* bl must be 0, so copy that to bh */
906 args[3] = bl;
907 args[4] = tcg_tst_ltge_cond(c);
908 return -1;
909 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700910 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700911
Richard Henderson7e64b112023-10-24 16:53:56 -0700912 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700913 r = do_constant_folding_cond_eq(c);
914 if (r >= 0) {
915 return r;
916 }
917
918 /* TSTNE x,x -> NE x,0 */
919 if (is_tst_cond(c)) {
920 args[3] = args[2] = arg_new_constant(ctx, 0);
921 args[4] = tcg_tst_eqne_cond(c);
922 return -1;
923 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700924 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100925
926 /* Expand to AND with a temporary if no backend support. */
927 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
928 TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
929 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
930 TCGArg t1 = arg_new_temp(ctx);
931 TCGArg t2 = arg_new_temp(ctx);
932
933 op1->args[0] = t1;
934 op1->args[1] = al;
935 op1->args[2] = bl;
936 op2->args[0] = t2;
937 op2->args[1] = ah;
938 op2->args[2] = bh;
939
940 args[0] = t1;
941 args[1] = t2;
942 args[3] = args[2] = arg_new_constant(ctx, 0);
943 args[4] = tcg_tst_eqne_cond(c);
944 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700945 return -1;
946}
947
Richard Hendersone2577ea2021-08-24 08:00:48 -0700948static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
949{
950 for (int i = 0; i < nb_args; i++) {
951 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000952 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700953 }
954}
955
Richard Henderson8774dde2021-08-24 08:04:47 -0700956static void copy_propagate(OptContext *ctx, TCGOp *op,
957 int nb_oargs, int nb_iargs)
958{
Richard Henderson8774dde2021-08-24 08:04:47 -0700959 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
960 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000961 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700962 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700963 }
964 }
965}
966
Richard Henderson15268552024-12-08 07:45:11 -0600967static void finish_bb(OptContext *ctx)
968{
969 /* We only optimize memory barriers across basic blocks. */
970 ctx->prev_mb = NULL;
971}
972
973static void finish_ebb(OptContext *ctx)
974{
975 finish_bb(ctx);
976 /* We only optimize across extended basic blocks. */
977 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
978 remove_mem_copy_all(ctx);
979}
980
Richard Henderson137f1f42021-08-24 08:49:25 -0700981static void finish_folding(OptContext *ctx, TCGOp *op)
982{
983 const TCGOpDef *def = &tcg_op_defs[op->opc];
984 int i, nb_oargs;
985
Richard Henderson137f1f42021-08-24 08:49:25 -0700986 nb_oargs = def->nb_oargs;
987 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700988 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800989 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700990 /*
Richard Henderson57fe5c62021-08-26 12:04:46 -0700991 * Save the corresponding known-zero/sign bits mask for the
Richard Henderson137f1f42021-08-24 08:49:25 -0700992 * first output argument (only one supported so far).
993 */
994 if (i == 0) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700995 ts_info(ts)->z_mask = ctx->z_mask;
996 ts_info(ts)->s_mask = ctx->s_mask;
Richard Henderson137f1f42021-08-24 08:49:25 -0700997 }
998 }
999}
1000
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001001/*
1002 * The fold_* functions return true when processing is complete,
1003 * usually by folding the operation to a constant or to a copy,
1004 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
1005 * like collect information about the value produced, for use in
1006 * optimizing a subsequent operation.
1007 *
1008 * These first fold_* functions are all helpers, used by other
1009 * folders for more specific operations.
1010 */
1011
1012static bool fold_const1(OptContext *ctx, TCGOp *op)
1013{
1014 if (arg_is_const(op->args[1])) {
1015 uint64_t t;
1016
1017 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -07001018 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001019 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1020 }
1021 return false;
1022}
1023
1024static bool fold_const2(OptContext *ctx, TCGOp *op)
1025{
1026 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1027 uint64_t t1 = arg_info(op->args[1])->val;
1028 uint64_t t2 = arg_info(op->args[2])->val;
1029
Richard Henderson67f84c92021-08-25 08:00:20 -07001030 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001031 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1032 }
1033 return false;
1034}
1035
Richard Hendersonc578ff12021-12-16 06:07:25 -08001036static bool fold_commutative(OptContext *ctx, TCGOp *op)
1037{
1038 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1039 return false;
1040}
1041
Richard Henderson7a2f7082021-08-26 07:06:39 -07001042static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1043{
1044 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1045 return fold_const2(ctx, op);
1046}
1047
Richard Hendersonfae450b2021-08-25 22:42:19 -07001048static bool fold_masks(OptContext *ctx, TCGOp *op)
1049{
1050 uint64_t a_mask = ctx->a_mask;
1051 uint64_t z_mask = ctx->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001052 uint64_t s_mask = ctx->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001053
1054 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001055 * 32-bit ops generate 32-bit results, which for the purpose of
1056 * simplifying tcg are sign-extended. Certainly that's how we
1057 * represent our constants elsewhere. Note that the bits will
1058 * be reset properly for a 64-bit value when encountering the
1059 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001060 */
1061 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001062 a_mask = (int32_t)a_mask;
1063 z_mask = (int32_t)z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001064 s_mask |= MAKE_64BIT_MASK(32, 32);
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001065 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001066 ctx->s_mask = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001067 }
1068
1069 if (z_mask == 0) {
1070 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1071 }
1072 if (a_mask == 0) {
1073 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1074 }
1075 return false;
1076}
1077
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001078/*
1079 * Convert @op to NOT, if NOT is supported by the host.
1080 * Return true f the conversion is successful, which will still
1081 * indicate that the processing is complete.
1082 */
1083static bool fold_not(OptContext *ctx, TCGOp *op);
1084static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1085{
1086 TCGOpcode not_op;
1087 bool have_not;
1088
1089 switch (ctx->type) {
1090 case TCG_TYPE_I32:
1091 not_op = INDEX_op_not_i32;
1092 have_not = TCG_TARGET_HAS_not_i32;
1093 break;
1094 case TCG_TYPE_I64:
1095 not_op = INDEX_op_not_i64;
1096 have_not = TCG_TARGET_HAS_not_i64;
1097 break;
1098 case TCG_TYPE_V64:
1099 case TCG_TYPE_V128:
1100 case TCG_TYPE_V256:
1101 not_op = INDEX_op_not_vec;
1102 have_not = TCG_TARGET_HAS_not_vec;
1103 break;
1104 default:
1105 g_assert_not_reached();
1106 }
1107 if (have_not) {
1108 op->opc = not_op;
1109 op->args[1] = op->args[idx];
1110 return fold_not(ctx, op);
1111 }
1112 return false;
1113}
1114
Richard Hendersonda48e272021-08-25 20:42:04 -07001115/* If the binary operation has first argument @i, fold to @i. */
1116static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1117{
Richard Henderson27cdb852023-10-23 11:38:00 -07001118 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001119 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1120 }
1121 return false;
1122}
1123
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001124/* If the binary operation has first argument @i, fold to NOT. */
1125static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1126{
Richard Henderson27cdb852023-10-23 11:38:00 -07001127 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001128 return fold_to_not(ctx, op, 2);
1129 }
1130 return false;
1131}
1132
Richard Hendersone8679952021-08-25 13:19:52 -07001133/* If the binary operation has second argument @i, fold to @i. */
1134static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1135{
Richard Henderson27cdb852023-10-23 11:38:00 -07001136 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001137 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1138 }
1139 return false;
1140}
1141
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001142/* If the binary operation has second argument @i, fold to identity. */
1143static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1144{
Richard Henderson27cdb852023-10-23 11:38:00 -07001145 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001146 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1147 }
1148 return false;
1149}
1150
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001151/* If the binary operation has second argument @i, fold to NOT. */
1152static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1153{
Richard Henderson27cdb852023-10-23 11:38:00 -07001154 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001155 return fold_to_not(ctx, op, 1);
1156 }
1157 return false;
1158}
1159
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001160/* If the binary operation has both arguments equal, fold to @i. */
1161static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1162{
1163 if (args_are_copies(op->args[1], op->args[2])) {
1164 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1165 }
1166 return false;
1167}
1168
Richard Hendersonca7bb042021-08-25 13:14:21 -07001169/* If the binary operation has both arguments equal, fold to identity. */
1170static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1171{
1172 if (args_are_copies(op->args[1], op->args[2])) {
1173 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1174 }
1175 return false;
1176}
1177
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001178/*
1179 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001180 *
1181 * The ordering of the transformations should be:
1182 * 1) those that produce a constant
1183 * 2) those that produce a copy
1184 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001185 */
1186
1187static bool fold_add(OptContext *ctx, TCGOp *op)
1188{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001189 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001190 fold_xi_to_x(ctx, op, 0)) {
1191 return true;
1192 }
1193 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001194}
1195
Richard Hendersonc578ff12021-12-16 06:07:25 -08001196/* We cannot as yet do_constant_folding with vectors. */
1197static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1198{
1199 if (fold_commutative(ctx, op) ||
1200 fold_xi_to_x(ctx, op, 0)) {
1201 return true;
1202 }
1203 return false;
1204}
1205
Richard Henderson9531c072021-08-26 06:51:39 -07001206static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001207{
Richard Hendersonf2457572023-10-25 18:39:44 -07001208 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1209 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1210
1211 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001212 uint64_t al = arg_info(op->args[2])->val;
1213 uint64_t ah = arg_info(op->args[3])->val;
1214 uint64_t bl = arg_info(op->args[4])->val;
1215 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001216 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001217 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001218
Richard Henderson9531c072021-08-26 06:51:39 -07001219 if (ctx->type == TCG_TYPE_I32) {
1220 uint64_t a = deposit64(al, 32, 32, ah);
1221 uint64_t b = deposit64(bl, 32, 32, bh);
1222
1223 if (add) {
1224 a += b;
1225 } else {
1226 a -= b;
1227 }
1228
1229 al = sextract64(a, 0, 32);
1230 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001231 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001232 Int128 a = int128_make128(al, ah);
1233 Int128 b = int128_make128(bl, bh);
1234
1235 if (add) {
1236 a = int128_add(a, b);
1237 } else {
1238 a = int128_sub(a, b);
1239 }
1240
1241 al = int128_getlo(a);
1242 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001243 }
1244
1245 rl = op->args[0];
1246 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001247
1248 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01001249 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001250
1251 tcg_opt_gen_movi(ctx, op, rl, al);
1252 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001253 return true;
1254 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001255
1256 /* Fold sub2 r,x,i to add2 r,x,-i */
1257 if (!add && b_const) {
1258 uint64_t bl = arg_info(op->args[4])->val;
1259 uint64_t bh = arg_info(op->args[5])->val;
1260
1261 /* Negate the two parts without assembling and disassembling. */
1262 bl = -bl;
1263 bh = ~bh + !bl;
1264
1265 op->opc = (ctx->type == TCG_TYPE_I32
1266 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1267 op->args[4] = arg_new_constant(ctx, bl);
1268 op->args[5] = arg_new_constant(ctx, bh);
1269 }
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001270 return false;
1271}
1272
Richard Henderson9531c072021-08-26 06:51:39 -07001273static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001274{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001275 /* Note that the high and low parts may be independently swapped. */
1276 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1277 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1278
Richard Henderson9531c072021-08-26 06:51:39 -07001279 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001280}
1281
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001282static bool fold_and(OptContext *ctx, TCGOp *op)
1283{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001284 uint64_t z1, z2;
1285
Richard Henderson7a2f7082021-08-26 07:06:39 -07001286 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001287 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001288 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001289 fold_xx_to_x(ctx, op)) {
1290 return true;
1291 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001292
1293 z1 = arg_info(op->args[1])->z_mask;
1294 z2 = arg_info(op->args[2])->z_mask;
1295 ctx->z_mask = z1 & z2;
1296
1297 /*
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001298 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1299 * Bitwise operations preserve the relative quantity of the repetitions.
1300 */
1301 ctx->s_mask = arg_info(op->args[1])->s_mask
1302 & arg_info(op->args[2])->s_mask;
1303
1304 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001305 * Known-zeros does not imply known-ones. Therefore unless
1306 * arg2 is constant, we can't infer affected bits from it.
1307 */
1308 if (arg_is_const(op->args[2])) {
1309 ctx->a_mask = z1 & ~z2;
1310 }
1311
1312 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001313}
1314
1315static bool fold_andc(OptContext *ctx, TCGOp *op)
1316{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001317 uint64_t z1;
1318
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001319 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001320 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001321 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001322 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001323 return true;
1324 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001325
1326 z1 = arg_info(op->args[1])->z_mask;
1327
1328 /*
1329 * Known-zeros does not imply known-ones. Therefore unless
1330 * arg2 is constant, we can't infer anything from it.
1331 */
1332 if (arg_is_const(op->args[2])) {
1333 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
1334 ctx->a_mask = z1 & ~z2;
1335 z1 &= z2;
1336 }
1337 ctx->z_mask = z1;
1338
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001339 ctx->s_mask = arg_info(op->args[1])->s_mask
1340 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001341 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001342}
1343
Richard Henderson079b0802021-08-24 09:30:59 -07001344static bool fold_brcond(OptContext *ctx, TCGOp *op)
1345{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001346 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001347 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001348 if (i == 0) {
1349 tcg_op_remove(ctx->tcg, op);
1350 return true;
1351 }
1352 if (i > 0) {
1353 op->opc = INDEX_op_br;
1354 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001355 finish_ebb(ctx);
1356 } else {
1357 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001358 }
Richard Henderson15268552024-12-08 07:45:11 -06001359 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001360}
1361
Richard Henderson764d2ab2021-08-24 09:22:11 -07001362static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1363{
Richard Henderson7e64b112023-10-24 16:53:56 -07001364 TCGCond cond;
1365 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001366 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001367
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001368 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001369 cond = op->args[4];
1370 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001371 if (i >= 0) {
1372 goto do_brcond_const;
1373 }
1374
1375 switch (cond) {
1376 case TCG_COND_LT:
1377 case TCG_COND_GE:
1378 /*
1379 * Simplify LT/GE comparisons vs zero to a single compare
1380 * vs the high word of the input.
1381 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001382 if (arg_is_const_val(op->args[2], 0) &&
1383 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001384 goto do_brcond_high;
1385 }
1386 break;
1387
1388 case TCG_COND_NE:
1389 inv = 1;
1390 QEMU_FALLTHROUGH;
1391 case TCG_COND_EQ:
1392 /*
1393 * Simplify EQ/NE comparisons where one of the pairs
1394 * can be simplified.
1395 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001396 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001397 op->args[2], cond);
1398 switch (i ^ inv) {
1399 case 0:
1400 goto do_brcond_const;
1401 case 1:
1402 goto do_brcond_high;
1403 }
1404
Richard Henderson67f84c92021-08-25 08:00:20 -07001405 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001406 op->args[3], cond);
1407 switch (i ^ inv) {
1408 case 0:
1409 goto do_brcond_const;
1410 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001411 goto do_brcond_low;
1412 }
1413 break;
1414
1415 case TCG_COND_TSTEQ:
1416 case TCG_COND_TSTNE:
1417 if (arg_is_const_val(op->args[2], 0)) {
1418 goto do_brcond_high;
1419 }
1420 if (arg_is_const_val(op->args[3], 0)) {
1421 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001422 }
1423 break;
1424
1425 default:
1426 break;
1427
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001428 do_brcond_low:
1429 op->opc = INDEX_op_brcond_i32;
1430 op->args[1] = op->args[2];
1431 op->args[2] = cond;
1432 op->args[3] = label;
1433 return fold_brcond(ctx, op);
1434
Richard Henderson764d2ab2021-08-24 09:22:11 -07001435 do_brcond_high:
1436 op->opc = INDEX_op_brcond_i32;
1437 op->args[0] = op->args[1];
1438 op->args[1] = op->args[3];
1439 op->args[2] = cond;
1440 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001441 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001442
1443 do_brcond_const:
1444 if (i == 0) {
1445 tcg_op_remove(ctx->tcg, op);
1446 return true;
1447 }
1448 op->opc = INDEX_op_br;
1449 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001450 finish_ebb(ctx);
1451 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001452 }
Richard Henderson15268552024-12-08 07:45:11 -06001453
1454 finish_bb(ctx);
1455 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001456}
1457
Richard Henderson09bacdc2021-08-24 11:58:12 -07001458static bool fold_bswap(OptContext *ctx, TCGOp *op)
1459{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001460 uint64_t z_mask, s_mask, sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001461
Richard Henderson09bacdc2021-08-24 11:58:12 -07001462 if (arg_is_const(op->args[1])) {
1463 uint64_t t = arg_info(op->args[1])->val;
1464
Richard Henderson67f84c92021-08-25 08:00:20 -07001465 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001466 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1467 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001468
1469 z_mask = arg_info(op->args[1])->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001470
Richard Hendersonfae450b2021-08-25 22:42:19 -07001471 switch (op->opc) {
1472 case INDEX_op_bswap16_i32:
1473 case INDEX_op_bswap16_i64:
1474 z_mask = bswap16(z_mask);
1475 sign = INT16_MIN;
1476 break;
1477 case INDEX_op_bswap32_i32:
1478 case INDEX_op_bswap32_i64:
1479 z_mask = bswap32(z_mask);
1480 sign = INT32_MIN;
1481 break;
1482 case INDEX_op_bswap64_i64:
1483 z_mask = bswap64(z_mask);
1484 sign = INT64_MIN;
1485 break;
1486 default:
1487 g_assert_not_reached();
1488 }
Richard Henderson57fe5c62021-08-26 12:04:46 -07001489 s_mask = smask_from_zmask(z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001490
1491 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1492 case TCG_BSWAP_OZ:
1493 break;
1494 case TCG_BSWAP_OS:
1495 /* If the sign bit may be 1, force all the bits above to 1. */
1496 if (z_mask & sign) {
1497 z_mask |= sign;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001498 s_mask = sign << 1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001499 }
1500 break;
1501 default:
1502 /* The high bits are undefined: force all bits above the sign to 1. */
1503 z_mask |= sign << 1;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001504 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001505 break;
1506 }
1507 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001508 ctx->s_mask = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001509
1510 return fold_masks(ctx, op);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001511}
1512
Richard Henderson5cf32be2021-08-24 08:17:08 -07001513static bool fold_call(OptContext *ctx, TCGOp *op)
1514{
1515 TCGContext *s = ctx->tcg;
1516 int nb_oargs = TCGOP_CALLO(op);
1517 int nb_iargs = TCGOP_CALLI(op);
1518 int flags, i;
1519
1520 init_arguments(ctx, op, nb_oargs + nb_iargs);
1521 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1522
1523 /* If the function reads or writes globals, reset temp data. */
1524 flags = tcg_call_flags(op);
1525 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1526 int nb_globals = s->nb_globals;
1527
1528 for (i = 0; i < nb_globals; i++) {
1529 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001530 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001531 }
1532 }
1533 }
1534
Richard Hendersonab84dc32023-08-23 23:04:24 -07001535 /* If the function has side effects, reset mem data. */
1536 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1537 remove_mem_copy_all(ctx);
1538 }
1539
Richard Henderson5cf32be2021-08-24 08:17:08 -07001540 /* Reset temp data for outputs. */
1541 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001542 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001543 }
1544
1545 /* Stop optimizing MB across calls. */
1546 ctx->prev_mb = NULL;
1547 return true;
1548}
1549
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001550static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1551{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001552 uint64_t z_mask;
1553
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001554 if (arg_is_const(op->args[1])) {
1555 uint64_t t = arg_info(op->args[1])->val;
1556
1557 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001558 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001559 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1560 }
1561 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1562 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001563
1564 switch (ctx->type) {
1565 case TCG_TYPE_I32:
1566 z_mask = 31;
1567 break;
1568 case TCG_TYPE_I64:
1569 z_mask = 63;
1570 break;
1571 default:
1572 g_assert_not_reached();
1573 }
1574 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
Richard Henderson2b9d0c52021-08-26 13:24:17 -07001575 ctx->s_mask = smask_from_zmask(ctx->z_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001576 return false;
1577}
1578
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001579static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1580{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001581 if (fold_const1(ctx, op)) {
1582 return true;
1583 }
1584
1585 switch (ctx->type) {
1586 case TCG_TYPE_I32:
1587 ctx->z_mask = 32 | 31;
1588 break;
1589 case TCG_TYPE_I64:
1590 ctx->z_mask = 64 | 63;
1591 break;
1592 default:
1593 g_assert_not_reached();
1594 }
Richard Henderson2b9d0c52021-08-26 13:24:17 -07001595 ctx->s_mask = smask_from_zmask(ctx->z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001596 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001597}
1598
Richard Henderson1b1907b2021-08-24 10:47:04 -07001599static bool fold_deposit(OptContext *ctx, TCGOp *op)
1600{
Richard Henderson8f7a8402023-08-13 11:03:05 -07001601 TCGOpcode and_opc;
1602
Richard Henderson1b1907b2021-08-24 10:47:04 -07001603 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1604 uint64_t t1 = arg_info(op->args[1])->val;
1605 uint64_t t2 = arg_info(op->args[2])->val;
1606
1607 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1608 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1609 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001610
Richard Henderson8f7a8402023-08-13 11:03:05 -07001611 switch (ctx->type) {
1612 case TCG_TYPE_I32:
1613 and_opc = INDEX_op_and_i32;
1614 break;
1615 case TCG_TYPE_I64:
1616 and_opc = INDEX_op_and_i64;
1617 break;
1618 default:
1619 g_assert_not_reached();
1620 }
1621
1622 /* Inserting a value into zero at offset 0. */
Richard Henderson27cdb852023-10-23 11:38:00 -07001623 if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
Richard Henderson8f7a8402023-08-13 11:03:05 -07001624 uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
1625
1626 op->opc = and_opc;
1627 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001628 op->args[2] = arg_new_constant(ctx, mask);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001629 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1630 return false;
1631 }
1632
1633 /* Inserting zero into a value. */
Richard Henderson27cdb852023-10-23 11:38:00 -07001634 if (arg_is_const_val(op->args[2], 0)) {
Richard Henderson8f7a8402023-08-13 11:03:05 -07001635 uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
1636
1637 op->opc = and_opc;
Richard Henderson26aac972023-10-23 12:31:57 -07001638 op->args[2] = arg_new_constant(ctx, mask);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001639 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1640 return false;
1641 }
1642
Richard Hendersonfae450b2021-08-25 22:42:19 -07001643 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1644 op->args[3], op->args[4],
1645 arg_info(op->args[2])->z_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001646 return false;
1647}
1648
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001649static bool fold_divide(OptContext *ctx, TCGOp *op)
1650{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001651 if (fold_const2(ctx, op) ||
1652 fold_xi_to_x(ctx, op, 1)) {
1653 return true;
1654 }
1655 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001656}
1657
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001658static bool fold_dup(OptContext *ctx, TCGOp *op)
1659{
1660 if (arg_is_const(op->args[1])) {
1661 uint64_t t = arg_info(op->args[1])->val;
1662 t = dup_const(TCGOP_VECE(op), t);
1663 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1664 }
1665 return false;
1666}
1667
1668static bool fold_dup2(OptContext *ctx, TCGOp *op)
1669{
1670 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1671 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1672 arg_info(op->args[2])->val);
1673 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1674 }
1675
1676 if (args_are_copies(op->args[1], op->args[2])) {
1677 op->opc = INDEX_op_dup_vec;
1678 TCGOP_VECE(op) = MO_32;
1679 }
1680 return false;
1681}
1682
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001683static bool fold_eqv(OptContext *ctx, TCGOp *op)
1684{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001685 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001686 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001687 fold_xi_to_not(ctx, op, 0)) {
1688 return true;
1689 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001690
1691 ctx->s_mask = arg_info(op->args[1])->s_mask
1692 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001693 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001694}
1695
Richard Hendersonb6617c82021-08-24 10:44:53 -07001696static bool fold_extract(OptContext *ctx, TCGOp *op)
1697{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001698 uint64_t z_mask_old, z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001699 int pos = op->args[2];
1700 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001701
Richard Hendersonb6617c82021-08-24 10:44:53 -07001702 if (arg_is_const(op->args[1])) {
1703 uint64_t t;
1704
1705 t = arg_info(op->args[1])->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001706 t = extract64(t, pos, len);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001707 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1708 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001709
1710 z_mask_old = arg_info(op->args[1])->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001711 z_mask = extract64(z_mask_old, pos, len);
1712 if (pos == 0) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001713 ctx->a_mask = z_mask_old ^ z_mask;
1714 }
1715 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001716 ctx->s_mask = smask_from_zmask(z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001717
1718 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001719}
1720
Richard Hendersondcd08992021-08-24 10:41:39 -07001721static bool fold_extract2(OptContext *ctx, TCGOp *op)
1722{
1723 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1724 uint64_t v1 = arg_info(op->args[1])->val;
1725 uint64_t v2 = arg_info(op->args[2])->val;
1726 int shr = op->args[3];
1727
1728 if (op->opc == INDEX_op_extract2_i64) {
1729 v1 >>= shr;
1730 v2 <<= 64 - shr;
1731 } else {
1732 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001733 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001734 }
1735 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1736 }
1737 return false;
1738}
1739
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001740static bool fold_exts(OptContext *ctx, TCGOp *op)
1741{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001742 uint64_t s_mask_old, s_mask, z_mask, sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001743 bool type_change = false;
1744
1745 if (fold_const1(ctx, op)) {
1746 return true;
1747 }
1748
Richard Henderson57fe5c62021-08-26 12:04:46 -07001749 z_mask = arg_info(op->args[1])->z_mask;
1750 s_mask = arg_info(op->args[1])->s_mask;
1751 s_mask_old = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001752
1753 switch (op->opc) {
1754 CASE_OP_32_64(ext8s):
1755 sign = INT8_MIN;
1756 z_mask = (uint8_t)z_mask;
1757 break;
1758 CASE_OP_32_64(ext16s):
1759 sign = INT16_MIN;
1760 z_mask = (uint16_t)z_mask;
1761 break;
1762 case INDEX_op_ext_i32_i64:
1763 type_change = true;
1764 QEMU_FALLTHROUGH;
1765 case INDEX_op_ext32s_i64:
1766 sign = INT32_MIN;
1767 z_mask = (uint32_t)z_mask;
1768 break;
1769 default:
1770 g_assert_not_reached();
1771 }
1772
1773 if (z_mask & sign) {
1774 z_mask |= sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001775 }
Richard Henderson57fe5c62021-08-26 12:04:46 -07001776 s_mask |= sign << 1;
1777
Richard Hendersonfae450b2021-08-25 22:42:19 -07001778 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001779 ctx->s_mask = s_mask;
1780 if (!type_change) {
1781 ctx->a_mask = s_mask & ~s_mask_old;
1782 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001783
1784 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001785}
1786
1787static bool fold_extu(OptContext *ctx, TCGOp *op)
1788{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001789 uint64_t z_mask_old, z_mask;
1790 bool type_change = false;
1791
1792 if (fold_const1(ctx, op)) {
1793 return true;
1794 }
1795
1796 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1797
1798 switch (op->opc) {
1799 CASE_OP_32_64(ext8u):
1800 z_mask = (uint8_t)z_mask;
1801 break;
1802 CASE_OP_32_64(ext16u):
1803 z_mask = (uint16_t)z_mask;
1804 break;
1805 case INDEX_op_extrl_i64_i32:
1806 case INDEX_op_extu_i32_i64:
1807 type_change = true;
1808 QEMU_FALLTHROUGH;
1809 case INDEX_op_ext32u_i64:
1810 z_mask = (uint32_t)z_mask;
1811 break;
1812 case INDEX_op_extrh_i64_i32:
1813 type_change = true;
1814 z_mask >>= 32;
1815 break;
1816 default:
1817 g_assert_not_reached();
1818 }
1819
1820 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001821 ctx->s_mask = smask_from_zmask(z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001822 if (!type_change) {
1823 ctx->a_mask = z_mask_old ^ z_mask;
1824 }
1825 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001826}
1827
Richard Henderson3eefdf22021-08-25 11:06:43 -07001828static bool fold_mb(OptContext *ctx, TCGOp *op)
1829{
1830 /* Eliminate duplicate and redundant fence instructions. */
1831 if (ctx->prev_mb) {
1832 /*
1833 * Merge two barriers of the same type into one,
1834 * or a weaker barrier into a stronger one,
1835 * or two weaker barriers into a stronger one.
1836 * mb X; mb Y => mb X|Y
1837 * mb; strl => mb; st
1838 * ldaq; mb => ld; mb
1839 * ldaq; strl => ld; mb; st
1840 * Other combinations are also merged into a strong
1841 * barrier. This is stricter than specified but for
1842 * the purposes of TCG is better than not optimizing.
1843 */
1844 ctx->prev_mb->args[0] |= op->args[0];
1845 tcg_op_remove(ctx->tcg, op);
1846 } else {
1847 ctx->prev_mb = op;
1848 }
1849 return true;
1850}
1851
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001852static bool fold_mov(OptContext *ctx, TCGOp *op)
1853{
1854 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1855}
1856
Richard Henderson0c310a32021-08-24 10:37:24 -07001857static bool fold_movcond(OptContext *ctx, TCGOp *op)
1858{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001859 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001860
Richard Henderson141125e2024-09-06 21:00:10 -07001861 /* If true and false values are the same, eliminate the cmp. */
1862 if (args_are_copies(op->args[3], op->args[4])) {
1863 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1864 }
1865
Richard Henderson7a2f7082021-08-26 07:06:39 -07001866 /*
1867 * Canonicalize the "false" input reg to match the destination reg so
1868 * that the tcg backend can implement a "move if true" operation.
1869 */
1870 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001871 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001872 }
1873
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001874 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001875 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001876 if (i >= 0) {
1877 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1878 }
1879
Richard Hendersonfae450b2021-08-25 22:42:19 -07001880 ctx->z_mask = arg_info(op->args[3])->z_mask
1881 | arg_info(op->args[4])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001882 ctx->s_mask = arg_info(op->args[3])->s_mask
1883 & arg_info(op->args[4])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001884
Richard Henderson0c310a32021-08-24 10:37:24 -07001885 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1886 uint64_t tv = arg_info(op->args[3])->val;
1887 uint64_t fv = arg_info(op->args[4])->val;
Richard Henderson36355022023-08-04 23:24:04 +00001888 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001889 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001890
Richard Henderson67f84c92021-08-25 08:00:20 -07001891 switch (ctx->type) {
1892 case TCG_TYPE_I32:
1893 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00001894 if (TCG_TARGET_HAS_negsetcond_i32) {
1895 negopc = INDEX_op_negsetcond_i32;
1896 }
1897 tv = (int32_t)tv;
1898 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07001899 break;
1900 case TCG_TYPE_I64:
1901 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00001902 if (TCG_TARGET_HAS_negsetcond_i64) {
1903 negopc = INDEX_op_negsetcond_i64;
1904 }
Richard Henderson67f84c92021-08-25 08:00:20 -07001905 break;
1906 default:
1907 g_assert_not_reached();
1908 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001909
1910 if (tv == 1 && fv == 0) {
1911 op->opc = opc;
1912 op->args[3] = cond;
1913 } else if (fv == 1 && tv == 0) {
1914 op->opc = opc;
1915 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00001916 } else if (negopc) {
1917 if (tv == -1 && fv == 0) {
1918 op->opc = negopc;
1919 op->args[3] = cond;
1920 } else if (fv == -1 && tv == 0) {
1921 op->opc = negopc;
1922 op->args[3] = tcg_invert_cond(cond);
1923 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001924 }
1925 }
1926 return false;
1927}
1928
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001929static bool fold_mul(OptContext *ctx, TCGOp *op)
1930{
Richard Hendersone8679952021-08-25 13:19:52 -07001931 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07001932 fold_xi_to_i(ctx, op, 0) ||
1933 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001934 return true;
1935 }
1936 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001937}
1938
1939static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1940{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001941 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001942 fold_xi_to_i(ctx, op, 0)) {
1943 return true;
1944 }
1945 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001946}
1947
Richard Henderson407112b2021-08-26 06:33:04 -07001948static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001949{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001950 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1951
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001952 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07001953 uint64_t a = arg_info(op->args[2])->val;
1954 uint64_t b = arg_info(op->args[3])->val;
1955 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001956 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07001957 TCGOp *op2;
1958
1959 switch (op->opc) {
1960 case INDEX_op_mulu2_i32:
1961 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1962 h = (int32_t)(l >> 32);
1963 l = (int32_t)l;
1964 break;
1965 case INDEX_op_muls2_i32:
1966 l = (int64_t)(int32_t)a * (int32_t)b;
1967 h = l >> 32;
1968 l = (int32_t)l;
1969 break;
1970 case INDEX_op_mulu2_i64:
1971 mulu64(&l, &h, a, b);
1972 break;
1973 case INDEX_op_muls2_i64:
1974 muls64(&l, &h, a, b);
1975 break;
1976 default:
1977 g_assert_not_reached();
1978 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001979
1980 rl = op->args[0];
1981 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07001982
1983 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01001984 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07001985
1986 tcg_opt_gen_movi(ctx, op, rl, l);
1987 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001988 return true;
1989 }
1990 return false;
1991}
1992
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001993static bool fold_nand(OptContext *ctx, TCGOp *op)
1994{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001995 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001996 fold_xi_to_not(ctx, op, -1)) {
1997 return true;
1998 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001999
2000 ctx->s_mask = arg_info(op->args[1])->s_mask
2001 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002002 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002003}
2004
Richard Hendersone25fe882024-04-04 20:53:50 +00002005static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002006{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002007 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002008 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002009 ctx->z_mask = -(z_mask & -z_mask);
2010
Richard Henderson9caca882021-08-24 13:30:32 -07002011 /*
2012 * Because of fold_sub_to_neg, we want to always return true,
2013 * via finish_folding.
2014 */
2015 finish_folding(ctx, op);
2016 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002017}
2018
Richard Hendersone25fe882024-04-04 20:53:50 +00002019static bool fold_neg(OptContext *ctx, TCGOp *op)
2020{
2021 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2022}
2023
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002024static bool fold_nor(OptContext *ctx, TCGOp *op)
2025{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002026 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002027 fold_xi_to_not(ctx, op, 0)) {
2028 return true;
2029 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002030
2031 ctx->s_mask = arg_info(op->args[1])->s_mask
2032 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002033 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002034}
2035
2036static bool fold_not(OptContext *ctx, TCGOp *op)
2037{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002038 if (fold_const1(ctx, op)) {
2039 return true;
2040 }
2041
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002042 ctx->s_mask = arg_info(op->args[1])->s_mask;
2043
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002044 /* Because of fold_to_not, we want to always return true, via finish. */
2045 finish_folding(ctx, op);
2046 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002047}
2048
2049static bool fold_or(OptContext *ctx, TCGOp *op)
2050{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002051 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002052 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002053 fold_xx_to_x(ctx, op)) {
2054 return true;
2055 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002056
2057 ctx->z_mask = arg_info(op->args[1])->z_mask
2058 | arg_info(op->args[2])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002059 ctx->s_mask = arg_info(op->args[1])->s_mask
2060 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002061 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002062}
2063
2064static bool fold_orc(OptContext *ctx, TCGOp *op)
2065{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002066 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002067 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002068 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002069 fold_ix_to_not(ctx, op, 0)) {
2070 return true;
2071 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002072
2073 ctx->s_mask = arg_info(op->args[1])->s_mask
2074 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002075 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002076}
2077
Richard Henderson3eefdf22021-08-25 11:06:43 -07002078static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
2079{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002080 const TCGOpDef *def = &tcg_op_defs[op->opc];
2081 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2082 MemOp mop = get_memop(oi);
2083 int width = 8 * memop_size(mop);
2084
Richard Henderson57fe5c62021-08-26 12:04:46 -07002085 if (width < 64) {
2086 ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
2087 if (!(mop & MO_SIGN)) {
2088 ctx->z_mask = MAKE_64BIT_MASK(0, width);
2089 ctx->s_mask <<= 1;
2090 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002091 }
2092
Richard Henderson3eefdf22021-08-25 11:06:43 -07002093 /* Opcodes that touch guest memory stop the mb optimization. */
2094 ctx->prev_mb = NULL;
2095 return false;
2096}
2097
2098static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2099{
2100 /* Opcodes that touch guest memory stop the mb optimization. */
2101 ctx->prev_mb = NULL;
2102 return false;
2103}
2104
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002105static bool fold_remainder(OptContext *ctx, TCGOp *op)
2106{
Richard Henderson267c17e2021-10-25 11:30:33 -07002107 if (fold_const2(ctx, op) ||
2108 fold_xx_to_i(ctx, op, 0)) {
2109 return true;
2110 }
2111 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002112}
2113
Richard Henderson8d65cda2024-03-26 16:00:40 -10002114static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
2115{
2116 uint64_t a_zmask, b_val;
2117 TCGCond cond;
2118
2119 if (!arg_is_const(op->args[2])) {
2120 return false;
2121 }
2122
2123 a_zmask = arg_info(op->args[1])->z_mask;
2124 b_val = arg_info(op->args[2])->val;
2125 cond = op->args[3];
2126
2127 if (ctx->type == TCG_TYPE_I32) {
2128 a_zmask = (uint32_t)a_zmask;
2129 b_val = (uint32_t)b_val;
2130 }
2131
2132 /*
2133 * A with only low bits set vs B with high bits set means that A < B.
2134 */
2135 if (a_zmask < b_val) {
2136 bool inv = false;
2137
2138 switch (cond) {
2139 case TCG_COND_NE:
2140 case TCG_COND_LEU:
2141 case TCG_COND_LTU:
2142 inv = true;
2143 /* fall through */
2144 case TCG_COND_GTU:
2145 case TCG_COND_GEU:
2146 case TCG_COND_EQ:
2147 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2148 default:
2149 break;
2150 }
2151 }
2152
2153 /*
2154 * A with only lsb set is already boolean.
2155 */
2156 if (a_zmask <= 1) {
2157 bool convert = false;
2158 bool inv = false;
2159
2160 switch (cond) {
2161 case TCG_COND_EQ:
2162 inv = true;
2163 /* fall through */
2164 case TCG_COND_NE:
2165 convert = (b_val == 0);
2166 break;
2167 case TCG_COND_LTU:
2168 case TCG_COND_TSTEQ:
2169 inv = true;
2170 /* fall through */
2171 case TCG_COND_GEU:
2172 case TCG_COND_TSTNE:
2173 convert = (b_val == 1);
2174 break;
2175 default:
2176 break;
2177 }
2178 if (convert) {
2179 TCGOpcode add_opc, xor_opc, neg_opc;
2180
2181 if (!inv && !neg) {
2182 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2183 }
2184
2185 switch (ctx->type) {
2186 case TCG_TYPE_I32:
2187 add_opc = INDEX_op_add_i32;
2188 neg_opc = INDEX_op_neg_i32;
2189 xor_opc = INDEX_op_xor_i32;
2190 break;
2191 case TCG_TYPE_I64:
2192 add_opc = INDEX_op_add_i64;
2193 neg_opc = INDEX_op_neg_i64;
2194 xor_opc = INDEX_op_xor_i64;
2195 break;
2196 default:
2197 g_assert_not_reached();
2198 }
2199
2200 if (!inv) {
2201 op->opc = neg_opc;
2202 } else if (neg) {
2203 op->opc = add_opc;
2204 op->args[2] = arg_new_constant(ctx, -1);
2205 } else {
2206 op->opc = xor_opc;
2207 op->args[2] = arg_new_constant(ctx, 1);
2208 }
2209 return false;
2210 }
2211 }
2212
2213 return false;
2214}
2215
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002216static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2217{
Paolo Bonziniff202812024-02-28 12:06:41 +01002218 TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
2219 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002220 TCGCond cond = op->args[3];
2221 TCGArg ret, src1, src2;
2222 TCGOp *op2;
2223 uint64_t val;
2224 int sh;
2225 bool inv;
2226
2227 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2228 return;
2229 }
2230
2231 src2 = op->args[2];
2232 val = arg_info(src2)->val;
2233 if (!is_power_of_2(val)) {
2234 return;
2235 }
2236 sh = ctz64(val);
2237
2238 switch (ctx->type) {
2239 case TCG_TYPE_I32:
2240 and_opc = INDEX_op_and_i32;
2241 sub_opc = INDEX_op_sub_i32;
2242 xor_opc = INDEX_op_xor_i32;
2243 shr_opc = INDEX_op_shr_i32;
2244 neg_opc = INDEX_op_neg_i32;
2245 if (TCG_TARGET_extract_i32_valid(sh, 1)) {
2246 uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
2247 sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
2248 }
2249 break;
2250 case TCG_TYPE_I64:
2251 and_opc = INDEX_op_and_i64;
2252 sub_opc = INDEX_op_sub_i64;
2253 xor_opc = INDEX_op_xor_i64;
2254 shr_opc = INDEX_op_shr_i64;
2255 neg_opc = INDEX_op_neg_i64;
2256 if (TCG_TARGET_extract_i64_valid(sh, 1)) {
2257 uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
2258 sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
2259 }
2260 break;
2261 default:
2262 g_assert_not_reached();
2263 }
2264
2265 ret = op->args[0];
2266 src1 = op->args[1];
2267 inv = cond == TCG_COND_TSTEQ;
2268
2269 if (sh && sext_opc && neg && !inv) {
2270 op->opc = sext_opc;
2271 op->args[1] = src1;
2272 op->args[2] = sh;
2273 op->args[3] = 1;
2274 return;
2275 } else if (sh && uext_opc) {
2276 op->opc = uext_opc;
2277 op->args[1] = src1;
2278 op->args[2] = sh;
2279 op->args[3] = 1;
2280 } else {
2281 if (sh) {
2282 op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
2283 op2->args[0] = ret;
2284 op2->args[1] = src1;
2285 op2->args[2] = arg_new_constant(ctx, sh);
2286 src1 = ret;
2287 }
2288 op->opc = and_opc;
2289 op->args[1] = src1;
2290 op->args[2] = arg_new_constant(ctx, 1);
2291 }
2292
2293 if (neg && inv) {
2294 op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
2295 op2->args[0] = ret;
2296 op2->args[1] = ret;
2297 op2->args[2] = arg_new_constant(ctx, 1);
2298 } else if (inv) {
2299 op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
2300 op2->args[0] = ret;
2301 op2->args[1] = ret;
2302 op2->args[2] = arg_new_constant(ctx, 1);
2303 } else if (neg) {
2304 op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
2305 op2->args[0] = ret;
2306 op2->args[1] = ret;
2307 }
2308}
2309
Richard Hendersonc63ff552021-08-24 09:35:30 -07002310static bool fold_setcond(OptContext *ctx, TCGOp *op)
2311{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002312 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002313 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002314 if (i >= 0) {
2315 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2316 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002317
2318 if (fold_setcond_zmask(ctx, op, false)) {
2319 return true;
2320 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002321 fold_setcond_tst_pow2(ctx, op, false);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002322
2323 ctx->z_mask = 1;
Richard Henderson275d7d82021-08-26 13:20:39 -07002324 ctx->s_mask = smask_from_zmask(1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002325 return false;
2326}
2327
Richard Henderson36355022023-08-04 23:24:04 +00002328static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2329{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002330 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002331 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002332 if (i >= 0) {
2333 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2334 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002335
2336 if (fold_setcond_zmask(ctx, op, true)) {
2337 return true;
2338 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002339 fold_setcond_tst_pow2(ctx, op, true);
Richard Henderson36355022023-08-04 23:24:04 +00002340
2341 /* Value is {0,-1} so all bits are repetitions of the sign. */
2342 ctx->s_mask = -1;
2343 return false;
2344}
2345
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002346static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2347{
Richard Henderson7e64b112023-10-24 16:53:56 -07002348 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002349 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002350
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002351 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002352 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002353 if (i >= 0) {
2354 goto do_setcond_const;
2355 }
2356
2357 switch (cond) {
2358 case TCG_COND_LT:
2359 case TCG_COND_GE:
2360 /*
2361 * Simplify LT/GE comparisons vs zero to a single compare
2362 * vs the high word of the input.
2363 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002364 if (arg_is_const_val(op->args[3], 0) &&
2365 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002366 goto do_setcond_high;
2367 }
2368 break;
2369
2370 case TCG_COND_NE:
2371 inv = 1;
2372 QEMU_FALLTHROUGH;
2373 case TCG_COND_EQ:
2374 /*
2375 * Simplify EQ/NE comparisons where one of the pairs
2376 * can be simplified.
2377 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002378 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002379 op->args[3], cond);
2380 switch (i ^ inv) {
2381 case 0:
2382 goto do_setcond_const;
2383 case 1:
2384 goto do_setcond_high;
2385 }
2386
Richard Henderson67f84c92021-08-25 08:00:20 -07002387 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002388 op->args[4], cond);
2389 switch (i ^ inv) {
2390 case 0:
2391 goto do_setcond_const;
2392 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002393 goto do_setcond_low;
2394 }
2395 break;
2396
2397 case TCG_COND_TSTEQ:
2398 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002399 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002400 goto do_setcond_high;
2401 }
2402 if (arg_is_const_val(op->args[4], 0)) {
2403 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002404 }
2405 break;
2406
2407 default:
2408 break;
2409
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002410 do_setcond_low:
2411 op->args[2] = op->args[3];
2412 op->args[3] = cond;
2413 op->opc = INDEX_op_setcond_i32;
2414 return fold_setcond(ctx, op);
2415
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002416 do_setcond_high:
2417 op->args[1] = op->args[2];
2418 op->args[2] = op->args[4];
2419 op->args[3] = cond;
2420 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002421 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002422 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002423
2424 ctx->z_mask = 1;
Richard Henderson275d7d82021-08-26 13:20:39 -07002425 ctx->s_mask = smask_from_zmask(1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002426 return false;
2427
2428 do_setcond_const:
2429 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2430}
2431
Richard Henderson1f106542024-09-06 12:22:41 -07002432static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
2433{
2434 /* Canonicalize the comparison to put immediate second. */
2435 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2436 op->args[3] = tcg_swap_cond(op->args[3]);
2437 }
2438 return false;
2439}
2440
2441static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
2442{
2443 /* If true and false values are the same, eliminate the cmp. */
2444 if (args_are_copies(op->args[3], op->args[4])) {
2445 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2446 }
2447
2448 /* Canonicalize the comparison to put immediate second. */
2449 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2450 op->args[5] = tcg_swap_cond(op->args[5]);
2451 }
2452 /*
2453 * Canonicalize the "false" input reg to match the destination,
2454 * so that the tcg backend can implement "move if true".
2455 */
2456 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
2457 op->args[5] = tcg_invert_cond(op->args[5]);
2458 }
2459 return false;
2460}
2461
Richard Hendersonb6617c82021-08-24 10:44:53 -07002462static bool fold_sextract(OptContext *ctx, TCGOp *op)
2463{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002464 uint64_t z_mask, s_mask, s_mask_old;
2465 int pos = op->args[2];
2466 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002467
Richard Hendersonb6617c82021-08-24 10:44:53 -07002468 if (arg_is_const(op->args[1])) {
2469 uint64_t t;
2470
2471 t = arg_info(op->args[1])->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002472 t = sextract64(t, pos, len);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002473 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
2474 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002475
Richard Henderson57fe5c62021-08-26 12:04:46 -07002476 z_mask = arg_info(op->args[1])->z_mask;
2477 z_mask = sextract64(z_mask, pos, len);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002478 ctx->z_mask = z_mask;
2479
Richard Henderson57fe5c62021-08-26 12:04:46 -07002480 s_mask_old = arg_info(op->args[1])->s_mask;
2481 s_mask = sextract64(s_mask_old, pos, len);
2482 s_mask |= MAKE_64BIT_MASK(len, 64 - len);
2483 ctx->s_mask = s_mask;
2484
2485 if (pos == 0) {
2486 ctx->a_mask = s_mask & ~s_mask_old;
2487 }
2488
Richard Hendersonfae450b2021-08-25 22:42:19 -07002489 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002490}
2491
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002492static bool fold_shift(OptContext *ctx, TCGOp *op)
2493{
Richard Henderson93a967f2021-08-26 13:24:59 -07002494 uint64_t s_mask, z_mask, sign;
2495
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002496 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002497 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002498 fold_xi_to_x(ctx, op, 0)) {
2499 return true;
2500 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002501
Richard Henderson93a967f2021-08-26 13:24:59 -07002502 s_mask = arg_info(op->args[1])->s_mask;
2503 z_mask = arg_info(op->args[1])->z_mask;
2504
Richard Hendersonfae450b2021-08-25 22:42:19 -07002505 if (arg_is_const(op->args[2])) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002506 int sh = arg_info(op->args[2])->val;
2507
2508 ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
2509
2510 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
2511 ctx->s_mask = smask_from_smask(s_mask);
2512
Richard Hendersonfae450b2021-08-25 22:42:19 -07002513 return fold_masks(ctx, op);
2514 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002515
2516 switch (op->opc) {
2517 CASE_OP_32_64(sar):
2518 /*
2519 * Arithmetic right shift will not reduce the number of
2520 * input sign repetitions.
2521 */
2522 ctx->s_mask = s_mask;
2523 break;
2524 CASE_OP_32_64(shr):
2525 /*
2526 * If the sign bit is known zero, then logical right shift
2527 * will not reduced the number of input sign repetitions.
2528 */
2529 sign = (s_mask & -s_mask) >> 1;
Richard Henderson2911e9b2024-03-26 11:21:38 -10002530 if (sign && !(z_mask & sign)) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002531 ctx->s_mask = s_mask;
2532 }
2533 break;
2534 default:
2535 break;
2536 }
2537
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002538 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002539}
2540
Richard Henderson9caca882021-08-24 13:30:32 -07002541static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2542{
2543 TCGOpcode neg_op;
2544 bool have_neg;
2545
2546 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2547 return false;
2548 }
2549
2550 switch (ctx->type) {
2551 case TCG_TYPE_I32:
2552 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002553 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002554 break;
2555 case TCG_TYPE_I64:
2556 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002557 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002558 break;
2559 case TCG_TYPE_V64:
2560 case TCG_TYPE_V128:
2561 case TCG_TYPE_V256:
2562 neg_op = INDEX_op_neg_vec;
2563 have_neg = (TCG_TARGET_HAS_neg_vec &&
2564 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2565 break;
2566 default:
2567 g_assert_not_reached();
2568 }
2569 if (have_neg) {
2570 op->opc = neg_op;
2571 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002572 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002573 }
2574 return false;
2575}
2576
Richard Hendersonc578ff12021-12-16 06:07:25 -08002577/* We cannot as yet do_constant_folding with vectors. */
2578static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002579{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002580 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002581 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002582 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002583 return true;
2584 }
2585 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002586}
2587
Richard Hendersonc578ff12021-12-16 06:07:25 -08002588static bool fold_sub(OptContext *ctx, TCGOp *op)
2589{
Richard Henderson6334a962023-10-25 18:39:43 -07002590 if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
2591 return true;
2592 }
2593
2594 /* Fold sub r,x,i to add r,x,-i */
2595 if (arg_is_const(op->args[2])) {
2596 uint64_t val = arg_info(op->args[2])->val;
2597
2598 op->opc = (ctx->type == TCG_TYPE_I32
2599 ? INDEX_op_add_i32 : INDEX_op_add_i64);
2600 op->args[2] = arg_new_constant(ctx, -val);
2601 }
2602 return false;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002603}
2604
Richard Henderson9531c072021-08-26 06:51:39 -07002605static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002606{
Richard Henderson9531c072021-08-26 06:51:39 -07002607 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002608}
2609
Richard Hendersonfae450b2021-08-25 22:42:19 -07002610static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2611{
2612 /* We can't do any folding with a load, but we can record bits. */
2613 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002614 CASE_OP_32_64(ld8s):
2615 ctx->s_mask = MAKE_64BIT_MASK(8, 56);
2616 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002617 CASE_OP_32_64(ld8u):
2618 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002619 ctx->s_mask = MAKE_64BIT_MASK(9, 55);
2620 break;
2621 CASE_OP_32_64(ld16s):
2622 ctx->s_mask = MAKE_64BIT_MASK(16, 48);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002623 break;
2624 CASE_OP_32_64(ld16u):
2625 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002626 ctx->s_mask = MAKE_64BIT_MASK(17, 47);
2627 break;
2628 case INDEX_op_ld32s_i64:
2629 ctx->s_mask = MAKE_64BIT_MASK(32, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002630 break;
2631 case INDEX_op_ld32u_i64:
2632 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002633 ctx->s_mask = MAKE_64BIT_MASK(33, 31);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002634 break;
2635 default:
2636 g_assert_not_reached();
2637 }
2638 return false;
2639}
2640
Richard Hendersonab84dc32023-08-23 23:04:24 -07002641static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2642{
2643 TCGTemp *dst, *src;
2644 intptr_t ofs;
2645 TCGType type;
2646
2647 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2648 return false;
2649 }
2650
2651 type = ctx->type;
2652 ofs = op->args[2];
2653 dst = arg_temp(op->args[0]);
2654 src = find_mem_copy_for(ctx, type, ofs);
2655 if (src && src->base_type == type) {
2656 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2657 }
2658
2659 reset_ts(ctx, dst);
2660 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2661 return true;
2662}
2663
2664static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2665{
2666 intptr_t ofs = op->args[2];
2667 intptr_t lm1;
2668
2669 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2670 remove_mem_copy_all(ctx);
2671 return false;
2672 }
2673
2674 switch (op->opc) {
2675 CASE_OP_32_64(st8):
2676 lm1 = 0;
2677 break;
2678 CASE_OP_32_64(st16):
2679 lm1 = 1;
2680 break;
2681 case INDEX_op_st32_i64:
2682 case INDEX_op_st_i32:
2683 lm1 = 3;
2684 break;
2685 case INDEX_op_st_i64:
2686 lm1 = 7;
2687 break;
2688 case INDEX_op_st_vec:
2689 lm1 = tcg_type_size(ctx->type) - 1;
2690 break;
2691 default:
2692 g_assert_not_reached();
2693 }
2694 remove_mem_copy_in(ctx, ofs, ofs + lm1);
2695 return false;
2696}
2697
2698static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2699{
2700 TCGTemp *src;
2701 intptr_t ofs, last;
2702 TCGType type;
2703
2704 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2705 fold_tcg_st(ctx, op);
2706 return false;
2707 }
2708
2709 src = arg_temp(op->args[0]);
2710 ofs = op->args[2];
2711 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002712
2713 /*
2714 * Eliminate duplicate stores of a constant.
2715 * This happens frequently when the target ISA zero-extends.
2716 */
2717 if (ts_is_const(src)) {
2718 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2719 if (src == prev) {
2720 tcg_op_remove(ctx->tcg, op);
2721 return true;
2722 }
2723 }
2724
Richard Hendersonab84dc32023-08-23 23:04:24 -07002725 last = ofs + tcg_type_size(type) - 1;
2726 remove_mem_copy_in(ctx, ofs, last);
2727 record_mem_copy(ctx, type, src, ofs, last);
2728 return false;
2729}
2730
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002731static bool fold_xor(OptContext *ctx, TCGOp *op)
2732{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002733 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002734 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002735 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002736 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002737 return true;
2738 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002739
2740 ctx->z_mask = arg_info(op->args[1])->z_mask
2741 | arg_info(op->args[2])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002742 ctx->s_mask = arg_info(op->args[1])->s_mask
2743 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002744 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002745}
2746
Richard Hendersone58b9772024-09-06 22:30:01 -07002747static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
2748{
2749 /* If true and false values are the same, eliminate the cmp. */
2750 if (args_are_copies(op->args[2], op->args[3])) {
2751 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
2752 }
2753
2754 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
2755 uint64_t tv = arg_info(op->args[2])->val;
2756 uint64_t fv = arg_info(op->args[3])->val;
2757
2758 if (tv == -1 && fv == 0) {
2759 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2760 }
2761 if (tv == 0 && fv == -1) {
2762 if (TCG_TARGET_HAS_not_vec) {
2763 op->opc = INDEX_op_not_vec;
2764 return fold_not(ctx, op);
2765 } else {
2766 op->opc = INDEX_op_xor_vec;
2767 op->args[2] = arg_new_constant(ctx, -1);
2768 return fold_xor(ctx, op);
2769 }
2770 }
2771 }
2772 if (arg_is_const(op->args[2])) {
2773 uint64_t tv = arg_info(op->args[2])->val;
2774 if (tv == -1) {
2775 op->opc = INDEX_op_or_vec;
2776 op->args[2] = op->args[3];
2777 return fold_or(ctx, op);
2778 }
2779 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
2780 op->opc = INDEX_op_andc_vec;
2781 op->args[2] = op->args[1];
2782 op->args[1] = op->args[3];
2783 return fold_andc(ctx, op);
2784 }
2785 }
2786 if (arg_is_const(op->args[3])) {
2787 uint64_t fv = arg_info(op->args[3])->val;
2788 if (fv == 0) {
2789 op->opc = INDEX_op_and_vec;
2790 return fold_and(ctx, op);
2791 }
2792 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
2793 op->opc = INDEX_op_orc_vec;
2794 op->args[2] = op->args[1];
2795 op->args[1] = op->args[3];
2796 return fold_orc(ctx, op);
2797 }
2798 }
2799 return false;
2800}
2801
Kirill Batuzov22613af2011-07-07 16:37:13 +04002802/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002803void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002804{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002805 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002806 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002807 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002808
Richard Hendersonab84dc32023-08-23 23:04:24 -07002809 QSIMPLEQ_INIT(&ctx.mem_free);
2810
Kirill Batuzov22613af2011-07-07 16:37:13 +04002811 /* Array VALS has an element for each temp.
2812 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002813 If this temp is a copy of other ones then the other copies are
2814 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002815
2816 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002817 for (i = 0; i < nb_temps; ++i) {
2818 s->temps[i].state_ptr = NULL;
2819 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002820
Richard Henderson15fa08f2017-11-02 15:19:14 +01002821 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002822 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002823 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002824 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002825
Richard Henderson5cf32be2021-08-24 08:17:08 -07002826 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002827 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002828 fold_call(&ctx, op);
2829 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002830 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002831
2832 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002833 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2834 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002835
Richard Henderson67f84c92021-08-25 08:00:20 -07002836 /* Pre-compute the type of the operation. */
2837 if (def->flags & TCG_OPF_VECTOR) {
2838 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
2839 } else if (def->flags & TCG_OPF_64BIT) {
2840 ctx.type = TCG_TYPE_I64;
2841 } else {
2842 ctx.type = TCG_TYPE_I32;
2843 }
2844
Richard Henderson57fe5c62021-08-26 12:04:46 -07002845 /* Assume all bits affected, no bits known zero, no sign reps. */
Richard Hendersonfae450b2021-08-25 22:42:19 -07002846 ctx.a_mask = -1;
2847 ctx.z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002848 ctx.s_mask = 0;
Paolo Bonzini633f6502013-01-11 15:42:53 -08002849
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002850 /*
2851 * Process each opcode.
2852 * Sorted alphabetically by opcode as much as possible.
2853 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002854 switch (opc) {
Richard Hendersonc578ff12021-12-16 06:07:25 -08002855 CASE_OP_32_64(add):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002856 done = fold_add(&ctx, op);
2857 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002858 case INDEX_op_add_vec:
2859 done = fold_add_vec(&ctx, op);
2860 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002861 CASE_OP_32_64(add2):
2862 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002863 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002864 CASE_OP_32_64_VEC(and):
2865 done = fold_and(&ctx, op);
2866 break;
2867 CASE_OP_32_64_VEC(andc):
2868 done = fold_andc(&ctx, op);
2869 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002870 CASE_OP_32_64(brcond):
2871 done = fold_brcond(&ctx, op);
2872 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002873 case INDEX_op_brcond2_i32:
2874 done = fold_brcond2(&ctx, op);
2875 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002876 CASE_OP_32_64(bswap16):
2877 CASE_OP_32_64(bswap32):
2878 case INDEX_op_bswap64_i64:
2879 done = fold_bswap(&ctx, op);
2880 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002881 CASE_OP_32_64(clz):
2882 CASE_OP_32_64(ctz):
2883 done = fold_count_zeros(&ctx, op);
2884 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002885 CASE_OP_32_64(ctpop):
2886 done = fold_ctpop(&ctx, op);
2887 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002888 CASE_OP_32_64(deposit):
2889 done = fold_deposit(&ctx, op);
2890 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002891 CASE_OP_32_64(div):
2892 CASE_OP_32_64(divu):
2893 done = fold_divide(&ctx, op);
2894 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002895 case INDEX_op_dup_vec:
2896 done = fold_dup(&ctx, op);
2897 break;
2898 case INDEX_op_dup2_vec:
2899 done = fold_dup2(&ctx, op);
2900 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002901 CASE_OP_32_64_VEC(eqv):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002902 done = fold_eqv(&ctx, op);
2903 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002904 CASE_OP_32_64(extract):
2905 done = fold_extract(&ctx, op);
2906 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002907 CASE_OP_32_64(extract2):
2908 done = fold_extract2(&ctx, op);
2909 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002910 CASE_OP_32_64(ext8s):
2911 CASE_OP_32_64(ext16s):
2912 case INDEX_op_ext32s_i64:
2913 case INDEX_op_ext_i32_i64:
2914 done = fold_exts(&ctx, op);
2915 break;
2916 CASE_OP_32_64(ext8u):
2917 CASE_OP_32_64(ext16u):
2918 case INDEX_op_ext32u_i64:
2919 case INDEX_op_extu_i32_i64:
2920 case INDEX_op_extrl_i64_i32:
2921 case INDEX_op_extrh_i64_i32:
2922 done = fold_extu(&ctx, op);
2923 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002924 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002925 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002926 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002927 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002928 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002929 case INDEX_op_ld32u_i64:
2930 done = fold_tcg_ld(&ctx, op);
2931 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002932 case INDEX_op_ld_i32:
2933 case INDEX_op_ld_i64:
2934 case INDEX_op_ld_vec:
2935 done = fold_tcg_ld_memcopy(&ctx, op);
2936 break;
2937 CASE_OP_32_64(st8):
2938 CASE_OP_32_64(st16):
2939 case INDEX_op_st32_i64:
2940 done = fold_tcg_st(&ctx, op);
2941 break;
2942 case INDEX_op_st_i32:
2943 case INDEX_op_st_i64:
2944 case INDEX_op_st_vec:
2945 done = fold_tcg_st_memcopy(&ctx, op);
2946 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002947 case INDEX_op_mb:
2948 done = fold_mb(&ctx, op);
2949 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002950 CASE_OP_32_64_VEC(mov):
2951 done = fold_mov(&ctx, op);
2952 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002953 CASE_OP_32_64(movcond):
2954 done = fold_movcond(&ctx, op);
2955 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002956 CASE_OP_32_64(mul):
2957 done = fold_mul(&ctx, op);
2958 break;
2959 CASE_OP_32_64(mulsh):
2960 CASE_OP_32_64(muluh):
2961 done = fold_mul_highpart(&ctx, op);
2962 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002963 CASE_OP_32_64(muls2):
2964 CASE_OP_32_64(mulu2):
2965 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002966 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002967 CASE_OP_32_64_VEC(nand):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002968 done = fold_nand(&ctx, op);
2969 break;
2970 CASE_OP_32_64(neg):
2971 done = fold_neg(&ctx, op);
2972 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002973 CASE_OP_32_64_VEC(nor):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002974 done = fold_nor(&ctx, op);
2975 break;
2976 CASE_OP_32_64_VEC(not):
2977 done = fold_not(&ctx, op);
2978 break;
2979 CASE_OP_32_64_VEC(or):
2980 done = fold_or(&ctx, op);
2981 break;
2982 CASE_OP_32_64_VEC(orc):
2983 done = fold_orc(&ctx, op);
2984 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07002985 case INDEX_op_qemu_ld_a32_i32:
2986 case INDEX_op_qemu_ld_a64_i32:
2987 case INDEX_op_qemu_ld_a32_i64:
2988 case INDEX_op_qemu_ld_a64_i64:
2989 case INDEX_op_qemu_ld_a32_i128:
2990 case INDEX_op_qemu_ld_a64_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07002991 done = fold_qemu_ld(&ctx, op);
2992 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07002993 case INDEX_op_qemu_st8_a32_i32:
2994 case INDEX_op_qemu_st8_a64_i32:
2995 case INDEX_op_qemu_st_a32_i32:
2996 case INDEX_op_qemu_st_a64_i32:
2997 case INDEX_op_qemu_st_a32_i64:
2998 case INDEX_op_qemu_st_a64_i64:
2999 case INDEX_op_qemu_st_a32_i128:
3000 case INDEX_op_qemu_st_a64_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003001 done = fold_qemu_st(&ctx, op);
3002 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003003 CASE_OP_32_64(rem):
3004 CASE_OP_32_64(remu):
3005 done = fold_remainder(&ctx, op);
3006 break;
3007 CASE_OP_32_64(rotl):
3008 CASE_OP_32_64(rotr):
3009 CASE_OP_32_64(sar):
3010 CASE_OP_32_64(shl):
3011 CASE_OP_32_64(shr):
3012 done = fold_shift(&ctx, op);
3013 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003014 CASE_OP_32_64(setcond):
3015 done = fold_setcond(&ctx, op);
3016 break;
Richard Henderson36355022023-08-04 23:24:04 +00003017 CASE_OP_32_64(negsetcond):
3018 done = fold_negsetcond(&ctx, op);
3019 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003020 case INDEX_op_setcond2_i32:
3021 done = fold_setcond2(&ctx, op);
3022 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003023 case INDEX_op_cmp_vec:
3024 done = fold_cmp_vec(&ctx, op);
3025 break;
3026 case INDEX_op_cmpsel_vec:
3027 done = fold_cmpsel_vec(&ctx, op);
3028 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003029 case INDEX_op_bitsel_vec:
3030 done = fold_bitsel_vec(&ctx, op);
3031 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003032 CASE_OP_32_64(sextract):
3033 done = fold_sextract(&ctx, op);
3034 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003035 CASE_OP_32_64(sub):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003036 done = fold_sub(&ctx, op);
3037 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003038 case INDEX_op_sub_vec:
3039 done = fold_sub_vec(&ctx, op);
3040 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003041 CASE_OP_32_64(sub2):
3042 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003043 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003044 CASE_OP_32_64_VEC(xor):
3045 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003046 break;
Richard Henderson15268552024-12-08 07:45:11 -06003047 case INDEX_op_set_label:
3048 case INDEX_op_br:
3049 case INDEX_op_exit_tb:
3050 case INDEX_op_goto_tb:
3051 case INDEX_op_goto_ptr:
3052 finish_ebb(&ctx);
3053 done = true;
3054 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003055 default:
3056 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003057 }
3058
Richard Henderson404a1482021-08-24 11:08:21 -07003059 if (!done) {
3060 finish_folding(&ctx, op);
3061 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003062 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003063}