blob: 756f681e88deaf213c77bf3cc811c3e770392602 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040033#define CASE_OP_32_64(x) \
34 glue(glue(case INDEX_op_, x), _i32): \
35 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040036
Richard Henderson170ba882017-11-22 09:07:11 +010037#define CASE_OP_32_64_VEC(x) \
38 glue(glue(case INDEX_op_, x), _i32): \
39 glue(glue(case INDEX_op_, x), _i64): \
40 glue(glue(case INDEX_op_, x), _vec)
41
Richard Hendersonab84dc32023-08-23 23:04:24 -070042typedef struct MemCopyInfo {
43 IntervalTreeNode itree;
44 QSIMPLEQ_ENTRY (MemCopyInfo) next;
45 TCGTemp *ts;
46 TCGType type;
47} MemCopyInfo;
48
Richard Henderson6fcb98e2020-03-30 17:44:30 -070049typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020050 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070051 TCGTemp *prev_copy;
52 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070053 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070054 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070055 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080056 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070057} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040058
Richard Henderson3b3f8472021-08-23 22:06:31 -070059typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070060 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070061 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070062 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070063
Richard Hendersonab84dc32023-08-23 23:04:24 -070064 IntervalTreeRoot mem_copy;
65 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
66
Richard Henderson137f1f42021-08-24 08:49:25 -070067 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070068 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070069} OptContext;
70
Richard Henderson6fcb98e2020-03-30 17:44:30 -070071static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020072{
Richard Henderson63490392017-06-20 13:43:15 -070073 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020074}
75
Richard Henderson6fcb98e2020-03-30 17:44:30 -070076static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020077{
Richard Henderson63490392017-06-20 13:43:15 -070078 return ts_info(arg_temp(arg));
79}
80
Richard Hendersone1b6c142024-12-22 10:26:14 -080081static inline bool ti_is_const(TempOptInfo *ti)
82{
83 return ti->is_const;
84}
85
86static inline uint64_t ti_const_val(TempOptInfo *ti)
87{
88 return ti->val;
89}
90
91static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
92{
93 return ti_is_const(ti) && ti_const_val(ti) == val;
94}
95
Richard Henderson63490392017-06-20 13:43:15 -070096static inline bool ts_is_const(TCGTemp *ts)
97{
Richard Hendersone1b6c142024-12-22 10:26:14 -080098 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070099}
100
Richard Henderson27cdb852023-10-23 11:38:00 -0700101static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
102{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800103 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700104}
105
Richard Henderson63490392017-06-20 13:43:15 -0700106static inline bool arg_is_const(TCGArg arg)
107{
108 return ts_is_const(arg_temp(arg));
109}
110
Richard Henderson27cdb852023-10-23 11:38:00 -0700111static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
112{
113 return ts_is_const_val(arg_temp(arg), val);
114}
115
Richard Henderson63490392017-06-20 13:43:15 -0700116static inline bool ts_is_copy(TCGTemp *ts)
117{
118 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200119}
120
Richard Henderson9f75e522023-11-02 13:37:46 -0700121static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
122{
123 return a->kind < b->kind ? b : a;
124}
125
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200126/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700127static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200128{
Richard Henderson63490392017-06-20 13:43:15 -0700129 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700130 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700131
Richard Henderson3b3f8472021-08-23 22:06:31 -0700132 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 return;
134 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700135 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700136
137 ti = ts->state_ptr;
138 if (ti == NULL) {
139 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700140 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700141 }
142
143 ti->next_copy = ts;
144 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700146 if (ts->kind == TEMP_CONST) {
147 ti->is_const = true;
148 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700149 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800150 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700151 } else {
152 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700153 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700154 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155 }
156}
157
Richard Hendersonab84dc32023-08-23 23:04:24 -0700158static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
159{
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
161 return r ? container_of(r, MemCopyInfo, itree) : NULL;
162}
163
164static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
165{
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
167 return r ? container_of(r, MemCopyInfo, itree) : NULL;
168}
169
170static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
171{
172 TCGTemp *ts = mc->ts;
173 TempOptInfo *ti = ts_info(ts);
174
175 interval_tree_remove(&mc->itree, &ctx->mem_copy);
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
178}
179
180static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
181{
182 while (true) {
183 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
184 if (!mc) {
185 break;
186 }
187 remove_mem_copy(ctx, mc);
188 }
189}
190
191static void remove_mem_copy_all(OptContext *ctx)
192{
193 remove_mem_copy_in(ctx, 0, -1);
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
195}
196
Richard Henderson9f75e522023-11-02 13:37:46 -0700197static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198{
Richard Henderson9f75e522023-11-02 13:37:46 -0700199 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200200
Richard Henderson4c868ce2020-04-23 09:02:23 -0700201 /* If this is already readonly, we can't do better. */
202 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700203 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200204 }
205
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700210 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200211}
212
Richard Hendersonab84dc32023-08-23 23:04:24 -0700213static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
214{
215 TempOptInfo *si = ts_info(src_ts);
216 TempOptInfo *di = ts_info(dst_ts);
217 MemCopyInfo *mc;
218
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
220 tcg_debug_assert(mc->ts == src_ts);
221 mc->ts = dst_ts;
222 }
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
224}
225
226/* Reset TEMP's state, possibly removing the temp for the list of copies. */
227static void reset_ts(OptContext *ctx, TCGTemp *ts)
228{
229 TempOptInfo *ti = ts_info(ts);
230 TCGTemp *pts = ti->prev_copy;
231 TCGTemp *nts = ti->next_copy;
232 TempOptInfo *pi = ts_info(pts);
233 TempOptInfo *ni = ts_info(nts);
234
235 ni->prev_copy = ti->prev_copy;
236 pi->next_copy = ti->next_copy;
237 ti->next_copy = ts;
238 ti->prev_copy = ts;
239 ti->is_const = false;
240 ti->z_mask = -1;
241 ti->s_mask = 0;
242
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
244 if (ts == nts) {
245 /* Last temp copy being removed, the mem copies die. */
246 MemCopyInfo *mc;
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
248 interval_tree_remove(&mc->itree, &ctx->mem_copy);
249 }
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
251 } else {
252 move_mem_copies(find_better_copy(nts), ts);
253 }
254 }
255}
256
257static void reset_temp(OptContext *ctx, TCGArg arg)
258{
259 reset_ts(ctx, arg_temp(arg));
260}
261
262static void record_mem_copy(OptContext *ctx, TCGType type,
263 TCGTemp *ts, intptr_t start, intptr_t last)
264{
265 MemCopyInfo *mc;
266 TempOptInfo *ti;
267
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
269 if (mc) {
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
271 } else {
272 mc = tcg_malloc(sizeof(*mc));
273 }
274
275 memset(mc, 0, sizeof(*mc));
276 mc->itree.start = start;
277 mc->itree.last = last;
278 mc->type = type;
279 interval_tree_insert(&mc->itree, &ctx->mem_copy);
280
281 ts = find_better_copy(ts);
282 ti = ts_info(ts);
283 mc->ts = ts;
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
285}
286
Richard Henderson63490392017-06-20 13:43:15 -0700287static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288{
Richard Henderson63490392017-06-20 13:43:15 -0700289 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290
Richard Henderson63490392017-06-20 13:43:15 -0700291 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200292 return true;
293 }
294
Richard Henderson63490392017-06-20 13:43:15 -0700295 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200296 return false;
297 }
298
Richard Henderson63490392017-06-20 13:43:15 -0700299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
300 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200301 return true;
302 }
303 }
304
305 return false;
306}
307
Richard Henderson63490392017-06-20 13:43:15 -0700308static bool args_are_copies(TCGArg arg1, TCGArg arg2)
309{
310 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
311}
312
Richard Hendersonab84dc32023-08-23 23:04:24 -0700313static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
314{
315 MemCopyInfo *mc;
316
317 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
318 if (mc->itree.start == s && mc->type == type) {
319 return find_better_copy(mc->ts);
320 }
321 }
322 return NULL;
323}
324
Richard Henderson26aac972023-10-23 12:31:57 -0700325static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
326{
327 TCGType type = ctx->type;
328 TCGTemp *ts;
329
330 if (type == TCG_TYPE_I32) {
331 val = (int32_t)val;
332 }
333
334 ts = tcg_constant_internal(type, val);
335 init_ts_info(ctx, ts);
336
337 return temp_arg(ts);
338}
339
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100340static TCGArg arg_new_temp(OptContext *ctx)
341{
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
343 init_ts_info(ctx, ts);
344 return temp_arg(ts);
345}
346
Richard Hendersona3c1c572025-04-21 11:05:29 -0700347static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
348 TCGOpcode opc, unsigned narg)
349{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800350 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700351}
352
353static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
354 TCGOpcode opc, unsigned narg)
355{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800356 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700357}
358
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700359static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400360{
Richard Henderson63490392017-06-20 13:43:15 -0700361 TCGTemp *dst_ts = arg_temp(dst);
362 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700363 TempOptInfo *di;
364 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700365 TCGOpcode new_op;
366
367 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700368 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700369 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200370 }
371
Richard Henderson986cac12023-01-09 13:59:35 -0800372 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700373 di = ts_info(dst_ts);
374 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700375
376 switch (ctx->type) {
377 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800379 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700380 break;
381 case TCG_TYPE_V64:
382 case TCG_TYPE_V128:
383 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800384 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700385 new_op = INDEX_op_mov_vec;
386 break;
387 default:
388 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100389 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700390 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700391 op->args[0] = dst;
392 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700393
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700394 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700395 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700396
Richard Henderson63490392017-06-20 13:43:15 -0700397 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700398 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700399
400 di->next_copy = si->next_copy;
401 di->prev_copy = src_ts;
402 ni->prev_copy = dst_ts;
403 si->next_copy = dst_ts;
404 di->is_const = si->is_const;
405 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700406
407 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
408 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
409 move_mem_copies(dst_ts, src_ts);
410 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800411 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700412 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400413}
414
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700415static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700416 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700417{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700418 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700419 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700420}
421
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800422static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
423 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400424{
Richard Henderson03271522013-08-14 14:35:56 -0700425 uint64_t l64, h64;
426
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400427 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800428 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400429 return x + y;
430
Richard Henderson60f34f52025-01-06 22:06:32 -0800431 case INDEX_op_sub:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400432 return x - y;
433
Richard Hendersond2c3eca2025-01-07 09:32:18 -0800434 case INDEX_op_mul:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400435 return x * y;
436
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800437 case INDEX_op_and:
438 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400439 return x & y;
440
Richard Henderson49bd7512025-01-06 14:00:40 -0800441 case INDEX_op_or:
442 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400443 return x | y;
444
Richard Hendersonfffd3dc2025-01-06 15:18:35 -0800445 case INDEX_op_xor:
446 case INDEX_op_xor_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400447 return x ^ y;
448
Richard Henderson6ca59452025-01-07 21:50:04 -0800449 case INDEX_op_shl:
450 if (type == TCG_TYPE_I32) {
451 return (uint32_t)x << (y & 31);
452 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700453 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400454
Richard Henderson74dbd362025-01-07 22:52:10 -0800455 case INDEX_op_shr:
456 if (type == TCG_TYPE_I32) {
457 return (uint32_t)x >> (y & 31);
458 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700459 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400460
Richard Henderson3949f362025-01-08 08:05:18 -0800461 case INDEX_op_sar:
462 if (type == TCG_TYPE_I32) {
463 return (int32_t)x >> (y & 31);
464 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700465 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400466
Richard Henderson005a87e2025-01-08 10:42:16 -0800467 case INDEX_op_rotr:
468 if (type == TCG_TYPE_I32) {
469 return ror32(x, y & 31);
470 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700471 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400472
Richard Henderson005a87e2025-01-08 10:42:16 -0800473 case INDEX_op_rotl:
474 if (type == TCG_TYPE_I32) {
475 return rol32(x, y & 31);
476 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700477 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400478
Richard Henderson5c62d372025-01-06 23:46:47 -0800479 case INDEX_op_not:
480 case INDEX_op_not_vec:
Kirill Batuzova640f032011-07-07 16:37:17 +0400481 return ~x;
482
Richard Henderson69713582025-01-06 22:48:57 -0800483 case INDEX_op_neg:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700484 return -x;
485
Richard Henderson46f96bf2025-01-06 12:37:02 -0800486 case INDEX_op_andc:
487 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700488 return x & ~y;
489
Richard Henderson6aba25e2025-01-06 14:46:26 -0800490 case INDEX_op_orc:
491 case INDEX_op_orc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700492 return x | ~y;
493
Richard Henderson5c0968a2025-01-06 15:47:53 -0800494 case INDEX_op_eqv:
495 case INDEX_op_eqv_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700496 return ~(x ^ y);
497
Richard Henderson59379a42025-01-06 20:32:54 -0800498 case INDEX_op_nand:
499 case INDEX_op_nand_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700500 return ~(x & y);
501
Richard Henderson3a8c4e92025-01-06 21:02:17 -0800502 case INDEX_op_nor:
503 case INDEX_op_nor_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700504 return ~(x | y);
505
Richard Henderson5a5bb0a2025-01-08 16:12:46 -0800506 case INDEX_op_clz:
507 if (type == TCG_TYPE_I32) {
508 return (uint32_t)x ? clz32(x) : y;
509 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100510 return x ? clz64(x) : y;
511
Richard Hendersonc96447d2025-01-08 17:07:01 -0800512 case INDEX_op_ctz:
513 if (type == TCG_TYPE_I32) {
514 return (uint32_t)x ? ctz32(x) : y;
515 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100516 return x ? ctz64(x) : y;
517
Richard Henderson97218ae2025-01-08 18:37:43 -0800518 case INDEX_op_ctpop:
519 return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
Richard Hendersona768e4e2016-11-21 11:13:39 +0100520
Richard Henderson64985942018-11-20 08:53:34 +0100521 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700522 x = bswap16(x);
523 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100524
525 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700526 x = bswap32(x);
527 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100528
529 case INDEX_op_bswap64_i64:
530 return bswap64(x);
531
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200532 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400533 return (int32_t)x;
534
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200535 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700536 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400537 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400538
Richard Henderson609ad702015-07-24 07:16:00 -0700539 case INDEX_op_extrh_i64_i32:
540 return (uint64_t)x >> 32;
541
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800542 case INDEX_op_muluh:
543 if (type == TCG_TYPE_I32) {
544 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
545 }
546 mulu64(&l64, &h64, x, y);
547 return h64;
548
Richard Hendersonc7428242025-01-07 11:19:29 -0800549 case INDEX_op_mulsh:
550 if (type == TCG_TYPE_I32) {
551 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
552 }
Richard Henderson03271522013-08-14 14:35:56 -0700553 muls64(&l64, &h64, x, y);
554 return h64;
555
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800556 case INDEX_op_divs:
Richard Henderson01547f72013-08-14 15:22:46 -0700557 /* Avoid crashing on divide by zero, otherwise undefined. */
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800558 if (type == TCG_TYPE_I32) {
559 return (int32_t)x / ((int32_t)y ? : 1);
560 }
561 return (int64_t)x / ((int64_t)y ? : 1);
562
Richard Henderson961b80a2025-01-07 14:27:19 -0800563 case INDEX_op_divu:
564 if (type == TCG_TYPE_I32) {
565 return (uint32_t)x / ((uint32_t)y ? : 1);
566 }
Richard Henderson01547f72013-08-14 15:22:46 -0700567 return (uint64_t)x / ((uint64_t)y ? : 1);
568
Richard Henderson9a6bc182025-01-07 19:00:51 -0800569 case INDEX_op_rems:
570 if (type == TCG_TYPE_I32) {
571 return (int32_t)x % ((int32_t)y ? : 1);
572 }
573 return (int64_t)x % ((int64_t)y ? : 1);
574
Richard Hendersoncd9acd22025-01-07 20:25:14 -0800575 case INDEX_op_remu:
576 if (type == TCG_TYPE_I32) {
577 return (uint32_t)x % ((uint32_t)y ? : 1);
578 }
Richard Henderson01547f72013-08-14 15:22:46 -0700579 return (uint64_t)x % ((uint64_t)y ? : 1);
580
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400581 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700582 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400583 }
584}
585
Richard Henderson67f84c92021-08-25 08:00:20 -0700586static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
587 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400588{
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800589 uint64_t res = do_constant_folding_2(op, type, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700590 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200591 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400592 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400593 return res;
594}
595
Richard Henderson9519da72012-10-02 11:32:26 -0700596static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
597{
598 switch (c) {
599 case TCG_COND_EQ:
600 return x == y;
601 case TCG_COND_NE:
602 return x != y;
603 case TCG_COND_LT:
604 return (int32_t)x < (int32_t)y;
605 case TCG_COND_GE:
606 return (int32_t)x >= (int32_t)y;
607 case TCG_COND_LE:
608 return (int32_t)x <= (int32_t)y;
609 case TCG_COND_GT:
610 return (int32_t)x > (int32_t)y;
611 case TCG_COND_LTU:
612 return x < y;
613 case TCG_COND_GEU:
614 return x >= y;
615 case TCG_COND_LEU:
616 return x <= y;
617 case TCG_COND_GTU:
618 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700619 case TCG_COND_TSTEQ:
620 return (x & y) == 0;
621 case TCG_COND_TSTNE:
622 return (x & y) != 0;
623 case TCG_COND_ALWAYS:
624 case TCG_COND_NEVER:
625 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700626 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700627 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700628}
629
630static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
631{
632 switch (c) {
633 case TCG_COND_EQ:
634 return x == y;
635 case TCG_COND_NE:
636 return x != y;
637 case TCG_COND_LT:
638 return (int64_t)x < (int64_t)y;
639 case TCG_COND_GE:
640 return (int64_t)x >= (int64_t)y;
641 case TCG_COND_LE:
642 return (int64_t)x <= (int64_t)y;
643 case TCG_COND_GT:
644 return (int64_t)x > (int64_t)y;
645 case TCG_COND_LTU:
646 return x < y;
647 case TCG_COND_GEU:
648 return x >= y;
649 case TCG_COND_LEU:
650 return x <= y;
651 case TCG_COND_GTU:
652 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700653 case TCG_COND_TSTEQ:
654 return (x & y) == 0;
655 case TCG_COND_TSTNE:
656 return (x & y) != 0;
657 case TCG_COND_ALWAYS:
658 case TCG_COND_NEVER:
659 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700660 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700661 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700662}
663
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700664static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700665{
666 switch (c) {
667 case TCG_COND_GT:
668 case TCG_COND_LTU:
669 case TCG_COND_LT:
670 case TCG_COND_GTU:
671 case TCG_COND_NE:
672 return 0;
673 case TCG_COND_GE:
674 case TCG_COND_GEU:
675 case TCG_COND_LE:
676 case TCG_COND_LEU:
677 case TCG_COND_EQ:
678 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700679 case TCG_COND_TSTEQ:
680 case TCG_COND_TSTNE:
681 return -1;
682 case TCG_COND_ALWAYS:
683 case TCG_COND_NEVER:
684 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700685 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700686 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700687}
688
Richard Henderson8d57bf12021-08-24 08:34:27 -0700689/*
690 * Return -1 if the condition can't be simplified,
691 * and the result of the condition (0 or 1) if it can.
692 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700693static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700694 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200695{
Richard Henderson63490392017-06-20 13:43:15 -0700696 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000697 uint64_t xv = arg_info(x)->val;
698 uint64_t yv = arg_info(y)->val;
699
Richard Henderson67f84c92021-08-25 08:00:20 -0700700 switch (type) {
701 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100702 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700703 case TCG_TYPE_I64:
704 return do_constant_folding_cond_64(xv, yv, c);
705 default:
706 /* Only scalar comparisons are optimizable */
707 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200708 }
Richard Henderson63490392017-06-20 13:43:15 -0700709 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700710 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700711 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200712 switch (c) {
713 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700714 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200715 return 0;
716 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700717 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200718 return 1;
719 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700720 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200721 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200722 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700723 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200724}
725
Richard Henderson7a2f7082021-08-26 07:06:39 -0700726/**
727 * swap_commutative:
728 * @dest: TCGArg of the destination argument, or NO_DEST.
729 * @p1: first paired argument
730 * @p2: second paired argument
731 *
732 * If *@p1 is a constant and *@p2 is not, swap.
733 * If *@p2 matches @dest, swap.
734 * Return true if a swap was performed.
735 */
736
737#define NO_DEST temp_arg(NULL)
738
Richard Henderson24c9ae42012-10-02 11:32:21 -0700739static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
740{
741 TCGArg a1 = *p1, a2 = *p2;
742 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700743 sum += arg_is_const(a1);
744 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700745
746 /* Prefer the constant in second argument, and then the form
747 op a, a, b, which is better handled on non-RISC hosts. */
748 if (sum > 0 || (sum == 0 && dest == a2)) {
749 *p1 = a2;
750 *p2 = a1;
751 return true;
752 }
753 return false;
754}
755
Richard Henderson0bfcb862012-10-02 11:32:23 -0700756static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
757{
758 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700759 sum += arg_is_const(p1[0]);
760 sum += arg_is_const(p1[1]);
761 sum -= arg_is_const(p2[0]);
762 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700763 if (sum > 0) {
764 TCGArg t;
765 t = p1[0], p1[0] = p2[0], p2[0] = t;
766 t = p1[1], p1[1] = p2[1], p2[1] = t;
767 return true;
768 }
769 return false;
770}
771
Richard Henderson7e64b112023-10-24 16:53:56 -0700772/*
773 * Return -1 if the condition can't be simplified,
774 * and the result of the condition (0 or 1) if it can.
775 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100776static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700777 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
778{
779 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100780 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700781 bool swap;
782 int r;
783
784 swap = swap_commutative(dest, p1, p2);
785 cond = *pcond;
786 if (swap) {
787 *pcond = cond = tcg_swap_cond(cond);
788 }
789
790 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700791 if (r >= 0) {
792 return r;
793 }
794 if (!is_tst_cond(cond)) {
795 return -1;
796 }
797
Paolo Bonzini35020622024-01-22 10:48:11 +0100798 i1 = arg_info(*p1);
799
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700800 /*
801 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100802 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700803 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100804 if (args_are_copies(*p1, *p2) ||
805 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700806 *p2 = arg_new_constant(ctx, 0);
807 *pcond = tcg_tst_eqne_cond(cond);
808 return -1;
809 }
810
Paolo Bonzini35020622024-01-22 10:48:11 +0100811 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
812 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700813 *p2 = arg_new_constant(ctx, 0);
814 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100815 return -1;
816 }
817
818 /* Expand to AND with a temporary if no backend support. */
819 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800820 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100821 TCGArg tmp = arg_new_temp(ctx);
822
823 op2->args[0] = tmp;
824 op2->args[1] = *p1;
825 op2->args[2] = *p2;
826
827 *p1 = tmp;
828 *p2 = arg_new_constant(ctx, 0);
829 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700830 }
831 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700832}
833
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100834static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700835{
836 TCGArg al, ah, bl, bh;
837 TCGCond c;
838 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700839 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700840
841 swap = swap_commutative2(args, args + 2);
842 c = args[4];
843 if (swap) {
844 args[4] = c = tcg_swap_cond(c);
845 }
846
847 al = args[0];
848 ah = args[1];
849 bl = args[2];
850 bh = args[3];
851
852 if (arg_is_const(bl) && arg_is_const(bh)) {
853 tcg_target_ulong blv = arg_info(bl)->val;
854 tcg_target_ulong bhv = arg_info(bh)->val;
855 uint64_t b = deposit64(blv, 32, 32, bhv);
856
857 if (arg_is_const(al) && arg_is_const(ah)) {
858 tcg_target_ulong alv = arg_info(al)->val;
859 tcg_target_ulong ahv = arg_info(ah)->val;
860 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700861
862 r = do_constant_folding_cond_64(a, b, c);
863 if (r >= 0) {
864 return r;
865 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700866 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700867
Richard Henderson7e64b112023-10-24 16:53:56 -0700868 if (b == 0) {
869 switch (c) {
870 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700871 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700872 return 0;
873 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700874 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700875 return 1;
876 default:
877 break;
878 }
879 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700880
881 /* TSTNE x,-1 -> NE x,0 */
882 if (b == -1 && is_tst_cond(c)) {
883 args[3] = args[2] = arg_new_constant(ctx, 0);
884 args[4] = tcg_tst_eqne_cond(c);
885 return -1;
886 }
887
888 /* TSTNE x,sign -> LT x,0 */
889 if (b == INT64_MIN && is_tst_cond(c)) {
890 /* bl must be 0, so copy that to bh */
891 args[3] = bl;
892 args[4] = tcg_tst_ltge_cond(c);
893 return -1;
894 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700895 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700896
Richard Henderson7e64b112023-10-24 16:53:56 -0700897 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700898 r = do_constant_folding_cond_eq(c);
899 if (r >= 0) {
900 return r;
901 }
902
903 /* TSTNE x,x -> NE x,0 */
904 if (is_tst_cond(c)) {
905 args[3] = args[2] = arg_new_constant(ctx, 0);
906 args[4] = tcg_tst_eqne_cond(c);
907 return -1;
908 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700909 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100910
911 /* Expand to AND with a temporary if no backend support. */
912 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800913 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
914 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100915 TCGArg t1 = arg_new_temp(ctx);
916 TCGArg t2 = arg_new_temp(ctx);
917
918 op1->args[0] = t1;
919 op1->args[1] = al;
920 op1->args[2] = bl;
921 op2->args[0] = t2;
922 op2->args[1] = ah;
923 op2->args[2] = bh;
924
925 args[0] = t1;
926 args[1] = t2;
927 args[3] = args[2] = arg_new_constant(ctx, 0);
928 args[4] = tcg_tst_eqne_cond(c);
929 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700930 return -1;
931}
932
Richard Hendersone2577ea2021-08-24 08:00:48 -0700933static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
934{
935 for (int i = 0; i < nb_args; i++) {
936 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000937 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700938 }
939}
940
Richard Henderson8774dde2021-08-24 08:04:47 -0700941static void copy_propagate(OptContext *ctx, TCGOp *op,
942 int nb_oargs, int nb_iargs)
943{
Richard Henderson8774dde2021-08-24 08:04:47 -0700944 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
945 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000946 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700947 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700948 }
949 }
950}
951
Richard Henderson15268552024-12-08 07:45:11 -0600952static void finish_bb(OptContext *ctx)
953{
954 /* We only optimize memory barriers across basic blocks. */
955 ctx->prev_mb = NULL;
956}
957
958static void finish_ebb(OptContext *ctx)
959{
960 finish_bb(ctx);
961 /* We only optimize across extended basic blocks. */
962 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
963 remove_mem_copy_all(ctx);
964}
965
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600966static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700967{
968 const TCGOpDef *def = &tcg_op_defs[op->opc];
969 int i, nb_oargs;
970
Richard Henderson137f1f42021-08-24 08:49:25 -0700971 nb_oargs = def->nb_oargs;
972 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700973 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800974 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700975 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600976 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700977}
978
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700979/*
980 * The fold_* functions return true when processing is complete,
981 * usually by folding the operation to a constant or to a copy,
982 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
983 * like collect information about the value produced, for use in
984 * optimizing a subsequent operation.
985 *
986 * These first fold_* functions are all helpers, used by other
987 * folders for more specific operations.
988 */
989
990static bool fold_const1(OptContext *ctx, TCGOp *op)
991{
992 if (arg_is_const(op->args[1])) {
993 uint64_t t;
994
995 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700996 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700997 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
998 }
999 return false;
1000}
1001
1002static bool fold_const2(OptContext *ctx, TCGOp *op)
1003{
1004 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1005 uint64_t t1 = arg_info(op->args[1])->val;
1006 uint64_t t2 = arg_info(op->args[2])->val;
1007
Richard Henderson67f84c92021-08-25 08:00:20 -07001008 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001009 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1010 }
1011 return false;
1012}
1013
Richard Hendersonc578ff12021-12-16 06:07:25 -08001014static bool fold_commutative(OptContext *ctx, TCGOp *op)
1015{
1016 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1017 return false;
1018}
1019
Richard Henderson7a2f7082021-08-26 07:06:39 -07001020static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1021{
1022 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1023 return fold_const2(ctx, op);
1024}
1025
Richard Hendersond582b142024-12-19 10:43:26 -08001026/*
1027 * Record "zero" and "sign" masks for the single output of @op.
1028 * See TempOptInfo definition of z_mask and s_mask.
1029 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001030 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001031 */
1032static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001033 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001034{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001035 const TCGOpDef *def = &tcg_op_defs[op->opc];
1036 TCGTemp *ts;
1037 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001038 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001039
1040 /* Only single-output opcodes are supported here. */
1041 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001042
1043 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001044 * 32-bit ops generate 32-bit results, which for the purpose of
1045 * simplifying tcg are sign-extended. Certainly that's how we
1046 * represent our constants elsewhere. Note that the bits will
1047 * be reset properly for a 64-bit value when encountering the
1048 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001049 */
1050 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001051 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001052 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001053 }
1054
1055 if (z_mask == 0) {
1056 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1057 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001058
1059 ts = arg_temp(op->args[0]);
1060 reset_ts(ctx, ts);
1061
1062 ti = ts_info(ts);
1063 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001064
1065 /* Canonicalize s_mask and incorporate data from z_mask. */
1066 rep = clz64(~s_mask);
1067 rep = MAX(rep, clz64(z_mask));
1068 rep = MAX(rep - 1, 0);
1069 ti->s_mask = INT64_MIN >> rep;
1070
Richard Henderson56e06ec2024-12-08 18:26:48 -06001071 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001072}
1073
Richard Henderson81be07f2024-12-08 19:49:17 -06001074static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1075{
1076 return fold_masks_zs(ctx, op, z_mask, 0);
1077}
1078
Richard Hendersonef6be622024-12-08 20:03:15 -06001079static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1080{
1081 return fold_masks_zs(ctx, op, -1, s_mask);
1082}
1083
Richard Henderson045ace32024-12-19 10:33:51 -08001084/*
1085 * An "affected" mask bit is 0 if and only if the result is identical
1086 * to the first input. Thus if the entire mask is 0, the operation
1087 * is equivalent to a copy.
1088 */
1089static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1090{
1091 if (ctx->type == TCG_TYPE_I32) {
1092 a_mask = (uint32_t)a_mask;
1093 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001094 if (a_mask == 0) {
1095 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1096 }
1097 return false;
1098}
1099
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001100/*
1101 * Convert @op to NOT, if NOT is supported by the host.
1102 * Return true f the conversion is successful, which will still
1103 * indicate that the processing is complete.
1104 */
1105static bool fold_not(OptContext *ctx, TCGOp *op);
1106static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1107{
1108 TCGOpcode not_op;
1109 bool have_not;
1110
1111 switch (ctx->type) {
1112 case TCG_TYPE_I32:
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001113 case TCG_TYPE_I64:
Richard Henderson5c62d372025-01-06 23:46:47 -08001114 not_op = INDEX_op_not;
1115 have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001116 break;
1117 case TCG_TYPE_V64:
1118 case TCG_TYPE_V128:
1119 case TCG_TYPE_V256:
1120 not_op = INDEX_op_not_vec;
1121 have_not = TCG_TARGET_HAS_not_vec;
1122 break;
1123 default:
1124 g_assert_not_reached();
1125 }
1126 if (have_not) {
1127 op->opc = not_op;
1128 op->args[1] = op->args[idx];
1129 return fold_not(ctx, op);
1130 }
1131 return false;
1132}
1133
Richard Hendersonda48e272021-08-25 20:42:04 -07001134/* If the binary operation has first argument @i, fold to @i. */
1135static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1136{
Richard Henderson27cdb852023-10-23 11:38:00 -07001137 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001138 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1139 }
1140 return false;
1141}
1142
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001143/* If the binary operation has first argument @i, fold to NOT. */
1144static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1145{
Richard Henderson27cdb852023-10-23 11:38:00 -07001146 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001147 return fold_to_not(ctx, op, 2);
1148 }
1149 return false;
1150}
1151
Richard Hendersone8679952021-08-25 13:19:52 -07001152/* If the binary operation has second argument @i, fold to @i. */
1153static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1154{
Richard Henderson27cdb852023-10-23 11:38:00 -07001155 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001156 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1157 }
1158 return false;
1159}
1160
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001161/* If the binary operation has second argument @i, fold to identity. */
1162static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1163{
Richard Henderson27cdb852023-10-23 11:38:00 -07001164 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001165 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1166 }
1167 return false;
1168}
1169
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001170/* If the binary operation has second argument @i, fold to NOT. */
1171static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1172{
Richard Henderson27cdb852023-10-23 11:38:00 -07001173 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001174 return fold_to_not(ctx, op, 1);
1175 }
1176 return false;
1177}
1178
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001179/* If the binary operation has both arguments equal, fold to @i. */
1180static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1181{
1182 if (args_are_copies(op->args[1], op->args[2])) {
1183 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1184 }
1185 return false;
1186}
1187
Richard Hendersonca7bb042021-08-25 13:14:21 -07001188/* If the binary operation has both arguments equal, fold to identity. */
1189static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1190{
1191 if (args_are_copies(op->args[1], op->args[2])) {
1192 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1193 }
1194 return false;
1195}
1196
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001197/*
1198 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001199 *
1200 * The ordering of the transformations should be:
1201 * 1) those that produce a constant
1202 * 2) those that produce a copy
1203 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001204 */
1205
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001206static bool fold_or(OptContext *ctx, TCGOp *op);
1207static bool fold_orc(OptContext *ctx, TCGOp *op);
1208static bool fold_xor(OptContext *ctx, TCGOp *op);
1209
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001210static bool fold_add(OptContext *ctx, TCGOp *op)
1211{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001212 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001213 fold_xi_to_x(ctx, op, 0)) {
1214 return true;
1215 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001216 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001217}
1218
Richard Hendersonc578ff12021-12-16 06:07:25 -08001219/* We cannot as yet do_constant_folding with vectors. */
1220static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1221{
1222 if (fold_commutative(ctx, op) ||
1223 fold_xi_to_x(ctx, op, 0)) {
1224 return true;
1225 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001226 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001227}
1228
Richard Henderson9531c072021-08-26 06:51:39 -07001229static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001230{
Richard Hendersonf2457572023-10-25 18:39:44 -07001231 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1232 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1233
1234 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001235 uint64_t al = arg_info(op->args[2])->val;
1236 uint64_t ah = arg_info(op->args[3])->val;
1237 uint64_t bl = arg_info(op->args[4])->val;
1238 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001239 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001240 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001241
Richard Henderson9531c072021-08-26 06:51:39 -07001242 if (ctx->type == TCG_TYPE_I32) {
1243 uint64_t a = deposit64(al, 32, 32, ah);
1244 uint64_t b = deposit64(bl, 32, 32, bh);
1245
1246 if (add) {
1247 a += b;
1248 } else {
1249 a -= b;
1250 }
1251
1252 al = sextract64(a, 0, 32);
1253 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001254 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001255 Int128 a = int128_make128(al, ah);
1256 Int128 b = int128_make128(bl, bh);
1257
1258 if (add) {
1259 a = int128_add(a, b);
1260 } else {
1261 a = int128_sub(a, b);
1262 }
1263
1264 al = int128_getlo(a);
1265 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001266 }
1267
1268 rl = op->args[0];
1269 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001270
1271 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07001272 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001273
1274 tcg_opt_gen_movi(ctx, op, rl, al);
1275 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001276 return true;
1277 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001278
1279 /* Fold sub2 r,x,i to add2 r,x,-i */
1280 if (!add && b_const) {
1281 uint64_t bl = arg_info(op->args[4])->val;
1282 uint64_t bh = arg_info(op->args[5])->val;
1283
1284 /* Negate the two parts without assembling and disassembling. */
1285 bl = -bl;
1286 bh = ~bh + !bl;
1287
1288 op->opc = (ctx->type == TCG_TYPE_I32
1289 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1290 op->args[4] = arg_new_constant(ctx, bl);
1291 op->args[5] = arg_new_constant(ctx, bh);
1292 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001293 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001294}
1295
Richard Henderson9531c072021-08-26 06:51:39 -07001296static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001297{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001298 /* Note that the high and low parts may be independently swapped. */
1299 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1300 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1301
Richard Henderson9531c072021-08-26 06:51:39 -07001302 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001303}
1304
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001305static bool fold_and(OptContext *ctx, TCGOp *op)
1306{
Richard Henderson1ca73722024-12-08 18:47:15 -06001307 uint64_t z1, z2, z_mask, s_mask;
1308 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001309
Richard Henderson7a2f7082021-08-26 07:06:39 -07001310 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001311 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001312 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001313 fold_xx_to_x(ctx, op)) {
1314 return true;
1315 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001316
Richard Henderson1ca73722024-12-08 18:47:15 -06001317 t1 = arg_info(op->args[1]);
1318 t2 = arg_info(op->args[2]);
1319 z1 = t1->z_mask;
1320 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001321
1322 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001323 * Known-zeros does not imply known-ones. Therefore unless
1324 * arg2 is constant, we can't infer affected bits from it.
1325 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001326 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001327 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001328 }
1329
Richard Henderson1ca73722024-12-08 18:47:15 -06001330 z_mask = z1 & z2;
1331
1332 /*
1333 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1334 * Bitwise operations preserve the relative quantity of the repetitions.
1335 */
1336 s_mask = t1->s_mask & t2->s_mask;
1337
1338 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001339}
1340
1341static bool fold_andc(OptContext *ctx, TCGOp *op)
1342{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001343 uint64_t z_mask, s_mask;
1344 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001345
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001346 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001347 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001348 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001349 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001350 return true;
1351 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001352
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001353 t1 = arg_info(op->args[1]);
1354 t2 = arg_info(op->args[2]);
1355 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001356
Richard Henderson899281c2023-11-15 11:18:55 -08001357 if (ti_is_const(t2)) {
1358 /* Fold andc r,x,i to and r,x,~i. */
1359 switch (ctx->type) {
1360 case TCG_TYPE_I32:
1361 case TCG_TYPE_I64:
1362 op->opc = INDEX_op_and;
1363 break;
1364 case TCG_TYPE_V64:
1365 case TCG_TYPE_V128:
1366 case TCG_TYPE_V256:
1367 op->opc = INDEX_op_and_vec;
1368 break;
1369 default:
1370 g_assert_not_reached();
1371 }
1372 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1373 return fold_and(ctx, op);
1374 }
1375
Richard Hendersonfae450b2021-08-25 22:42:19 -07001376 /*
1377 * Known-zeros does not imply known-ones. Therefore unless
1378 * arg2 is constant, we can't infer anything from it.
1379 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001380 if (ti_is_const(t2)) {
1381 uint64_t v2 = ti_const_val(t2);
1382 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001383 return true;
1384 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001385 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001386 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001387
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001388 s_mask = t1->s_mask & t2->s_mask;
1389 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001390}
1391
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001392static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1393{
1394 /* If true and false values are the same, eliminate the cmp. */
1395 if (args_are_copies(op->args[2], op->args[3])) {
1396 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1397 }
1398
1399 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1400 uint64_t tv = arg_info(op->args[2])->val;
1401 uint64_t fv = arg_info(op->args[3])->val;
1402
1403 if (tv == -1 && fv == 0) {
1404 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1405 }
1406 if (tv == 0 && fv == -1) {
1407 if (TCG_TARGET_HAS_not_vec) {
1408 op->opc = INDEX_op_not_vec;
1409 return fold_not(ctx, op);
1410 } else {
1411 op->opc = INDEX_op_xor_vec;
1412 op->args[2] = arg_new_constant(ctx, -1);
1413 return fold_xor(ctx, op);
1414 }
1415 }
1416 }
1417 if (arg_is_const(op->args[2])) {
1418 uint64_t tv = arg_info(op->args[2])->val;
1419 if (tv == -1) {
1420 op->opc = INDEX_op_or_vec;
1421 op->args[2] = op->args[3];
1422 return fold_or(ctx, op);
1423 }
1424 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1425 op->opc = INDEX_op_andc_vec;
1426 op->args[2] = op->args[1];
1427 op->args[1] = op->args[3];
1428 return fold_andc(ctx, op);
1429 }
1430 }
1431 if (arg_is_const(op->args[3])) {
1432 uint64_t fv = arg_info(op->args[3])->val;
1433 if (fv == 0) {
1434 op->opc = INDEX_op_and_vec;
1435 return fold_and(ctx, op);
1436 }
1437 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1438 op->opc = INDEX_op_orc_vec;
1439 op->args[2] = op->args[1];
1440 op->args[1] = op->args[3];
1441 return fold_orc(ctx, op);
1442 }
1443 }
1444 return finish_folding(ctx, op);
1445}
1446
Richard Henderson079b0802021-08-24 09:30:59 -07001447static bool fold_brcond(OptContext *ctx, TCGOp *op)
1448{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001449 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001450 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001451 if (i == 0) {
1452 tcg_op_remove(ctx->tcg, op);
1453 return true;
1454 }
1455 if (i > 0) {
1456 op->opc = INDEX_op_br;
1457 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001458 finish_ebb(ctx);
1459 } else {
1460 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001461 }
Richard Henderson15268552024-12-08 07:45:11 -06001462 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001463}
1464
Richard Henderson764d2ab2021-08-24 09:22:11 -07001465static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1466{
Richard Henderson7e64b112023-10-24 16:53:56 -07001467 TCGCond cond;
1468 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001469 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001470
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001471 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001472 cond = op->args[4];
1473 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001474 if (i >= 0) {
1475 goto do_brcond_const;
1476 }
1477
1478 switch (cond) {
1479 case TCG_COND_LT:
1480 case TCG_COND_GE:
1481 /*
1482 * Simplify LT/GE comparisons vs zero to a single compare
1483 * vs the high word of the input.
1484 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001485 if (arg_is_const_val(op->args[2], 0) &&
1486 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001487 goto do_brcond_high;
1488 }
1489 break;
1490
1491 case TCG_COND_NE:
1492 inv = 1;
1493 QEMU_FALLTHROUGH;
1494 case TCG_COND_EQ:
1495 /*
1496 * Simplify EQ/NE comparisons where one of the pairs
1497 * can be simplified.
1498 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001499 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001500 op->args[2], cond);
1501 switch (i ^ inv) {
1502 case 0:
1503 goto do_brcond_const;
1504 case 1:
1505 goto do_brcond_high;
1506 }
1507
Richard Henderson67f84c92021-08-25 08:00:20 -07001508 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001509 op->args[3], cond);
1510 switch (i ^ inv) {
1511 case 0:
1512 goto do_brcond_const;
1513 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001514 goto do_brcond_low;
1515 }
1516 break;
1517
1518 case TCG_COND_TSTEQ:
1519 case TCG_COND_TSTNE:
1520 if (arg_is_const_val(op->args[2], 0)) {
1521 goto do_brcond_high;
1522 }
1523 if (arg_is_const_val(op->args[3], 0)) {
1524 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001525 }
1526 break;
1527
1528 default:
1529 break;
1530
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001531 do_brcond_low:
1532 op->opc = INDEX_op_brcond_i32;
1533 op->args[1] = op->args[2];
1534 op->args[2] = cond;
1535 op->args[3] = label;
1536 return fold_brcond(ctx, op);
1537
Richard Henderson764d2ab2021-08-24 09:22:11 -07001538 do_brcond_high:
1539 op->opc = INDEX_op_brcond_i32;
1540 op->args[0] = op->args[1];
1541 op->args[1] = op->args[3];
1542 op->args[2] = cond;
1543 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001544 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001545
1546 do_brcond_const:
1547 if (i == 0) {
1548 tcg_op_remove(ctx->tcg, op);
1549 return true;
1550 }
1551 op->opc = INDEX_op_br;
1552 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001553 finish_ebb(ctx);
1554 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001555 }
Richard Henderson15268552024-12-08 07:45:11 -06001556
1557 finish_bb(ctx);
1558 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001559}
1560
Richard Henderson09bacdc2021-08-24 11:58:12 -07001561static bool fold_bswap(OptContext *ctx, TCGOp *op)
1562{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001563 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001564 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001565
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001566 if (ti_is_const(t1)) {
1567 return tcg_opt_gen_movi(ctx, op, op->args[0],
1568 do_constant_folding(op->opc, ctx->type,
1569 ti_const_val(t1),
1570 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001571 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001572
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001573 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001574 switch (op->opc) {
1575 case INDEX_op_bswap16_i32:
1576 case INDEX_op_bswap16_i64:
1577 z_mask = bswap16(z_mask);
1578 sign = INT16_MIN;
1579 break;
1580 case INDEX_op_bswap32_i32:
1581 case INDEX_op_bswap32_i64:
1582 z_mask = bswap32(z_mask);
1583 sign = INT32_MIN;
1584 break;
1585 case INDEX_op_bswap64_i64:
1586 z_mask = bswap64(z_mask);
1587 sign = INT64_MIN;
1588 break;
1589 default:
1590 g_assert_not_reached();
1591 }
1592
Richard Henderson75c3bf32024-12-19 10:50:40 -08001593 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001594 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1595 case TCG_BSWAP_OZ:
1596 break;
1597 case TCG_BSWAP_OS:
1598 /* If the sign bit may be 1, force all the bits above to 1. */
1599 if (z_mask & sign) {
1600 z_mask |= sign;
1601 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001602 /* The value and therefore s_mask is explicitly sign-extended. */
1603 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001604 break;
1605 default:
1606 /* The high bits are undefined: force all bits above the sign to 1. */
1607 z_mask |= sign << 1;
1608 break;
1609 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001610
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001611 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001612}
1613
Richard Henderson5cf32be2021-08-24 08:17:08 -07001614static bool fold_call(OptContext *ctx, TCGOp *op)
1615{
1616 TCGContext *s = ctx->tcg;
1617 int nb_oargs = TCGOP_CALLO(op);
1618 int nb_iargs = TCGOP_CALLI(op);
1619 int flags, i;
1620
1621 init_arguments(ctx, op, nb_oargs + nb_iargs);
1622 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1623
1624 /* If the function reads or writes globals, reset temp data. */
1625 flags = tcg_call_flags(op);
1626 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1627 int nb_globals = s->nb_globals;
1628
1629 for (i = 0; i < nb_globals; i++) {
1630 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001631 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001632 }
1633 }
1634 }
1635
Richard Hendersonab84dc32023-08-23 23:04:24 -07001636 /* If the function has side effects, reset mem data. */
1637 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1638 remove_mem_copy_all(ctx);
1639 }
1640
Richard Henderson5cf32be2021-08-24 08:17:08 -07001641 /* Reset temp data for outputs. */
1642 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001643 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001644 }
1645
1646 /* Stop optimizing MB across calls. */
1647 ctx->prev_mb = NULL;
1648 return true;
1649}
1650
Richard Henderson29f65862024-12-09 14:09:49 -06001651static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1652{
1653 /* Canonicalize the comparison to put immediate second. */
1654 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1655 op->args[3] = tcg_swap_cond(op->args[3]);
1656 }
1657 return finish_folding(ctx, op);
1658}
1659
1660static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1661{
1662 /* If true and false values are the same, eliminate the cmp. */
1663 if (args_are_copies(op->args[3], op->args[4])) {
1664 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1665 }
1666
1667 /* Canonicalize the comparison to put immediate second. */
1668 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1669 op->args[5] = tcg_swap_cond(op->args[5]);
1670 }
1671 /*
1672 * Canonicalize the "false" input reg to match the destination,
1673 * so that the tcg backend can implement "move if true".
1674 */
1675 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1676 op->args[5] = tcg_invert_cond(op->args[5]);
1677 }
1678 return finish_folding(ctx, op);
1679}
1680
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001681static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1682{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001683 uint64_t z_mask, s_mask;
1684 TempOptInfo *t1 = arg_info(op->args[1]);
1685 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001686
Richard Hendersonce1d6632024-12-08 19:47:51 -06001687 if (ti_is_const(t1)) {
1688 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001689
1690 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001691 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001692 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1693 }
1694 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1695 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001696
1697 switch (ctx->type) {
1698 case TCG_TYPE_I32:
1699 z_mask = 31;
1700 break;
1701 case TCG_TYPE_I64:
1702 z_mask = 63;
1703 break;
1704 default:
1705 g_assert_not_reached();
1706 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001707 s_mask = ~z_mask;
1708 z_mask |= t2->z_mask;
1709 s_mask &= t2->s_mask;
1710
1711 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001712}
1713
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001714static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1715{
Richard Henderson81be07f2024-12-08 19:49:17 -06001716 uint64_t z_mask;
1717
Richard Hendersonfae450b2021-08-25 22:42:19 -07001718 if (fold_const1(ctx, op)) {
1719 return true;
1720 }
1721
1722 switch (ctx->type) {
1723 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001724 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001725 break;
1726 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001727 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001728 break;
1729 default:
1730 g_assert_not_reached();
1731 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001732 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001733}
1734
Richard Henderson1b1907b2021-08-24 10:47:04 -07001735static bool fold_deposit(OptContext *ctx, TCGOp *op)
1736{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001737 TempOptInfo *t1 = arg_info(op->args[1]);
1738 TempOptInfo *t2 = arg_info(op->args[2]);
1739 int ofs = op->args[3];
1740 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001741 int width = 8 * tcg_type_size(ctx->type);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001742 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001743
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001744 if (ti_is_const(t1) && ti_is_const(t2)) {
1745 return tcg_opt_gen_movi(ctx, op, op->args[0],
1746 deposit64(ti_const_val(t1), ofs, len,
1747 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001748 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001749
Richard Henderson8f7a8402023-08-13 11:03:05 -07001750 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001751 if (ti_is_const_val(t1, 0) && ofs == 0) {
1752 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001753
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001754 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001755 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001756 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001757 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001758 }
1759
1760 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001761 if (ti_is_const_val(t2, 0)) {
1762 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001763
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001764 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001765 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001766 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001767 }
1768
Richard Hendersonedb832c2024-12-19 17:56:05 -08001769 /* The s_mask from the top portion of the deposit is still valid. */
1770 if (ofs + len == width) {
1771 s_mask = t2->s_mask << ofs;
1772 } else {
1773 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1774 }
1775
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001776 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001777 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001778}
1779
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001780static bool fold_divide(OptContext *ctx, TCGOp *op)
1781{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001782 if (fold_const2(ctx, op) ||
1783 fold_xi_to_x(ctx, op, 1)) {
1784 return true;
1785 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001786 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001787}
1788
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001789static bool fold_dup(OptContext *ctx, TCGOp *op)
1790{
1791 if (arg_is_const(op->args[1])) {
1792 uint64_t t = arg_info(op->args[1])->val;
1793 t = dup_const(TCGOP_VECE(op), t);
1794 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1795 }
Richard Hendersone089d692024-12-08 20:00:51 -06001796 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001797}
1798
1799static bool fold_dup2(OptContext *ctx, TCGOp *op)
1800{
1801 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1802 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1803 arg_info(op->args[2])->val);
1804 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1805 }
1806
1807 if (args_are_copies(op->args[1], op->args[2])) {
1808 op->opc = INDEX_op_dup_vec;
1809 TCGOP_VECE(op) = MO_32;
1810 }
Richard Hendersone089d692024-12-08 20:00:51 -06001811 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001812}
1813
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001814static bool fold_eqv(OptContext *ctx, TCGOp *op)
1815{
Richard Hendersonef6be622024-12-08 20:03:15 -06001816 uint64_t s_mask;
Richard Henderson46c68d72023-11-15 11:51:28 -08001817 TempOptInfo *t1, *t2;
Richard Hendersonef6be622024-12-08 20:03:15 -06001818
Richard Henderson7a2f7082021-08-26 07:06:39 -07001819 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001820 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001821 fold_xi_to_not(ctx, op, 0)) {
1822 return true;
1823 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001824
Richard Henderson46c68d72023-11-15 11:51:28 -08001825 t2 = arg_info(op->args[2]);
1826 if (ti_is_const(t2)) {
1827 /* Fold eqv r,x,i to xor r,x,~i. */
1828 switch (ctx->type) {
1829 case TCG_TYPE_I32:
1830 case TCG_TYPE_I64:
1831 op->opc = INDEX_op_xor;
1832 break;
1833 case TCG_TYPE_V64:
1834 case TCG_TYPE_V128:
1835 case TCG_TYPE_V256:
1836 op->opc = INDEX_op_xor_vec;
1837 break;
1838 default:
1839 g_assert_not_reached();
1840 }
1841 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1842 return fold_xor(ctx, op);
1843 }
1844
1845 t1 = arg_info(op->args[1]);
1846 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersonef6be622024-12-08 20:03:15 -06001847 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001848}
1849
Richard Hendersonb6617c82021-08-24 10:44:53 -07001850static bool fold_extract(OptContext *ctx, TCGOp *op)
1851{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001852 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001853 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001854 int pos = op->args[2];
1855 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001856
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001857 if (ti_is_const(t1)) {
1858 return tcg_opt_gen_movi(ctx, op, op->args[0],
1859 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001860 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001861
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001862 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001863 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001864 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1865 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001866 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001867
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001868 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001869}
1870
Richard Hendersondcd08992021-08-24 10:41:39 -07001871static bool fold_extract2(OptContext *ctx, TCGOp *op)
1872{
1873 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1874 uint64_t v1 = arg_info(op->args[1])->val;
1875 uint64_t v2 = arg_info(op->args[2])->val;
1876 int shr = op->args[3];
1877
1878 if (op->opc == INDEX_op_extract2_i64) {
1879 v1 >>= shr;
1880 v2 <<= 64 - shr;
1881 } else {
1882 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001883 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001884 }
1885 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1886 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001887 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001888}
1889
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001890static bool fold_exts(OptContext *ctx, TCGOp *op)
1891{
Richard Henderson48e8de62024-12-26 12:01:57 -08001892 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06001893 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001894
1895 if (fold_const1(ctx, op)) {
1896 return true;
1897 }
1898
Richard Hendersona9621922024-12-08 20:08:46 -06001899 t1 = arg_info(op->args[1]);
1900 z_mask = t1->z_mask;
1901 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001902
1903 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001904 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001905 s_mask |= INT32_MIN;
1906 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001907 break;
1908 default:
1909 g_assert_not_reached();
1910 }
Richard Hendersona9621922024-12-08 20:08:46 -06001911 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001912}
1913
1914static bool fold_extu(OptContext *ctx, TCGOp *op)
1915{
Richard Henderson48e8de62024-12-26 12:01:57 -08001916 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001917
1918 if (fold_const1(ctx, op)) {
1919 return true;
1920 }
1921
Richard Henderson48e8de62024-12-26 12:01:57 -08001922 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001923 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001924 case INDEX_op_extrl_i64_i32:
1925 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001926 z_mask = (uint32_t)z_mask;
1927 break;
1928 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001929 z_mask >>= 32;
1930 break;
1931 default:
1932 g_assert_not_reached();
1933 }
Richard Henderson08abe292024-12-08 20:11:44 -06001934 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001935}
1936
Richard Henderson3eefdf22021-08-25 11:06:43 -07001937static bool fold_mb(OptContext *ctx, TCGOp *op)
1938{
1939 /* Eliminate duplicate and redundant fence instructions. */
1940 if (ctx->prev_mb) {
1941 /*
1942 * Merge two barriers of the same type into one,
1943 * or a weaker barrier into a stronger one,
1944 * or two weaker barriers into a stronger one.
1945 * mb X; mb Y => mb X|Y
1946 * mb; strl => mb; st
1947 * ldaq; mb => ld; mb
1948 * ldaq; strl => ld; mb; st
1949 * Other combinations are also merged into a strong
1950 * barrier. This is stricter than specified but for
1951 * the purposes of TCG is better than not optimizing.
1952 */
1953 ctx->prev_mb->args[0] |= op->args[0];
1954 tcg_op_remove(ctx->tcg, op);
1955 } else {
1956 ctx->prev_mb = op;
1957 }
1958 return true;
1959}
1960
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001961static bool fold_mov(OptContext *ctx, TCGOp *op)
1962{
1963 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1964}
1965
Richard Henderson0c310a32021-08-24 10:37:24 -07001966static bool fold_movcond(OptContext *ctx, TCGOp *op)
1967{
Richard Henderson32202782024-12-08 20:16:38 -06001968 uint64_t z_mask, s_mask;
1969 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001970 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001971
Richard Henderson141125e2024-09-06 21:00:10 -07001972 /* If true and false values are the same, eliminate the cmp. */
1973 if (args_are_copies(op->args[3], op->args[4])) {
1974 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1975 }
1976
Richard Henderson7a2f7082021-08-26 07:06:39 -07001977 /*
1978 * Canonicalize the "false" input reg to match the destination reg so
1979 * that the tcg backend can implement a "move if true" operation.
1980 */
1981 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001982 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001983 }
1984
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001985 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001986 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001987 if (i >= 0) {
1988 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1989 }
1990
Richard Henderson32202782024-12-08 20:16:38 -06001991 tt = arg_info(op->args[3]);
1992 ft = arg_info(op->args[4]);
1993 z_mask = tt->z_mask | ft->z_mask;
1994 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001995
Richard Henderson32202782024-12-08 20:16:38 -06001996 if (ti_is_const(tt) && ti_is_const(ft)) {
1997 uint64_t tv = ti_const_val(tt);
1998 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00001999 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07002000 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07002001
Richard Henderson67f84c92021-08-25 08:00:20 -07002002 switch (ctx->type) {
2003 case TCG_TYPE_I32:
2004 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00002005 if (TCG_TARGET_HAS_negsetcond_i32) {
2006 negopc = INDEX_op_negsetcond_i32;
2007 }
2008 tv = (int32_t)tv;
2009 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07002010 break;
2011 case TCG_TYPE_I64:
2012 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00002013 if (TCG_TARGET_HAS_negsetcond_i64) {
2014 negopc = INDEX_op_negsetcond_i64;
2015 }
Richard Henderson67f84c92021-08-25 08:00:20 -07002016 break;
2017 default:
2018 g_assert_not_reached();
2019 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002020
2021 if (tv == 1 && fv == 0) {
2022 op->opc = opc;
2023 op->args[3] = cond;
2024 } else if (fv == 1 && tv == 0) {
2025 op->opc = opc;
2026 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00002027 } else if (negopc) {
2028 if (tv == -1 && fv == 0) {
2029 op->opc = negopc;
2030 op->args[3] = cond;
2031 } else if (fv == -1 && tv == 0) {
2032 op->opc = negopc;
2033 op->args[3] = tcg_invert_cond(cond);
2034 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002035 }
2036 }
Richard Henderson32202782024-12-08 20:16:38 -06002037
2038 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002039}
2040
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002041static bool fold_mul(OptContext *ctx, TCGOp *op)
2042{
Richard Hendersone8679952021-08-25 13:19:52 -07002043 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002044 fold_xi_to_i(ctx, op, 0) ||
2045 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002046 return true;
2047 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002048 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002049}
2050
2051static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2052{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002053 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002054 fold_xi_to_i(ctx, op, 0)) {
2055 return true;
2056 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002057 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002058}
2059
Richard Henderson407112b2021-08-26 06:33:04 -07002060static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002061{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002062 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2063
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002064 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07002065 uint64_t a = arg_info(op->args[2])->val;
2066 uint64_t b = arg_info(op->args[3])->val;
2067 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002068 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002069 TCGOp *op2;
2070
2071 switch (op->opc) {
2072 case INDEX_op_mulu2_i32:
2073 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2074 h = (int32_t)(l >> 32);
2075 l = (int32_t)l;
2076 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002077 case INDEX_op_mulu2_i64:
2078 mulu64(&l, &h, a, b);
2079 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08002080 case INDEX_op_muls2:
2081 if (ctx->type == TCG_TYPE_I32) {
2082 l = (int64_t)(int32_t)a * (int32_t)b;
2083 h = l >> 32;
2084 l = (int32_t)l;
2085 } else {
2086 muls64(&l, &h, a, b);
2087 }
Richard Henderson407112b2021-08-26 06:33:04 -07002088 break;
2089 default:
2090 g_assert_not_reached();
2091 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002092
2093 rl = op->args[0];
2094 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002095
2096 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002097 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002098
2099 tcg_opt_gen_movi(ctx, op, rl, l);
2100 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002101 return true;
2102 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002103 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002104}
2105
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002106static bool fold_nand(OptContext *ctx, TCGOp *op)
2107{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002108 uint64_t s_mask;
2109
Richard Henderson7a2f7082021-08-26 07:06:39 -07002110 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002111 fold_xi_to_not(ctx, op, -1)) {
2112 return true;
2113 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002114
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002115 s_mask = arg_info(op->args[1])->s_mask
2116 & arg_info(op->args[2])->s_mask;
2117 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002118}
2119
Richard Hendersone25fe882024-04-04 20:53:50 +00002120static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002121{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002122 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002123 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002124 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002125
Richard Hendersond151fd32024-12-08 20:23:11 -06002126 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002127}
2128
Richard Hendersone25fe882024-04-04 20:53:50 +00002129static bool fold_neg(OptContext *ctx, TCGOp *op)
2130{
2131 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2132}
2133
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002134static bool fold_nor(OptContext *ctx, TCGOp *op)
2135{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002136 uint64_t s_mask;
2137
Richard Henderson7a2f7082021-08-26 07:06:39 -07002138 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002139 fold_xi_to_not(ctx, op, 0)) {
2140 return true;
2141 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002142
Richard Henderson2b7b6952024-12-08 20:25:21 -06002143 s_mask = arg_info(op->args[1])->s_mask
2144 & arg_info(op->args[2])->s_mask;
2145 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002146}
2147
2148static bool fold_not(OptContext *ctx, TCGOp *op)
2149{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002150 if (fold_const1(ctx, op)) {
2151 return true;
2152 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002153 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002154}
2155
2156static bool fold_or(OptContext *ctx, TCGOp *op)
2157{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002158 uint64_t z_mask, s_mask;
2159 TempOptInfo *t1, *t2;
2160
Richard Henderson7a2f7082021-08-26 07:06:39 -07002161 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002162 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002163 fold_xx_to_x(ctx, op)) {
2164 return true;
2165 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002166
Richard Henderson83b1ba32024-12-08 20:28:59 -06002167 t1 = arg_info(op->args[1]);
2168 t2 = arg_info(op->args[2]);
2169 z_mask = t1->z_mask | t2->z_mask;
2170 s_mask = t1->s_mask & t2->s_mask;
2171 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002172}
2173
2174static bool fold_orc(OptContext *ctx, TCGOp *op)
2175{
Richard Henderson54e26b22024-12-08 20:30:20 -06002176 uint64_t s_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002177 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002178
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002179 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002180 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002181 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002182 fold_ix_to_not(ctx, op, 0)) {
2183 return true;
2184 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002185
Richard Henderson50e40ec2024-12-10 08:13:10 -06002186 t2 = arg_info(op->args[2]);
2187 if (ti_is_const(t2)) {
2188 /* Fold orc r,x,i to or r,x,~i. */
2189 switch (ctx->type) {
2190 case TCG_TYPE_I32:
2191 case TCG_TYPE_I64:
2192 op->opc = INDEX_op_or;
2193 break;
2194 case TCG_TYPE_V64:
2195 case TCG_TYPE_V128:
2196 case TCG_TYPE_V256:
2197 op->opc = INDEX_op_or_vec;
2198 break;
2199 default:
2200 g_assert_not_reached();
2201 }
2202 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2203 return fold_or(ctx, op);
2204 }
2205
2206 t1 = arg_info(op->args[1]);
2207 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson54e26b22024-12-08 20:30:20 -06002208 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002209}
2210
Richard Henderson6813be92024-12-08 20:33:30 -06002211static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002212{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002213 const TCGOpDef *def = &tcg_op_defs[op->opc];
2214 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2215 MemOp mop = get_memop(oi);
2216 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002217 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002218
Richard Henderson57fe5c62021-08-26 12:04:46 -07002219 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002220 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002221 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002222 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002223 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002224 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002225 }
2226
Richard Henderson3eefdf22021-08-25 11:06:43 -07002227 /* Opcodes that touch guest memory stop the mb optimization. */
2228 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002229
2230 return fold_masks_zs(ctx, op, z_mask, s_mask);
2231}
2232
2233static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2234{
2235 /* Opcodes that touch guest memory stop the mb optimization. */
2236 ctx->prev_mb = NULL;
2237 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002238}
2239
2240static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2241{
2242 /* Opcodes that touch guest memory stop the mb optimization. */
2243 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002244 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002245}
2246
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002247static bool fold_remainder(OptContext *ctx, TCGOp *op)
2248{
Richard Henderson267c17e2021-10-25 11:30:33 -07002249 if (fold_const2(ctx, op) ||
2250 fold_xx_to_i(ctx, op, 0)) {
2251 return true;
2252 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002253 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002254}
2255
Richard Henderson95eb2292024-12-08 20:47:59 -06002256/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2257static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002258{
2259 uint64_t a_zmask, b_val;
2260 TCGCond cond;
2261
2262 if (!arg_is_const(op->args[2])) {
2263 return false;
2264 }
2265
2266 a_zmask = arg_info(op->args[1])->z_mask;
2267 b_val = arg_info(op->args[2])->val;
2268 cond = op->args[3];
2269
2270 if (ctx->type == TCG_TYPE_I32) {
2271 a_zmask = (uint32_t)a_zmask;
2272 b_val = (uint32_t)b_val;
2273 }
2274
2275 /*
2276 * A with only low bits set vs B with high bits set means that A < B.
2277 */
2278 if (a_zmask < b_val) {
2279 bool inv = false;
2280
2281 switch (cond) {
2282 case TCG_COND_NE:
2283 case TCG_COND_LEU:
2284 case TCG_COND_LTU:
2285 inv = true;
2286 /* fall through */
2287 case TCG_COND_GTU:
2288 case TCG_COND_GEU:
2289 case TCG_COND_EQ:
2290 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2291 default:
2292 break;
2293 }
2294 }
2295
2296 /*
2297 * A with only lsb set is already boolean.
2298 */
2299 if (a_zmask <= 1) {
2300 bool convert = false;
2301 bool inv = false;
2302
2303 switch (cond) {
2304 case TCG_COND_EQ:
2305 inv = true;
2306 /* fall through */
2307 case TCG_COND_NE:
2308 convert = (b_val == 0);
2309 break;
2310 case TCG_COND_LTU:
2311 case TCG_COND_TSTEQ:
2312 inv = true;
2313 /* fall through */
2314 case TCG_COND_GEU:
2315 case TCG_COND_TSTNE:
2316 convert = (b_val == 1);
2317 break;
2318 default:
2319 break;
2320 }
2321 if (convert) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002322 if (!inv && !neg) {
2323 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2324 }
2325
Richard Henderson8d65cda2024-03-26 16:00:40 -10002326 if (!inv) {
Richard Henderson69713582025-01-06 22:48:57 -08002327 op->opc = INDEX_op_neg;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002328 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002329 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002330 op->args[2] = arg_new_constant(ctx, -1);
2331 } else {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002332 op->opc = INDEX_op_xor;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002333 op->args[2] = arg_new_constant(ctx, 1);
2334 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002335 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002336 }
2337 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002338 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002339}
2340
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002341static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2342{
Paolo Bonziniff202812024-02-28 12:06:41 +01002343 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002344 TCGCond cond = op->args[3];
2345 TCGArg ret, src1, src2;
2346 TCGOp *op2;
2347 uint64_t val;
2348 int sh;
2349 bool inv;
2350
2351 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2352 return;
2353 }
2354
2355 src2 = op->args[2];
2356 val = arg_info(src2)->val;
2357 if (!is_power_of_2(val)) {
2358 return;
2359 }
2360 sh = ctz64(val);
2361
2362 switch (ctx->type) {
2363 case TCG_TYPE_I32:
Richard Henderson4bce7522024-12-25 18:55:45 -08002364 if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002365 uext_opc = INDEX_op_extract_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002366 }
2367 if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002368 sext_opc = INDEX_op_sextract_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002369 }
2370 break;
2371 case TCG_TYPE_I64:
Richard Henderson4bce7522024-12-25 18:55:45 -08002372 if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002373 uext_opc = INDEX_op_extract_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002374 }
2375 if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002376 sext_opc = INDEX_op_sextract_i64;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002377 }
2378 break;
2379 default:
2380 g_assert_not_reached();
2381 }
2382
2383 ret = op->args[0];
2384 src1 = op->args[1];
2385 inv = cond == TCG_COND_TSTEQ;
2386
2387 if (sh && sext_opc && neg && !inv) {
2388 op->opc = sext_opc;
2389 op->args[1] = src1;
2390 op->args[2] = sh;
2391 op->args[3] = 1;
2392 return;
2393 } else if (sh && uext_opc) {
2394 op->opc = uext_opc;
2395 op->args[1] = src1;
2396 op->args[2] = sh;
2397 op->args[3] = 1;
2398 } else {
2399 if (sh) {
Richard Henderson74dbd362025-01-07 22:52:10 -08002400 op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002401 op2->args[0] = ret;
2402 op2->args[1] = src1;
2403 op2->args[2] = arg_new_constant(ctx, sh);
2404 src1 = ret;
2405 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002406 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002407 op->args[1] = src1;
2408 op->args[2] = arg_new_constant(ctx, 1);
2409 }
2410
2411 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002412 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002413 op2->args[0] = ret;
2414 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002415 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002416 } else if (inv) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002417 op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002418 op2->args[0] = ret;
2419 op2->args[1] = ret;
2420 op2->args[2] = arg_new_constant(ctx, 1);
2421 } else if (neg) {
Richard Henderson69713582025-01-06 22:48:57 -08002422 op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002423 op2->args[0] = ret;
2424 op2->args[1] = ret;
2425 }
2426}
2427
Richard Hendersonc63ff552021-08-24 09:35:30 -07002428static bool fold_setcond(OptContext *ctx, TCGOp *op)
2429{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002430 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002431 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002432 if (i >= 0) {
2433 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2434 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002435
Richard Henderson95eb2292024-12-08 20:47:59 -06002436 i = fold_setcond_zmask(ctx, op, false);
2437 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002438 return true;
2439 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002440 if (i == 0) {
2441 fold_setcond_tst_pow2(ctx, op, false);
2442 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002443
Richard Henderson2c8a2832024-12-08 20:50:37 -06002444 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002445}
2446
Richard Henderson36355022023-08-04 23:24:04 +00002447static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2448{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002449 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002450 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002451 if (i >= 0) {
2452 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2453 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002454
Richard Henderson95eb2292024-12-08 20:47:59 -06002455 i = fold_setcond_zmask(ctx, op, true);
2456 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002457 return true;
2458 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002459 if (i == 0) {
2460 fold_setcond_tst_pow2(ctx, op, true);
2461 }
Richard Henderson36355022023-08-04 23:24:04 +00002462
2463 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002464 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002465}
2466
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002467static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2468{
Richard Henderson7e64b112023-10-24 16:53:56 -07002469 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002470 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002471
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002472 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002473 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002474 if (i >= 0) {
2475 goto do_setcond_const;
2476 }
2477
2478 switch (cond) {
2479 case TCG_COND_LT:
2480 case TCG_COND_GE:
2481 /*
2482 * Simplify LT/GE comparisons vs zero to a single compare
2483 * vs the high word of the input.
2484 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002485 if (arg_is_const_val(op->args[3], 0) &&
2486 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002487 goto do_setcond_high;
2488 }
2489 break;
2490
2491 case TCG_COND_NE:
2492 inv = 1;
2493 QEMU_FALLTHROUGH;
2494 case TCG_COND_EQ:
2495 /*
2496 * Simplify EQ/NE comparisons where one of the pairs
2497 * can be simplified.
2498 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002499 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002500 op->args[3], cond);
2501 switch (i ^ inv) {
2502 case 0:
2503 goto do_setcond_const;
2504 case 1:
2505 goto do_setcond_high;
2506 }
2507
Richard Henderson67f84c92021-08-25 08:00:20 -07002508 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002509 op->args[4], cond);
2510 switch (i ^ inv) {
2511 case 0:
2512 goto do_setcond_const;
2513 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002514 goto do_setcond_low;
2515 }
2516 break;
2517
2518 case TCG_COND_TSTEQ:
2519 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002520 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002521 goto do_setcond_high;
2522 }
2523 if (arg_is_const_val(op->args[4], 0)) {
2524 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002525 }
2526 break;
2527
2528 default:
2529 break;
2530
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002531 do_setcond_low:
2532 op->args[2] = op->args[3];
2533 op->args[3] = cond;
2534 op->opc = INDEX_op_setcond_i32;
2535 return fold_setcond(ctx, op);
2536
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002537 do_setcond_high:
2538 op->args[1] = op->args[2];
2539 op->args[2] = op->args[4];
2540 op->args[3] = cond;
2541 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002542 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002543 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002544
Richard Hendersona53502c2024-12-08 20:56:36 -06002545 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002546
2547 do_setcond_const:
2548 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2549}
2550
Richard Hendersonb6617c82021-08-24 10:44:53 -07002551static bool fold_sextract(OptContext *ctx, TCGOp *op)
2552{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002553 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002554 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002555 int pos = op->args[2];
2556 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002557
Richard Hendersonbaff5072024-12-08 21:09:30 -06002558 if (ti_is_const(t1)) {
2559 return tcg_opt_gen_movi(ctx, op, op->args[0],
2560 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002561 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002562
Richard Hendersonbaff5072024-12-08 21:09:30 -06002563 s_mask_old = t1->s_mask;
2564 s_mask = s_mask_old >> pos;
2565 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002566
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002567 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002568 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002569 }
2570
Richard Hendersonbaff5072024-12-08 21:09:30 -06002571 z_mask = sextract64(t1->z_mask, pos, len);
2572 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002573}
2574
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002575static bool fold_shift(OptContext *ctx, TCGOp *op)
2576{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002577 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002578 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002579
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002580 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002581 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002582 fold_xi_to_x(ctx, op, 0)) {
2583 return true;
2584 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002585
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002586 t1 = arg_info(op->args[1]);
2587 t2 = arg_info(op->args[2]);
2588 s_mask = t1->s_mask;
2589 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002590
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002591 if (ti_is_const(t2)) {
2592 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002593
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002594 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002595 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002596
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002597 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002598 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002599
2600 switch (op->opc) {
Richard Henderson3949f362025-01-08 08:05:18 -08002601 case INDEX_op_sar:
Richard Henderson93a967f2021-08-26 13:24:59 -07002602 /*
2603 * Arithmetic right shift will not reduce the number of
2604 * input sign repetitions.
2605 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002606 return fold_masks_s(ctx, op, s_mask);
Richard Henderson74dbd362025-01-07 22:52:10 -08002607 case INDEX_op_shr:
Richard Henderson93a967f2021-08-26 13:24:59 -07002608 /*
2609 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002610 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002611 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002612 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002613 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002614 }
2615 break;
2616 default:
2617 break;
2618 }
2619
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002620 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002621}
2622
Richard Henderson9caca882021-08-24 13:30:32 -07002623static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2624{
2625 TCGOpcode neg_op;
2626 bool have_neg;
2627
2628 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2629 return false;
2630 }
2631
2632 switch (ctx->type) {
2633 case TCG_TYPE_I32:
Richard Henderson9caca882021-08-24 13:30:32 -07002634 case TCG_TYPE_I64:
Richard Henderson69713582025-01-06 22:48:57 -08002635 neg_op = INDEX_op_neg;
Richard Hendersonb701f192023-10-25 21:14:04 -07002636 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002637 break;
2638 case TCG_TYPE_V64:
2639 case TCG_TYPE_V128:
2640 case TCG_TYPE_V256:
2641 neg_op = INDEX_op_neg_vec;
2642 have_neg = (TCG_TARGET_HAS_neg_vec &&
2643 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2644 break;
2645 default:
2646 g_assert_not_reached();
2647 }
2648 if (have_neg) {
2649 op->opc = neg_op;
2650 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002651 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002652 }
2653 return false;
2654}
2655
Richard Hendersonc578ff12021-12-16 06:07:25 -08002656/* We cannot as yet do_constant_folding with vectors. */
2657static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002658{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002659 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002660 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002661 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002662 return true;
2663 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002664 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002665}
2666
Richard Hendersonc578ff12021-12-16 06:07:25 -08002667static bool fold_sub(OptContext *ctx, TCGOp *op)
2668{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002669 if (fold_const2(ctx, op) ||
2670 fold_xx_to_i(ctx, op, 0) ||
2671 fold_xi_to_x(ctx, op, 0) ||
2672 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002673 return true;
2674 }
2675
2676 /* Fold sub r,x,i to add r,x,-i */
2677 if (arg_is_const(op->args[2])) {
2678 uint64_t val = arg_info(op->args[2])->val;
2679
Richard Henderson79602f62025-01-06 09:11:39 -08002680 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002681 op->args[2] = arg_new_constant(ctx, -val);
2682 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002683 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002684}
2685
Richard Henderson9531c072021-08-26 06:51:39 -07002686static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002687{
Richard Henderson9531c072021-08-26 06:51:39 -07002688 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002689}
2690
Richard Hendersonfae450b2021-08-25 22:42:19 -07002691static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2692{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002693 uint64_t z_mask = -1, s_mask = 0;
2694
Richard Hendersonfae450b2021-08-25 22:42:19 -07002695 /* We can't do any folding with a load, but we can record bits. */
2696 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002697 CASE_OP_32_64(ld8s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002698 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002699 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002700 CASE_OP_32_64(ld8u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002701 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002702 break;
2703 CASE_OP_32_64(ld16s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002704 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002705 break;
2706 CASE_OP_32_64(ld16u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002707 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002708 break;
2709 case INDEX_op_ld32s_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002710 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002711 break;
2712 case INDEX_op_ld32u_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002713 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002714 break;
2715 default:
2716 g_assert_not_reached();
2717 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002718 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002719}
2720
Richard Hendersonab84dc32023-08-23 23:04:24 -07002721static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2722{
2723 TCGTemp *dst, *src;
2724 intptr_t ofs;
2725 TCGType type;
2726
2727 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002728 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002729 }
2730
2731 type = ctx->type;
2732 ofs = op->args[2];
2733 dst = arg_temp(op->args[0]);
2734 src = find_mem_copy_for(ctx, type, ofs);
2735 if (src && src->base_type == type) {
2736 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2737 }
2738
2739 reset_ts(ctx, dst);
2740 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2741 return true;
2742}
2743
2744static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2745{
2746 intptr_t ofs = op->args[2];
2747 intptr_t lm1;
2748
2749 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2750 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002751 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002752 }
2753
2754 switch (op->opc) {
2755 CASE_OP_32_64(st8):
2756 lm1 = 0;
2757 break;
2758 CASE_OP_32_64(st16):
2759 lm1 = 1;
2760 break;
2761 case INDEX_op_st32_i64:
2762 case INDEX_op_st_i32:
2763 lm1 = 3;
2764 break;
2765 case INDEX_op_st_i64:
2766 lm1 = 7;
2767 break;
2768 case INDEX_op_st_vec:
2769 lm1 = tcg_type_size(ctx->type) - 1;
2770 break;
2771 default:
2772 g_assert_not_reached();
2773 }
2774 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002775 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002776}
2777
2778static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2779{
2780 TCGTemp *src;
2781 intptr_t ofs, last;
2782 TCGType type;
2783
2784 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002785 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002786 }
2787
2788 src = arg_temp(op->args[0]);
2789 ofs = op->args[2];
2790 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002791
2792 /*
2793 * Eliminate duplicate stores of a constant.
2794 * This happens frequently when the target ISA zero-extends.
2795 */
2796 if (ts_is_const(src)) {
2797 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2798 if (src == prev) {
2799 tcg_op_remove(ctx->tcg, op);
2800 return true;
2801 }
2802 }
2803
Richard Hendersonab84dc32023-08-23 23:04:24 -07002804 last = ofs + tcg_type_size(type) - 1;
2805 remove_mem_copy_in(ctx, ofs, last);
2806 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002807 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002808}
2809
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002810static bool fold_xor(OptContext *ctx, TCGOp *op)
2811{
Richard Hendersonc890fd72024-12-08 21:39:01 -06002812 uint64_t z_mask, s_mask;
2813 TempOptInfo *t1, *t2;
2814
Richard Henderson7a2f7082021-08-26 07:06:39 -07002815 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002816 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002817 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002818 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002819 return true;
2820 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002821
Richard Hendersonc890fd72024-12-08 21:39:01 -06002822 t1 = arg_info(op->args[1]);
2823 t2 = arg_info(op->args[2]);
2824 z_mask = t1->z_mask | t2->z_mask;
2825 s_mask = t1->s_mask & t2->s_mask;
2826 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002827}
2828
Kirill Batuzov22613af2011-07-07 16:37:13 +04002829/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002830void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002831{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002832 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002833 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002834 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002835
Richard Hendersonab84dc32023-08-23 23:04:24 -07002836 QSIMPLEQ_INIT(&ctx.mem_free);
2837
Kirill Batuzov22613af2011-07-07 16:37:13 +04002838 /* Array VALS has an element for each temp.
2839 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002840 If this temp is a copy of other ones then the other copies are
2841 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002842
2843 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002844 for (i = 0; i < nb_temps; ++i) {
2845 s->temps[i].state_ptr = NULL;
2846 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002847
Richard Henderson15fa08f2017-11-02 15:19:14 +01002848 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002849 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002850 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002851 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002852
Richard Henderson5cf32be2021-08-24 08:17:08 -07002853 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002854 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002855 fold_call(&ctx, op);
2856 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002857 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002858
2859 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002860 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2861 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002862
Richard Henderson67f84c92021-08-25 08:00:20 -07002863 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08002864 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07002865
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002866 /*
2867 * Process each opcode.
2868 * Sorted alphabetically by opcode as much as possible.
2869 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002870 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08002871 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002872 done = fold_add(&ctx, op);
2873 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002874 case INDEX_op_add_vec:
2875 done = fold_add_vec(&ctx, op);
2876 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002877 CASE_OP_32_64(add2):
2878 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002879 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002880 case INDEX_op_and:
2881 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002882 done = fold_and(&ctx, op);
2883 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08002884 case INDEX_op_andc:
2885 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002886 done = fold_andc(&ctx, op);
2887 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002888 CASE_OP_32_64(brcond):
2889 done = fold_brcond(&ctx, op);
2890 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002891 case INDEX_op_brcond2_i32:
2892 done = fold_brcond2(&ctx, op);
2893 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002894 CASE_OP_32_64(bswap16):
2895 CASE_OP_32_64(bswap32):
2896 case INDEX_op_bswap64_i64:
2897 done = fold_bswap(&ctx, op);
2898 break;
Richard Henderson5a5bb0a2025-01-08 16:12:46 -08002899 case INDEX_op_clz:
Richard Hendersonc96447d2025-01-08 17:07:01 -08002900 case INDEX_op_ctz:
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002901 done = fold_count_zeros(&ctx, op);
2902 break;
Richard Henderson97218ae2025-01-08 18:37:43 -08002903 case INDEX_op_ctpop:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002904 done = fold_ctpop(&ctx, op);
2905 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002906 CASE_OP_32_64(deposit):
2907 done = fold_deposit(&ctx, op);
2908 break;
Richard Hendersonb2c514f2025-01-07 13:22:56 -08002909 case INDEX_op_divs:
Richard Henderson961b80a2025-01-07 14:27:19 -08002910 case INDEX_op_divu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002911 done = fold_divide(&ctx, op);
2912 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002913 case INDEX_op_dup_vec:
2914 done = fold_dup(&ctx, op);
2915 break;
2916 case INDEX_op_dup2_vec:
2917 done = fold_dup2(&ctx, op);
2918 break;
Richard Henderson5c0968a2025-01-06 15:47:53 -08002919 case INDEX_op_eqv:
2920 case INDEX_op_eqv_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002921 done = fold_eqv(&ctx, op);
2922 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002923 CASE_OP_32_64(extract):
2924 done = fold_extract(&ctx, op);
2925 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002926 CASE_OP_32_64(extract2):
2927 done = fold_extract2(&ctx, op);
2928 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002929 case INDEX_op_ext_i32_i64:
2930 done = fold_exts(&ctx, op);
2931 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002932 case INDEX_op_extu_i32_i64:
2933 case INDEX_op_extrl_i64_i32:
2934 case INDEX_op_extrh_i64_i32:
2935 done = fold_extu(&ctx, op);
2936 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002937 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002938 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002939 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002940 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002941 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002942 case INDEX_op_ld32u_i64:
2943 done = fold_tcg_ld(&ctx, op);
2944 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002945 case INDEX_op_ld_i32:
2946 case INDEX_op_ld_i64:
2947 case INDEX_op_ld_vec:
2948 done = fold_tcg_ld_memcopy(&ctx, op);
2949 break;
2950 CASE_OP_32_64(st8):
2951 CASE_OP_32_64(st16):
2952 case INDEX_op_st32_i64:
2953 done = fold_tcg_st(&ctx, op);
2954 break;
2955 case INDEX_op_st_i32:
2956 case INDEX_op_st_i64:
2957 case INDEX_op_st_vec:
2958 done = fold_tcg_st_memcopy(&ctx, op);
2959 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002960 case INDEX_op_mb:
2961 done = fold_mb(&ctx, op);
2962 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08002963 case INDEX_op_mov:
2964 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002965 done = fold_mov(&ctx, op);
2966 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002967 CASE_OP_32_64(movcond):
2968 done = fold_movcond(&ctx, op);
2969 break;
Richard Hendersond2c3eca2025-01-07 09:32:18 -08002970 case INDEX_op_mul:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002971 done = fold_mul(&ctx, op);
2972 break;
Richard Hendersonc7428242025-01-07 11:19:29 -08002973 case INDEX_op_mulsh:
Richard Hendersonaa28c9e2025-01-07 10:36:24 -08002974 case INDEX_op_muluh:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002975 done = fold_mul_highpart(&ctx, op);
2976 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08002977 case INDEX_op_muls2:
Richard Henderson407112b2021-08-26 06:33:04 -07002978 CASE_OP_32_64(mulu2):
2979 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002980 break;
Richard Henderson59379a42025-01-06 20:32:54 -08002981 case INDEX_op_nand:
2982 case INDEX_op_nand_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002983 done = fold_nand(&ctx, op);
2984 break;
Richard Henderson69713582025-01-06 22:48:57 -08002985 case INDEX_op_neg:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002986 done = fold_neg(&ctx, op);
2987 break;
Richard Henderson3a8c4e92025-01-06 21:02:17 -08002988 case INDEX_op_nor:
2989 case INDEX_op_nor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002990 done = fold_nor(&ctx, op);
2991 break;
Richard Henderson5c62d372025-01-06 23:46:47 -08002992 case INDEX_op_not:
2993 case INDEX_op_not_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002994 done = fold_not(&ctx, op);
2995 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08002996 case INDEX_op_or:
2997 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002998 done = fold_or(&ctx, op);
2999 break;
Richard Henderson6aba25e2025-01-06 14:46:26 -08003000 case INDEX_op_orc:
3001 case INDEX_op_orc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003002 done = fold_orc(&ctx, op);
3003 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003004 case INDEX_op_qemu_ld_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06003005 done = fold_qemu_ld_1reg(&ctx, op);
3006 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003007 case INDEX_op_qemu_ld_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06003008 if (TCG_TARGET_REG_BITS == 64) {
3009 done = fold_qemu_ld_1reg(&ctx, op);
3010 break;
3011 }
3012 QEMU_FALLTHROUGH;
Richard Henderson50b7a192025-02-04 13:46:09 -08003013 case INDEX_op_qemu_ld_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06003014 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003015 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003016 case INDEX_op_qemu_st8_i32:
3017 case INDEX_op_qemu_st_i32:
3018 case INDEX_op_qemu_st_i64:
3019 case INDEX_op_qemu_st_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003020 done = fold_qemu_st(&ctx, op);
3021 break;
Richard Henderson9a6bc182025-01-07 19:00:51 -08003022 case INDEX_op_rems:
Richard Hendersoncd9acd22025-01-07 20:25:14 -08003023 case INDEX_op_remu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003024 done = fold_remainder(&ctx, op);
3025 break;
Richard Henderson005a87e2025-01-08 10:42:16 -08003026 case INDEX_op_rotl:
3027 case INDEX_op_rotr:
Richard Henderson3949f362025-01-08 08:05:18 -08003028 case INDEX_op_sar:
Richard Henderson6ca59452025-01-07 21:50:04 -08003029 case INDEX_op_shl:
Richard Henderson74dbd362025-01-07 22:52:10 -08003030 case INDEX_op_shr:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003031 done = fold_shift(&ctx, op);
3032 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003033 CASE_OP_32_64(setcond):
3034 done = fold_setcond(&ctx, op);
3035 break;
Richard Henderson36355022023-08-04 23:24:04 +00003036 CASE_OP_32_64(negsetcond):
3037 done = fold_negsetcond(&ctx, op);
3038 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003039 case INDEX_op_setcond2_i32:
3040 done = fold_setcond2(&ctx, op);
3041 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003042 case INDEX_op_cmp_vec:
3043 done = fold_cmp_vec(&ctx, op);
3044 break;
3045 case INDEX_op_cmpsel_vec:
3046 done = fold_cmpsel_vec(&ctx, op);
3047 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003048 case INDEX_op_bitsel_vec:
3049 done = fold_bitsel_vec(&ctx, op);
3050 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003051 CASE_OP_32_64(sextract):
3052 done = fold_sextract(&ctx, op);
3053 break;
Richard Henderson60f34f52025-01-06 22:06:32 -08003054 case INDEX_op_sub:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003055 done = fold_sub(&ctx, op);
3056 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003057 case INDEX_op_sub_vec:
3058 done = fold_sub_vec(&ctx, op);
3059 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003060 CASE_OP_32_64(sub2):
3061 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003062 break;
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08003063 case INDEX_op_xor:
3064 case INDEX_op_xor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003065 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003066 break;
Richard Henderson15268552024-12-08 07:45:11 -06003067 case INDEX_op_set_label:
3068 case INDEX_op_br:
3069 case INDEX_op_exit_tb:
3070 case INDEX_op_goto_tb:
3071 case INDEX_op_goto_ptr:
3072 finish_ebb(&ctx);
3073 done = true;
3074 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003075 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003076 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003077 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003078 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003079 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003080 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003081}