blob: 684b1099d01b87ff975059f4c89b6345db858ff7 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040033#define CASE_OP_32_64(x) \
34 glue(glue(case INDEX_op_, x), _i32): \
35 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040036
Richard Henderson170ba882017-11-22 09:07:11 +010037#define CASE_OP_32_64_VEC(x) \
38 glue(glue(case INDEX_op_, x), _i32): \
39 glue(glue(case INDEX_op_, x), _i64): \
40 glue(glue(case INDEX_op_, x), _vec)
41
Richard Hendersonab84dc32023-08-23 23:04:24 -070042typedef struct MemCopyInfo {
43 IntervalTreeNode itree;
44 QSIMPLEQ_ENTRY (MemCopyInfo) next;
45 TCGTemp *ts;
46 TCGType type;
47} MemCopyInfo;
48
Richard Henderson6fcb98e2020-03-30 17:44:30 -070049typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020050 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070051 TCGTemp *prev_copy;
52 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070053 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070054 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070055 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080056 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070057} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040058
Richard Henderson3b3f8472021-08-23 22:06:31 -070059typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070060 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070061 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070062 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070063
Richard Hendersonab84dc32023-08-23 23:04:24 -070064 IntervalTreeRoot mem_copy;
65 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
66
Richard Henderson137f1f42021-08-24 08:49:25 -070067 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070068 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070069} OptContext;
70
Richard Henderson6fcb98e2020-03-30 17:44:30 -070071static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020072{
Richard Henderson63490392017-06-20 13:43:15 -070073 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020074}
75
Richard Henderson6fcb98e2020-03-30 17:44:30 -070076static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020077{
Richard Henderson63490392017-06-20 13:43:15 -070078 return ts_info(arg_temp(arg));
79}
80
Richard Hendersone1b6c142024-12-22 10:26:14 -080081static inline bool ti_is_const(TempOptInfo *ti)
82{
83 return ti->is_const;
84}
85
86static inline uint64_t ti_const_val(TempOptInfo *ti)
87{
88 return ti->val;
89}
90
91static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
92{
93 return ti_is_const(ti) && ti_const_val(ti) == val;
94}
95
Richard Henderson63490392017-06-20 13:43:15 -070096static inline bool ts_is_const(TCGTemp *ts)
97{
Richard Hendersone1b6c142024-12-22 10:26:14 -080098 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070099}
100
Richard Henderson27cdb852023-10-23 11:38:00 -0700101static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
102{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800103 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700104}
105
Richard Henderson63490392017-06-20 13:43:15 -0700106static inline bool arg_is_const(TCGArg arg)
107{
108 return ts_is_const(arg_temp(arg));
109}
110
Richard Henderson27cdb852023-10-23 11:38:00 -0700111static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
112{
113 return ts_is_const_val(arg_temp(arg), val);
114}
115
Richard Henderson63490392017-06-20 13:43:15 -0700116static inline bool ts_is_copy(TCGTemp *ts)
117{
118 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200119}
120
Richard Henderson9f75e522023-11-02 13:37:46 -0700121static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
122{
123 return a->kind < b->kind ? b : a;
124}
125
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200126/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700127static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200128{
Richard Henderson63490392017-06-20 13:43:15 -0700129 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700130 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700131
Richard Henderson3b3f8472021-08-23 22:06:31 -0700132 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 return;
134 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700135 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700136
137 ti = ts->state_ptr;
138 if (ti == NULL) {
139 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700140 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700141 }
142
143 ti->next_copy = ts;
144 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700146 if (ts->kind == TEMP_CONST) {
147 ti->is_const = true;
148 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700149 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800150 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700151 } else {
152 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700153 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700154 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155 }
156}
157
Richard Hendersonab84dc32023-08-23 23:04:24 -0700158static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
159{
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
161 return r ? container_of(r, MemCopyInfo, itree) : NULL;
162}
163
164static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
165{
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
167 return r ? container_of(r, MemCopyInfo, itree) : NULL;
168}
169
170static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
171{
172 TCGTemp *ts = mc->ts;
173 TempOptInfo *ti = ts_info(ts);
174
175 interval_tree_remove(&mc->itree, &ctx->mem_copy);
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
178}
179
180static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
181{
182 while (true) {
183 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
184 if (!mc) {
185 break;
186 }
187 remove_mem_copy(ctx, mc);
188 }
189}
190
191static void remove_mem_copy_all(OptContext *ctx)
192{
193 remove_mem_copy_in(ctx, 0, -1);
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
195}
196
Richard Henderson9f75e522023-11-02 13:37:46 -0700197static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198{
Richard Henderson9f75e522023-11-02 13:37:46 -0700199 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200200
Richard Henderson4c868ce2020-04-23 09:02:23 -0700201 /* If this is already readonly, we can't do better. */
202 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700203 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200204 }
205
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700210 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200211}
212
Richard Hendersonab84dc32023-08-23 23:04:24 -0700213static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
214{
215 TempOptInfo *si = ts_info(src_ts);
216 TempOptInfo *di = ts_info(dst_ts);
217 MemCopyInfo *mc;
218
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
220 tcg_debug_assert(mc->ts == src_ts);
221 mc->ts = dst_ts;
222 }
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
224}
225
226/* Reset TEMP's state, possibly removing the temp for the list of copies. */
227static void reset_ts(OptContext *ctx, TCGTemp *ts)
228{
229 TempOptInfo *ti = ts_info(ts);
230 TCGTemp *pts = ti->prev_copy;
231 TCGTemp *nts = ti->next_copy;
232 TempOptInfo *pi = ts_info(pts);
233 TempOptInfo *ni = ts_info(nts);
234
235 ni->prev_copy = ti->prev_copy;
236 pi->next_copy = ti->next_copy;
237 ti->next_copy = ts;
238 ti->prev_copy = ts;
239 ti->is_const = false;
240 ti->z_mask = -1;
241 ti->s_mask = 0;
242
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
244 if (ts == nts) {
245 /* Last temp copy being removed, the mem copies die. */
246 MemCopyInfo *mc;
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
248 interval_tree_remove(&mc->itree, &ctx->mem_copy);
249 }
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
251 } else {
252 move_mem_copies(find_better_copy(nts), ts);
253 }
254 }
255}
256
257static void reset_temp(OptContext *ctx, TCGArg arg)
258{
259 reset_ts(ctx, arg_temp(arg));
260}
261
262static void record_mem_copy(OptContext *ctx, TCGType type,
263 TCGTemp *ts, intptr_t start, intptr_t last)
264{
265 MemCopyInfo *mc;
266 TempOptInfo *ti;
267
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
269 if (mc) {
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
271 } else {
272 mc = tcg_malloc(sizeof(*mc));
273 }
274
275 memset(mc, 0, sizeof(*mc));
276 mc->itree.start = start;
277 mc->itree.last = last;
278 mc->type = type;
279 interval_tree_insert(&mc->itree, &ctx->mem_copy);
280
281 ts = find_better_copy(ts);
282 ti = ts_info(ts);
283 mc->ts = ts;
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
285}
286
Richard Henderson63490392017-06-20 13:43:15 -0700287static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288{
Richard Henderson63490392017-06-20 13:43:15 -0700289 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290
Richard Henderson63490392017-06-20 13:43:15 -0700291 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200292 return true;
293 }
294
Richard Henderson63490392017-06-20 13:43:15 -0700295 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200296 return false;
297 }
298
Richard Henderson63490392017-06-20 13:43:15 -0700299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
300 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200301 return true;
302 }
303 }
304
305 return false;
306}
307
Richard Henderson63490392017-06-20 13:43:15 -0700308static bool args_are_copies(TCGArg arg1, TCGArg arg2)
309{
310 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
311}
312
Richard Hendersonab84dc32023-08-23 23:04:24 -0700313static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
314{
315 MemCopyInfo *mc;
316
317 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
318 if (mc->itree.start == s && mc->type == type) {
319 return find_better_copy(mc->ts);
320 }
321 }
322 return NULL;
323}
324
Richard Henderson26aac972023-10-23 12:31:57 -0700325static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
326{
327 TCGType type = ctx->type;
328 TCGTemp *ts;
329
330 if (type == TCG_TYPE_I32) {
331 val = (int32_t)val;
332 }
333
334 ts = tcg_constant_internal(type, val);
335 init_ts_info(ctx, ts);
336
337 return temp_arg(ts);
338}
339
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100340static TCGArg arg_new_temp(OptContext *ctx)
341{
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
343 init_ts_info(ctx, ts);
344 return temp_arg(ts);
345}
346
Richard Hendersona3c1c572025-04-21 11:05:29 -0700347static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
348 TCGOpcode opc, unsigned narg)
349{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800350 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700351}
352
353static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
354 TCGOpcode opc, unsigned narg)
355{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800356 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700357}
358
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700359static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400360{
Richard Henderson63490392017-06-20 13:43:15 -0700361 TCGTemp *dst_ts = arg_temp(dst);
362 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700363 TempOptInfo *di;
364 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700365 TCGOpcode new_op;
366
367 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700368 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700369 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200370 }
371
Richard Henderson986cac12023-01-09 13:59:35 -0800372 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700373 di = ts_info(dst_ts);
374 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700375
376 switch (ctx->type) {
377 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800379 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700380 break;
381 case TCG_TYPE_V64:
382 case TCG_TYPE_V128:
383 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800384 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700385 new_op = INDEX_op_mov_vec;
386 break;
387 default:
388 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100389 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700390 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700391 op->args[0] = dst;
392 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700393
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700394 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700395 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700396
Richard Henderson63490392017-06-20 13:43:15 -0700397 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700398 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700399
400 di->next_copy = si->next_copy;
401 di->prev_copy = src_ts;
402 ni->prev_copy = dst_ts;
403 si->next_copy = dst_ts;
404 di->is_const = si->is_const;
405 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700406
407 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
408 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
409 move_mem_copies(dst_ts, src_ts);
410 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800411 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700412 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400413}
414
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700415static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700416 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700417{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700418 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700419 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700420}
421
Richard Henderson54795542020-09-06 16:21:32 -0700422static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400423{
Richard Henderson03271522013-08-14 14:35:56 -0700424 uint64_t l64, h64;
425
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400426 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800427 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400428 return x + y;
429
430 CASE_OP_32_64(sub):
431 return x - y;
432
433 CASE_OP_32_64(mul):
434 return x * y;
435
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800436 case INDEX_op_and:
437 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400438 return x & y;
439
Richard Henderson49bd7512025-01-06 14:00:40 -0800440 case INDEX_op_or:
441 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400442 return x | y;
443
Richard Hendersonc578ff12021-12-16 06:07:25 -0800444 CASE_OP_32_64_VEC(xor):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400445 return x ^ y;
446
Kirill Batuzov55c09752011-07-07 16:37:16 +0400447 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700448 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400449
Kirill Batuzov55c09752011-07-07 16:37:16 +0400450 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700451 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400452
453 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700454 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400455
Kirill Batuzov55c09752011-07-07 16:37:16 +0400456 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700457 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400458
459 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700460 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400461
Kirill Batuzov55c09752011-07-07 16:37:16 +0400462 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700463 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400464
465 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700466 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400467
Kirill Batuzov55c09752011-07-07 16:37:16 +0400468 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700469 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400470
471 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700472 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400473
Kirill Batuzov55c09752011-07-07 16:37:16 +0400474 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700475 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400476
Richard Hendersonc578ff12021-12-16 06:07:25 -0800477 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400478 return ~x;
479
Richard Hendersoncb25c802011-08-17 14:11:47 -0700480 CASE_OP_32_64(neg):
481 return -x;
482
Richard Henderson46f96bf2025-01-06 12:37:02 -0800483 case INDEX_op_andc:
484 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700485 return x & ~y;
486
Richard Hendersonc578ff12021-12-16 06:07:25 -0800487 CASE_OP_32_64_VEC(orc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700488 return x | ~y;
489
Richard Hendersoned523472021-12-16 11:17:46 -0800490 CASE_OP_32_64_VEC(eqv):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700491 return ~(x ^ y);
492
Richard Hendersoned523472021-12-16 11:17:46 -0800493 CASE_OP_32_64_VEC(nand):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700494 return ~(x & y);
495
Richard Hendersoned523472021-12-16 11:17:46 -0800496 CASE_OP_32_64_VEC(nor):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700497 return ~(x | y);
498
Richard Henderson0e28d002016-11-16 09:23:28 +0100499 case INDEX_op_clz_i32:
500 return (uint32_t)x ? clz32(x) : y;
501
502 case INDEX_op_clz_i64:
503 return x ? clz64(x) : y;
504
505 case INDEX_op_ctz_i32:
506 return (uint32_t)x ? ctz32(x) : y;
507
508 case INDEX_op_ctz_i64:
509 return x ? ctz64(x) : y;
510
Richard Hendersona768e4e2016-11-21 11:13:39 +0100511 case INDEX_op_ctpop_i32:
512 return ctpop32(x);
513
514 case INDEX_op_ctpop_i64:
515 return ctpop64(x);
516
Richard Henderson64985942018-11-20 08:53:34 +0100517 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700518 x = bswap16(x);
519 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100520
521 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700522 x = bswap32(x);
523 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100524
525 case INDEX_op_bswap64_i64:
526 return bswap64(x);
527
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200528 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400529 return (int32_t)x;
530
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200531 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700532 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400533 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400534
Richard Henderson609ad702015-07-24 07:16:00 -0700535 case INDEX_op_extrh_i64_i32:
536 return (uint64_t)x >> 32;
537
Richard Henderson03271522013-08-14 14:35:56 -0700538 case INDEX_op_muluh_i32:
539 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
540 case INDEX_op_mulsh_i32:
541 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
542
543 case INDEX_op_muluh_i64:
544 mulu64(&l64, &h64, x, y);
545 return h64;
546 case INDEX_op_mulsh_i64:
547 muls64(&l64, &h64, x, y);
548 return h64;
549
Richard Henderson01547f72013-08-14 15:22:46 -0700550 case INDEX_op_div_i32:
551 /* Avoid crashing on divide by zero, otherwise undefined. */
552 return (int32_t)x / ((int32_t)y ? : 1);
553 case INDEX_op_divu_i32:
554 return (uint32_t)x / ((uint32_t)y ? : 1);
555 case INDEX_op_div_i64:
556 return (int64_t)x / ((int64_t)y ? : 1);
557 case INDEX_op_divu_i64:
558 return (uint64_t)x / ((uint64_t)y ? : 1);
559
560 case INDEX_op_rem_i32:
561 return (int32_t)x % ((int32_t)y ? : 1);
562 case INDEX_op_remu_i32:
563 return (uint32_t)x % ((uint32_t)y ? : 1);
564 case INDEX_op_rem_i64:
565 return (int64_t)x % ((int64_t)y ? : 1);
566 case INDEX_op_remu_i64:
567 return (uint64_t)x % ((uint64_t)y ? : 1);
568
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400569 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700570 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400571 }
572}
573
Richard Henderson67f84c92021-08-25 08:00:20 -0700574static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
575 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400576{
Richard Henderson54795542020-09-06 16:21:32 -0700577 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700578 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200579 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400580 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400581 return res;
582}
583
Richard Henderson9519da72012-10-02 11:32:26 -0700584static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
585{
586 switch (c) {
587 case TCG_COND_EQ:
588 return x == y;
589 case TCG_COND_NE:
590 return x != y;
591 case TCG_COND_LT:
592 return (int32_t)x < (int32_t)y;
593 case TCG_COND_GE:
594 return (int32_t)x >= (int32_t)y;
595 case TCG_COND_LE:
596 return (int32_t)x <= (int32_t)y;
597 case TCG_COND_GT:
598 return (int32_t)x > (int32_t)y;
599 case TCG_COND_LTU:
600 return x < y;
601 case TCG_COND_GEU:
602 return x >= y;
603 case TCG_COND_LEU:
604 return x <= y;
605 case TCG_COND_GTU:
606 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700607 case TCG_COND_TSTEQ:
608 return (x & y) == 0;
609 case TCG_COND_TSTNE:
610 return (x & y) != 0;
611 case TCG_COND_ALWAYS:
612 case TCG_COND_NEVER:
613 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700614 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700615 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700616}
617
618static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
619{
620 switch (c) {
621 case TCG_COND_EQ:
622 return x == y;
623 case TCG_COND_NE:
624 return x != y;
625 case TCG_COND_LT:
626 return (int64_t)x < (int64_t)y;
627 case TCG_COND_GE:
628 return (int64_t)x >= (int64_t)y;
629 case TCG_COND_LE:
630 return (int64_t)x <= (int64_t)y;
631 case TCG_COND_GT:
632 return (int64_t)x > (int64_t)y;
633 case TCG_COND_LTU:
634 return x < y;
635 case TCG_COND_GEU:
636 return x >= y;
637 case TCG_COND_LEU:
638 return x <= y;
639 case TCG_COND_GTU:
640 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700641 case TCG_COND_TSTEQ:
642 return (x & y) == 0;
643 case TCG_COND_TSTNE:
644 return (x & y) != 0;
645 case TCG_COND_ALWAYS:
646 case TCG_COND_NEVER:
647 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700648 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700649 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700650}
651
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700652static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700653{
654 switch (c) {
655 case TCG_COND_GT:
656 case TCG_COND_LTU:
657 case TCG_COND_LT:
658 case TCG_COND_GTU:
659 case TCG_COND_NE:
660 return 0;
661 case TCG_COND_GE:
662 case TCG_COND_GEU:
663 case TCG_COND_LE:
664 case TCG_COND_LEU:
665 case TCG_COND_EQ:
666 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700667 case TCG_COND_TSTEQ:
668 case TCG_COND_TSTNE:
669 return -1;
670 case TCG_COND_ALWAYS:
671 case TCG_COND_NEVER:
672 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700673 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700674 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700675}
676
Richard Henderson8d57bf12021-08-24 08:34:27 -0700677/*
678 * Return -1 if the condition can't be simplified,
679 * and the result of the condition (0 or 1) if it can.
680 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700681static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700682 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200683{
Richard Henderson63490392017-06-20 13:43:15 -0700684 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000685 uint64_t xv = arg_info(x)->val;
686 uint64_t yv = arg_info(y)->val;
687
Richard Henderson67f84c92021-08-25 08:00:20 -0700688 switch (type) {
689 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100690 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700691 case TCG_TYPE_I64:
692 return do_constant_folding_cond_64(xv, yv, c);
693 default:
694 /* Only scalar comparisons are optimizable */
695 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200696 }
Richard Henderson63490392017-06-20 13:43:15 -0700697 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700698 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700699 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200700 switch (c) {
701 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700702 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200703 return 0;
704 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700705 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200706 return 1;
707 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700708 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200709 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200710 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700711 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200712}
713
Richard Henderson7a2f7082021-08-26 07:06:39 -0700714/**
715 * swap_commutative:
716 * @dest: TCGArg of the destination argument, or NO_DEST.
717 * @p1: first paired argument
718 * @p2: second paired argument
719 *
720 * If *@p1 is a constant and *@p2 is not, swap.
721 * If *@p2 matches @dest, swap.
722 * Return true if a swap was performed.
723 */
724
725#define NO_DEST temp_arg(NULL)
726
Richard Henderson24c9ae42012-10-02 11:32:21 -0700727static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
728{
729 TCGArg a1 = *p1, a2 = *p2;
730 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700731 sum += arg_is_const(a1);
732 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700733
734 /* Prefer the constant in second argument, and then the form
735 op a, a, b, which is better handled on non-RISC hosts. */
736 if (sum > 0 || (sum == 0 && dest == a2)) {
737 *p1 = a2;
738 *p2 = a1;
739 return true;
740 }
741 return false;
742}
743
Richard Henderson0bfcb862012-10-02 11:32:23 -0700744static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
745{
746 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700747 sum += arg_is_const(p1[0]);
748 sum += arg_is_const(p1[1]);
749 sum -= arg_is_const(p2[0]);
750 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700751 if (sum > 0) {
752 TCGArg t;
753 t = p1[0], p1[0] = p2[0], p2[0] = t;
754 t = p1[1], p1[1] = p2[1], p2[1] = t;
755 return true;
756 }
757 return false;
758}
759
Richard Henderson7e64b112023-10-24 16:53:56 -0700760/*
761 * Return -1 if the condition can't be simplified,
762 * and the result of the condition (0 or 1) if it can.
763 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100764static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700765 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
766{
767 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100768 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700769 bool swap;
770 int r;
771
772 swap = swap_commutative(dest, p1, p2);
773 cond = *pcond;
774 if (swap) {
775 *pcond = cond = tcg_swap_cond(cond);
776 }
777
778 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700779 if (r >= 0) {
780 return r;
781 }
782 if (!is_tst_cond(cond)) {
783 return -1;
784 }
785
Paolo Bonzini35020622024-01-22 10:48:11 +0100786 i1 = arg_info(*p1);
787
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700788 /*
789 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100790 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700791 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100792 if (args_are_copies(*p1, *p2) ||
793 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700794 *p2 = arg_new_constant(ctx, 0);
795 *pcond = tcg_tst_eqne_cond(cond);
796 return -1;
797 }
798
Paolo Bonzini35020622024-01-22 10:48:11 +0100799 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
800 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700801 *p2 = arg_new_constant(ctx, 0);
802 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100803 return -1;
804 }
805
806 /* Expand to AND with a temporary if no backend support. */
807 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800808 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100809 TCGArg tmp = arg_new_temp(ctx);
810
811 op2->args[0] = tmp;
812 op2->args[1] = *p1;
813 op2->args[2] = *p2;
814
815 *p1 = tmp;
816 *p2 = arg_new_constant(ctx, 0);
817 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700818 }
819 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700820}
821
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100822static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700823{
824 TCGArg al, ah, bl, bh;
825 TCGCond c;
826 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700827 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700828
829 swap = swap_commutative2(args, args + 2);
830 c = args[4];
831 if (swap) {
832 args[4] = c = tcg_swap_cond(c);
833 }
834
835 al = args[0];
836 ah = args[1];
837 bl = args[2];
838 bh = args[3];
839
840 if (arg_is_const(bl) && arg_is_const(bh)) {
841 tcg_target_ulong blv = arg_info(bl)->val;
842 tcg_target_ulong bhv = arg_info(bh)->val;
843 uint64_t b = deposit64(blv, 32, 32, bhv);
844
845 if (arg_is_const(al) && arg_is_const(ah)) {
846 tcg_target_ulong alv = arg_info(al)->val;
847 tcg_target_ulong ahv = arg_info(ah)->val;
848 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700849
850 r = do_constant_folding_cond_64(a, b, c);
851 if (r >= 0) {
852 return r;
853 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700854 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700855
Richard Henderson7e64b112023-10-24 16:53:56 -0700856 if (b == 0) {
857 switch (c) {
858 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700859 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700860 return 0;
861 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700862 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700863 return 1;
864 default:
865 break;
866 }
867 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700868
869 /* TSTNE x,-1 -> NE x,0 */
870 if (b == -1 && is_tst_cond(c)) {
871 args[3] = args[2] = arg_new_constant(ctx, 0);
872 args[4] = tcg_tst_eqne_cond(c);
873 return -1;
874 }
875
876 /* TSTNE x,sign -> LT x,0 */
877 if (b == INT64_MIN && is_tst_cond(c)) {
878 /* bl must be 0, so copy that to bh */
879 args[3] = bl;
880 args[4] = tcg_tst_ltge_cond(c);
881 return -1;
882 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700883 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700884
Richard Henderson7e64b112023-10-24 16:53:56 -0700885 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700886 r = do_constant_folding_cond_eq(c);
887 if (r >= 0) {
888 return r;
889 }
890
891 /* TSTNE x,x -> NE x,0 */
892 if (is_tst_cond(c)) {
893 args[3] = args[2] = arg_new_constant(ctx, 0);
894 args[4] = tcg_tst_eqne_cond(c);
895 return -1;
896 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700897 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100898
899 /* Expand to AND with a temporary if no backend support. */
900 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800901 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
902 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100903 TCGArg t1 = arg_new_temp(ctx);
904 TCGArg t2 = arg_new_temp(ctx);
905
906 op1->args[0] = t1;
907 op1->args[1] = al;
908 op1->args[2] = bl;
909 op2->args[0] = t2;
910 op2->args[1] = ah;
911 op2->args[2] = bh;
912
913 args[0] = t1;
914 args[1] = t2;
915 args[3] = args[2] = arg_new_constant(ctx, 0);
916 args[4] = tcg_tst_eqne_cond(c);
917 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700918 return -1;
919}
920
Richard Hendersone2577ea2021-08-24 08:00:48 -0700921static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
922{
923 for (int i = 0; i < nb_args; i++) {
924 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000925 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700926 }
927}
928
Richard Henderson8774dde2021-08-24 08:04:47 -0700929static void copy_propagate(OptContext *ctx, TCGOp *op,
930 int nb_oargs, int nb_iargs)
931{
Richard Henderson8774dde2021-08-24 08:04:47 -0700932 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
933 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000934 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700935 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700936 }
937 }
938}
939
Richard Henderson15268552024-12-08 07:45:11 -0600940static void finish_bb(OptContext *ctx)
941{
942 /* We only optimize memory barriers across basic blocks. */
943 ctx->prev_mb = NULL;
944}
945
946static void finish_ebb(OptContext *ctx)
947{
948 finish_bb(ctx);
949 /* We only optimize across extended basic blocks. */
950 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
951 remove_mem_copy_all(ctx);
952}
953
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600954static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700955{
956 const TCGOpDef *def = &tcg_op_defs[op->opc];
957 int i, nb_oargs;
958
Richard Henderson137f1f42021-08-24 08:49:25 -0700959 nb_oargs = def->nb_oargs;
960 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700961 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800962 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700963 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600964 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700965}
966
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700967/*
968 * The fold_* functions return true when processing is complete,
969 * usually by folding the operation to a constant or to a copy,
970 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
971 * like collect information about the value produced, for use in
972 * optimizing a subsequent operation.
973 *
974 * These first fold_* functions are all helpers, used by other
975 * folders for more specific operations.
976 */
977
978static bool fold_const1(OptContext *ctx, TCGOp *op)
979{
980 if (arg_is_const(op->args[1])) {
981 uint64_t t;
982
983 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700984 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700985 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
986 }
987 return false;
988}
989
990static bool fold_const2(OptContext *ctx, TCGOp *op)
991{
992 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
993 uint64_t t1 = arg_info(op->args[1])->val;
994 uint64_t t2 = arg_info(op->args[2])->val;
995
Richard Henderson67f84c92021-08-25 08:00:20 -0700996 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700997 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
998 }
999 return false;
1000}
1001
Richard Hendersonc578ff12021-12-16 06:07:25 -08001002static bool fold_commutative(OptContext *ctx, TCGOp *op)
1003{
1004 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1005 return false;
1006}
1007
Richard Henderson7a2f7082021-08-26 07:06:39 -07001008static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1009{
1010 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1011 return fold_const2(ctx, op);
1012}
1013
Richard Hendersond582b142024-12-19 10:43:26 -08001014/*
1015 * Record "zero" and "sign" masks for the single output of @op.
1016 * See TempOptInfo definition of z_mask and s_mask.
1017 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001018 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001019 */
1020static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001021 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001022{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001023 const TCGOpDef *def = &tcg_op_defs[op->opc];
1024 TCGTemp *ts;
1025 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001026 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001027
1028 /* Only single-output opcodes are supported here. */
1029 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001030
1031 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001032 * 32-bit ops generate 32-bit results, which for the purpose of
1033 * simplifying tcg are sign-extended. Certainly that's how we
1034 * represent our constants elsewhere. Note that the bits will
1035 * be reset properly for a 64-bit value when encountering the
1036 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001037 */
1038 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001039 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001040 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001041 }
1042
1043 if (z_mask == 0) {
1044 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1045 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001046
1047 ts = arg_temp(op->args[0]);
1048 reset_ts(ctx, ts);
1049
1050 ti = ts_info(ts);
1051 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001052
1053 /* Canonicalize s_mask and incorporate data from z_mask. */
1054 rep = clz64(~s_mask);
1055 rep = MAX(rep, clz64(z_mask));
1056 rep = MAX(rep - 1, 0);
1057 ti->s_mask = INT64_MIN >> rep;
1058
Richard Henderson56e06ec2024-12-08 18:26:48 -06001059 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001060}
1061
Richard Henderson81be07f2024-12-08 19:49:17 -06001062static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1063{
1064 return fold_masks_zs(ctx, op, z_mask, 0);
1065}
1066
Richard Hendersonef6be622024-12-08 20:03:15 -06001067static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1068{
1069 return fold_masks_zs(ctx, op, -1, s_mask);
1070}
1071
Richard Henderson045ace32024-12-19 10:33:51 -08001072/*
1073 * An "affected" mask bit is 0 if and only if the result is identical
1074 * to the first input. Thus if the entire mask is 0, the operation
1075 * is equivalent to a copy.
1076 */
1077static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1078{
1079 if (ctx->type == TCG_TYPE_I32) {
1080 a_mask = (uint32_t)a_mask;
1081 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001082 if (a_mask == 0) {
1083 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1084 }
1085 return false;
1086}
1087
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001088/*
1089 * Convert @op to NOT, if NOT is supported by the host.
1090 * Return true f the conversion is successful, which will still
1091 * indicate that the processing is complete.
1092 */
1093static bool fold_not(OptContext *ctx, TCGOp *op);
1094static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1095{
1096 TCGOpcode not_op;
1097 bool have_not;
1098
1099 switch (ctx->type) {
1100 case TCG_TYPE_I32:
1101 not_op = INDEX_op_not_i32;
1102 have_not = TCG_TARGET_HAS_not_i32;
1103 break;
1104 case TCG_TYPE_I64:
1105 not_op = INDEX_op_not_i64;
1106 have_not = TCG_TARGET_HAS_not_i64;
1107 break;
1108 case TCG_TYPE_V64:
1109 case TCG_TYPE_V128:
1110 case TCG_TYPE_V256:
1111 not_op = INDEX_op_not_vec;
1112 have_not = TCG_TARGET_HAS_not_vec;
1113 break;
1114 default:
1115 g_assert_not_reached();
1116 }
1117 if (have_not) {
1118 op->opc = not_op;
1119 op->args[1] = op->args[idx];
1120 return fold_not(ctx, op);
1121 }
1122 return false;
1123}
1124
Richard Hendersonda48e272021-08-25 20:42:04 -07001125/* If the binary operation has first argument @i, fold to @i. */
1126static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1127{
Richard Henderson27cdb852023-10-23 11:38:00 -07001128 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001129 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1130 }
1131 return false;
1132}
1133
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001134/* If the binary operation has first argument @i, fold to NOT. */
1135static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1136{
Richard Henderson27cdb852023-10-23 11:38:00 -07001137 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001138 return fold_to_not(ctx, op, 2);
1139 }
1140 return false;
1141}
1142
Richard Hendersone8679952021-08-25 13:19:52 -07001143/* If the binary operation has second argument @i, fold to @i. */
1144static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1145{
Richard Henderson27cdb852023-10-23 11:38:00 -07001146 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001147 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1148 }
1149 return false;
1150}
1151
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001152/* If the binary operation has second argument @i, fold to identity. */
1153static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1154{
Richard Henderson27cdb852023-10-23 11:38:00 -07001155 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001156 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1157 }
1158 return false;
1159}
1160
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001161/* If the binary operation has second argument @i, fold to NOT. */
1162static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1163{
Richard Henderson27cdb852023-10-23 11:38:00 -07001164 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001165 return fold_to_not(ctx, op, 1);
1166 }
1167 return false;
1168}
1169
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001170/* If the binary operation has both arguments equal, fold to @i. */
1171static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1172{
1173 if (args_are_copies(op->args[1], op->args[2])) {
1174 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1175 }
1176 return false;
1177}
1178
Richard Hendersonca7bb042021-08-25 13:14:21 -07001179/* If the binary operation has both arguments equal, fold to identity. */
1180static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1181{
1182 if (args_are_copies(op->args[1], op->args[2])) {
1183 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1184 }
1185 return false;
1186}
1187
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001188/*
1189 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001190 *
1191 * The ordering of the transformations should be:
1192 * 1) those that produce a constant
1193 * 2) those that produce a copy
1194 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001195 */
1196
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001197static bool fold_or(OptContext *ctx, TCGOp *op);
1198static bool fold_orc(OptContext *ctx, TCGOp *op);
1199static bool fold_xor(OptContext *ctx, TCGOp *op);
1200
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001201static bool fold_add(OptContext *ctx, TCGOp *op)
1202{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001203 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001204 fold_xi_to_x(ctx, op, 0)) {
1205 return true;
1206 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001207 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001208}
1209
Richard Hendersonc578ff12021-12-16 06:07:25 -08001210/* We cannot as yet do_constant_folding with vectors. */
1211static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1212{
1213 if (fold_commutative(ctx, op) ||
1214 fold_xi_to_x(ctx, op, 0)) {
1215 return true;
1216 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001217 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001218}
1219
Richard Henderson9531c072021-08-26 06:51:39 -07001220static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001221{
Richard Hendersonf2457572023-10-25 18:39:44 -07001222 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1223 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1224
1225 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001226 uint64_t al = arg_info(op->args[2])->val;
1227 uint64_t ah = arg_info(op->args[3])->val;
1228 uint64_t bl = arg_info(op->args[4])->val;
1229 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001230 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001231 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001232
Richard Henderson9531c072021-08-26 06:51:39 -07001233 if (ctx->type == TCG_TYPE_I32) {
1234 uint64_t a = deposit64(al, 32, 32, ah);
1235 uint64_t b = deposit64(bl, 32, 32, bh);
1236
1237 if (add) {
1238 a += b;
1239 } else {
1240 a -= b;
1241 }
1242
1243 al = sextract64(a, 0, 32);
1244 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001245 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001246 Int128 a = int128_make128(al, ah);
1247 Int128 b = int128_make128(bl, bh);
1248
1249 if (add) {
1250 a = int128_add(a, b);
1251 } else {
1252 a = int128_sub(a, b);
1253 }
1254
1255 al = int128_getlo(a);
1256 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001257 }
1258
1259 rl = op->args[0];
1260 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001261
1262 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07001263 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001264
1265 tcg_opt_gen_movi(ctx, op, rl, al);
1266 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001267 return true;
1268 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001269
1270 /* Fold sub2 r,x,i to add2 r,x,-i */
1271 if (!add && b_const) {
1272 uint64_t bl = arg_info(op->args[4])->val;
1273 uint64_t bh = arg_info(op->args[5])->val;
1274
1275 /* Negate the two parts without assembling and disassembling. */
1276 bl = -bl;
1277 bh = ~bh + !bl;
1278
1279 op->opc = (ctx->type == TCG_TYPE_I32
1280 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1281 op->args[4] = arg_new_constant(ctx, bl);
1282 op->args[5] = arg_new_constant(ctx, bh);
1283 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001284 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001285}
1286
Richard Henderson9531c072021-08-26 06:51:39 -07001287static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001288{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001289 /* Note that the high and low parts may be independently swapped. */
1290 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1291 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1292
Richard Henderson9531c072021-08-26 06:51:39 -07001293 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001294}
1295
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001296static bool fold_and(OptContext *ctx, TCGOp *op)
1297{
Richard Henderson1ca73722024-12-08 18:47:15 -06001298 uint64_t z1, z2, z_mask, s_mask;
1299 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001300
Richard Henderson7a2f7082021-08-26 07:06:39 -07001301 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001302 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001303 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001304 fold_xx_to_x(ctx, op)) {
1305 return true;
1306 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001307
Richard Henderson1ca73722024-12-08 18:47:15 -06001308 t1 = arg_info(op->args[1]);
1309 t2 = arg_info(op->args[2]);
1310 z1 = t1->z_mask;
1311 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001312
1313 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001314 * Known-zeros does not imply known-ones. Therefore unless
1315 * arg2 is constant, we can't infer affected bits from it.
1316 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001317 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001318 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001319 }
1320
Richard Henderson1ca73722024-12-08 18:47:15 -06001321 z_mask = z1 & z2;
1322
1323 /*
1324 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1325 * Bitwise operations preserve the relative quantity of the repetitions.
1326 */
1327 s_mask = t1->s_mask & t2->s_mask;
1328
1329 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001330}
1331
1332static bool fold_andc(OptContext *ctx, TCGOp *op)
1333{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001334 uint64_t z_mask, s_mask;
1335 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001336
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001337 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001338 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001339 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001340 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001341 return true;
1342 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001343
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001344 t1 = arg_info(op->args[1]);
1345 t2 = arg_info(op->args[2]);
1346 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001347
Richard Henderson899281c2023-11-15 11:18:55 -08001348 if (ti_is_const(t2)) {
1349 /* Fold andc r,x,i to and r,x,~i. */
1350 switch (ctx->type) {
1351 case TCG_TYPE_I32:
1352 case TCG_TYPE_I64:
1353 op->opc = INDEX_op_and;
1354 break;
1355 case TCG_TYPE_V64:
1356 case TCG_TYPE_V128:
1357 case TCG_TYPE_V256:
1358 op->opc = INDEX_op_and_vec;
1359 break;
1360 default:
1361 g_assert_not_reached();
1362 }
1363 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1364 return fold_and(ctx, op);
1365 }
1366
Richard Hendersonfae450b2021-08-25 22:42:19 -07001367 /*
1368 * Known-zeros does not imply known-ones. Therefore unless
1369 * arg2 is constant, we can't infer anything from it.
1370 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001371 if (ti_is_const(t2)) {
1372 uint64_t v2 = ti_const_val(t2);
1373 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001374 return true;
1375 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001376 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001377 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001378
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001379 s_mask = t1->s_mask & t2->s_mask;
1380 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001381}
1382
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001383static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1384{
1385 /* If true and false values are the same, eliminate the cmp. */
1386 if (args_are_copies(op->args[2], op->args[3])) {
1387 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1388 }
1389
1390 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1391 uint64_t tv = arg_info(op->args[2])->val;
1392 uint64_t fv = arg_info(op->args[3])->val;
1393
1394 if (tv == -1 && fv == 0) {
1395 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1396 }
1397 if (tv == 0 && fv == -1) {
1398 if (TCG_TARGET_HAS_not_vec) {
1399 op->opc = INDEX_op_not_vec;
1400 return fold_not(ctx, op);
1401 } else {
1402 op->opc = INDEX_op_xor_vec;
1403 op->args[2] = arg_new_constant(ctx, -1);
1404 return fold_xor(ctx, op);
1405 }
1406 }
1407 }
1408 if (arg_is_const(op->args[2])) {
1409 uint64_t tv = arg_info(op->args[2])->val;
1410 if (tv == -1) {
1411 op->opc = INDEX_op_or_vec;
1412 op->args[2] = op->args[3];
1413 return fold_or(ctx, op);
1414 }
1415 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1416 op->opc = INDEX_op_andc_vec;
1417 op->args[2] = op->args[1];
1418 op->args[1] = op->args[3];
1419 return fold_andc(ctx, op);
1420 }
1421 }
1422 if (arg_is_const(op->args[3])) {
1423 uint64_t fv = arg_info(op->args[3])->val;
1424 if (fv == 0) {
1425 op->opc = INDEX_op_and_vec;
1426 return fold_and(ctx, op);
1427 }
1428 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1429 op->opc = INDEX_op_orc_vec;
1430 op->args[2] = op->args[1];
1431 op->args[1] = op->args[3];
1432 return fold_orc(ctx, op);
1433 }
1434 }
1435 return finish_folding(ctx, op);
1436}
1437
Richard Henderson079b0802021-08-24 09:30:59 -07001438static bool fold_brcond(OptContext *ctx, TCGOp *op)
1439{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001440 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001441 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001442 if (i == 0) {
1443 tcg_op_remove(ctx->tcg, op);
1444 return true;
1445 }
1446 if (i > 0) {
1447 op->opc = INDEX_op_br;
1448 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001449 finish_ebb(ctx);
1450 } else {
1451 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001452 }
Richard Henderson15268552024-12-08 07:45:11 -06001453 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001454}
1455
Richard Henderson764d2ab2021-08-24 09:22:11 -07001456static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1457{
Richard Henderson7e64b112023-10-24 16:53:56 -07001458 TCGCond cond;
1459 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001460 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001461
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001462 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001463 cond = op->args[4];
1464 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001465 if (i >= 0) {
1466 goto do_brcond_const;
1467 }
1468
1469 switch (cond) {
1470 case TCG_COND_LT:
1471 case TCG_COND_GE:
1472 /*
1473 * Simplify LT/GE comparisons vs zero to a single compare
1474 * vs the high word of the input.
1475 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001476 if (arg_is_const_val(op->args[2], 0) &&
1477 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001478 goto do_brcond_high;
1479 }
1480 break;
1481
1482 case TCG_COND_NE:
1483 inv = 1;
1484 QEMU_FALLTHROUGH;
1485 case TCG_COND_EQ:
1486 /*
1487 * Simplify EQ/NE comparisons where one of the pairs
1488 * can be simplified.
1489 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001490 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001491 op->args[2], cond);
1492 switch (i ^ inv) {
1493 case 0:
1494 goto do_brcond_const;
1495 case 1:
1496 goto do_brcond_high;
1497 }
1498
Richard Henderson67f84c92021-08-25 08:00:20 -07001499 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001500 op->args[3], cond);
1501 switch (i ^ inv) {
1502 case 0:
1503 goto do_brcond_const;
1504 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001505 goto do_brcond_low;
1506 }
1507 break;
1508
1509 case TCG_COND_TSTEQ:
1510 case TCG_COND_TSTNE:
1511 if (arg_is_const_val(op->args[2], 0)) {
1512 goto do_brcond_high;
1513 }
1514 if (arg_is_const_val(op->args[3], 0)) {
1515 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001516 }
1517 break;
1518
1519 default:
1520 break;
1521
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001522 do_brcond_low:
1523 op->opc = INDEX_op_brcond_i32;
1524 op->args[1] = op->args[2];
1525 op->args[2] = cond;
1526 op->args[3] = label;
1527 return fold_brcond(ctx, op);
1528
Richard Henderson764d2ab2021-08-24 09:22:11 -07001529 do_brcond_high:
1530 op->opc = INDEX_op_brcond_i32;
1531 op->args[0] = op->args[1];
1532 op->args[1] = op->args[3];
1533 op->args[2] = cond;
1534 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001535 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001536
1537 do_brcond_const:
1538 if (i == 0) {
1539 tcg_op_remove(ctx->tcg, op);
1540 return true;
1541 }
1542 op->opc = INDEX_op_br;
1543 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001544 finish_ebb(ctx);
1545 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001546 }
Richard Henderson15268552024-12-08 07:45:11 -06001547
1548 finish_bb(ctx);
1549 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001550}
1551
Richard Henderson09bacdc2021-08-24 11:58:12 -07001552static bool fold_bswap(OptContext *ctx, TCGOp *op)
1553{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001554 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001555 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001556
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001557 if (ti_is_const(t1)) {
1558 return tcg_opt_gen_movi(ctx, op, op->args[0],
1559 do_constant_folding(op->opc, ctx->type,
1560 ti_const_val(t1),
1561 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001562 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001563
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001564 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001565 switch (op->opc) {
1566 case INDEX_op_bswap16_i32:
1567 case INDEX_op_bswap16_i64:
1568 z_mask = bswap16(z_mask);
1569 sign = INT16_MIN;
1570 break;
1571 case INDEX_op_bswap32_i32:
1572 case INDEX_op_bswap32_i64:
1573 z_mask = bswap32(z_mask);
1574 sign = INT32_MIN;
1575 break;
1576 case INDEX_op_bswap64_i64:
1577 z_mask = bswap64(z_mask);
1578 sign = INT64_MIN;
1579 break;
1580 default:
1581 g_assert_not_reached();
1582 }
1583
Richard Henderson75c3bf32024-12-19 10:50:40 -08001584 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001585 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1586 case TCG_BSWAP_OZ:
1587 break;
1588 case TCG_BSWAP_OS:
1589 /* If the sign bit may be 1, force all the bits above to 1. */
1590 if (z_mask & sign) {
1591 z_mask |= sign;
1592 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001593 /* The value and therefore s_mask is explicitly sign-extended. */
1594 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001595 break;
1596 default:
1597 /* The high bits are undefined: force all bits above the sign to 1. */
1598 z_mask |= sign << 1;
1599 break;
1600 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001601
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001602 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001603}
1604
Richard Henderson5cf32be2021-08-24 08:17:08 -07001605static bool fold_call(OptContext *ctx, TCGOp *op)
1606{
1607 TCGContext *s = ctx->tcg;
1608 int nb_oargs = TCGOP_CALLO(op);
1609 int nb_iargs = TCGOP_CALLI(op);
1610 int flags, i;
1611
1612 init_arguments(ctx, op, nb_oargs + nb_iargs);
1613 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1614
1615 /* If the function reads or writes globals, reset temp data. */
1616 flags = tcg_call_flags(op);
1617 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1618 int nb_globals = s->nb_globals;
1619
1620 for (i = 0; i < nb_globals; i++) {
1621 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001622 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001623 }
1624 }
1625 }
1626
Richard Hendersonab84dc32023-08-23 23:04:24 -07001627 /* If the function has side effects, reset mem data. */
1628 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1629 remove_mem_copy_all(ctx);
1630 }
1631
Richard Henderson5cf32be2021-08-24 08:17:08 -07001632 /* Reset temp data for outputs. */
1633 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001634 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001635 }
1636
1637 /* Stop optimizing MB across calls. */
1638 ctx->prev_mb = NULL;
1639 return true;
1640}
1641
Richard Henderson29f65862024-12-09 14:09:49 -06001642static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1643{
1644 /* Canonicalize the comparison to put immediate second. */
1645 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1646 op->args[3] = tcg_swap_cond(op->args[3]);
1647 }
1648 return finish_folding(ctx, op);
1649}
1650
1651static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1652{
1653 /* If true and false values are the same, eliminate the cmp. */
1654 if (args_are_copies(op->args[3], op->args[4])) {
1655 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1656 }
1657
1658 /* Canonicalize the comparison to put immediate second. */
1659 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1660 op->args[5] = tcg_swap_cond(op->args[5]);
1661 }
1662 /*
1663 * Canonicalize the "false" input reg to match the destination,
1664 * so that the tcg backend can implement "move if true".
1665 */
1666 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1667 op->args[5] = tcg_invert_cond(op->args[5]);
1668 }
1669 return finish_folding(ctx, op);
1670}
1671
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001672static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1673{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001674 uint64_t z_mask, s_mask;
1675 TempOptInfo *t1 = arg_info(op->args[1]);
1676 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001677
Richard Hendersonce1d6632024-12-08 19:47:51 -06001678 if (ti_is_const(t1)) {
1679 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001680
1681 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001682 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001683 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1684 }
1685 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1686 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001687
1688 switch (ctx->type) {
1689 case TCG_TYPE_I32:
1690 z_mask = 31;
1691 break;
1692 case TCG_TYPE_I64:
1693 z_mask = 63;
1694 break;
1695 default:
1696 g_assert_not_reached();
1697 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001698 s_mask = ~z_mask;
1699 z_mask |= t2->z_mask;
1700 s_mask &= t2->s_mask;
1701
1702 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001703}
1704
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001705static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1706{
Richard Henderson81be07f2024-12-08 19:49:17 -06001707 uint64_t z_mask;
1708
Richard Hendersonfae450b2021-08-25 22:42:19 -07001709 if (fold_const1(ctx, op)) {
1710 return true;
1711 }
1712
1713 switch (ctx->type) {
1714 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001715 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001716 break;
1717 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001718 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001719 break;
1720 default:
1721 g_assert_not_reached();
1722 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001723 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001724}
1725
Richard Henderson1b1907b2021-08-24 10:47:04 -07001726static bool fold_deposit(OptContext *ctx, TCGOp *op)
1727{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001728 TempOptInfo *t1 = arg_info(op->args[1]);
1729 TempOptInfo *t2 = arg_info(op->args[2]);
1730 int ofs = op->args[3];
1731 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001732 int width = 8 * tcg_type_size(ctx->type);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001733 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001734
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001735 if (ti_is_const(t1) && ti_is_const(t2)) {
1736 return tcg_opt_gen_movi(ctx, op, op->args[0],
1737 deposit64(ti_const_val(t1), ofs, len,
1738 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001739 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001740
Richard Henderson8f7a8402023-08-13 11:03:05 -07001741 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001742 if (ti_is_const_val(t1, 0) && ofs == 0) {
1743 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001744
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001745 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001746 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001747 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001748 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001749 }
1750
1751 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001752 if (ti_is_const_val(t2, 0)) {
1753 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001754
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001755 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001756 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001757 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001758 }
1759
Richard Hendersonedb832c2024-12-19 17:56:05 -08001760 /* The s_mask from the top portion of the deposit is still valid. */
1761 if (ofs + len == width) {
1762 s_mask = t2->s_mask << ofs;
1763 } else {
1764 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1765 }
1766
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001767 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001768 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001769}
1770
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001771static bool fold_divide(OptContext *ctx, TCGOp *op)
1772{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001773 if (fold_const2(ctx, op) ||
1774 fold_xi_to_x(ctx, op, 1)) {
1775 return true;
1776 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001777 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001778}
1779
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001780static bool fold_dup(OptContext *ctx, TCGOp *op)
1781{
1782 if (arg_is_const(op->args[1])) {
1783 uint64_t t = arg_info(op->args[1])->val;
1784 t = dup_const(TCGOP_VECE(op), t);
1785 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1786 }
Richard Hendersone089d692024-12-08 20:00:51 -06001787 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001788}
1789
1790static bool fold_dup2(OptContext *ctx, TCGOp *op)
1791{
1792 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1793 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1794 arg_info(op->args[2])->val);
1795 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1796 }
1797
1798 if (args_are_copies(op->args[1], op->args[2])) {
1799 op->opc = INDEX_op_dup_vec;
1800 TCGOP_VECE(op) = MO_32;
1801 }
Richard Hendersone089d692024-12-08 20:00:51 -06001802 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001803}
1804
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001805static bool fold_eqv(OptContext *ctx, TCGOp *op)
1806{
Richard Hendersonef6be622024-12-08 20:03:15 -06001807 uint64_t s_mask;
1808
Richard Henderson7a2f7082021-08-26 07:06:39 -07001809 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001810 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001811 fold_xi_to_not(ctx, op, 0)) {
1812 return true;
1813 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001814
Richard Hendersonef6be622024-12-08 20:03:15 -06001815 s_mask = arg_info(op->args[1])->s_mask
1816 & arg_info(op->args[2])->s_mask;
1817 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001818}
1819
Richard Hendersonb6617c82021-08-24 10:44:53 -07001820static bool fold_extract(OptContext *ctx, TCGOp *op)
1821{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001822 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001823 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001824 int pos = op->args[2];
1825 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001826
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001827 if (ti_is_const(t1)) {
1828 return tcg_opt_gen_movi(ctx, op, op->args[0],
1829 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001830 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001831
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001832 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001833 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001834 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1835 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001836 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001837
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001838 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001839}
1840
Richard Hendersondcd08992021-08-24 10:41:39 -07001841static bool fold_extract2(OptContext *ctx, TCGOp *op)
1842{
1843 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1844 uint64_t v1 = arg_info(op->args[1])->val;
1845 uint64_t v2 = arg_info(op->args[2])->val;
1846 int shr = op->args[3];
1847
1848 if (op->opc == INDEX_op_extract2_i64) {
1849 v1 >>= shr;
1850 v2 <<= 64 - shr;
1851 } else {
1852 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001853 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001854 }
1855 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1856 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001857 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001858}
1859
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001860static bool fold_exts(OptContext *ctx, TCGOp *op)
1861{
Richard Henderson48e8de62024-12-26 12:01:57 -08001862 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06001863 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001864
1865 if (fold_const1(ctx, op)) {
1866 return true;
1867 }
1868
Richard Hendersona9621922024-12-08 20:08:46 -06001869 t1 = arg_info(op->args[1]);
1870 z_mask = t1->z_mask;
1871 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001872
1873 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001874 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001875 s_mask |= INT32_MIN;
1876 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001877 break;
1878 default:
1879 g_assert_not_reached();
1880 }
Richard Hendersona9621922024-12-08 20:08:46 -06001881 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001882}
1883
1884static bool fold_extu(OptContext *ctx, TCGOp *op)
1885{
Richard Henderson48e8de62024-12-26 12:01:57 -08001886 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001887
1888 if (fold_const1(ctx, op)) {
1889 return true;
1890 }
1891
Richard Henderson48e8de62024-12-26 12:01:57 -08001892 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001893 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001894 case INDEX_op_extrl_i64_i32:
1895 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001896 z_mask = (uint32_t)z_mask;
1897 break;
1898 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001899 z_mask >>= 32;
1900 break;
1901 default:
1902 g_assert_not_reached();
1903 }
Richard Henderson08abe292024-12-08 20:11:44 -06001904 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001905}
1906
Richard Henderson3eefdf22021-08-25 11:06:43 -07001907static bool fold_mb(OptContext *ctx, TCGOp *op)
1908{
1909 /* Eliminate duplicate and redundant fence instructions. */
1910 if (ctx->prev_mb) {
1911 /*
1912 * Merge two barriers of the same type into one,
1913 * or a weaker barrier into a stronger one,
1914 * or two weaker barriers into a stronger one.
1915 * mb X; mb Y => mb X|Y
1916 * mb; strl => mb; st
1917 * ldaq; mb => ld; mb
1918 * ldaq; strl => ld; mb; st
1919 * Other combinations are also merged into a strong
1920 * barrier. This is stricter than specified but for
1921 * the purposes of TCG is better than not optimizing.
1922 */
1923 ctx->prev_mb->args[0] |= op->args[0];
1924 tcg_op_remove(ctx->tcg, op);
1925 } else {
1926 ctx->prev_mb = op;
1927 }
1928 return true;
1929}
1930
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001931static bool fold_mov(OptContext *ctx, TCGOp *op)
1932{
1933 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1934}
1935
Richard Henderson0c310a32021-08-24 10:37:24 -07001936static bool fold_movcond(OptContext *ctx, TCGOp *op)
1937{
Richard Henderson32202782024-12-08 20:16:38 -06001938 uint64_t z_mask, s_mask;
1939 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001940 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001941
Richard Henderson141125e2024-09-06 21:00:10 -07001942 /* If true and false values are the same, eliminate the cmp. */
1943 if (args_are_copies(op->args[3], op->args[4])) {
1944 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1945 }
1946
Richard Henderson7a2f7082021-08-26 07:06:39 -07001947 /*
1948 * Canonicalize the "false" input reg to match the destination reg so
1949 * that the tcg backend can implement a "move if true" operation.
1950 */
1951 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001952 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001953 }
1954
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001955 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001956 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001957 if (i >= 0) {
1958 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1959 }
1960
Richard Henderson32202782024-12-08 20:16:38 -06001961 tt = arg_info(op->args[3]);
1962 ft = arg_info(op->args[4]);
1963 z_mask = tt->z_mask | ft->z_mask;
1964 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001965
Richard Henderson32202782024-12-08 20:16:38 -06001966 if (ti_is_const(tt) && ti_is_const(ft)) {
1967 uint64_t tv = ti_const_val(tt);
1968 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00001969 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001970 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001971
Richard Henderson67f84c92021-08-25 08:00:20 -07001972 switch (ctx->type) {
1973 case TCG_TYPE_I32:
1974 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00001975 if (TCG_TARGET_HAS_negsetcond_i32) {
1976 negopc = INDEX_op_negsetcond_i32;
1977 }
1978 tv = (int32_t)tv;
1979 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07001980 break;
1981 case TCG_TYPE_I64:
1982 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00001983 if (TCG_TARGET_HAS_negsetcond_i64) {
1984 negopc = INDEX_op_negsetcond_i64;
1985 }
Richard Henderson67f84c92021-08-25 08:00:20 -07001986 break;
1987 default:
1988 g_assert_not_reached();
1989 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001990
1991 if (tv == 1 && fv == 0) {
1992 op->opc = opc;
1993 op->args[3] = cond;
1994 } else if (fv == 1 && tv == 0) {
1995 op->opc = opc;
1996 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00001997 } else if (negopc) {
1998 if (tv == -1 && fv == 0) {
1999 op->opc = negopc;
2000 op->args[3] = cond;
2001 } else if (fv == -1 && tv == 0) {
2002 op->opc = negopc;
2003 op->args[3] = tcg_invert_cond(cond);
2004 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002005 }
2006 }
Richard Henderson32202782024-12-08 20:16:38 -06002007
2008 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002009}
2010
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002011static bool fold_mul(OptContext *ctx, TCGOp *op)
2012{
Richard Hendersone8679952021-08-25 13:19:52 -07002013 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002014 fold_xi_to_i(ctx, op, 0) ||
2015 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002016 return true;
2017 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002018 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002019}
2020
2021static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2022{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002023 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002024 fold_xi_to_i(ctx, op, 0)) {
2025 return true;
2026 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002027 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002028}
2029
Richard Henderson407112b2021-08-26 06:33:04 -07002030static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002031{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002032 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2033
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002034 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07002035 uint64_t a = arg_info(op->args[2])->val;
2036 uint64_t b = arg_info(op->args[3])->val;
2037 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002038 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002039 TCGOp *op2;
2040
2041 switch (op->opc) {
2042 case INDEX_op_mulu2_i32:
2043 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2044 h = (int32_t)(l >> 32);
2045 l = (int32_t)l;
2046 break;
2047 case INDEX_op_muls2_i32:
2048 l = (int64_t)(int32_t)a * (int32_t)b;
2049 h = l >> 32;
2050 l = (int32_t)l;
2051 break;
2052 case INDEX_op_mulu2_i64:
2053 mulu64(&l, &h, a, b);
2054 break;
2055 case INDEX_op_muls2_i64:
2056 muls64(&l, &h, a, b);
2057 break;
2058 default:
2059 g_assert_not_reached();
2060 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002061
2062 rl = op->args[0];
2063 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002064
2065 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002066 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002067
2068 tcg_opt_gen_movi(ctx, op, rl, l);
2069 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002070 return true;
2071 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002072 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002073}
2074
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002075static bool fold_nand(OptContext *ctx, TCGOp *op)
2076{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002077 uint64_t s_mask;
2078
Richard Henderson7a2f7082021-08-26 07:06:39 -07002079 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002080 fold_xi_to_not(ctx, op, -1)) {
2081 return true;
2082 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002083
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002084 s_mask = arg_info(op->args[1])->s_mask
2085 & arg_info(op->args[2])->s_mask;
2086 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002087}
2088
Richard Hendersone25fe882024-04-04 20:53:50 +00002089static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002090{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002091 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002092 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002093 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002094
Richard Hendersond151fd32024-12-08 20:23:11 -06002095 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002096}
2097
Richard Hendersone25fe882024-04-04 20:53:50 +00002098static bool fold_neg(OptContext *ctx, TCGOp *op)
2099{
2100 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2101}
2102
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002103static bool fold_nor(OptContext *ctx, TCGOp *op)
2104{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002105 uint64_t s_mask;
2106
Richard Henderson7a2f7082021-08-26 07:06:39 -07002107 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002108 fold_xi_to_not(ctx, op, 0)) {
2109 return true;
2110 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002111
Richard Henderson2b7b6952024-12-08 20:25:21 -06002112 s_mask = arg_info(op->args[1])->s_mask
2113 & arg_info(op->args[2])->s_mask;
2114 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002115}
2116
2117static bool fold_not(OptContext *ctx, TCGOp *op)
2118{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002119 if (fold_const1(ctx, op)) {
2120 return true;
2121 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002122 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002123}
2124
2125static bool fold_or(OptContext *ctx, TCGOp *op)
2126{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002127 uint64_t z_mask, s_mask;
2128 TempOptInfo *t1, *t2;
2129
Richard Henderson7a2f7082021-08-26 07:06:39 -07002130 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002131 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002132 fold_xx_to_x(ctx, op)) {
2133 return true;
2134 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002135
Richard Henderson83b1ba32024-12-08 20:28:59 -06002136 t1 = arg_info(op->args[1]);
2137 t2 = arg_info(op->args[2]);
2138 z_mask = t1->z_mask | t2->z_mask;
2139 s_mask = t1->s_mask & t2->s_mask;
2140 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002141}
2142
2143static bool fold_orc(OptContext *ctx, TCGOp *op)
2144{
Richard Henderson54e26b22024-12-08 20:30:20 -06002145 uint64_t s_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002146 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002147
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002148 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002149 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002150 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002151 fold_ix_to_not(ctx, op, 0)) {
2152 return true;
2153 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002154
Richard Henderson50e40ec2024-12-10 08:13:10 -06002155 t2 = arg_info(op->args[2]);
2156 if (ti_is_const(t2)) {
2157 /* Fold orc r,x,i to or r,x,~i. */
2158 switch (ctx->type) {
2159 case TCG_TYPE_I32:
2160 case TCG_TYPE_I64:
2161 op->opc = INDEX_op_or;
2162 break;
2163 case TCG_TYPE_V64:
2164 case TCG_TYPE_V128:
2165 case TCG_TYPE_V256:
2166 op->opc = INDEX_op_or_vec;
2167 break;
2168 default:
2169 g_assert_not_reached();
2170 }
2171 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2172 return fold_or(ctx, op);
2173 }
2174
2175 t1 = arg_info(op->args[1]);
2176 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson54e26b22024-12-08 20:30:20 -06002177 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002178}
2179
Richard Henderson6813be92024-12-08 20:33:30 -06002180static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002181{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002182 const TCGOpDef *def = &tcg_op_defs[op->opc];
2183 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2184 MemOp mop = get_memop(oi);
2185 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002186 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002187
Richard Henderson57fe5c62021-08-26 12:04:46 -07002188 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002189 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002190 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002191 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002192 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002193 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002194 }
2195
Richard Henderson3eefdf22021-08-25 11:06:43 -07002196 /* Opcodes that touch guest memory stop the mb optimization. */
2197 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002198
2199 return fold_masks_zs(ctx, op, z_mask, s_mask);
2200}
2201
2202static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2203{
2204 /* Opcodes that touch guest memory stop the mb optimization. */
2205 ctx->prev_mb = NULL;
2206 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002207}
2208
2209static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2210{
2211 /* Opcodes that touch guest memory stop the mb optimization. */
2212 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002213 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002214}
2215
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002216static bool fold_remainder(OptContext *ctx, TCGOp *op)
2217{
Richard Henderson267c17e2021-10-25 11:30:33 -07002218 if (fold_const2(ctx, op) ||
2219 fold_xx_to_i(ctx, op, 0)) {
2220 return true;
2221 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002222 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002223}
2224
Richard Henderson95eb2292024-12-08 20:47:59 -06002225/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2226static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002227{
2228 uint64_t a_zmask, b_val;
2229 TCGCond cond;
2230
2231 if (!arg_is_const(op->args[2])) {
2232 return false;
2233 }
2234
2235 a_zmask = arg_info(op->args[1])->z_mask;
2236 b_val = arg_info(op->args[2])->val;
2237 cond = op->args[3];
2238
2239 if (ctx->type == TCG_TYPE_I32) {
2240 a_zmask = (uint32_t)a_zmask;
2241 b_val = (uint32_t)b_val;
2242 }
2243
2244 /*
2245 * A with only low bits set vs B with high bits set means that A < B.
2246 */
2247 if (a_zmask < b_val) {
2248 bool inv = false;
2249
2250 switch (cond) {
2251 case TCG_COND_NE:
2252 case TCG_COND_LEU:
2253 case TCG_COND_LTU:
2254 inv = true;
2255 /* fall through */
2256 case TCG_COND_GTU:
2257 case TCG_COND_GEU:
2258 case TCG_COND_EQ:
2259 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2260 default:
2261 break;
2262 }
2263 }
2264
2265 /*
2266 * A with only lsb set is already boolean.
2267 */
2268 if (a_zmask <= 1) {
2269 bool convert = false;
2270 bool inv = false;
2271
2272 switch (cond) {
2273 case TCG_COND_EQ:
2274 inv = true;
2275 /* fall through */
2276 case TCG_COND_NE:
2277 convert = (b_val == 0);
2278 break;
2279 case TCG_COND_LTU:
2280 case TCG_COND_TSTEQ:
2281 inv = true;
2282 /* fall through */
2283 case TCG_COND_GEU:
2284 case TCG_COND_TSTNE:
2285 convert = (b_val == 1);
2286 break;
2287 default:
2288 break;
2289 }
2290 if (convert) {
Richard Henderson79602f62025-01-06 09:11:39 -08002291 TCGOpcode xor_opc, neg_opc;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002292
2293 if (!inv && !neg) {
2294 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2295 }
2296
2297 switch (ctx->type) {
2298 case TCG_TYPE_I32:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002299 neg_opc = INDEX_op_neg_i32;
2300 xor_opc = INDEX_op_xor_i32;
2301 break;
2302 case TCG_TYPE_I64:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002303 neg_opc = INDEX_op_neg_i64;
2304 xor_opc = INDEX_op_xor_i64;
2305 break;
2306 default:
2307 g_assert_not_reached();
2308 }
2309
2310 if (!inv) {
2311 op->opc = neg_opc;
2312 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002313 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002314 op->args[2] = arg_new_constant(ctx, -1);
2315 } else {
2316 op->opc = xor_opc;
2317 op->args[2] = arg_new_constant(ctx, 1);
2318 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002319 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002320 }
2321 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002322 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002323}
2324
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002325static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2326{
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002327 TCGOpcode xor_opc, neg_opc, shr_opc;
Paolo Bonziniff202812024-02-28 12:06:41 +01002328 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002329 TCGCond cond = op->args[3];
2330 TCGArg ret, src1, src2;
2331 TCGOp *op2;
2332 uint64_t val;
2333 int sh;
2334 bool inv;
2335
2336 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2337 return;
2338 }
2339
2340 src2 = op->args[2];
2341 val = arg_info(src2)->val;
2342 if (!is_power_of_2(val)) {
2343 return;
2344 }
2345 sh = ctz64(val);
2346
2347 switch (ctx->type) {
2348 case TCG_TYPE_I32:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002349 xor_opc = INDEX_op_xor_i32;
2350 shr_opc = INDEX_op_shr_i32;
2351 neg_opc = INDEX_op_neg_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002352 if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002353 uext_opc = INDEX_op_extract_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002354 }
2355 if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002356 sext_opc = INDEX_op_sextract_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002357 }
2358 break;
2359 case TCG_TYPE_I64:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002360 xor_opc = INDEX_op_xor_i64;
2361 shr_opc = INDEX_op_shr_i64;
2362 neg_opc = INDEX_op_neg_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002363 if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002364 uext_opc = INDEX_op_extract_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002365 }
2366 if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002367 sext_opc = INDEX_op_sextract_i64;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002368 }
2369 break;
2370 default:
2371 g_assert_not_reached();
2372 }
2373
2374 ret = op->args[0];
2375 src1 = op->args[1];
2376 inv = cond == TCG_COND_TSTEQ;
2377
2378 if (sh && sext_opc && neg && !inv) {
2379 op->opc = sext_opc;
2380 op->args[1] = src1;
2381 op->args[2] = sh;
2382 op->args[3] = 1;
2383 return;
2384 } else if (sh && uext_opc) {
2385 op->opc = uext_opc;
2386 op->args[1] = src1;
2387 op->args[2] = sh;
2388 op->args[3] = 1;
2389 } else {
2390 if (sh) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002391 op2 = opt_insert_before(ctx, op, shr_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002392 op2->args[0] = ret;
2393 op2->args[1] = src1;
2394 op2->args[2] = arg_new_constant(ctx, sh);
2395 src1 = ret;
2396 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002397 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002398 op->args[1] = src1;
2399 op->args[2] = arg_new_constant(ctx, 1);
2400 }
2401
2402 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002403 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002404 op2->args[0] = ret;
2405 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002406 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002407 } else if (inv) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002408 op2 = opt_insert_after(ctx, op, xor_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002409 op2->args[0] = ret;
2410 op2->args[1] = ret;
2411 op2->args[2] = arg_new_constant(ctx, 1);
2412 } else if (neg) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002413 op2 = opt_insert_after(ctx, op, neg_opc, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002414 op2->args[0] = ret;
2415 op2->args[1] = ret;
2416 }
2417}
2418
Richard Hendersonc63ff552021-08-24 09:35:30 -07002419static bool fold_setcond(OptContext *ctx, TCGOp *op)
2420{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002421 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002422 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002423 if (i >= 0) {
2424 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2425 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002426
Richard Henderson95eb2292024-12-08 20:47:59 -06002427 i = fold_setcond_zmask(ctx, op, false);
2428 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002429 return true;
2430 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002431 if (i == 0) {
2432 fold_setcond_tst_pow2(ctx, op, false);
2433 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002434
Richard Henderson2c8a2832024-12-08 20:50:37 -06002435 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002436}
2437
Richard Henderson36355022023-08-04 23:24:04 +00002438static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2439{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002440 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002441 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002442 if (i >= 0) {
2443 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2444 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002445
Richard Henderson95eb2292024-12-08 20:47:59 -06002446 i = fold_setcond_zmask(ctx, op, true);
2447 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002448 return true;
2449 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002450 if (i == 0) {
2451 fold_setcond_tst_pow2(ctx, op, true);
2452 }
Richard Henderson36355022023-08-04 23:24:04 +00002453
2454 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002455 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002456}
2457
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002458static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2459{
Richard Henderson7e64b112023-10-24 16:53:56 -07002460 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002461 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002462
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002463 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002464 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002465 if (i >= 0) {
2466 goto do_setcond_const;
2467 }
2468
2469 switch (cond) {
2470 case TCG_COND_LT:
2471 case TCG_COND_GE:
2472 /*
2473 * Simplify LT/GE comparisons vs zero to a single compare
2474 * vs the high word of the input.
2475 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002476 if (arg_is_const_val(op->args[3], 0) &&
2477 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002478 goto do_setcond_high;
2479 }
2480 break;
2481
2482 case TCG_COND_NE:
2483 inv = 1;
2484 QEMU_FALLTHROUGH;
2485 case TCG_COND_EQ:
2486 /*
2487 * Simplify EQ/NE comparisons where one of the pairs
2488 * can be simplified.
2489 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002490 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002491 op->args[3], cond);
2492 switch (i ^ inv) {
2493 case 0:
2494 goto do_setcond_const;
2495 case 1:
2496 goto do_setcond_high;
2497 }
2498
Richard Henderson67f84c92021-08-25 08:00:20 -07002499 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002500 op->args[4], cond);
2501 switch (i ^ inv) {
2502 case 0:
2503 goto do_setcond_const;
2504 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002505 goto do_setcond_low;
2506 }
2507 break;
2508
2509 case TCG_COND_TSTEQ:
2510 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002511 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002512 goto do_setcond_high;
2513 }
2514 if (arg_is_const_val(op->args[4], 0)) {
2515 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002516 }
2517 break;
2518
2519 default:
2520 break;
2521
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002522 do_setcond_low:
2523 op->args[2] = op->args[3];
2524 op->args[3] = cond;
2525 op->opc = INDEX_op_setcond_i32;
2526 return fold_setcond(ctx, op);
2527
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002528 do_setcond_high:
2529 op->args[1] = op->args[2];
2530 op->args[2] = op->args[4];
2531 op->args[3] = cond;
2532 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002533 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002534 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002535
Richard Hendersona53502c2024-12-08 20:56:36 -06002536 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002537
2538 do_setcond_const:
2539 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2540}
2541
Richard Hendersonb6617c82021-08-24 10:44:53 -07002542static bool fold_sextract(OptContext *ctx, TCGOp *op)
2543{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002544 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002545 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002546 int pos = op->args[2];
2547 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002548
Richard Hendersonbaff5072024-12-08 21:09:30 -06002549 if (ti_is_const(t1)) {
2550 return tcg_opt_gen_movi(ctx, op, op->args[0],
2551 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002552 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002553
Richard Hendersonbaff5072024-12-08 21:09:30 -06002554 s_mask_old = t1->s_mask;
2555 s_mask = s_mask_old >> pos;
2556 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002557
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002558 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002559 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002560 }
2561
Richard Hendersonbaff5072024-12-08 21:09:30 -06002562 z_mask = sextract64(t1->z_mask, pos, len);
2563 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002564}
2565
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002566static bool fold_shift(OptContext *ctx, TCGOp *op)
2567{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002568 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002569 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002570
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002571 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002572 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002573 fold_xi_to_x(ctx, op, 0)) {
2574 return true;
2575 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002576
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002577 t1 = arg_info(op->args[1]);
2578 t2 = arg_info(op->args[2]);
2579 s_mask = t1->s_mask;
2580 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002581
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002582 if (ti_is_const(t2)) {
2583 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002584
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002585 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002586 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002587
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002588 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002589 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002590
2591 switch (op->opc) {
2592 CASE_OP_32_64(sar):
2593 /*
2594 * Arithmetic right shift will not reduce the number of
2595 * input sign repetitions.
2596 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002597 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002598 CASE_OP_32_64(shr):
2599 /*
2600 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002601 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002602 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002603 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002604 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002605 }
2606 break;
2607 default:
2608 break;
2609 }
2610
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002611 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002612}
2613
Richard Henderson9caca882021-08-24 13:30:32 -07002614static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2615{
2616 TCGOpcode neg_op;
2617 bool have_neg;
2618
2619 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2620 return false;
2621 }
2622
2623 switch (ctx->type) {
2624 case TCG_TYPE_I32:
2625 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002626 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002627 break;
2628 case TCG_TYPE_I64:
2629 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002630 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002631 break;
2632 case TCG_TYPE_V64:
2633 case TCG_TYPE_V128:
2634 case TCG_TYPE_V256:
2635 neg_op = INDEX_op_neg_vec;
2636 have_neg = (TCG_TARGET_HAS_neg_vec &&
2637 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2638 break;
2639 default:
2640 g_assert_not_reached();
2641 }
2642 if (have_neg) {
2643 op->opc = neg_op;
2644 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002645 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002646 }
2647 return false;
2648}
2649
Richard Hendersonc578ff12021-12-16 06:07:25 -08002650/* We cannot as yet do_constant_folding with vectors. */
2651static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002652{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002653 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002654 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002655 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002656 return true;
2657 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002658 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002659}
2660
Richard Hendersonc578ff12021-12-16 06:07:25 -08002661static bool fold_sub(OptContext *ctx, TCGOp *op)
2662{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002663 if (fold_const2(ctx, op) ||
2664 fold_xx_to_i(ctx, op, 0) ||
2665 fold_xi_to_x(ctx, op, 0) ||
2666 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002667 return true;
2668 }
2669
2670 /* Fold sub r,x,i to add r,x,-i */
2671 if (arg_is_const(op->args[2])) {
2672 uint64_t val = arg_info(op->args[2])->val;
2673
Richard Henderson79602f62025-01-06 09:11:39 -08002674 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002675 op->args[2] = arg_new_constant(ctx, -val);
2676 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002677 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002678}
2679
Richard Henderson9531c072021-08-26 06:51:39 -07002680static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002681{
Richard Henderson9531c072021-08-26 06:51:39 -07002682 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002683}
2684
Richard Hendersonfae450b2021-08-25 22:42:19 -07002685static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2686{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002687 uint64_t z_mask = -1, s_mask = 0;
2688
Richard Hendersonfae450b2021-08-25 22:42:19 -07002689 /* We can't do any folding with a load, but we can record bits. */
2690 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002691 CASE_OP_32_64(ld8s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002692 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002693 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002694 CASE_OP_32_64(ld8u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002695 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002696 break;
2697 CASE_OP_32_64(ld16s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002698 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002699 break;
2700 CASE_OP_32_64(ld16u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002701 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002702 break;
2703 case INDEX_op_ld32s_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002704 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002705 break;
2706 case INDEX_op_ld32u_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002707 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002708 break;
2709 default:
2710 g_assert_not_reached();
2711 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002712 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002713}
2714
Richard Hendersonab84dc32023-08-23 23:04:24 -07002715static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2716{
2717 TCGTemp *dst, *src;
2718 intptr_t ofs;
2719 TCGType type;
2720
2721 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002722 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002723 }
2724
2725 type = ctx->type;
2726 ofs = op->args[2];
2727 dst = arg_temp(op->args[0]);
2728 src = find_mem_copy_for(ctx, type, ofs);
2729 if (src && src->base_type == type) {
2730 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2731 }
2732
2733 reset_ts(ctx, dst);
2734 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2735 return true;
2736}
2737
2738static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2739{
2740 intptr_t ofs = op->args[2];
2741 intptr_t lm1;
2742
2743 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2744 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002745 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002746 }
2747
2748 switch (op->opc) {
2749 CASE_OP_32_64(st8):
2750 lm1 = 0;
2751 break;
2752 CASE_OP_32_64(st16):
2753 lm1 = 1;
2754 break;
2755 case INDEX_op_st32_i64:
2756 case INDEX_op_st_i32:
2757 lm1 = 3;
2758 break;
2759 case INDEX_op_st_i64:
2760 lm1 = 7;
2761 break;
2762 case INDEX_op_st_vec:
2763 lm1 = tcg_type_size(ctx->type) - 1;
2764 break;
2765 default:
2766 g_assert_not_reached();
2767 }
2768 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002769 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002770}
2771
2772static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2773{
2774 TCGTemp *src;
2775 intptr_t ofs, last;
2776 TCGType type;
2777
2778 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002779 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002780 }
2781
2782 src = arg_temp(op->args[0]);
2783 ofs = op->args[2];
2784 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002785
2786 /*
2787 * Eliminate duplicate stores of a constant.
2788 * This happens frequently when the target ISA zero-extends.
2789 */
2790 if (ts_is_const(src)) {
2791 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2792 if (src == prev) {
2793 tcg_op_remove(ctx->tcg, op);
2794 return true;
2795 }
2796 }
2797
Richard Hendersonab84dc32023-08-23 23:04:24 -07002798 last = ofs + tcg_type_size(type) - 1;
2799 remove_mem_copy_in(ctx, ofs, last);
2800 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002801 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002802}
2803
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002804static bool fold_xor(OptContext *ctx, TCGOp *op)
2805{
Richard Hendersonc890fd72024-12-08 21:39:01 -06002806 uint64_t z_mask, s_mask;
2807 TempOptInfo *t1, *t2;
2808
Richard Henderson7a2f7082021-08-26 07:06:39 -07002809 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002810 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002811 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002812 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002813 return true;
2814 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002815
Richard Hendersonc890fd72024-12-08 21:39:01 -06002816 t1 = arg_info(op->args[1]);
2817 t2 = arg_info(op->args[2]);
2818 z_mask = t1->z_mask | t2->z_mask;
2819 s_mask = t1->s_mask & t2->s_mask;
2820 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002821}
2822
Kirill Batuzov22613af2011-07-07 16:37:13 +04002823/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002824void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002825{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002826 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002827 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002828 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002829
Richard Hendersonab84dc32023-08-23 23:04:24 -07002830 QSIMPLEQ_INIT(&ctx.mem_free);
2831
Kirill Batuzov22613af2011-07-07 16:37:13 +04002832 /* Array VALS has an element for each temp.
2833 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002834 If this temp is a copy of other ones then the other copies are
2835 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002836
2837 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002838 for (i = 0; i < nb_temps; ++i) {
2839 s->temps[i].state_ptr = NULL;
2840 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002841
Richard Henderson15fa08f2017-11-02 15:19:14 +01002842 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002843 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002844 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002845 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002846
Richard Henderson5cf32be2021-08-24 08:17:08 -07002847 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002848 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002849 fold_call(&ctx, op);
2850 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002851 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002852
2853 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002854 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2855 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002856
Richard Henderson67f84c92021-08-25 08:00:20 -07002857 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08002858 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07002859
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002860 /*
2861 * Process each opcode.
2862 * Sorted alphabetically by opcode as much as possible.
2863 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002864 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08002865 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002866 done = fold_add(&ctx, op);
2867 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002868 case INDEX_op_add_vec:
2869 done = fold_add_vec(&ctx, op);
2870 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002871 CASE_OP_32_64(add2):
2872 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002873 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002874 case INDEX_op_and:
2875 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002876 done = fold_and(&ctx, op);
2877 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08002878 case INDEX_op_andc:
2879 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002880 done = fold_andc(&ctx, op);
2881 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002882 CASE_OP_32_64(brcond):
2883 done = fold_brcond(&ctx, op);
2884 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002885 case INDEX_op_brcond2_i32:
2886 done = fold_brcond2(&ctx, op);
2887 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002888 CASE_OP_32_64(bswap16):
2889 CASE_OP_32_64(bswap32):
2890 case INDEX_op_bswap64_i64:
2891 done = fold_bswap(&ctx, op);
2892 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002893 CASE_OP_32_64(clz):
2894 CASE_OP_32_64(ctz):
2895 done = fold_count_zeros(&ctx, op);
2896 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002897 CASE_OP_32_64(ctpop):
2898 done = fold_ctpop(&ctx, op);
2899 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002900 CASE_OP_32_64(deposit):
2901 done = fold_deposit(&ctx, op);
2902 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002903 CASE_OP_32_64(div):
2904 CASE_OP_32_64(divu):
2905 done = fold_divide(&ctx, op);
2906 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002907 case INDEX_op_dup_vec:
2908 done = fold_dup(&ctx, op);
2909 break;
2910 case INDEX_op_dup2_vec:
2911 done = fold_dup2(&ctx, op);
2912 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002913 CASE_OP_32_64_VEC(eqv):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002914 done = fold_eqv(&ctx, op);
2915 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002916 CASE_OP_32_64(extract):
2917 done = fold_extract(&ctx, op);
2918 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002919 CASE_OP_32_64(extract2):
2920 done = fold_extract2(&ctx, op);
2921 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002922 case INDEX_op_ext_i32_i64:
2923 done = fold_exts(&ctx, op);
2924 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002925 case INDEX_op_extu_i32_i64:
2926 case INDEX_op_extrl_i64_i32:
2927 case INDEX_op_extrh_i64_i32:
2928 done = fold_extu(&ctx, op);
2929 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002930 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002931 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002932 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002933 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002934 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002935 case INDEX_op_ld32u_i64:
2936 done = fold_tcg_ld(&ctx, op);
2937 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002938 case INDEX_op_ld_i32:
2939 case INDEX_op_ld_i64:
2940 case INDEX_op_ld_vec:
2941 done = fold_tcg_ld_memcopy(&ctx, op);
2942 break;
2943 CASE_OP_32_64(st8):
2944 CASE_OP_32_64(st16):
2945 case INDEX_op_st32_i64:
2946 done = fold_tcg_st(&ctx, op);
2947 break;
2948 case INDEX_op_st_i32:
2949 case INDEX_op_st_i64:
2950 case INDEX_op_st_vec:
2951 done = fold_tcg_st_memcopy(&ctx, op);
2952 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002953 case INDEX_op_mb:
2954 done = fold_mb(&ctx, op);
2955 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08002956 case INDEX_op_mov:
2957 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002958 done = fold_mov(&ctx, op);
2959 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002960 CASE_OP_32_64(movcond):
2961 done = fold_movcond(&ctx, op);
2962 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002963 CASE_OP_32_64(mul):
2964 done = fold_mul(&ctx, op);
2965 break;
2966 CASE_OP_32_64(mulsh):
2967 CASE_OP_32_64(muluh):
2968 done = fold_mul_highpart(&ctx, op);
2969 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002970 CASE_OP_32_64(muls2):
2971 CASE_OP_32_64(mulu2):
2972 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002973 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002974 CASE_OP_32_64_VEC(nand):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002975 done = fold_nand(&ctx, op);
2976 break;
2977 CASE_OP_32_64(neg):
2978 done = fold_neg(&ctx, op);
2979 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002980 CASE_OP_32_64_VEC(nor):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002981 done = fold_nor(&ctx, op);
2982 break;
2983 CASE_OP_32_64_VEC(not):
2984 done = fold_not(&ctx, op);
2985 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08002986 case INDEX_op_or:
2987 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002988 done = fold_or(&ctx, op);
2989 break;
2990 CASE_OP_32_64_VEC(orc):
2991 done = fold_orc(&ctx, op);
2992 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08002993 case INDEX_op_qemu_ld_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06002994 done = fold_qemu_ld_1reg(&ctx, op);
2995 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08002996 case INDEX_op_qemu_ld_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06002997 if (TCG_TARGET_REG_BITS == 64) {
2998 done = fold_qemu_ld_1reg(&ctx, op);
2999 break;
3000 }
3001 QEMU_FALLTHROUGH;
Richard Henderson50b7a192025-02-04 13:46:09 -08003002 case INDEX_op_qemu_ld_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06003003 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003004 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003005 case INDEX_op_qemu_st8_i32:
3006 case INDEX_op_qemu_st_i32:
3007 case INDEX_op_qemu_st_i64:
3008 case INDEX_op_qemu_st_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003009 done = fold_qemu_st(&ctx, op);
3010 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003011 CASE_OP_32_64(rem):
3012 CASE_OP_32_64(remu):
3013 done = fold_remainder(&ctx, op);
3014 break;
3015 CASE_OP_32_64(rotl):
3016 CASE_OP_32_64(rotr):
3017 CASE_OP_32_64(sar):
3018 CASE_OP_32_64(shl):
3019 CASE_OP_32_64(shr):
3020 done = fold_shift(&ctx, op);
3021 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003022 CASE_OP_32_64(setcond):
3023 done = fold_setcond(&ctx, op);
3024 break;
Richard Henderson36355022023-08-04 23:24:04 +00003025 CASE_OP_32_64(negsetcond):
3026 done = fold_negsetcond(&ctx, op);
3027 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003028 case INDEX_op_setcond2_i32:
3029 done = fold_setcond2(&ctx, op);
3030 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003031 case INDEX_op_cmp_vec:
3032 done = fold_cmp_vec(&ctx, op);
3033 break;
3034 case INDEX_op_cmpsel_vec:
3035 done = fold_cmpsel_vec(&ctx, op);
3036 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003037 case INDEX_op_bitsel_vec:
3038 done = fold_bitsel_vec(&ctx, op);
3039 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003040 CASE_OP_32_64(sextract):
3041 done = fold_sextract(&ctx, op);
3042 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003043 CASE_OP_32_64(sub):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003044 done = fold_sub(&ctx, op);
3045 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003046 case INDEX_op_sub_vec:
3047 done = fold_sub_vec(&ctx, op);
3048 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003049 CASE_OP_32_64(sub2):
3050 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003051 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003052 CASE_OP_32_64_VEC(xor):
3053 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003054 break;
Richard Henderson15268552024-12-08 07:45:11 -06003055 case INDEX_op_set_label:
3056 case INDEX_op_br:
3057 case INDEX_op_exit_tb:
3058 case INDEX_op_goto_tb:
3059 case INDEX_op_goto_ptr:
3060 finish_ebb(&ctx);
3061 done = true;
3062 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003063 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003064 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003065 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003066 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003067 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003068 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003069}