blob: 69f9ba15554fc0a7c81c3ec2ddd1dc97e8839170 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040033#define CASE_OP_32_64(x) \
34 glue(glue(case INDEX_op_, x), _i32): \
35 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040036
Richard Henderson170ba882017-11-22 09:07:11 +010037#define CASE_OP_32_64_VEC(x) \
38 glue(glue(case INDEX_op_, x), _i32): \
39 glue(glue(case INDEX_op_, x), _i64): \
40 glue(glue(case INDEX_op_, x), _vec)
41
Richard Hendersonab84dc32023-08-23 23:04:24 -070042typedef struct MemCopyInfo {
43 IntervalTreeNode itree;
44 QSIMPLEQ_ENTRY (MemCopyInfo) next;
45 TCGTemp *ts;
46 TCGType type;
47} MemCopyInfo;
48
Richard Henderson6fcb98e2020-03-30 17:44:30 -070049typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020050 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070051 TCGTemp *prev_copy;
52 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070053 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070054 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070055 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080056 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070057} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040058
Richard Henderson3b3f8472021-08-23 22:06:31 -070059typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070060 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070061 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070062 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070063
Richard Hendersonab84dc32023-08-23 23:04:24 -070064 IntervalTreeRoot mem_copy;
65 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
66
Richard Henderson137f1f42021-08-24 08:49:25 -070067 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070068 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070069} OptContext;
70
Richard Henderson6fcb98e2020-03-30 17:44:30 -070071static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020072{
Richard Henderson63490392017-06-20 13:43:15 -070073 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020074}
75
Richard Henderson6fcb98e2020-03-30 17:44:30 -070076static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020077{
Richard Henderson63490392017-06-20 13:43:15 -070078 return ts_info(arg_temp(arg));
79}
80
Richard Hendersone1b6c142024-12-22 10:26:14 -080081static inline bool ti_is_const(TempOptInfo *ti)
82{
83 return ti->is_const;
84}
85
86static inline uint64_t ti_const_val(TempOptInfo *ti)
87{
88 return ti->val;
89}
90
91static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
92{
93 return ti_is_const(ti) && ti_const_val(ti) == val;
94}
95
Richard Henderson63490392017-06-20 13:43:15 -070096static inline bool ts_is_const(TCGTemp *ts)
97{
Richard Hendersone1b6c142024-12-22 10:26:14 -080098 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070099}
100
Richard Henderson27cdb852023-10-23 11:38:00 -0700101static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
102{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800103 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700104}
105
Richard Henderson63490392017-06-20 13:43:15 -0700106static inline bool arg_is_const(TCGArg arg)
107{
108 return ts_is_const(arg_temp(arg));
109}
110
Richard Henderson27cdb852023-10-23 11:38:00 -0700111static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
112{
113 return ts_is_const_val(arg_temp(arg), val);
114}
115
Richard Henderson63490392017-06-20 13:43:15 -0700116static inline bool ts_is_copy(TCGTemp *ts)
117{
118 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200119}
120
Richard Henderson9f75e522023-11-02 13:37:46 -0700121static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
122{
123 return a->kind < b->kind ? b : a;
124}
125
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200126/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700127static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200128{
Richard Henderson63490392017-06-20 13:43:15 -0700129 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700130 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700131
Richard Henderson3b3f8472021-08-23 22:06:31 -0700132 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 return;
134 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700135 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700136
137 ti = ts->state_ptr;
138 if (ti == NULL) {
139 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700140 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700141 }
142
143 ti->next_copy = ts;
144 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700146 if (ts->kind == TEMP_CONST) {
147 ti->is_const = true;
148 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700149 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800150 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700151 } else {
152 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700153 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700154 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155 }
156}
157
Richard Hendersonab84dc32023-08-23 23:04:24 -0700158static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
159{
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
161 return r ? container_of(r, MemCopyInfo, itree) : NULL;
162}
163
164static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
165{
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
167 return r ? container_of(r, MemCopyInfo, itree) : NULL;
168}
169
170static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
171{
172 TCGTemp *ts = mc->ts;
173 TempOptInfo *ti = ts_info(ts);
174
175 interval_tree_remove(&mc->itree, &ctx->mem_copy);
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
178}
179
180static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
181{
182 while (true) {
183 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
184 if (!mc) {
185 break;
186 }
187 remove_mem_copy(ctx, mc);
188 }
189}
190
191static void remove_mem_copy_all(OptContext *ctx)
192{
193 remove_mem_copy_in(ctx, 0, -1);
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
195}
196
Richard Henderson9f75e522023-11-02 13:37:46 -0700197static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198{
Richard Henderson9f75e522023-11-02 13:37:46 -0700199 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200200
Richard Henderson4c868ce2020-04-23 09:02:23 -0700201 /* If this is already readonly, we can't do better. */
202 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700203 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200204 }
205
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700210 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200211}
212
Richard Hendersonab84dc32023-08-23 23:04:24 -0700213static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
214{
215 TempOptInfo *si = ts_info(src_ts);
216 TempOptInfo *di = ts_info(dst_ts);
217 MemCopyInfo *mc;
218
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
220 tcg_debug_assert(mc->ts == src_ts);
221 mc->ts = dst_ts;
222 }
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
224}
225
226/* Reset TEMP's state, possibly removing the temp for the list of copies. */
227static void reset_ts(OptContext *ctx, TCGTemp *ts)
228{
229 TempOptInfo *ti = ts_info(ts);
230 TCGTemp *pts = ti->prev_copy;
231 TCGTemp *nts = ti->next_copy;
232 TempOptInfo *pi = ts_info(pts);
233 TempOptInfo *ni = ts_info(nts);
234
235 ni->prev_copy = ti->prev_copy;
236 pi->next_copy = ti->next_copy;
237 ti->next_copy = ts;
238 ti->prev_copy = ts;
239 ti->is_const = false;
240 ti->z_mask = -1;
241 ti->s_mask = 0;
242
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
244 if (ts == nts) {
245 /* Last temp copy being removed, the mem copies die. */
246 MemCopyInfo *mc;
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
248 interval_tree_remove(&mc->itree, &ctx->mem_copy);
249 }
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
251 } else {
252 move_mem_copies(find_better_copy(nts), ts);
253 }
254 }
255}
256
257static void reset_temp(OptContext *ctx, TCGArg arg)
258{
259 reset_ts(ctx, arg_temp(arg));
260}
261
262static void record_mem_copy(OptContext *ctx, TCGType type,
263 TCGTemp *ts, intptr_t start, intptr_t last)
264{
265 MemCopyInfo *mc;
266 TempOptInfo *ti;
267
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
269 if (mc) {
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
271 } else {
272 mc = tcg_malloc(sizeof(*mc));
273 }
274
275 memset(mc, 0, sizeof(*mc));
276 mc->itree.start = start;
277 mc->itree.last = last;
278 mc->type = type;
279 interval_tree_insert(&mc->itree, &ctx->mem_copy);
280
281 ts = find_better_copy(ts);
282 ti = ts_info(ts);
283 mc->ts = ts;
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
285}
286
Richard Henderson63490392017-06-20 13:43:15 -0700287static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288{
Richard Henderson63490392017-06-20 13:43:15 -0700289 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290
Richard Henderson63490392017-06-20 13:43:15 -0700291 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200292 return true;
293 }
294
Richard Henderson63490392017-06-20 13:43:15 -0700295 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200296 return false;
297 }
298
Richard Henderson63490392017-06-20 13:43:15 -0700299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
300 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200301 return true;
302 }
303 }
304
305 return false;
306}
307
Richard Henderson63490392017-06-20 13:43:15 -0700308static bool args_are_copies(TCGArg arg1, TCGArg arg2)
309{
310 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
311}
312
Richard Hendersonab84dc32023-08-23 23:04:24 -0700313static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
314{
315 MemCopyInfo *mc;
316
317 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
318 if (mc->itree.start == s && mc->type == type) {
319 return find_better_copy(mc->ts);
320 }
321 }
322 return NULL;
323}
324
Richard Henderson26aac972023-10-23 12:31:57 -0700325static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
326{
327 TCGType type = ctx->type;
328 TCGTemp *ts;
329
330 if (type == TCG_TYPE_I32) {
331 val = (int32_t)val;
332 }
333
334 ts = tcg_constant_internal(type, val);
335 init_ts_info(ctx, ts);
336
337 return temp_arg(ts);
338}
339
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100340static TCGArg arg_new_temp(OptContext *ctx)
341{
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
343 init_ts_info(ctx, ts);
344 return temp_arg(ts);
345}
346
Richard Hendersona3c1c572025-04-21 11:05:29 -0700347static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
348 TCGOpcode opc, unsigned narg)
349{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800350 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700351}
352
353static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
354 TCGOpcode opc, unsigned narg)
355{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800356 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700357}
358
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700359static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400360{
Richard Henderson63490392017-06-20 13:43:15 -0700361 TCGTemp *dst_ts = arg_temp(dst);
362 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700363 TempOptInfo *di;
364 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700365 TCGOpcode new_op;
366
367 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700368 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700369 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200370 }
371
Richard Henderson986cac12023-01-09 13:59:35 -0800372 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700373 di = ts_info(dst_ts);
374 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700375
376 switch (ctx->type) {
377 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800379 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700380 break;
381 case TCG_TYPE_V64:
382 case TCG_TYPE_V128:
383 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800384 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700385 new_op = INDEX_op_mov_vec;
386 break;
387 default:
388 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100389 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700390 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700391 op->args[0] = dst;
392 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700393
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700394 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700395 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700396
Richard Henderson63490392017-06-20 13:43:15 -0700397 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700398 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700399
400 di->next_copy = si->next_copy;
401 di->prev_copy = src_ts;
402 ni->prev_copy = dst_ts;
403 si->next_copy = dst_ts;
404 di->is_const = si->is_const;
405 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700406
407 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
408 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
409 move_mem_copies(dst_ts, src_ts);
410 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800411 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700412 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400413}
414
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700415static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700416 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700417{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700418 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700419 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700420}
421
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800422static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
423 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400424{
Richard Henderson03271522013-08-14 14:35:56 -0700425 uint64_t l64, h64;
426
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400427 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800428 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400429 return x + y;
430
Richard Henderson60f34f52025-01-06 22:06:32 -0800431 case INDEX_op_sub:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400432 return x - y;
433
Richard Hendersond2c3eca2025-01-07 09:32:18 -0800434 case INDEX_op_mul:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400435 return x * y;
436
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800437 case INDEX_op_and:
438 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400439 return x & y;
440
Richard Henderson49bd7512025-01-06 14:00:40 -0800441 case INDEX_op_or:
442 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400443 return x | y;
444
Richard Hendersonfffd3dc2025-01-06 15:18:35 -0800445 case INDEX_op_xor:
446 case INDEX_op_xor_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400447 return x ^ y;
448
Kirill Batuzov55c09752011-07-07 16:37:16 +0400449 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700450 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400451
Kirill Batuzov55c09752011-07-07 16:37:16 +0400452 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700453 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400454
455 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700456 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400457
Kirill Batuzov55c09752011-07-07 16:37:16 +0400458 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700459 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400460
461 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700462 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400463
Kirill Batuzov55c09752011-07-07 16:37:16 +0400464 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700465 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400466
467 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700468 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400469
Kirill Batuzov55c09752011-07-07 16:37:16 +0400470 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700471 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400472
473 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700474 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400475
Kirill Batuzov55c09752011-07-07 16:37:16 +0400476 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700477 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400478
Richard Henderson5c62d372025-01-06 23:46:47 -0800479 case INDEX_op_not:
480 case INDEX_op_not_vec:
Kirill Batuzova640f032011-07-07 16:37:17 +0400481 return ~x;
482
Richard Henderson69713582025-01-06 22:48:57 -0800483 case INDEX_op_neg:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700484 return -x;
485
Richard Henderson46f96bf2025-01-06 12:37:02 -0800486 case INDEX_op_andc:
487 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700488 return x & ~y;
489
Richard Henderson6aba25e2025-01-06 14:46:26 -0800490 case INDEX_op_orc:
491 case INDEX_op_orc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700492 return x | ~y;
493
Richard Henderson5c0968a2025-01-06 15:47:53 -0800494 case INDEX_op_eqv:
495 case INDEX_op_eqv_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700496 return ~(x ^ y);
497
Richard Henderson59379a42025-01-06 20:32:54 -0800498 case INDEX_op_nand:
499 case INDEX_op_nand_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700500 return ~(x & y);
501
Richard Henderson3a8c4e92025-01-06 21:02:17 -0800502 case INDEX_op_nor:
503 case INDEX_op_nor_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700504 return ~(x | y);
505
Richard Henderson0e28d002016-11-16 09:23:28 +0100506 case INDEX_op_clz_i32:
507 return (uint32_t)x ? clz32(x) : y;
508
509 case INDEX_op_clz_i64:
510 return x ? clz64(x) : y;
511
512 case INDEX_op_ctz_i32:
513 return (uint32_t)x ? ctz32(x) : y;
514
515 case INDEX_op_ctz_i64:
516 return x ? ctz64(x) : y;
517
Richard Hendersona768e4e2016-11-21 11:13:39 +0100518 case INDEX_op_ctpop_i32:
519 return ctpop32(x);
520
521 case INDEX_op_ctpop_i64:
522 return ctpop64(x);
523
Richard Henderson64985942018-11-20 08:53:34 +0100524 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700525 x = bswap16(x);
526 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100527
528 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700529 x = bswap32(x);
530 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100531
532 case INDEX_op_bswap64_i64:
533 return bswap64(x);
534
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200535 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400536 return (int32_t)x;
537
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200538 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700539 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400540 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400541
Richard Henderson609ad702015-07-24 07:16:00 -0700542 case INDEX_op_extrh_i64_i32:
543 return (uint64_t)x >> 32;
544
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800545 case INDEX_op_muluh:
546 if (type == TCG_TYPE_I32) {
547 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
548 }
549 mulu64(&l64, &h64, x, y);
550 return h64;
551
Richard Hendersonc7428242025-01-07 11:19:29 -0800552 case INDEX_op_mulsh:
553 if (type == TCG_TYPE_I32) {
554 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
555 }
Richard Henderson03271522013-08-14 14:35:56 -0700556 muls64(&l64, &h64, x, y);
557 return h64;
558
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800559 case INDEX_op_divs:
Richard Henderson01547f72013-08-14 15:22:46 -0700560 /* Avoid crashing on divide by zero, otherwise undefined. */
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800561 if (type == TCG_TYPE_I32) {
562 return (int32_t)x / ((int32_t)y ? : 1);
563 }
564 return (int64_t)x / ((int64_t)y ? : 1);
565
Richard Henderson961b80a2025-01-07 14:27:19 -0800566 case INDEX_op_divu:
567 if (type == TCG_TYPE_I32) {
568 return (uint32_t)x / ((uint32_t)y ? : 1);
569 }
Richard Henderson01547f72013-08-14 15:22:46 -0700570 return (uint64_t)x / ((uint64_t)y ? : 1);
571
Richard Henderson9a6bc182025-01-07 19:00:51 -0800572 case INDEX_op_rems:
573 if (type == TCG_TYPE_I32) {
574 return (int32_t)x % ((int32_t)y ? : 1);
575 }
576 return (int64_t)x % ((int64_t)y ? : 1);
577
Richard Hendersoncd9acd22025-01-07 20:25:14 -0800578 case INDEX_op_remu:
579 if (type == TCG_TYPE_I32) {
580 return (uint32_t)x % ((uint32_t)y ? : 1);
581 }
Richard Henderson01547f72013-08-14 15:22:46 -0700582 return (uint64_t)x % ((uint64_t)y ? : 1);
583
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400584 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700585 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400586 }
587}
588
Richard Henderson67f84c92021-08-25 08:00:20 -0700589static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
590 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400591{
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800592 uint64_t res = do_constant_folding_2(op, type, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700593 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200594 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400595 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400596 return res;
597}
598
Richard Henderson9519da72012-10-02 11:32:26 -0700599static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
600{
601 switch (c) {
602 case TCG_COND_EQ:
603 return x == y;
604 case TCG_COND_NE:
605 return x != y;
606 case TCG_COND_LT:
607 return (int32_t)x < (int32_t)y;
608 case TCG_COND_GE:
609 return (int32_t)x >= (int32_t)y;
610 case TCG_COND_LE:
611 return (int32_t)x <= (int32_t)y;
612 case TCG_COND_GT:
613 return (int32_t)x > (int32_t)y;
614 case TCG_COND_LTU:
615 return x < y;
616 case TCG_COND_GEU:
617 return x >= y;
618 case TCG_COND_LEU:
619 return x <= y;
620 case TCG_COND_GTU:
621 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700622 case TCG_COND_TSTEQ:
623 return (x & y) == 0;
624 case TCG_COND_TSTNE:
625 return (x & y) != 0;
626 case TCG_COND_ALWAYS:
627 case TCG_COND_NEVER:
628 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700629 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700630 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700631}
632
633static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
634{
635 switch (c) {
636 case TCG_COND_EQ:
637 return x == y;
638 case TCG_COND_NE:
639 return x != y;
640 case TCG_COND_LT:
641 return (int64_t)x < (int64_t)y;
642 case TCG_COND_GE:
643 return (int64_t)x >= (int64_t)y;
644 case TCG_COND_LE:
645 return (int64_t)x <= (int64_t)y;
646 case TCG_COND_GT:
647 return (int64_t)x > (int64_t)y;
648 case TCG_COND_LTU:
649 return x < y;
650 case TCG_COND_GEU:
651 return x >= y;
652 case TCG_COND_LEU:
653 return x <= y;
654 case TCG_COND_GTU:
655 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700656 case TCG_COND_TSTEQ:
657 return (x & y) == 0;
658 case TCG_COND_TSTNE:
659 return (x & y) != 0;
660 case TCG_COND_ALWAYS:
661 case TCG_COND_NEVER:
662 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700663 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700664 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700665}
666
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700667static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700668{
669 switch (c) {
670 case TCG_COND_GT:
671 case TCG_COND_LTU:
672 case TCG_COND_LT:
673 case TCG_COND_GTU:
674 case TCG_COND_NE:
675 return 0;
676 case TCG_COND_GE:
677 case TCG_COND_GEU:
678 case TCG_COND_LE:
679 case TCG_COND_LEU:
680 case TCG_COND_EQ:
681 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700682 case TCG_COND_TSTEQ:
683 case TCG_COND_TSTNE:
684 return -1;
685 case TCG_COND_ALWAYS:
686 case TCG_COND_NEVER:
687 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700688 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700689 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700690}
691
Richard Henderson8d57bf12021-08-24 08:34:27 -0700692/*
693 * Return -1 if the condition can't be simplified,
694 * and the result of the condition (0 or 1) if it can.
695 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700696static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700697 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200698{
Richard Henderson63490392017-06-20 13:43:15 -0700699 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000700 uint64_t xv = arg_info(x)->val;
701 uint64_t yv = arg_info(y)->val;
702
Richard Henderson67f84c92021-08-25 08:00:20 -0700703 switch (type) {
704 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100705 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700706 case TCG_TYPE_I64:
707 return do_constant_folding_cond_64(xv, yv, c);
708 default:
709 /* Only scalar comparisons are optimizable */
710 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200711 }
Richard Henderson63490392017-06-20 13:43:15 -0700712 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700713 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700714 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200715 switch (c) {
716 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700717 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200718 return 0;
719 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700720 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200721 return 1;
722 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700723 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200724 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200725 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700726 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200727}
728
Richard Henderson7a2f7082021-08-26 07:06:39 -0700729/**
730 * swap_commutative:
731 * @dest: TCGArg of the destination argument, or NO_DEST.
732 * @p1: first paired argument
733 * @p2: second paired argument
734 *
735 * If *@p1 is a constant and *@p2 is not, swap.
736 * If *@p2 matches @dest, swap.
737 * Return true if a swap was performed.
738 */
739
740#define NO_DEST temp_arg(NULL)
741
Richard Henderson24c9ae42012-10-02 11:32:21 -0700742static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
743{
744 TCGArg a1 = *p1, a2 = *p2;
745 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700746 sum += arg_is_const(a1);
747 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700748
749 /* Prefer the constant in second argument, and then the form
750 op a, a, b, which is better handled on non-RISC hosts. */
751 if (sum > 0 || (sum == 0 && dest == a2)) {
752 *p1 = a2;
753 *p2 = a1;
754 return true;
755 }
756 return false;
757}
758
Richard Henderson0bfcb862012-10-02 11:32:23 -0700759static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
760{
761 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700762 sum += arg_is_const(p1[0]);
763 sum += arg_is_const(p1[1]);
764 sum -= arg_is_const(p2[0]);
765 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700766 if (sum > 0) {
767 TCGArg t;
768 t = p1[0], p1[0] = p2[0], p2[0] = t;
769 t = p1[1], p1[1] = p2[1], p2[1] = t;
770 return true;
771 }
772 return false;
773}
774
Richard Henderson7e64b112023-10-24 16:53:56 -0700775/*
776 * Return -1 if the condition can't be simplified,
777 * and the result of the condition (0 or 1) if it can.
778 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100779static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700780 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
781{
782 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100783 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700784 bool swap;
785 int r;
786
787 swap = swap_commutative(dest, p1, p2);
788 cond = *pcond;
789 if (swap) {
790 *pcond = cond = tcg_swap_cond(cond);
791 }
792
793 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700794 if (r >= 0) {
795 return r;
796 }
797 if (!is_tst_cond(cond)) {
798 return -1;
799 }
800
Paolo Bonzini35020622024-01-22 10:48:11 +0100801 i1 = arg_info(*p1);
802
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700803 /*
804 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100805 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700806 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100807 if (args_are_copies(*p1, *p2) ||
808 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700809 *p2 = arg_new_constant(ctx, 0);
810 *pcond = tcg_tst_eqne_cond(cond);
811 return -1;
812 }
813
Paolo Bonzini35020622024-01-22 10:48:11 +0100814 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
815 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700816 *p2 = arg_new_constant(ctx, 0);
817 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100818 return -1;
819 }
820
821 /* Expand to AND with a temporary if no backend support. */
822 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800823 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100824 TCGArg tmp = arg_new_temp(ctx);
825
826 op2->args[0] = tmp;
827 op2->args[1] = *p1;
828 op2->args[2] = *p2;
829
830 *p1 = tmp;
831 *p2 = arg_new_constant(ctx, 0);
832 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700833 }
834 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700835}
836
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100837static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700838{
839 TCGArg al, ah, bl, bh;
840 TCGCond c;
841 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700842 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700843
844 swap = swap_commutative2(args, args + 2);
845 c = args[4];
846 if (swap) {
847 args[4] = c = tcg_swap_cond(c);
848 }
849
850 al = args[0];
851 ah = args[1];
852 bl = args[2];
853 bh = args[3];
854
855 if (arg_is_const(bl) && arg_is_const(bh)) {
856 tcg_target_ulong blv = arg_info(bl)->val;
857 tcg_target_ulong bhv = arg_info(bh)->val;
858 uint64_t b = deposit64(blv, 32, 32, bhv);
859
860 if (arg_is_const(al) && arg_is_const(ah)) {
861 tcg_target_ulong alv = arg_info(al)->val;
862 tcg_target_ulong ahv = arg_info(ah)->val;
863 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700864
865 r = do_constant_folding_cond_64(a, b, c);
866 if (r >= 0) {
867 return r;
868 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700869 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700870
Richard Henderson7e64b112023-10-24 16:53:56 -0700871 if (b == 0) {
872 switch (c) {
873 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700874 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700875 return 0;
876 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700877 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700878 return 1;
879 default:
880 break;
881 }
882 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700883
884 /* TSTNE x,-1 -> NE x,0 */
885 if (b == -1 && is_tst_cond(c)) {
886 args[3] = args[2] = arg_new_constant(ctx, 0);
887 args[4] = tcg_tst_eqne_cond(c);
888 return -1;
889 }
890
891 /* TSTNE x,sign -> LT x,0 */
892 if (b == INT64_MIN && is_tst_cond(c)) {
893 /* bl must be 0, so copy that to bh */
894 args[3] = bl;
895 args[4] = tcg_tst_ltge_cond(c);
896 return -1;
897 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700898 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700899
Richard Henderson7e64b112023-10-24 16:53:56 -0700900 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700901 r = do_constant_folding_cond_eq(c);
902 if (r >= 0) {
903 return r;
904 }
905
906 /* TSTNE x,x -> NE x,0 */
907 if (is_tst_cond(c)) {
908 args[3] = args[2] = arg_new_constant(ctx, 0);
909 args[4] = tcg_tst_eqne_cond(c);
910 return -1;
911 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700912 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100913
914 /* Expand to AND with a temporary if no backend support. */
915 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800916 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
917 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100918 TCGArg t1 = arg_new_temp(ctx);
919 TCGArg t2 = arg_new_temp(ctx);
920
921 op1->args[0] = t1;
922 op1->args[1] = al;
923 op1->args[2] = bl;
924 op2->args[0] = t2;
925 op2->args[1] = ah;
926 op2->args[2] = bh;
927
928 args[0] = t1;
929 args[1] = t2;
930 args[3] = args[2] = arg_new_constant(ctx, 0);
931 args[4] = tcg_tst_eqne_cond(c);
932 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700933 return -1;
934}
935
Richard Hendersone2577ea2021-08-24 08:00:48 -0700936static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
937{
938 for (int i = 0; i < nb_args; i++) {
939 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000940 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700941 }
942}
943
Richard Henderson8774dde2021-08-24 08:04:47 -0700944static void copy_propagate(OptContext *ctx, TCGOp *op,
945 int nb_oargs, int nb_iargs)
946{
Richard Henderson8774dde2021-08-24 08:04:47 -0700947 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
948 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000949 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700950 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700951 }
952 }
953}
954
Richard Henderson15268552024-12-08 07:45:11 -0600955static void finish_bb(OptContext *ctx)
956{
957 /* We only optimize memory barriers across basic blocks. */
958 ctx->prev_mb = NULL;
959}
960
961static void finish_ebb(OptContext *ctx)
962{
963 finish_bb(ctx);
964 /* We only optimize across extended basic blocks. */
965 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
966 remove_mem_copy_all(ctx);
967}
968
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600969static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700970{
971 const TCGOpDef *def = &tcg_op_defs[op->opc];
972 int i, nb_oargs;
973
Richard Henderson137f1f42021-08-24 08:49:25 -0700974 nb_oargs = def->nb_oargs;
975 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700976 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800977 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700978 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600979 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700980}
981
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700982/*
983 * The fold_* functions return true when processing is complete,
984 * usually by folding the operation to a constant or to a copy,
985 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
986 * like collect information about the value produced, for use in
987 * optimizing a subsequent operation.
988 *
989 * These first fold_* functions are all helpers, used by other
990 * folders for more specific operations.
991 */
992
993static bool fold_const1(OptContext *ctx, TCGOp *op)
994{
995 if (arg_is_const(op->args[1])) {
996 uint64_t t;
997
998 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700999 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001000 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1001 }
1002 return false;
1003}
1004
1005static bool fold_const2(OptContext *ctx, TCGOp *op)
1006{
1007 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1008 uint64_t t1 = arg_info(op->args[1])->val;
1009 uint64_t t2 = arg_info(op->args[2])->val;
1010
Richard Henderson67f84c92021-08-25 08:00:20 -07001011 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001012 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1013 }
1014 return false;
1015}
1016
Richard Hendersonc578ff12021-12-16 06:07:25 -08001017static bool fold_commutative(OptContext *ctx, TCGOp *op)
1018{
1019 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1020 return false;
1021}
1022
Richard Henderson7a2f7082021-08-26 07:06:39 -07001023static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1024{
1025 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1026 return fold_const2(ctx, op);
1027}
1028
Richard Hendersond582b142024-12-19 10:43:26 -08001029/*
1030 * Record "zero" and "sign" masks for the single output of @op.
1031 * See TempOptInfo definition of z_mask and s_mask.
1032 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001033 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001034 */
1035static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001036 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001037{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001038 const TCGOpDef *def = &tcg_op_defs[op->opc];
1039 TCGTemp *ts;
1040 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001041 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001042
1043 /* Only single-output opcodes are supported here. */
1044 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001045
1046 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001047 * 32-bit ops generate 32-bit results, which for the purpose of
1048 * simplifying tcg are sign-extended. Certainly that's how we
1049 * represent our constants elsewhere. Note that the bits will
1050 * be reset properly for a 64-bit value when encountering the
1051 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001052 */
1053 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001054 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001055 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001056 }
1057
1058 if (z_mask == 0) {
1059 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1060 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001061
1062 ts = arg_temp(op->args[0]);
1063 reset_ts(ctx, ts);
1064
1065 ti = ts_info(ts);
1066 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001067
1068 /* Canonicalize s_mask and incorporate data from z_mask. */
1069 rep = clz64(~s_mask);
1070 rep = MAX(rep, clz64(z_mask));
1071 rep = MAX(rep - 1, 0);
1072 ti->s_mask = INT64_MIN >> rep;
1073
Richard Henderson56e06ec2024-12-08 18:26:48 -06001074 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001075}
1076
Richard Henderson81be07f2024-12-08 19:49:17 -06001077static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1078{
1079 return fold_masks_zs(ctx, op, z_mask, 0);
1080}
1081
Richard Hendersonef6be622024-12-08 20:03:15 -06001082static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1083{
1084 return fold_masks_zs(ctx, op, -1, s_mask);
1085}
1086
Richard Henderson045ace32024-12-19 10:33:51 -08001087/*
1088 * An "affected" mask bit is 0 if and only if the result is identical
1089 * to the first input. Thus if the entire mask is 0, the operation
1090 * is equivalent to a copy.
1091 */
1092static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1093{
1094 if (ctx->type == TCG_TYPE_I32) {
1095 a_mask = (uint32_t)a_mask;
1096 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001097 if (a_mask == 0) {
1098 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1099 }
1100 return false;
1101}
1102
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001103/*
1104 * Convert @op to NOT, if NOT is supported by the host.
1105 * Return true f the conversion is successful, which will still
1106 * indicate that the processing is complete.
1107 */
1108static bool fold_not(OptContext *ctx, TCGOp *op);
1109static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1110{
1111 TCGOpcode not_op;
1112 bool have_not;
1113
1114 switch (ctx->type) {
1115 case TCG_TYPE_I32:
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001116 case TCG_TYPE_I64:
Richard Henderson5c62d372025-01-06 23:46:47 -08001117 not_op = INDEX_op_not;
1118 have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001119 break;
1120 case TCG_TYPE_V64:
1121 case TCG_TYPE_V128:
1122 case TCG_TYPE_V256:
1123 not_op = INDEX_op_not_vec;
1124 have_not = TCG_TARGET_HAS_not_vec;
1125 break;
1126 default:
1127 g_assert_not_reached();
1128 }
1129 if (have_not) {
1130 op->opc = not_op;
1131 op->args[1] = op->args[idx];
1132 return fold_not(ctx, op);
1133 }
1134 return false;
1135}
1136
Richard Hendersonda48e272021-08-25 20:42:04 -07001137/* If the binary operation has first argument @i, fold to @i. */
1138static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1139{
Richard Henderson27cdb852023-10-23 11:38:00 -07001140 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001141 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1142 }
1143 return false;
1144}
1145
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001146/* If the binary operation has first argument @i, fold to NOT. */
1147static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1148{
Richard Henderson27cdb852023-10-23 11:38:00 -07001149 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001150 return fold_to_not(ctx, op, 2);
1151 }
1152 return false;
1153}
1154
Richard Hendersone8679952021-08-25 13:19:52 -07001155/* If the binary operation has second argument @i, fold to @i. */
1156static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1157{
Richard Henderson27cdb852023-10-23 11:38:00 -07001158 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001159 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1160 }
1161 return false;
1162}
1163
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001164/* If the binary operation has second argument @i, fold to identity. */
1165static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1166{
Richard Henderson27cdb852023-10-23 11:38:00 -07001167 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001168 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1169 }
1170 return false;
1171}
1172
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001173/* If the binary operation has second argument @i, fold to NOT. */
1174static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1175{
Richard Henderson27cdb852023-10-23 11:38:00 -07001176 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001177 return fold_to_not(ctx, op, 1);
1178 }
1179 return false;
1180}
1181
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001182/* If the binary operation has both arguments equal, fold to @i. */
1183static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1184{
1185 if (args_are_copies(op->args[1], op->args[2])) {
1186 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1187 }
1188 return false;
1189}
1190
Richard Hendersonca7bb042021-08-25 13:14:21 -07001191/* If the binary operation has both arguments equal, fold to identity. */
1192static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1193{
1194 if (args_are_copies(op->args[1], op->args[2])) {
1195 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1196 }
1197 return false;
1198}
1199
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001200/*
1201 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001202 *
1203 * The ordering of the transformations should be:
1204 * 1) those that produce a constant
1205 * 2) those that produce a copy
1206 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001207 */
1208
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001209static bool fold_or(OptContext *ctx, TCGOp *op);
1210static bool fold_orc(OptContext *ctx, TCGOp *op);
1211static bool fold_xor(OptContext *ctx, TCGOp *op);
1212
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001213static bool fold_add(OptContext *ctx, TCGOp *op)
1214{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001215 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001216 fold_xi_to_x(ctx, op, 0)) {
1217 return true;
1218 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001219 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001220}
1221
Richard Hendersonc578ff12021-12-16 06:07:25 -08001222/* We cannot as yet do_constant_folding with vectors. */
1223static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1224{
1225 if (fold_commutative(ctx, op) ||
1226 fold_xi_to_x(ctx, op, 0)) {
1227 return true;
1228 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001229 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001230}
1231
Richard Henderson9531c072021-08-26 06:51:39 -07001232static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001233{
Richard Hendersonf2457572023-10-25 18:39:44 -07001234 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1235 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1236
1237 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001238 uint64_t al = arg_info(op->args[2])->val;
1239 uint64_t ah = arg_info(op->args[3])->val;
1240 uint64_t bl = arg_info(op->args[4])->val;
1241 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001242 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001243 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001244
Richard Henderson9531c072021-08-26 06:51:39 -07001245 if (ctx->type == TCG_TYPE_I32) {
1246 uint64_t a = deposit64(al, 32, 32, ah);
1247 uint64_t b = deposit64(bl, 32, 32, bh);
1248
1249 if (add) {
1250 a += b;
1251 } else {
1252 a -= b;
1253 }
1254
1255 al = sextract64(a, 0, 32);
1256 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001257 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001258 Int128 a = int128_make128(al, ah);
1259 Int128 b = int128_make128(bl, bh);
1260
1261 if (add) {
1262 a = int128_add(a, b);
1263 } else {
1264 a = int128_sub(a, b);
1265 }
1266
1267 al = int128_getlo(a);
1268 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001269 }
1270
1271 rl = op->args[0];
1272 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001273
1274 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07001275 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001276
1277 tcg_opt_gen_movi(ctx, op, rl, al);
1278 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001279 return true;
1280 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001281
1282 /* Fold sub2 r,x,i to add2 r,x,-i */
1283 if (!add && b_const) {
1284 uint64_t bl = arg_info(op->args[4])->val;
1285 uint64_t bh = arg_info(op->args[5])->val;
1286
1287 /* Negate the two parts without assembling and disassembling. */
1288 bl = -bl;
1289 bh = ~bh + !bl;
1290
1291 op->opc = (ctx->type == TCG_TYPE_I32
1292 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1293 op->args[4] = arg_new_constant(ctx, bl);
1294 op->args[5] = arg_new_constant(ctx, bh);
1295 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001296 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001297}
1298
Richard Henderson9531c072021-08-26 06:51:39 -07001299static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001300{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001301 /* Note that the high and low parts may be independently swapped. */
1302 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1303 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1304
Richard Henderson9531c072021-08-26 06:51:39 -07001305 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001306}
1307
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001308static bool fold_and(OptContext *ctx, TCGOp *op)
1309{
Richard Henderson1ca73722024-12-08 18:47:15 -06001310 uint64_t z1, z2, z_mask, s_mask;
1311 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001312
Richard Henderson7a2f7082021-08-26 07:06:39 -07001313 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001314 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001315 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001316 fold_xx_to_x(ctx, op)) {
1317 return true;
1318 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001319
Richard Henderson1ca73722024-12-08 18:47:15 -06001320 t1 = arg_info(op->args[1]);
1321 t2 = arg_info(op->args[2]);
1322 z1 = t1->z_mask;
1323 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001324
1325 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001326 * Known-zeros does not imply known-ones. Therefore unless
1327 * arg2 is constant, we can't infer affected bits from it.
1328 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001329 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001330 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001331 }
1332
Richard Henderson1ca73722024-12-08 18:47:15 -06001333 z_mask = z1 & z2;
1334
1335 /*
1336 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1337 * Bitwise operations preserve the relative quantity of the repetitions.
1338 */
1339 s_mask = t1->s_mask & t2->s_mask;
1340
1341 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001342}
1343
1344static bool fold_andc(OptContext *ctx, TCGOp *op)
1345{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001346 uint64_t z_mask, s_mask;
1347 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001348
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001349 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001350 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001351 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001352 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001353 return true;
1354 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001355
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001356 t1 = arg_info(op->args[1]);
1357 t2 = arg_info(op->args[2]);
1358 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001359
Richard Henderson899281c2023-11-15 11:18:55 -08001360 if (ti_is_const(t2)) {
1361 /* Fold andc r,x,i to and r,x,~i. */
1362 switch (ctx->type) {
1363 case TCG_TYPE_I32:
1364 case TCG_TYPE_I64:
1365 op->opc = INDEX_op_and;
1366 break;
1367 case TCG_TYPE_V64:
1368 case TCG_TYPE_V128:
1369 case TCG_TYPE_V256:
1370 op->opc = INDEX_op_and_vec;
1371 break;
1372 default:
1373 g_assert_not_reached();
1374 }
1375 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1376 return fold_and(ctx, op);
1377 }
1378
Richard Hendersonfae450b2021-08-25 22:42:19 -07001379 /*
1380 * Known-zeros does not imply known-ones. Therefore unless
1381 * arg2 is constant, we can't infer anything from it.
1382 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001383 if (ti_is_const(t2)) {
1384 uint64_t v2 = ti_const_val(t2);
1385 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001386 return true;
1387 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001388 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001389 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001390
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001391 s_mask = t1->s_mask & t2->s_mask;
1392 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001393}
1394
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001395static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1396{
1397 /* If true and false values are the same, eliminate the cmp. */
1398 if (args_are_copies(op->args[2], op->args[3])) {
1399 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1400 }
1401
1402 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1403 uint64_t tv = arg_info(op->args[2])->val;
1404 uint64_t fv = arg_info(op->args[3])->val;
1405
1406 if (tv == -1 && fv == 0) {
1407 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1408 }
1409 if (tv == 0 && fv == -1) {
1410 if (TCG_TARGET_HAS_not_vec) {
1411 op->opc = INDEX_op_not_vec;
1412 return fold_not(ctx, op);
1413 } else {
1414 op->opc = INDEX_op_xor_vec;
1415 op->args[2] = arg_new_constant(ctx, -1);
1416 return fold_xor(ctx, op);
1417 }
1418 }
1419 }
1420 if (arg_is_const(op->args[2])) {
1421 uint64_t tv = arg_info(op->args[2])->val;
1422 if (tv == -1) {
1423 op->opc = INDEX_op_or_vec;
1424 op->args[2] = op->args[3];
1425 return fold_or(ctx, op);
1426 }
1427 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1428 op->opc = INDEX_op_andc_vec;
1429 op->args[2] = op->args[1];
1430 op->args[1] = op->args[3];
1431 return fold_andc(ctx, op);
1432 }
1433 }
1434 if (arg_is_const(op->args[3])) {
1435 uint64_t fv = arg_info(op->args[3])->val;
1436 if (fv == 0) {
1437 op->opc = INDEX_op_and_vec;
1438 return fold_and(ctx, op);
1439 }
1440 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1441 op->opc = INDEX_op_orc_vec;
1442 op->args[2] = op->args[1];
1443 op->args[1] = op->args[3];
1444 return fold_orc(ctx, op);
1445 }
1446 }
1447 return finish_folding(ctx, op);
1448}
1449
Richard Henderson079b0802021-08-24 09:30:59 -07001450static bool fold_brcond(OptContext *ctx, TCGOp *op)
1451{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001452 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001453 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001454 if (i == 0) {
1455 tcg_op_remove(ctx->tcg, op);
1456 return true;
1457 }
1458 if (i > 0) {
1459 op->opc = INDEX_op_br;
1460 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001461 finish_ebb(ctx);
1462 } else {
1463 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001464 }
Richard Henderson15268552024-12-08 07:45:11 -06001465 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001466}
1467
Richard Henderson764d2ab2021-08-24 09:22:11 -07001468static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1469{
Richard Henderson7e64b112023-10-24 16:53:56 -07001470 TCGCond cond;
1471 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001472 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001473
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001474 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001475 cond = op->args[4];
1476 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001477 if (i >= 0) {
1478 goto do_brcond_const;
1479 }
1480
1481 switch (cond) {
1482 case TCG_COND_LT:
1483 case TCG_COND_GE:
1484 /*
1485 * Simplify LT/GE comparisons vs zero to a single compare
1486 * vs the high word of the input.
1487 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001488 if (arg_is_const_val(op->args[2], 0) &&
1489 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001490 goto do_brcond_high;
1491 }
1492 break;
1493
1494 case TCG_COND_NE:
1495 inv = 1;
1496 QEMU_FALLTHROUGH;
1497 case TCG_COND_EQ:
1498 /*
1499 * Simplify EQ/NE comparisons where one of the pairs
1500 * can be simplified.
1501 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001502 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001503 op->args[2], cond);
1504 switch (i ^ inv) {
1505 case 0:
1506 goto do_brcond_const;
1507 case 1:
1508 goto do_brcond_high;
1509 }
1510
Richard Henderson67f84c92021-08-25 08:00:20 -07001511 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001512 op->args[3], cond);
1513 switch (i ^ inv) {
1514 case 0:
1515 goto do_brcond_const;
1516 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001517 goto do_brcond_low;
1518 }
1519 break;
1520
1521 case TCG_COND_TSTEQ:
1522 case TCG_COND_TSTNE:
1523 if (arg_is_const_val(op->args[2], 0)) {
1524 goto do_brcond_high;
1525 }
1526 if (arg_is_const_val(op->args[3], 0)) {
1527 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001528 }
1529 break;
1530
1531 default:
1532 break;
1533
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001534 do_brcond_low:
1535 op->opc = INDEX_op_brcond_i32;
1536 op->args[1] = op->args[2];
1537 op->args[2] = cond;
1538 op->args[3] = label;
1539 return fold_brcond(ctx, op);
1540
Richard Henderson764d2ab2021-08-24 09:22:11 -07001541 do_brcond_high:
1542 op->opc = INDEX_op_brcond_i32;
1543 op->args[0] = op->args[1];
1544 op->args[1] = op->args[3];
1545 op->args[2] = cond;
1546 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001547 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001548
1549 do_brcond_const:
1550 if (i == 0) {
1551 tcg_op_remove(ctx->tcg, op);
1552 return true;
1553 }
1554 op->opc = INDEX_op_br;
1555 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001556 finish_ebb(ctx);
1557 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001558 }
Richard Henderson15268552024-12-08 07:45:11 -06001559
1560 finish_bb(ctx);
1561 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001562}
1563
Richard Henderson09bacdc2021-08-24 11:58:12 -07001564static bool fold_bswap(OptContext *ctx, TCGOp *op)
1565{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001566 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001567 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001568
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001569 if (ti_is_const(t1)) {
1570 return tcg_opt_gen_movi(ctx, op, op->args[0],
1571 do_constant_folding(op->opc, ctx->type,
1572 ti_const_val(t1),
1573 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001574 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001575
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001576 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001577 switch (op->opc) {
1578 case INDEX_op_bswap16_i32:
1579 case INDEX_op_bswap16_i64:
1580 z_mask = bswap16(z_mask);
1581 sign = INT16_MIN;
1582 break;
1583 case INDEX_op_bswap32_i32:
1584 case INDEX_op_bswap32_i64:
1585 z_mask = bswap32(z_mask);
1586 sign = INT32_MIN;
1587 break;
1588 case INDEX_op_bswap64_i64:
1589 z_mask = bswap64(z_mask);
1590 sign = INT64_MIN;
1591 break;
1592 default:
1593 g_assert_not_reached();
1594 }
1595
Richard Henderson75c3bf32024-12-19 10:50:40 -08001596 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001597 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1598 case TCG_BSWAP_OZ:
1599 break;
1600 case TCG_BSWAP_OS:
1601 /* If the sign bit may be 1, force all the bits above to 1. */
1602 if (z_mask & sign) {
1603 z_mask |= sign;
1604 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001605 /* The value and therefore s_mask is explicitly sign-extended. */
1606 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001607 break;
1608 default:
1609 /* The high bits are undefined: force all bits above the sign to 1. */
1610 z_mask |= sign << 1;
1611 break;
1612 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001613
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001614 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001615}
1616
Richard Henderson5cf32be2021-08-24 08:17:08 -07001617static bool fold_call(OptContext *ctx, TCGOp *op)
1618{
1619 TCGContext *s = ctx->tcg;
1620 int nb_oargs = TCGOP_CALLO(op);
1621 int nb_iargs = TCGOP_CALLI(op);
1622 int flags, i;
1623
1624 init_arguments(ctx, op, nb_oargs + nb_iargs);
1625 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1626
1627 /* If the function reads or writes globals, reset temp data. */
1628 flags = tcg_call_flags(op);
1629 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1630 int nb_globals = s->nb_globals;
1631
1632 for (i = 0; i < nb_globals; i++) {
1633 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001634 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001635 }
1636 }
1637 }
1638
Richard Hendersonab84dc32023-08-23 23:04:24 -07001639 /* If the function has side effects, reset mem data. */
1640 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1641 remove_mem_copy_all(ctx);
1642 }
1643
Richard Henderson5cf32be2021-08-24 08:17:08 -07001644 /* Reset temp data for outputs. */
1645 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001646 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001647 }
1648
1649 /* Stop optimizing MB across calls. */
1650 ctx->prev_mb = NULL;
1651 return true;
1652}
1653
Richard Henderson29f65862024-12-09 14:09:49 -06001654static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1655{
1656 /* Canonicalize the comparison to put immediate second. */
1657 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1658 op->args[3] = tcg_swap_cond(op->args[3]);
1659 }
1660 return finish_folding(ctx, op);
1661}
1662
1663static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1664{
1665 /* If true and false values are the same, eliminate the cmp. */
1666 if (args_are_copies(op->args[3], op->args[4])) {
1667 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1668 }
1669
1670 /* Canonicalize the comparison to put immediate second. */
1671 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1672 op->args[5] = tcg_swap_cond(op->args[5]);
1673 }
1674 /*
1675 * Canonicalize the "false" input reg to match the destination,
1676 * so that the tcg backend can implement "move if true".
1677 */
1678 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1679 op->args[5] = tcg_invert_cond(op->args[5]);
1680 }
1681 return finish_folding(ctx, op);
1682}
1683
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001684static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1685{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001686 uint64_t z_mask, s_mask;
1687 TempOptInfo *t1 = arg_info(op->args[1]);
1688 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001689
Richard Hendersonce1d6632024-12-08 19:47:51 -06001690 if (ti_is_const(t1)) {
1691 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001692
1693 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001694 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001695 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1696 }
1697 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1698 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001699
1700 switch (ctx->type) {
1701 case TCG_TYPE_I32:
1702 z_mask = 31;
1703 break;
1704 case TCG_TYPE_I64:
1705 z_mask = 63;
1706 break;
1707 default:
1708 g_assert_not_reached();
1709 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001710 s_mask = ~z_mask;
1711 z_mask |= t2->z_mask;
1712 s_mask &= t2->s_mask;
1713
1714 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001715}
1716
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001717static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1718{
Richard Henderson81be07f2024-12-08 19:49:17 -06001719 uint64_t z_mask;
1720
Richard Hendersonfae450b2021-08-25 22:42:19 -07001721 if (fold_const1(ctx, op)) {
1722 return true;
1723 }
1724
1725 switch (ctx->type) {
1726 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001727 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001728 break;
1729 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001730 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001731 break;
1732 default:
1733 g_assert_not_reached();
1734 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001735 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001736}
1737
Richard Henderson1b1907b2021-08-24 10:47:04 -07001738static bool fold_deposit(OptContext *ctx, TCGOp *op)
1739{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001740 TempOptInfo *t1 = arg_info(op->args[1]);
1741 TempOptInfo *t2 = arg_info(op->args[2]);
1742 int ofs = op->args[3];
1743 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001744 int width = 8 * tcg_type_size(ctx->type);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001745 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001746
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001747 if (ti_is_const(t1) && ti_is_const(t2)) {
1748 return tcg_opt_gen_movi(ctx, op, op->args[0],
1749 deposit64(ti_const_val(t1), ofs, len,
1750 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001751 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001752
Richard Henderson8f7a8402023-08-13 11:03:05 -07001753 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001754 if (ti_is_const_val(t1, 0) && ofs == 0) {
1755 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001756
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001757 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001758 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001759 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001760 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001761 }
1762
1763 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001764 if (ti_is_const_val(t2, 0)) {
1765 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001766
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001767 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001768 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001769 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001770 }
1771
Richard Hendersonedb832c2024-12-19 17:56:05 -08001772 /* The s_mask from the top portion of the deposit is still valid. */
1773 if (ofs + len == width) {
1774 s_mask = t2->s_mask << ofs;
1775 } else {
1776 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1777 }
1778
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001779 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001780 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001781}
1782
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001783static bool fold_divide(OptContext *ctx, TCGOp *op)
1784{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001785 if (fold_const2(ctx, op) ||
1786 fold_xi_to_x(ctx, op, 1)) {
1787 return true;
1788 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001789 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001790}
1791
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001792static bool fold_dup(OptContext *ctx, TCGOp *op)
1793{
1794 if (arg_is_const(op->args[1])) {
1795 uint64_t t = arg_info(op->args[1])->val;
1796 t = dup_const(TCGOP_VECE(op), t);
1797 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1798 }
Richard Hendersone089d692024-12-08 20:00:51 -06001799 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001800}
1801
1802static bool fold_dup2(OptContext *ctx, TCGOp *op)
1803{
1804 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1805 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1806 arg_info(op->args[2])->val);
1807 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1808 }
1809
1810 if (args_are_copies(op->args[1], op->args[2])) {
1811 op->opc = INDEX_op_dup_vec;
1812 TCGOP_VECE(op) = MO_32;
1813 }
Richard Hendersone089d692024-12-08 20:00:51 -06001814 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001815}
1816
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001817static bool fold_eqv(OptContext *ctx, TCGOp *op)
1818{
Richard Hendersonef6be622024-12-08 20:03:15 -06001819 uint64_t s_mask;
Richard Henderson46c68d72023-11-15 11:51:28 -08001820 TempOptInfo *t1, *t2;
Richard Hendersonef6be622024-12-08 20:03:15 -06001821
Richard Henderson7a2f7082021-08-26 07:06:39 -07001822 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001823 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001824 fold_xi_to_not(ctx, op, 0)) {
1825 return true;
1826 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001827
Richard Henderson46c68d72023-11-15 11:51:28 -08001828 t2 = arg_info(op->args[2]);
1829 if (ti_is_const(t2)) {
1830 /* Fold eqv r,x,i to xor r,x,~i. */
1831 switch (ctx->type) {
1832 case TCG_TYPE_I32:
1833 case TCG_TYPE_I64:
1834 op->opc = INDEX_op_xor;
1835 break;
1836 case TCG_TYPE_V64:
1837 case TCG_TYPE_V128:
1838 case TCG_TYPE_V256:
1839 op->opc = INDEX_op_xor_vec;
1840 break;
1841 default:
1842 g_assert_not_reached();
1843 }
1844 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1845 return fold_xor(ctx, op);
1846 }
1847
1848 t1 = arg_info(op->args[1]);
1849 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersonef6be622024-12-08 20:03:15 -06001850 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001851}
1852
Richard Hendersonb6617c82021-08-24 10:44:53 -07001853static bool fold_extract(OptContext *ctx, TCGOp *op)
1854{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001855 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001856 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001857 int pos = op->args[2];
1858 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001859
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001860 if (ti_is_const(t1)) {
1861 return tcg_opt_gen_movi(ctx, op, op->args[0],
1862 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001863 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001864
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001865 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001866 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001867 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1868 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001869 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001870
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001871 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001872}
1873
Richard Hendersondcd08992021-08-24 10:41:39 -07001874static bool fold_extract2(OptContext *ctx, TCGOp *op)
1875{
1876 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1877 uint64_t v1 = arg_info(op->args[1])->val;
1878 uint64_t v2 = arg_info(op->args[2])->val;
1879 int shr = op->args[3];
1880
1881 if (op->opc == INDEX_op_extract2_i64) {
1882 v1 >>= shr;
1883 v2 <<= 64 - shr;
1884 } else {
1885 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001886 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001887 }
1888 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1889 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001890 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001891}
1892
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001893static bool fold_exts(OptContext *ctx, TCGOp *op)
1894{
Richard Henderson48e8de62024-12-26 12:01:57 -08001895 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06001896 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001897
1898 if (fold_const1(ctx, op)) {
1899 return true;
1900 }
1901
Richard Hendersona9621922024-12-08 20:08:46 -06001902 t1 = arg_info(op->args[1]);
1903 z_mask = t1->z_mask;
1904 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001905
1906 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001907 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001908 s_mask |= INT32_MIN;
1909 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001910 break;
1911 default:
1912 g_assert_not_reached();
1913 }
Richard Hendersona9621922024-12-08 20:08:46 -06001914 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001915}
1916
1917static bool fold_extu(OptContext *ctx, TCGOp *op)
1918{
Richard Henderson48e8de62024-12-26 12:01:57 -08001919 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001920
1921 if (fold_const1(ctx, op)) {
1922 return true;
1923 }
1924
Richard Henderson48e8de62024-12-26 12:01:57 -08001925 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001926 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001927 case INDEX_op_extrl_i64_i32:
1928 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001929 z_mask = (uint32_t)z_mask;
1930 break;
1931 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001932 z_mask >>= 32;
1933 break;
1934 default:
1935 g_assert_not_reached();
1936 }
Richard Henderson08abe292024-12-08 20:11:44 -06001937 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001938}
1939
Richard Henderson3eefdf22021-08-25 11:06:43 -07001940static bool fold_mb(OptContext *ctx, TCGOp *op)
1941{
1942 /* Eliminate duplicate and redundant fence instructions. */
1943 if (ctx->prev_mb) {
1944 /*
1945 * Merge two barriers of the same type into one,
1946 * or a weaker barrier into a stronger one,
1947 * or two weaker barriers into a stronger one.
1948 * mb X; mb Y => mb X|Y
1949 * mb; strl => mb; st
1950 * ldaq; mb => ld; mb
1951 * ldaq; strl => ld; mb; st
1952 * Other combinations are also merged into a strong
1953 * barrier. This is stricter than specified but for
1954 * the purposes of TCG is better than not optimizing.
1955 */
1956 ctx->prev_mb->args[0] |= op->args[0];
1957 tcg_op_remove(ctx->tcg, op);
1958 } else {
1959 ctx->prev_mb = op;
1960 }
1961 return true;
1962}
1963
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001964static bool fold_mov(OptContext *ctx, TCGOp *op)
1965{
1966 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1967}
1968
Richard Henderson0c310a32021-08-24 10:37:24 -07001969static bool fold_movcond(OptContext *ctx, TCGOp *op)
1970{
Richard Henderson32202782024-12-08 20:16:38 -06001971 uint64_t z_mask, s_mask;
1972 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001973 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001974
Richard Henderson141125e2024-09-06 21:00:10 -07001975 /* If true and false values are the same, eliminate the cmp. */
1976 if (args_are_copies(op->args[3], op->args[4])) {
1977 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1978 }
1979
Richard Henderson7a2f7082021-08-26 07:06:39 -07001980 /*
1981 * Canonicalize the "false" input reg to match the destination reg so
1982 * that the tcg backend can implement a "move if true" operation.
1983 */
1984 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001985 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001986 }
1987
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001988 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001989 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001990 if (i >= 0) {
1991 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1992 }
1993
Richard Henderson32202782024-12-08 20:16:38 -06001994 tt = arg_info(op->args[3]);
1995 ft = arg_info(op->args[4]);
1996 z_mask = tt->z_mask | ft->z_mask;
1997 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001998
Richard Henderson32202782024-12-08 20:16:38 -06001999 if (ti_is_const(tt) && ti_is_const(ft)) {
2000 uint64_t tv = ti_const_val(tt);
2001 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00002002 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07002003 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07002004
Richard Henderson67f84c92021-08-25 08:00:20 -07002005 switch (ctx->type) {
2006 case TCG_TYPE_I32:
2007 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00002008 if (TCG_TARGET_HAS_negsetcond_i32) {
2009 negopc = INDEX_op_negsetcond_i32;
2010 }
2011 tv = (int32_t)tv;
2012 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07002013 break;
2014 case TCG_TYPE_I64:
2015 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00002016 if (TCG_TARGET_HAS_negsetcond_i64) {
2017 negopc = INDEX_op_negsetcond_i64;
2018 }
Richard Henderson67f84c92021-08-25 08:00:20 -07002019 break;
2020 default:
2021 g_assert_not_reached();
2022 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002023
2024 if (tv == 1 && fv == 0) {
2025 op->opc = opc;
2026 op->args[3] = cond;
2027 } else if (fv == 1 && tv == 0) {
2028 op->opc = opc;
2029 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00002030 } else if (negopc) {
2031 if (tv == -1 && fv == 0) {
2032 op->opc = negopc;
2033 op->args[3] = cond;
2034 } else if (fv == -1 && tv == 0) {
2035 op->opc = negopc;
2036 op->args[3] = tcg_invert_cond(cond);
2037 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002038 }
2039 }
Richard Henderson32202782024-12-08 20:16:38 -06002040
2041 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002042}
2043
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002044static bool fold_mul(OptContext *ctx, TCGOp *op)
2045{
Richard Hendersone8679952021-08-25 13:19:52 -07002046 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002047 fold_xi_to_i(ctx, op, 0) ||
2048 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002049 return true;
2050 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002051 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002052}
2053
2054static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2055{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002056 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002057 fold_xi_to_i(ctx, op, 0)) {
2058 return true;
2059 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002060 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002061}
2062
Richard Henderson407112b2021-08-26 06:33:04 -07002063static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002064{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002065 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2066
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002067 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07002068 uint64_t a = arg_info(op->args[2])->val;
2069 uint64_t b = arg_info(op->args[3])->val;
2070 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002071 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002072 TCGOp *op2;
2073
2074 switch (op->opc) {
2075 case INDEX_op_mulu2_i32:
2076 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2077 h = (int32_t)(l >> 32);
2078 l = (int32_t)l;
2079 break;
2080 case INDEX_op_muls2_i32:
2081 l = (int64_t)(int32_t)a * (int32_t)b;
2082 h = l >> 32;
2083 l = (int32_t)l;
2084 break;
2085 case INDEX_op_mulu2_i64:
2086 mulu64(&l, &h, a, b);
2087 break;
2088 case INDEX_op_muls2_i64:
2089 muls64(&l, &h, a, b);
2090 break;
2091 default:
2092 g_assert_not_reached();
2093 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002094
2095 rl = op->args[0];
2096 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002097
2098 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002099 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002100
2101 tcg_opt_gen_movi(ctx, op, rl, l);
2102 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002103 return true;
2104 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002105 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002106}
2107
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002108static bool fold_nand(OptContext *ctx, TCGOp *op)
2109{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002110 uint64_t s_mask;
2111
Richard Henderson7a2f7082021-08-26 07:06:39 -07002112 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002113 fold_xi_to_not(ctx, op, -1)) {
2114 return true;
2115 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002116
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002117 s_mask = arg_info(op->args[1])->s_mask
2118 & arg_info(op->args[2])->s_mask;
2119 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002120}
2121
Richard Hendersone25fe882024-04-04 20:53:50 +00002122static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002123{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002124 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002125 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002126 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002127
Richard Hendersond151fd32024-12-08 20:23:11 -06002128 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002129}
2130
Richard Hendersone25fe882024-04-04 20:53:50 +00002131static bool fold_neg(OptContext *ctx, TCGOp *op)
2132{
2133 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2134}
2135
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002136static bool fold_nor(OptContext *ctx, TCGOp *op)
2137{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002138 uint64_t s_mask;
2139
Richard Henderson7a2f7082021-08-26 07:06:39 -07002140 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002141 fold_xi_to_not(ctx, op, 0)) {
2142 return true;
2143 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002144
Richard Henderson2b7b6952024-12-08 20:25:21 -06002145 s_mask = arg_info(op->args[1])->s_mask
2146 & arg_info(op->args[2])->s_mask;
2147 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002148}
2149
2150static bool fold_not(OptContext *ctx, TCGOp *op)
2151{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002152 if (fold_const1(ctx, op)) {
2153 return true;
2154 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002155 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002156}
2157
2158static bool fold_or(OptContext *ctx, TCGOp *op)
2159{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002160 uint64_t z_mask, s_mask;
2161 TempOptInfo *t1, *t2;
2162
Richard Henderson7a2f7082021-08-26 07:06:39 -07002163 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002164 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002165 fold_xx_to_x(ctx, op)) {
2166 return true;
2167 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002168
Richard Henderson83b1ba32024-12-08 20:28:59 -06002169 t1 = arg_info(op->args[1]);
2170 t2 = arg_info(op->args[2]);
2171 z_mask = t1->z_mask | t2->z_mask;
2172 s_mask = t1->s_mask & t2->s_mask;
2173 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002174}
2175
2176static bool fold_orc(OptContext *ctx, TCGOp *op)
2177{
Richard Henderson54e26b22024-12-08 20:30:20 -06002178 uint64_t s_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002179 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002180
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002181 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002182 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002183 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002184 fold_ix_to_not(ctx, op, 0)) {
2185 return true;
2186 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002187
Richard Henderson50e40ec2024-12-10 08:13:10 -06002188 t2 = arg_info(op->args[2]);
2189 if (ti_is_const(t2)) {
2190 /* Fold orc r,x,i to or r,x,~i. */
2191 switch (ctx->type) {
2192 case TCG_TYPE_I32:
2193 case TCG_TYPE_I64:
2194 op->opc = INDEX_op_or;
2195 break;
2196 case TCG_TYPE_V64:
2197 case TCG_TYPE_V128:
2198 case TCG_TYPE_V256:
2199 op->opc = INDEX_op_or_vec;
2200 break;
2201 default:
2202 g_assert_not_reached();
2203 }
2204 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2205 return fold_or(ctx, op);
2206 }
2207
2208 t1 = arg_info(op->args[1]);
2209 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson54e26b22024-12-08 20:30:20 -06002210 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002211}
2212
Richard Henderson6813be92024-12-08 20:33:30 -06002213static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002214{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002215 const TCGOpDef *def = &tcg_op_defs[op->opc];
2216 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2217 MemOp mop = get_memop(oi);
2218 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002219 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002220
Richard Henderson57fe5c62021-08-26 12:04:46 -07002221 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002222 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002223 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002224 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002225 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002226 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002227 }
2228
Richard Henderson3eefdf22021-08-25 11:06:43 -07002229 /* Opcodes that touch guest memory stop the mb optimization. */
2230 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002231
2232 return fold_masks_zs(ctx, op, z_mask, s_mask);
2233}
2234
2235static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2236{
2237 /* Opcodes that touch guest memory stop the mb optimization. */
2238 ctx->prev_mb = NULL;
2239 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002240}
2241
2242static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2243{
2244 /* Opcodes that touch guest memory stop the mb optimization. */
2245 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002246 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002247}
2248
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002249static bool fold_remainder(OptContext *ctx, TCGOp *op)
2250{
Richard Henderson267c17e2021-10-25 11:30:33 -07002251 if (fold_const2(ctx, op) ||
2252 fold_xx_to_i(ctx, op, 0)) {
2253 return true;
2254 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002255 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002256}
2257
Richard Henderson95eb2292024-12-08 20:47:59 -06002258/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2259static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002260{
2261 uint64_t a_zmask, b_val;
2262 TCGCond cond;
2263
2264 if (!arg_is_const(op->args[2])) {
2265 return false;
2266 }
2267
2268 a_zmask = arg_info(op->args[1])->z_mask;
2269 b_val = arg_info(op->args[2])->val;
2270 cond = op->args[3];
2271
2272 if (ctx->type == TCG_TYPE_I32) {
2273 a_zmask = (uint32_t)a_zmask;
2274 b_val = (uint32_t)b_val;
2275 }
2276
2277 /*
2278 * A with only low bits set vs B with high bits set means that A < B.
2279 */
2280 if (a_zmask < b_val) {
2281 bool inv = false;
2282
2283 switch (cond) {
2284 case TCG_COND_NE:
2285 case TCG_COND_LEU:
2286 case TCG_COND_LTU:
2287 inv = true;
2288 /* fall through */
2289 case TCG_COND_GTU:
2290 case TCG_COND_GEU:
2291 case TCG_COND_EQ:
2292 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2293 default:
2294 break;
2295 }
2296 }
2297
2298 /*
2299 * A with only lsb set is already boolean.
2300 */
2301 if (a_zmask <= 1) {
2302 bool convert = false;
2303 bool inv = false;
2304
2305 switch (cond) {
2306 case TCG_COND_EQ:
2307 inv = true;
2308 /* fall through */
2309 case TCG_COND_NE:
2310 convert = (b_val == 0);
2311 break;
2312 case TCG_COND_LTU:
2313 case TCG_COND_TSTEQ:
2314 inv = true;
2315 /* fall through */
2316 case TCG_COND_GEU:
2317 case TCG_COND_TSTNE:
2318 convert = (b_val == 1);
2319 break;
2320 default:
2321 break;
2322 }
2323 if (convert) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002324 if (!inv && !neg) {
2325 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2326 }
2327
Richard Henderson8d65cda2024-03-26 16:00:40 -10002328 if (!inv) {
Richard Henderson69713582025-01-06 22:48:57 -08002329 op->opc = INDEX_op_neg;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002330 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002331 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002332 op->args[2] = arg_new_constant(ctx, -1);
2333 } else {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002334 op->opc = INDEX_op_xor;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002335 op->args[2] = arg_new_constant(ctx, 1);
2336 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002337 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002338 }
2339 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002340 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002341}
2342
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002343static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2344{
Richard Henderson69713582025-01-06 22:48:57 -08002345 TCGOpcode shr_opc;
Paolo Bonziniff202812024-02-28 12:06:41 +01002346 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002347 TCGCond cond = op->args[3];
2348 TCGArg ret, src1, src2;
2349 TCGOp *op2;
2350 uint64_t val;
2351 int sh;
2352 bool inv;
2353
2354 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2355 return;
2356 }
2357
2358 src2 = op->args[2];
2359 val = arg_info(src2)->val;
2360 if (!is_power_of_2(val)) {
2361 return;
2362 }
2363 sh = ctz64(val);
2364
2365 switch (ctx->type) {
2366 case TCG_TYPE_I32:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002367 shr_opc = INDEX_op_shr_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002368 if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002369 uext_opc = INDEX_op_extract_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002370 }
2371 if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002372 sext_opc = INDEX_op_sextract_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002373 }
2374 break;
2375 case TCG_TYPE_I64:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002376 shr_opc = INDEX_op_shr_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002377 if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002378 uext_opc = INDEX_op_extract_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002379 }
2380 if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002381 sext_opc = INDEX_op_sextract_i64;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002382 }
2383 break;
2384 default:
2385 g_assert_not_reached();
2386 }
2387
2388 ret = op->args[0];
2389 src1 = op->args[1];
2390 inv = cond == TCG_COND_TSTEQ;
2391
2392 if (sh && sext_opc && neg && !inv) {
2393 op->opc = sext_opc;
2394 op->args[1] = src1;
2395 op->args[2] = sh;
2396 op->args[3] = 1;
2397 return;
2398 } else if (sh && uext_opc) {
2399 op->opc = uext_opc;
2400 op->args[1] = src1;
2401 op->args[2] = sh;
2402 op->args[3] = 1;
2403 } else {
2404 if (sh) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002405 op2 = opt_insert_before(ctx, op, shr_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002406 op2->args[0] = ret;
2407 op2->args[1] = src1;
2408 op2->args[2] = arg_new_constant(ctx, sh);
2409 src1 = ret;
2410 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002411 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002412 op->args[1] = src1;
2413 op->args[2] = arg_new_constant(ctx, 1);
2414 }
2415
2416 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002417 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002418 op2->args[0] = ret;
2419 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002420 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002421 } else if (inv) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002422 op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002423 op2->args[0] = ret;
2424 op2->args[1] = ret;
2425 op2->args[2] = arg_new_constant(ctx, 1);
2426 } else if (neg) {
Richard Henderson69713582025-01-06 22:48:57 -08002427 op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002428 op2->args[0] = ret;
2429 op2->args[1] = ret;
2430 }
2431}
2432
Richard Hendersonc63ff552021-08-24 09:35:30 -07002433static bool fold_setcond(OptContext *ctx, TCGOp *op)
2434{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002435 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002436 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002437 if (i >= 0) {
2438 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2439 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002440
Richard Henderson95eb2292024-12-08 20:47:59 -06002441 i = fold_setcond_zmask(ctx, op, false);
2442 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002443 return true;
2444 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002445 if (i == 0) {
2446 fold_setcond_tst_pow2(ctx, op, false);
2447 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002448
Richard Henderson2c8a2832024-12-08 20:50:37 -06002449 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002450}
2451
Richard Henderson36355022023-08-04 23:24:04 +00002452static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2453{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002454 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002455 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002456 if (i >= 0) {
2457 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2458 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002459
Richard Henderson95eb2292024-12-08 20:47:59 -06002460 i = fold_setcond_zmask(ctx, op, true);
2461 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002462 return true;
2463 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002464 if (i == 0) {
2465 fold_setcond_tst_pow2(ctx, op, true);
2466 }
Richard Henderson36355022023-08-04 23:24:04 +00002467
2468 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002469 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002470}
2471
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002472static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2473{
Richard Henderson7e64b112023-10-24 16:53:56 -07002474 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002475 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002476
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002477 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002478 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002479 if (i >= 0) {
2480 goto do_setcond_const;
2481 }
2482
2483 switch (cond) {
2484 case TCG_COND_LT:
2485 case TCG_COND_GE:
2486 /*
2487 * Simplify LT/GE comparisons vs zero to a single compare
2488 * vs the high word of the input.
2489 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002490 if (arg_is_const_val(op->args[3], 0) &&
2491 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002492 goto do_setcond_high;
2493 }
2494 break;
2495
2496 case TCG_COND_NE:
2497 inv = 1;
2498 QEMU_FALLTHROUGH;
2499 case TCG_COND_EQ:
2500 /*
2501 * Simplify EQ/NE comparisons where one of the pairs
2502 * can be simplified.
2503 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002504 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002505 op->args[3], cond);
2506 switch (i ^ inv) {
2507 case 0:
2508 goto do_setcond_const;
2509 case 1:
2510 goto do_setcond_high;
2511 }
2512
Richard Henderson67f84c92021-08-25 08:00:20 -07002513 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002514 op->args[4], cond);
2515 switch (i ^ inv) {
2516 case 0:
2517 goto do_setcond_const;
2518 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002519 goto do_setcond_low;
2520 }
2521 break;
2522
2523 case TCG_COND_TSTEQ:
2524 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002525 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002526 goto do_setcond_high;
2527 }
2528 if (arg_is_const_val(op->args[4], 0)) {
2529 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002530 }
2531 break;
2532
2533 default:
2534 break;
2535
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002536 do_setcond_low:
2537 op->args[2] = op->args[3];
2538 op->args[3] = cond;
2539 op->opc = INDEX_op_setcond_i32;
2540 return fold_setcond(ctx, op);
2541
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002542 do_setcond_high:
2543 op->args[1] = op->args[2];
2544 op->args[2] = op->args[4];
2545 op->args[3] = cond;
2546 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002547 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002548 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002549
Richard Hendersona53502c2024-12-08 20:56:36 -06002550 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002551
2552 do_setcond_const:
2553 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2554}
2555
Richard Hendersonb6617c82021-08-24 10:44:53 -07002556static bool fold_sextract(OptContext *ctx, TCGOp *op)
2557{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002558 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002559 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002560 int pos = op->args[2];
2561 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002562
Richard Hendersonbaff5072024-12-08 21:09:30 -06002563 if (ti_is_const(t1)) {
2564 return tcg_opt_gen_movi(ctx, op, op->args[0],
2565 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002566 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002567
Richard Hendersonbaff5072024-12-08 21:09:30 -06002568 s_mask_old = t1->s_mask;
2569 s_mask = s_mask_old >> pos;
2570 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002571
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002572 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002573 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002574 }
2575
Richard Hendersonbaff5072024-12-08 21:09:30 -06002576 z_mask = sextract64(t1->z_mask, pos, len);
2577 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002578}
2579
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002580static bool fold_shift(OptContext *ctx, TCGOp *op)
2581{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002582 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002583 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002584
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002585 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002586 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002587 fold_xi_to_x(ctx, op, 0)) {
2588 return true;
2589 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002590
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002591 t1 = arg_info(op->args[1]);
2592 t2 = arg_info(op->args[2]);
2593 s_mask = t1->s_mask;
2594 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002595
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002596 if (ti_is_const(t2)) {
2597 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002598
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002599 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002600 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002601
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002602 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002603 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002604
2605 switch (op->opc) {
2606 CASE_OP_32_64(sar):
2607 /*
2608 * Arithmetic right shift will not reduce the number of
2609 * input sign repetitions.
2610 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002611 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002612 CASE_OP_32_64(shr):
2613 /*
2614 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002615 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002616 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002617 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002618 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002619 }
2620 break;
2621 default:
2622 break;
2623 }
2624
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002625 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002626}
2627
Richard Henderson9caca882021-08-24 13:30:32 -07002628static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2629{
2630 TCGOpcode neg_op;
2631 bool have_neg;
2632
2633 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2634 return false;
2635 }
2636
2637 switch (ctx->type) {
2638 case TCG_TYPE_I32:
Richard Henderson9caca882021-08-24 13:30:32 -07002639 case TCG_TYPE_I64:
Richard Henderson69713582025-01-06 22:48:57 -08002640 neg_op = INDEX_op_neg;
Richard Hendersonb701f192023-10-25 21:14:04 -07002641 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002642 break;
2643 case TCG_TYPE_V64:
2644 case TCG_TYPE_V128:
2645 case TCG_TYPE_V256:
2646 neg_op = INDEX_op_neg_vec;
2647 have_neg = (TCG_TARGET_HAS_neg_vec &&
2648 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2649 break;
2650 default:
2651 g_assert_not_reached();
2652 }
2653 if (have_neg) {
2654 op->opc = neg_op;
2655 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002656 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002657 }
2658 return false;
2659}
2660
Richard Hendersonc578ff12021-12-16 06:07:25 -08002661/* We cannot as yet do_constant_folding with vectors. */
2662static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002663{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002664 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002665 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002666 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002667 return true;
2668 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002669 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002670}
2671
Richard Hendersonc578ff12021-12-16 06:07:25 -08002672static bool fold_sub(OptContext *ctx, TCGOp *op)
2673{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002674 if (fold_const2(ctx, op) ||
2675 fold_xx_to_i(ctx, op, 0) ||
2676 fold_xi_to_x(ctx, op, 0) ||
2677 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002678 return true;
2679 }
2680
2681 /* Fold sub r,x,i to add r,x,-i */
2682 if (arg_is_const(op->args[2])) {
2683 uint64_t val = arg_info(op->args[2])->val;
2684
Richard Henderson79602f62025-01-06 09:11:39 -08002685 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002686 op->args[2] = arg_new_constant(ctx, -val);
2687 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002688 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002689}
2690
Richard Henderson9531c072021-08-26 06:51:39 -07002691static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002692{
Richard Henderson9531c072021-08-26 06:51:39 -07002693 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002694}
2695
Richard Hendersonfae450b2021-08-25 22:42:19 -07002696static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2697{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002698 uint64_t z_mask = -1, s_mask = 0;
2699
Richard Hendersonfae450b2021-08-25 22:42:19 -07002700 /* We can't do any folding with a load, but we can record bits. */
2701 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002702 CASE_OP_32_64(ld8s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002703 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002704 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002705 CASE_OP_32_64(ld8u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002706 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002707 break;
2708 CASE_OP_32_64(ld16s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002709 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002710 break;
2711 CASE_OP_32_64(ld16u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002712 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002713 break;
2714 case INDEX_op_ld32s_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002715 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002716 break;
2717 case INDEX_op_ld32u_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002718 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002719 break;
2720 default:
2721 g_assert_not_reached();
2722 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002723 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002724}
2725
Richard Hendersonab84dc32023-08-23 23:04:24 -07002726static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2727{
2728 TCGTemp *dst, *src;
2729 intptr_t ofs;
2730 TCGType type;
2731
2732 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002733 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002734 }
2735
2736 type = ctx->type;
2737 ofs = op->args[2];
2738 dst = arg_temp(op->args[0]);
2739 src = find_mem_copy_for(ctx, type, ofs);
2740 if (src && src->base_type == type) {
2741 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2742 }
2743
2744 reset_ts(ctx, dst);
2745 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2746 return true;
2747}
2748
2749static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2750{
2751 intptr_t ofs = op->args[2];
2752 intptr_t lm1;
2753
2754 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2755 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002756 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002757 }
2758
2759 switch (op->opc) {
2760 CASE_OP_32_64(st8):
2761 lm1 = 0;
2762 break;
2763 CASE_OP_32_64(st16):
2764 lm1 = 1;
2765 break;
2766 case INDEX_op_st32_i64:
2767 case INDEX_op_st_i32:
2768 lm1 = 3;
2769 break;
2770 case INDEX_op_st_i64:
2771 lm1 = 7;
2772 break;
2773 case INDEX_op_st_vec:
2774 lm1 = tcg_type_size(ctx->type) - 1;
2775 break;
2776 default:
2777 g_assert_not_reached();
2778 }
2779 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002780 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002781}
2782
2783static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2784{
2785 TCGTemp *src;
2786 intptr_t ofs, last;
2787 TCGType type;
2788
2789 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002790 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002791 }
2792
2793 src = arg_temp(op->args[0]);
2794 ofs = op->args[2];
2795 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002796
2797 /*
2798 * Eliminate duplicate stores of a constant.
2799 * This happens frequently when the target ISA zero-extends.
2800 */
2801 if (ts_is_const(src)) {
2802 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2803 if (src == prev) {
2804 tcg_op_remove(ctx->tcg, op);
2805 return true;
2806 }
2807 }
2808
Richard Hendersonab84dc32023-08-23 23:04:24 -07002809 last = ofs + tcg_type_size(type) - 1;
2810 remove_mem_copy_in(ctx, ofs, last);
2811 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002812 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002813}
2814
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002815static bool fold_xor(OptContext *ctx, TCGOp *op)
2816{
Richard Hendersonc890fd72024-12-08 21:39:01 -06002817 uint64_t z_mask, s_mask;
2818 TempOptInfo *t1, *t2;
2819
Richard Henderson7a2f7082021-08-26 07:06:39 -07002820 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002821 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002822 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002823 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002824 return true;
2825 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002826
Richard Hendersonc890fd72024-12-08 21:39:01 -06002827 t1 = arg_info(op->args[1]);
2828 t2 = arg_info(op->args[2]);
2829 z_mask = t1->z_mask | t2->z_mask;
2830 s_mask = t1->s_mask & t2->s_mask;
2831 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002832}
2833
Kirill Batuzov22613af2011-07-07 16:37:13 +04002834/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002835void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002836{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002837 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002838 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002839 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002840
Richard Hendersonab84dc32023-08-23 23:04:24 -07002841 QSIMPLEQ_INIT(&ctx.mem_free);
2842
Kirill Batuzov22613af2011-07-07 16:37:13 +04002843 /* Array VALS has an element for each temp.
2844 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002845 If this temp is a copy of other ones then the other copies are
2846 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002847
2848 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002849 for (i = 0; i < nb_temps; ++i) {
2850 s->temps[i].state_ptr = NULL;
2851 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002852
Richard Henderson15fa08f2017-11-02 15:19:14 +01002853 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002854 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002855 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002856 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002857
Richard Henderson5cf32be2021-08-24 08:17:08 -07002858 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002859 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002860 fold_call(&ctx, op);
2861 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002862 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002863
2864 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002865 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2866 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002867
Richard Henderson67f84c92021-08-25 08:00:20 -07002868 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08002869 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07002870
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002871 /*
2872 * Process each opcode.
2873 * Sorted alphabetically by opcode as much as possible.
2874 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002875 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08002876 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002877 done = fold_add(&ctx, op);
2878 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002879 case INDEX_op_add_vec:
2880 done = fold_add_vec(&ctx, op);
2881 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002882 CASE_OP_32_64(add2):
2883 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002884 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002885 case INDEX_op_and:
2886 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002887 done = fold_and(&ctx, op);
2888 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08002889 case INDEX_op_andc:
2890 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002891 done = fold_andc(&ctx, op);
2892 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002893 CASE_OP_32_64(brcond):
2894 done = fold_brcond(&ctx, op);
2895 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002896 case INDEX_op_brcond2_i32:
2897 done = fold_brcond2(&ctx, op);
2898 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002899 CASE_OP_32_64(bswap16):
2900 CASE_OP_32_64(bswap32):
2901 case INDEX_op_bswap64_i64:
2902 done = fold_bswap(&ctx, op);
2903 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002904 CASE_OP_32_64(clz):
2905 CASE_OP_32_64(ctz):
2906 done = fold_count_zeros(&ctx, op);
2907 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002908 CASE_OP_32_64(ctpop):
2909 done = fold_ctpop(&ctx, op);
2910 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002911 CASE_OP_32_64(deposit):
2912 done = fold_deposit(&ctx, op);
2913 break;
Richard Hendersonb2c514f2025-01-07 13:22:56 -08002914 case INDEX_op_divs:
Richard Henderson961b80a2025-01-07 14:27:19 -08002915 case INDEX_op_divu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002916 done = fold_divide(&ctx, op);
2917 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002918 case INDEX_op_dup_vec:
2919 done = fold_dup(&ctx, op);
2920 break;
2921 case INDEX_op_dup2_vec:
2922 done = fold_dup2(&ctx, op);
2923 break;
Richard Henderson5c0968a2025-01-06 15:47:53 -08002924 case INDEX_op_eqv:
2925 case INDEX_op_eqv_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002926 done = fold_eqv(&ctx, op);
2927 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002928 CASE_OP_32_64(extract):
2929 done = fold_extract(&ctx, op);
2930 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002931 CASE_OP_32_64(extract2):
2932 done = fold_extract2(&ctx, op);
2933 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002934 case INDEX_op_ext_i32_i64:
2935 done = fold_exts(&ctx, op);
2936 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002937 case INDEX_op_extu_i32_i64:
2938 case INDEX_op_extrl_i64_i32:
2939 case INDEX_op_extrh_i64_i32:
2940 done = fold_extu(&ctx, op);
2941 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002942 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002943 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002944 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002945 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002946 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002947 case INDEX_op_ld32u_i64:
2948 done = fold_tcg_ld(&ctx, op);
2949 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002950 case INDEX_op_ld_i32:
2951 case INDEX_op_ld_i64:
2952 case INDEX_op_ld_vec:
2953 done = fold_tcg_ld_memcopy(&ctx, op);
2954 break;
2955 CASE_OP_32_64(st8):
2956 CASE_OP_32_64(st16):
2957 case INDEX_op_st32_i64:
2958 done = fold_tcg_st(&ctx, op);
2959 break;
2960 case INDEX_op_st_i32:
2961 case INDEX_op_st_i64:
2962 case INDEX_op_st_vec:
2963 done = fold_tcg_st_memcopy(&ctx, op);
2964 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002965 case INDEX_op_mb:
2966 done = fold_mb(&ctx, op);
2967 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08002968 case INDEX_op_mov:
2969 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002970 done = fold_mov(&ctx, op);
2971 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002972 CASE_OP_32_64(movcond):
2973 done = fold_movcond(&ctx, op);
2974 break;
Richard Hendersond2c3eca2025-01-07 09:32:18 -08002975 case INDEX_op_mul:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002976 done = fold_mul(&ctx, op);
2977 break;
Richard Hendersonc7428242025-01-07 11:19:29 -08002978 case INDEX_op_mulsh:
Richard Hendersonaa28c9e2025-01-07 10:36:24 -08002979 case INDEX_op_muluh:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002980 done = fold_mul_highpart(&ctx, op);
2981 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002982 CASE_OP_32_64(muls2):
2983 CASE_OP_32_64(mulu2):
2984 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002985 break;
Richard Henderson59379a42025-01-06 20:32:54 -08002986 case INDEX_op_nand:
2987 case INDEX_op_nand_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002988 done = fold_nand(&ctx, op);
2989 break;
Richard Henderson69713582025-01-06 22:48:57 -08002990 case INDEX_op_neg:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002991 done = fold_neg(&ctx, op);
2992 break;
Richard Henderson3a8c4e92025-01-06 21:02:17 -08002993 case INDEX_op_nor:
2994 case INDEX_op_nor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002995 done = fold_nor(&ctx, op);
2996 break;
Richard Henderson5c62d372025-01-06 23:46:47 -08002997 case INDEX_op_not:
2998 case INDEX_op_not_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002999 done = fold_not(&ctx, op);
3000 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08003001 case INDEX_op_or:
3002 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003003 done = fold_or(&ctx, op);
3004 break;
Richard Henderson6aba25e2025-01-06 14:46:26 -08003005 case INDEX_op_orc:
3006 case INDEX_op_orc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003007 done = fold_orc(&ctx, op);
3008 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003009 case INDEX_op_qemu_ld_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06003010 done = fold_qemu_ld_1reg(&ctx, op);
3011 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003012 case INDEX_op_qemu_ld_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06003013 if (TCG_TARGET_REG_BITS == 64) {
3014 done = fold_qemu_ld_1reg(&ctx, op);
3015 break;
3016 }
3017 QEMU_FALLTHROUGH;
Richard Henderson50b7a192025-02-04 13:46:09 -08003018 case INDEX_op_qemu_ld_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06003019 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003020 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003021 case INDEX_op_qemu_st8_i32:
3022 case INDEX_op_qemu_st_i32:
3023 case INDEX_op_qemu_st_i64:
3024 case INDEX_op_qemu_st_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003025 done = fold_qemu_st(&ctx, op);
3026 break;
Richard Henderson9a6bc182025-01-07 19:00:51 -08003027 case INDEX_op_rems:
Richard Hendersoncd9acd22025-01-07 20:25:14 -08003028 case INDEX_op_remu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003029 done = fold_remainder(&ctx, op);
3030 break;
3031 CASE_OP_32_64(rotl):
3032 CASE_OP_32_64(rotr):
3033 CASE_OP_32_64(sar):
3034 CASE_OP_32_64(shl):
3035 CASE_OP_32_64(shr):
3036 done = fold_shift(&ctx, op);
3037 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003038 CASE_OP_32_64(setcond):
3039 done = fold_setcond(&ctx, op);
3040 break;
Richard Henderson36355022023-08-04 23:24:04 +00003041 CASE_OP_32_64(negsetcond):
3042 done = fold_negsetcond(&ctx, op);
3043 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003044 case INDEX_op_setcond2_i32:
3045 done = fold_setcond2(&ctx, op);
3046 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003047 case INDEX_op_cmp_vec:
3048 done = fold_cmp_vec(&ctx, op);
3049 break;
3050 case INDEX_op_cmpsel_vec:
3051 done = fold_cmpsel_vec(&ctx, op);
3052 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003053 case INDEX_op_bitsel_vec:
3054 done = fold_bitsel_vec(&ctx, op);
3055 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003056 CASE_OP_32_64(sextract):
3057 done = fold_sextract(&ctx, op);
3058 break;
Richard Henderson60f34f52025-01-06 22:06:32 -08003059 case INDEX_op_sub:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003060 done = fold_sub(&ctx, op);
3061 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003062 case INDEX_op_sub_vec:
3063 done = fold_sub_vec(&ctx, op);
3064 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003065 CASE_OP_32_64(sub2):
3066 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003067 break;
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08003068 case INDEX_op_xor:
3069 case INDEX_op_xor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003070 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003071 break;
Richard Henderson15268552024-12-08 07:45:11 -06003072 case INDEX_op_set_label:
3073 case INDEX_op_br:
3074 case INDEX_op_exit_tb:
3075 case INDEX_op_goto_tb:
3076 case INDEX_op_goto_ptr:
3077 finish_ebb(&ctx);
3078 done = true;
3079 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003080 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003081 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003082 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003083 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003084 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003085 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003086}