blob: 718809ab8d4961d024407fac9b4a2909bb1806ec [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040033#define CASE_OP_32_64(x) \
34 glue(glue(case INDEX_op_, x), _i32): \
35 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040036
Richard Henderson170ba882017-11-22 09:07:11 +010037#define CASE_OP_32_64_VEC(x) \
38 glue(glue(case INDEX_op_, x), _i32): \
39 glue(glue(case INDEX_op_, x), _i64): \
40 glue(glue(case INDEX_op_, x), _vec)
41
Richard Hendersonab84dc32023-08-23 23:04:24 -070042typedef struct MemCopyInfo {
43 IntervalTreeNode itree;
44 QSIMPLEQ_ENTRY (MemCopyInfo) next;
45 TCGTemp *ts;
46 TCGType type;
47} MemCopyInfo;
48
Richard Henderson6fcb98e2020-03-30 17:44:30 -070049typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020050 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070051 TCGTemp *prev_copy;
52 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070053 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070054 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070055 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080056 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070057} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040058
Richard Henderson3b3f8472021-08-23 22:06:31 -070059typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070060 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070061 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070062 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070063
Richard Hendersonab84dc32023-08-23 23:04:24 -070064 IntervalTreeRoot mem_copy;
65 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
66
Richard Henderson137f1f42021-08-24 08:49:25 -070067 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070068 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070069} OptContext;
70
Richard Henderson6fcb98e2020-03-30 17:44:30 -070071static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020072{
Richard Henderson63490392017-06-20 13:43:15 -070073 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020074}
75
Richard Henderson6fcb98e2020-03-30 17:44:30 -070076static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020077{
Richard Henderson63490392017-06-20 13:43:15 -070078 return ts_info(arg_temp(arg));
79}
80
Richard Hendersone1b6c142024-12-22 10:26:14 -080081static inline bool ti_is_const(TempOptInfo *ti)
82{
83 return ti->is_const;
84}
85
86static inline uint64_t ti_const_val(TempOptInfo *ti)
87{
88 return ti->val;
89}
90
91static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
92{
93 return ti_is_const(ti) && ti_const_val(ti) == val;
94}
95
Richard Henderson63490392017-06-20 13:43:15 -070096static inline bool ts_is_const(TCGTemp *ts)
97{
Richard Hendersone1b6c142024-12-22 10:26:14 -080098 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070099}
100
Richard Henderson27cdb852023-10-23 11:38:00 -0700101static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
102{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800103 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700104}
105
Richard Henderson63490392017-06-20 13:43:15 -0700106static inline bool arg_is_const(TCGArg arg)
107{
108 return ts_is_const(arg_temp(arg));
109}
110
Richard Henderson27cdb852023-10-23 11:38:00 -0700111static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
112{
113 return ts_is_const_val(arg_temp(arg), val);
114}
115
Richard Henderson63490392017-06-20 13:43:15 -0700116static inline bool ts_is_copy(TCGTemp *ts)
117{
118 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200119}
120
Richard Henderson9f75e522023-11-02 13:37:46 -0700121static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
122{
123 return a->kind < b->kind ? b : a;
124}
125
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200126/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700127static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200128{
Richard Henderson63490392017-06-20 13:43:15 -0700129 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700130 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700131
Richard Henderson3b3f8472021-08-23 22:06:31 -0700132 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 return;
134 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700135 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700136
137 ti = ts->state_ptr;
138 if (ti == NULL) {
139 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700140 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700141 }
142
143 ti->next_copy = ts;
144 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700146 if (ts->kind == TEMP_CONST) {
147 ti->is_const = true;
148 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700149 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800150 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700151 } else {
152 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700153 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700154 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155 }
156}
157
Richard Hendersonab84dc32023-08-23 23:04:24 -0700158static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
159{
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
161 return r ? container_of(r, MemCopyInfo, itree) : NULL;
162}
163
164static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
165{
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
167 return r ? container_of(r, MemCopyInfo, itree) : NULL;
168}
169
170static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
171{
172 TCGTemp *ts = mc->ts;
173 TempOptInfo *ti = ts_info(ts);
174
175 interval_tree_remove(&mc->itree, &ctx->mem_copy);
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
178}
179
180static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
181{
182 while (true) {
183 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
184 if (!mc) {
185 break;
186 }
187 remove_mem_copy(ctx, mc);
188 }
189}
190
191static void remove_mem_copy_all(OptContext *ctx)
192{
193 remove_mem_copy_in(ctx, 0, -1);
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
195}
196
Richard Henderson9f75e522023-11-02 13:37:46 -0700197static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198{
Richard Henderson9f75e522023-11-02 13:37:46 -0700199 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200200
Richard Henderson4c868ce2020-04-23 09:02:23 -0700201 /* If this is already readonly, we can't do better. */
202 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700203 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200204 }
205
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700210 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200211}
212
Richard Hendersonab84dc32023-08-23 23:04:24 -0700213static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
214{
215 TempOptInfo *si = ts_info(src_ts);
216 TempOptInfo *di = ts_info(dst_ts);
217 MemCopyInfo *mc;
218
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
220 tcg_debug_assert(mc->ts == src_ts);
221 mc->ts = dst_ts;
222 }
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
224}
225
226/* Reset TEMP's state, possibly removing the temp for the list of copies. */
227static void reset_ts(OptContext *ctx, TCGTemp *ts)
228{
229 TempOptInfo *ti = ts_info(ts);
230 TCGTemp *pts = ti->prev_copy;
231 TCGTemp *nts = ti->next_copy;
232 TempOptInfo *pi = ts_info(pts);
233 TempOptInfo *ni = ts_info(nts);
234
235 ni->prev_copy = ti->prev_copy;
236 pi->next_copy = ti->next_copy;
237 ti->next_copy = ts;
238 ti->prev_copy = ts;
239 ti->is_const = false;
240 ti->z_mask = -1;
241 ti->s_mask = 0;
242
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
244 if (ts == nts) {
245 /* Last temp copy being removed, the mem copies die. */
246 MemCopyInfo *mc;
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
248 interval_tree_remove(&mc->itree, &ctx->mem_copy);
249 }
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
251 } else {
252 move_mem_copies(find_better_copy(nts), ts);
253 }
254 }
255}
256
257static void reset_temp(OptContext *ctx, TCGArg arg)
258{
259 reset_ts(ctx, arg_temp(arg));
260}
261
262static void record_mem_copy(OptContext *ctx, TCGType type,
263 TCGTemp *ts, intptr_t start, intptr_t last)
264{
265 MemCopyInfo *mc;
266 TempOptInfo *ti;
267
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
269 if (mc) {
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
271 } else {
272 mc = tcg_malloc(sizeof(*mc));
273 }
274
275 memset(mc, 0, sizeof(*mc));
276 mc->itree.start = start;
277 mc->itree.last = last;
278 mc->type = type;
279 interval_tree_insert(&mc->itree, &ctx->mem_copy);
280
281 ts = find_better_copy(ts);
282 ti = ts_info(ts);
283 mc->ts = ts;
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
285}
286
Richard Henderson63490392017-06-20 13:43:15 -0700287static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288{
Richard Henderson63490392017-06-20 13:43:15 -0700289 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290
Richard Henderson63490392017-06-20 13:43:15 -0700291 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200292 return true;
293 }
294
Richard Henderson63490392017-06-20 13:43:15 -0700295 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200296 return false;
297 }
298
Richard Henderson63490392017-06-20 13:43:15 -0700299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
300 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200301 return true;
302 }
303 }
304
305 return false;
306}
307
Richard Henderson63490392017-06-20 13:43:15 -0700308static bool args_are_copies(TCGArg arg1, TCGArg arg2)
309{
310 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
311}
312
Richard Hendersonab84dc32023-08-23 23:04:24 -0700313static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
314{
315 MemCopyInfo *mc;
316
317 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
318 if (mc->itree.start == s && mc->type == type) {
319 return find_better_copy(mc->ts);
320 }
321 }
322 return NULL;
323}
324
Richard Henderson26aac972023-10-23 12:31:57 -0700325static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
326{
327 TCGType type = ctx->type;
328 TCGTemp *ts;
329
330 if (type == TCG_TYPE_I32) {
331 val = (int32_t)val;
332 }
333
334 ts = tcg_constant_internal(type, val);
335 init_ts_info(ctx, ts);
336
337 return temp_arg(ts);
338}
339
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100340static TCGArg arg_new_temp(OptContext *ctx)
341{
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
343 init_ts_info(ctx, ts);
344 return temp_arg(ts);
345}
346
Richard Hendersona3c1c572025-04-21 11:05:29 -0700347static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
348 TCGOpcode opc, unsigned narg)
349{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800350 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700351}
352
353static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
354 TCGOpcode opc, unsigned narg)
355{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800356 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700357}
358
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700359static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400360{
Richard Henderson63490392017-06-20 13:43:15 -0700361 TCGTemp *dst_ts = arg_temp(dst);
362 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700363 TempOptInfo *di;
364 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700365 TCGOpcode new_op;
366
367 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700368 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700369 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200370 }
371
Richard Henderson986cac12023-01-09 13:59:35 -0800372 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700373 di = ts_info(dst_ts);
374 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700375
376 switch (ctx->type) {
377 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800379 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700380 break;
381 case TCG_TYPE_V64:
382 case TCG_TYPE_V128:
383 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800384 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700385 new_op = INDEX_op_mov_vec;
386 break;
387 default:
388 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100389 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700390 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700391 op->args[0] = dst;
392 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700393
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700394 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700395 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700396
Richard Henderson63490392017-06-20 13:43:15 -0700397 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700398 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700399
400 di->next_copy = si->next_copy;
401 di->prev_copy = src_ts;
402 ni->prev_copy = dst_ts;
403 si->next_copy = dst_ts;
404 di->is_const = si->is_const;
405 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700406
407 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
408 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
409 move_mem_copies(dst_ts, src_ts);
410 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800411 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700412 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400413}
414
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700415static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700416 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700417{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700418 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700419 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700420}
421
Richard Henderson54795542020-09-06 16:21:32 -0700422static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400423{
Richard Henderson03271522013-08-14 14:35:56 -0700424 uint64_t l64, h64;
425
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400426 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800427 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400428 return x + y;
429
Richard Henderson60f34f52025-01-06 22:06:32 -0800430 case INDEX_op_sub:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400431 return x - y;
432
433 CASE_OP_32_64(mul):
434 return x * y;
435
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800436 case INDEX_op_and:
437 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400438 return x & y;
439
Richard Henderson49bd7512025-01-06 14:00:40 -0800440 case INDEX_op_or:
441 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400442 return x | y;
443
Richard Hendersonfffd3dc2025-01-06 15:18:35 -0800444 case INDEX_op_xor:
445 case INDEX_op_xor_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400446 return x ^ y;
447
Kirill Batuzov55c09752011-07-07 16:37:16 +0400448 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700449 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400450
Kirill Batuzov55c09752011-07-07 16:37:16 +0400451 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700452 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400453
454 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700455 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400456
Kirill Batuzov55c09752011-07-07 16:37:16 +0400457 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700458 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400459
460 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700461 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400462
Kirill Batuzov55c09752011-07-07 16:37:16 +0400463 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700464 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400465
466 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700467 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400468
Kirill Batuzov55c09752011-07-07 16:37:16 +0400469 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700470 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400471
472 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700473 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400474
Kirill Batuzov55c09752011-07-07 16:37:16 +0400475 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700476 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400477
Richard Hendersonc578ff12021-12-16 06:07:25 -0800478 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400479 return ~x;
480
Richard Hendersoncb25c802011-08-17 14:11:47 -0700481 CASE_OP_32_64(neg):
482 return -x;
483
Richard Henderson46f96bf2025-01-06 12:37:02 -0800484 case INDEX_op_andc:
485 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700486 return x & ~y;
487
Richard Henderson6aba25e2025-01-06 14:46:26 -0800488 case INDEX_op_orc:
489 case INDEX_op_orc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700490 return x | ~y;
491
Richard Henderson5c0968a2025-01-06 15:47:53 -0800492 case INDEX_op_eqv:
493 case INDEX_op_eqv_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700494 return ~(x ^ y);
495
Richard Henderson59379a42025-01-06 20:32:54 -0800496 case INDEX_op_nand:
497 case INDEX_op_nand_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700498 return ~(x & y);
499
Richard Henderson3a8c4e92025-01-06 21:02:17 -0800500 case INDEX_op_nor:
501 case INDEX_op_nor_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700502 return ~(x | y);
503
Richard Henderson0e28d002016-11-16 09:23:28 +0100504 case INDEX_op_clz_i32:
505 return (uint32_t)x ? clz32(x) : y;
506
507 case INDEX_op_clz_i64:
508 return x ? clz64(x) : y;
509
510 case INDEX_op_ctz_i32:
511 return (uint32_t)x ? ctz32(x) : y;
512
513 case INDEX_op_ctz_i64:
514 return x ? ctz64(x) : y;
515
Richard Hendersona768e4e2016-11-21 11:13:39 +0100516 case INDEX_op_ctpop_i32:
517 return ctpop32(x);
518
519 case INDEX_op_ctpop_i64:
520 return ctpop64(x);
521
Richard Henderson64985942018-11-20 08:53:34 +0100522 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700523 x = bswap16(x);
524 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100525
526 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700527 x = bswap32(x);
528 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100529
530 case INDEX_op_bswap64_i64:
531 return bswap64(x);
532
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200533 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400534 return (int32_t)x;
535
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200536 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700537 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400538 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400539
Richard Henderson609ad702015-07-24 07:16:00 -0700540 case INDEX_op_extrh_i64_i32:
541 return (uint64_t)x >> 32;
542
Richard Henderson03271522013-08-14 14:35:56 -0700543 case INDEX_op_muluh_i32:
544 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
545 case INDEX_op_mulsh_i32:
546 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
547
548 case INDEX_op_muluh_i64:
549 mulu64(&l64, &h64, x, y);
550 return h64;
551 case INDEX_op_mulsh_i64:
552 muls64(&l64, &h64, x, y);
553 return h64;
554
Richard Henderson01547f72013-08-14 15:22:46 -0700555 case INDEX_op_div_i32:
556 /* Avoid crashing on divide by zero, otherwise undefined. */
557 return (int32_t)x / ((int32_t)y ? : 1);
558 case INDEX_op_divu_i32:
559 return (uint32_t)x / ((uint32_t)y ? : 1);
560 case INDEX_op_div_i64:
561 return (int64_t)x / ((int64_t)y ? : 1);
562 case INDEX_op_divu_i64:
563 return (uint64_t)x / ((uint64_t)y ? : 1);
564
565 case INDEX_op_rem_i32:
566 return (int32_t)x % ((int32_t)y ? : 1);
567 case INDEX_op_remu_i32:
568 return (uint32_t)x % ((uint32_t)y ? : 1);
569 case INDEX_op_rem_i64:
570 return (int64_t)x % ((int64_t)y ? : 1);
571 case INDEX_op_remu_i64:
572 return (uint64_t)x % ((uint64_t)y ? : 1);
573
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400574 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700575 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400576 }
577}
578
Richard Henderson67f84c92021-08-25 08:00:20 -0700579static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
580 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400581{
Richard Henderson54795542020-09-06 16:21:32 -0700582 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700583 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200584 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400585 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400586 return res;
587}
588
Richard Henderson9519da72012-10-02 11:32:26 -0700589static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
590{
591 switch (c) {
592 case TCG_COND_EQ:
593 return x == y;
594 case TCG_COND_NE:
595 return x != y;
596 case TCG_COND_LT:
597 return (int32_t)x < (int32_t)y;
598 case TCG_COND_GE:
599 return (int32_t)x >= (int32_t)y;
600 case TCG_COND_LE:
601 return (int32_t)x <= (int32_t)y;
602 case TCG_COND_GT:
603 return (int32_t)x > (int32_t)y;
604 case TCG_COND_LTU:
605 return x < y;
606 case TCG_COND_GEU:
607 return x >= y;
608 case TCG_COND_LEU:
609 return x <= y;
610 case TCG_COND_GTU:
611 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700612 case TCG_COND_TSTEQ:
613 return (x & y) == 0;
614 case TCG_COND_TSTNE:
615 return (x & y) != 0;
616 case TCG_COND_ALWAYS:
617 case TCG_COND_NEVER:
618 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700619 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700620 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700621}
622
623static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
624{
625 switch (c) {
626 case TCG_COND_EQ:
627 return x == y;
628 case TCG_COND_NE:
629 return x != y;
630 case TCG_COND_LT:
631 return (int64_t)x < (int64_t)y;
632 case TCG_COND_GE:
633 return (int64_t)x >= (int64_t)y;
634 case TCG_COND_LE:
635 return (int64_t)x <= (int64_t)y;
636 case TCG_COND_GT:
637 return (int64_t)x > (int64_t)y;
638 case TCG_COND_LTU:
639 return x < y;
640 case TCG_COND_GEU:
641 return x >= y;
642 case TCG_COND_LEU:
643 return x <= y;
644 case TCG_COND_GTU:
645 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700646 case TCG_COND_TSTEQ:
647 return (x & y) == 0;
648 case TCG_COND_TSTNE:
649 return (x & y) != 0;
650 case TCG_COND_ALWAYS:
651 case TCG_COND_NEVER:
652 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700653 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700654 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700655}
656
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700657static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700658{
659 switch (c) {
660 case TCG_COND_GT:
661 case TCG_COND_LTU:
662 case TCG_COND_LT:
663 case TCG_COND_GTU:
664 case TCG_COND_NE:
665 return 0;
666 case TCG_COND_GE:
667 case TCG_COND_GEU:
668 case TCG_COND_LE:
669 case TCG_COND_LEU:
670 case TCG_COND_EQ:
671 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700672 case TCG_COND_TSTEQ:
673 case TCG_COND_TSTNE:
674 return -1;
675 case TCG_COND_ALWAYS:
676 case TCG_COND_NEVER:
677 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700678 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700679 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700680}
681
Richard Henderson8d57bf12021-08-24 08:34:27 -0700682/*
683 * Return -1 if the condition can't be simplified,
684 * and the result of the condition (0 or 1) if it can.
685 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700686static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700687 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200688{
Richard Henderson63490392017-06-20 13:43:15 -0700689 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000690 uint64_t xv = arg_info(x)->val;
691 uint64_t yv = arg_info(y)->val;
692
Richard Henderson67f84c92021-08-25 08:00:20 -0700693 switch (type) {
694 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100695 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700696 case TCG_TYPE_I64:
697 return do_constant_folding_cond_64(xv, yv, c);
698 default:
699 /* Only scalar comparisons are optimizable */
700 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200701 }
Richard Henderson63490392017-06-20 13:43:15 -0700702 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700703 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700704 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200705 switch (c) {
706 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700707 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200708 return 0;
709 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700710 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200711 return 1;
712 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700713 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200714 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200715 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700716 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200717}
718
Richard Henderson7a2f7082021-08-26 07:06:39 -0700719/**
720 * swap_commutative:
721 * @dest: TCGArg of the destination argument, or NO_DEST.
722 * @p1: first paired argument
723 * @p2: second paired argument
724 *
725 * If *@p1 is a constant and *@p2 is not, swap.
726 * If *@p2 matches @dest, swap.
727 * Return true if a swap was performed.
728 */
729
730#define NO_DEST temp_arg(NULL)
731
Richard Henderson24c9ae42012-10-02 11:32:21 -0700732static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
733{
734 TCGArg a1 = *p1, a2 = *p2;
735 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700736 sum += arg_is_const(a1);
737 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700738
739 /* Prefer the constant in second argument, and then the form
740 op a, a, b, which is better handled on non-RISC hosts. */
741 if (sum > 0 || (sum == 0 && dest == a2)) {
742 *p1 = a2;
743 *p2 = a1;
744 return true;
745 }
746 return false;
747}
748
Richard Henderson0bfcb862012-10-02 11:32:23 -0700749static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
750{
751 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700752 sum += arg_is_const(p1[0]);
753 sum += arg_is_const(p1[1]);
754 sum -= arg_is_const(p2[0]);
755 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700756 if (sum > 0) {
757 TCGArg t;
758 t = p1[0], p1[0] = p2[0], p2[0] = t;
759 t = p1[1], p1[1] = p2[1], p2[1] = t;
760 return true;
761 }
762 return false;
763}
764
Richard Henderson7e64b112023-10-24 16:53:56 -0700765/*
766 * Return -1 if the condition can't be simplified,
767 * and the result of the condition (0 or 1) if it can.
768 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100769static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700770 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
771{
772 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100773 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700774 bool swap;
775 int r;
776
777 swap = swap_commutative(dest, p1, p2);
778 cond = *pcond;
779 if (swap) {
780 *pcond = cond = tcg_swap_cond(cond);
781 }
782
783 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700784 if (r >= 0) {
785 return r;
786 }
787 if (!is_tst_cond(cond)) {
788 return -1;
789 }
790
Paolo Bonzini35020622024-01-22 10:48:11 +0100791 i1 = arg_info(*p1);
792
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700793 /*
794 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100795 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700796 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100797 if (args_are_copies(*p1, *p2) ||
798 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700799 *p2 = arg_new_constant(ctx, 0);
800 *pcond = tcg_tst_eqne_cond(cond);
801 return -1;
802 }
803
Paolo Bonzini35020622024-01-22 10:48:11 +0100804 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
805 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700806 *p2 = arg_new_constant(ctx, 0);
807 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100808 return -1;
809 }
810
811 /* Expand to AND with a temporary if no backend support. */
812 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800813 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100814 TCGArg tmp = arg_new_temp(ctx);
815
816 op2->args[0] = tmp;
817 op2->args[1] = *p1;
818 op2->args[2] = *p2;
819
820 *p1 = tmp;
821 *p2 = arg_new_constant(ctx, 0);
822 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700823 }
824 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700825}
826
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100827static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700828{
829 TCGArg al, ah, bl, bh;
830 TCGCond c;
831 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700832 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700833
834 swap = swap_commutative2(args, args + 2);
835 c = args[4];
836 if (swap) {
837 args[4] = c = tcg_swap_cond(c);
838 }
839
840 al = args[0];
841 ah = args[1];
842 bl = args[2];
843 bh = args[3];
844
845 if (arg_is_const(bl) && arg_is_const(bh)) {
846 tcg_target_ulong blv = arg_info(bl)->val;
847 tcg_target_ulong bhv = arg_info(bh)->val;
848 uint64_t b = deposit64(blv, 32, 32, bhv);
849
850 if (arg_is_const(al) && arg_is_const(ah)) {
851 tcg_target_ulong alv = arg_info(al)->val;
852 tcg_target_ulong ahv = arg_info(ah)->val;
853 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700854
855 r = do_constant_folding_cond_64(a, b, c);
856 if (r >= 0) {
857 return r;
858 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700859 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700860
Richard Henderson7e64b112023-10-24 16:53:56 -0700861 if (b == 0) {
862 switch (c) {
863 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700864 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700865 return 0;
866 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700867 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700868 return 1;
869 default:
870 break;
871 }
872 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700873
874 /* TSTNE x,-1 -> NE x,0 */
875 if (b == -1 && is_tst_cond(c)) {
876 args[3] = args[2] = arg_new_constant(ctx, 0);
877 args[4] = tcg_tst_eqne_cond(c);
878 return -1;
879 }
880
881 /* TSTNE x,sign -> LT x,0 */
882 if (b == INT64_MIN && is_tst_cond(c)) {
883 /* bl must be 0, so copy that to bh */
884 args[3] = bl;
885 args[4] = tcg_tst_ltge_cond(c);
886 return -1;
887 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700888 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700889
Richard Henderson7e64b112023-10-24 16:53:56 -0700890 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700891 r = do_constant_folding_cond_eq(c);
892 if (r >= 0) {
893 return r;
894 }
895
896 /* TSTNE x,x -> NE x,0 */
897 if (is_tst_cond(c)) {
898 args[3] = args[2] = arg_new_constant(ctx, 0);
899 args[4] = tcg_tst_eqne_cond(c);
900 return -1;
901 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700902 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100903
904 /* Expand to AND with a temporary if no backend support. */
905 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800906 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
907 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100908 TCGArg t1 = arg_new_temp(ctx);
909 TCGArg t2 = arg_new_temp(ctx);
910
911 op1->args[0] = t1;
912 op1->args[1] = al;
913 op1->args[2] = bl;
914 op2->args[0] = t2;
915 op2->args[1] = ah;
916 op2->args[2] = bh;
917
918 args[0] = t1;
919 args[1] = t2;
920 args[3] = args[2] = arg_new_constant(ctx, 0);
921 args[4] = tcg_tst_eqne_cond(c);
922 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700923 return -1;
924}
925
Richard Hendersone2577ea2021-08-24 08:00:48 -0700926static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
927{
928 for (int i = 0; i < nb_args; i++) {
929 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000930 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700931 }
932}
933
Richard Henderson8774dde2021-08-24 08:04:47 -0700934static void copy_propagate(OptContext *ctx, TCGOp *op,
935 int nb_oargs, int nb_iargs)
936{
Richard Henderson8774dde2021-08-24 08:04:47 -0700937 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
938 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000939 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700940 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700941 }
942 }
943}
944
Richard Henderson15268552024-12-08 07:45:11 -0600945static void finish_bb(OptContext *ctx)
946{
947 /* We only optimize memory barriers across basic blocks. */
948 ctx->prev_mb = NULL;
949}
950
951static void finish_ebb(OptContext *ctx)
952{
953 finish_bb(ctx);
954 /* We only optimize across extended basic blocks. */
955 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
956 remove_mem_copy_all(ctx);
957}
958
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600959static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700960{
961 const TCGOpDef *def = &tcg_op_defs[op->opc];
962 int i, nb_oargs;
963
Richard Henderson137f1f42021-08-24 08:49:25 -0700964 nb_oargs = def->nb_oargs;
965 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700966 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800967 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700968 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600969 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700970}
971
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700972/*
973 * The fold_* functions return true when processing is complete,
974 * usually by folding the operation to a constant or to a copy,
975 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
976 * like collect information about the value produced, for use in
977 * optimizing a subsequent operation.
978 *
979 * These first fold_* functions are all helpers, used by other
980 * folders for more specific operations.
981 */
982
983static bool fold_const1(OptContext *ctx, TCGOp *op)
984{
985 if (arg_is_const(op->args[1])) {
986 uint64_t t;
987
988 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700989 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700990 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
991 }
992 return false;
993}
994
995static bool fold_const2(OptContext *ctx, TCGOp *op)
996{
997 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
998 uint64_t t1 = arg_info(op->args[1])->val;
999 uint64_t t2 = arg_info(op->args[2])->val;
1000
Richard Henderson67f84c92021-08-25 08:00:20 -07001001 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001002 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1003 }
1004 return false;
1005}
1006
Richard Hendersonc578ff12021-12-16 06:07:25 -08001007static bool fold_commutative(OptContext *ctx, TCGOp *op)
1008{
1009 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1010 return false;
1011}
1012
Richard Henderson7a2f7082021-08-26 07:06:39 -07001013static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1014{
1015 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1016 return fold_const2(ctx, op);
1017}
1018
Richard Hendersond582b142024-12-19 10:43:26 -08001019/*
1020 * Record "zero" and "sign" masks for the single output of @op.
1021 * See TempOptInfo definition of z_mask and s_mask.
1022 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001023 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001024 */
1025static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001026 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001027{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001028 const TCGOpDef *def = &tcg_op_defs[op->opc];
1029 TCGTemp *ts;
1030 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001031 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001032
1033 /* Only single-output opcodes are supported here. */
1034 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001035
1036 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001037 * 32-bit ops generate 32-bit results, which for the purpose of
1038 * simplifying tcg are sign-extended. Certainly that's how we
1039 * represent our constants elsewhere. Note that the bits will
1040 * be reset properly for a 64-bit value when encountering the
1041 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001042 */
1043 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001044 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001045 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001046 }
1047
1048 if (z_mask == 0) {
1049 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1050 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001051
1052 ts = arg_temp(op->args[0]);
1053 reset_ts(ctx, ts);
1054
1055 ti = ts_info(ts);
1056 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001057
1058 /* Canonicalize s_mask and incorporate data from z_mask. */
1059 rep = clz64(~s_mask);
1060 rep = MAX(rep, clz64(z_mask));
1061 rep = MAX(rep - 1, 0);
1062 ti->s_mask = INT64_MIN >> rep;
1063
Richard Henderson56e06ec2024-12-08 18:26:48 -06001064 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001065}
1066
Richard Henderson81be07f2024-12-08 19:49:17 -06001067static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1068{
1069 return fold_masks_zs(ctx, op, z_mask, 0);
1070}
1071
Richard Hendersonef6be622024-12-08 20:03:15 -06001072static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1073{
1074 return fold_masks_zs(ctx, op, -1, s_mask);
1075}
1076
Richard Henderson045ace32024-12-19 10:33:51 -08001077/*
1078 * An "affected" mask bit is 0 if and only if the result is identical
1079 * to the first input. Thus if the entire mask is 0, the operation
1080 * is equivalent to a copy.
1081 */
1082static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1083{
1084 if (ctx->type == TCG_TYPE_I32) {
1085 a_mask = (uint32_t)a_mask;
1086 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001087 if (a_mask == 0) {
1088 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1089 }
1090 return false;
1091}
1092
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001093/*
1094 * Convert @op to NOT, if NOT is supported by the host.
1095 * Return true f the conversion is successful, which will still
1096 * indicate that the processing is complete.
1097 */
1098static bool fold_not(OptContext *ctx, TCGOp *op);
1099static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1100{
1101 TCGOpcode not_op;
1102 bool have_not;
1103
1104 switch (ctx->type) {
1105 case TCG_TYPE_I32:
1106 not_op = INDEX_op_not_i32;
1107 have_not = TCG_TARGET_HAS_not_i32;
1108 break;
1109 case TCG_TYPE_I64:
1110 not_op = INDEX_op_not_i64;
1111 have_not = TCG_TARGET_HAS_not_i64;
1112 break;
1113 case TCG_TYPE_V64:
1114 case TCG_TYPE_V128:
1115 case TCG_TYPE_V256:
1116 not_op = INDEX_op_not_vec;
1117 have_not = TCG_TARGET_HAS_not_vec;
1118 break;
1119 default:
1120 g_assert_not_reached();
1121 }
1122 if (have_not) {
1123 op->opc = not_op;
1124 op->args[1] = op->args[idx];
1125 return fold_not(ctx, op);
1126 }
1127 return false;
1128}
1129
Richard Hendersonda48e272021-08-25 20:42:04 -07001130/* If the binary operation has first argument @i, fold to @i. */
1131static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1132{
Richard Henderson27cdb852023-10-23 11:38:00 -07001133 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001134 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1135 }
1136 return false;
1137}
1138
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001139/* If the binary operation has first argument @i, fold to NOT. */
1140static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1141{
Richard Henderson27cdb852023-10-23 11:38:00 -07001142 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001143 return fold_to_not(ctx, op, 2);
1144 }
1145 return false;
1146}
1147
Richard Hendersone8679952021-08-25 13:19:52 -07001148/* If the binary operation has second argument @i, fold to @i. */
1149static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1150{
Richard Henderson27cdb852023-10-23 11:38:00 -07001151 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001152 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1153 }
1154 return false;
1155}
1156
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001157/* If the binary operation has second argument @i, fold to identity. */
1158static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1159{
Richard Henderson27cdb852023-10-23 11:38:00 -07001160 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001161 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1162 }
1163 return false;
1164}
1165
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001166/* If the binary operation has second argument @i, fold to NOT. */
1167static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1168{
Richard Henderson27cdb852023-10-23 11:38:00 -07001169 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001170 return fold_to_not(ctx, op, 1);
1171 }
1172 return false;
1173}
1174
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001175/* If the binary operation has both arguments equal, fold to @i. */
1176static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1177{
1178 if (args_are_copies(op->args[1], op->args[2])) {
1179 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1180 }
1181 return false;
1182}
1183
Richard Hendersonca7bb042021-08-25 13:14:21 -07001184/* If the binary operation has both arguments equal, fold to identity. */
1185static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1186{
1187 if (args_are_copies(op->args[1], op->args[2])) {
1188 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1189 }
1190 return false;
1191}
1192
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001193/*
1194 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001195 *
1196 * The ordering of the transformations should be:
1197 * 1) those that produce a constant
1198 * 2) those that produce a copy
1199 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001200 */
1201
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001202static bool fold_or(OptContext *ctx, TCGOp *op);
1203static bool fold_orc(OptContext *ctx, TCGOp *op);
1204static bool fold_xor(OptContext *ctx, TCGOp *op);
1205
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001206static bool fold_add(OptContext *ctx, TCGOp *op)
1207{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001208 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001209 fold_xi_to_x(ctx, op, 0)) {
1210 return true;
1211 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001212 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001213}
1214
Richard Hendersonc578ff12021-12-16 06:07:25 -08001215/* We cannot as yet do_constant_folding with vectors. */
1216static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1217{
1218 if (fold_commutative(ctx, op) ||
1219 fold_xi_to_x(ctx, op, 0)) {
1220 return true;
1221 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001222 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001223}
1224
Richard Henderson9531c072021-08-26 06:51:39 -07001225static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001226{
Richard Hendersonf2457572023-10-25 18:39:44 -07001227 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1228 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1229
1230 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001231 uint64_t al = arg_info(op->args[2])->val;
1232 uint64_t ah = arg_info(op->args[3])->val;
1233 uint64_t bl = arg_info(op->args[4])->val;
1234 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001235 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001236 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001237
Richard Henderson9531c072021-08-26 06:51:39 -07001238 if (ctx->type == TCG_TYPE_I32) {
1239 uint64_t a = deposit64(al, 32, 32, ah);
1240 uint64_t b = deposit64(bl, 32, 32, bh);
1241
1242 if (add) {
1243 a += b;
1244 } else {
1245 a -= b;
1246 }
1247
1248 al = sextract64(a, 0, 32);
1249 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001250 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001251 Int128 a = int128_make128(al, ah);
1252 Int128 b = int128_make128(bl, bh);
1253
1254 if (add) {
1255 a = int128_add(a, b);
1256 } else {
1257 a = int128_sub(a, b);
1258 }
1259
1260 al = int128_getlo(a);
1261 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001262 }
1263
1264 rl = op->args[0];
1265 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001266
1267 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07001268 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001269
1270 tcg_opt_gen_movi(ctx, op, rl, al);
1271 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001272 return true;
1273 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001274
1275 /* Fold sub2 r,x,i to add2 r,x,-i */
1276 if (!add && b_const) {
1277 uint64_t bl = arg_info(op->args[4])->val;
1278 uint64_t bh = arg_info(op->args[5])->val;
1279
1280 /* Negate the two parts without assembling and disassembling. */
1281 bl = -bl;
1282 bh = ~bh + !bl;
1283
1284 op->opc = (ctx->type == TCG_TYPE_I32
1285 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1286 op->args[4] = arg_new_constant(ctx, bl);
1287 op->args[5] = arg_new_constant(ctx, bh);
1288 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001289 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001290}
1291
Richard Henderson9531c072021-08-26 06:51:39 -07001292static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001293{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001294 /* Note that the high and low parts may be independently swapped. */
1295 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1296 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1297
Richard Henderson9531c072021-08-26 06:51:39 -07001298 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001299}
1300
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001301static bool fold_and(OptContext *ctx, TCGOp *op)
1302{
Richard Henderson1ca73722024-12-08 18:47:15 -06001303 uint64_t z1, z2, z_mask, s_mask;
1304 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001305
Richard Henderson7a2f7082021-08-26 07:06:39 -07001306 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001307 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001308 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001309 fold_xx_to_x(ctx, op)) {
1310 return true;
1311 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001312
Richard Henderson1ca73722024-12-08 18:47:15 -06001313 t1 = arg_info(op->args[1]);
1314 t2 = arg_info(op->args[2]);
1315 z1 = t1->z_mask;
1316 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001317
1318 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001319 * Known-zeros does not imply known-ones. Therefore unless
1320 * arg2 is constant, we can't infer affected bits from it.
1321 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001322 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001323 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001324 }
1325
Richard Henderson1ca73722024-12-08 18:47:15 -06001326 z_mask = z1 & z2;
1327
1328 /*
1329 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1330 * Bitwise operations preserve the relative quantity of the repetitions.
1331 */
1332 s_mask = t1->s_mask & t2->s_mask;
1333
1334 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001335}
1336
1337static bool fold_andc(OptContext *ctx, TCGOp *op)
1338{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001339 uint64_t z_mask, s_mask;
1340 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001341
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001342 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001343 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001344 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001345 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001346 return true;
1347 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001348
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001349 t1 = arg_info(op->args[1]);
1350 t2 = arg_info(op->args[2]);
1351 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001352
Richard Henderson899281c2023-11-15 11:18:55 -08001353 if (ti_is_const(t2)) {
1354 /* Fold andc r,x,i to and r,x,~i. */
1355 switch (ctx->type) {
1356 case TCG_TYPE_I32:
1357 case TCG_TYPE_I64:
1358 op->opc = INDEX_op_and;
1359 break;
1360 case TCG_TYPE_V64:
1361 case TCG_TYPE_V128:
1362 case TCG_TYPE_V256:
1363 op->opc = INDEX_op_and_vec;
1364 break;
1365 default:
1366 g_assert_not_reached();
1367 }
1368 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1369 return fold_and(ctx, op);
1370 }
1371
Richard Hendersonfae450b2021-08-25 22:42:19 -07001372 /*
1373 * Known-zeros does not imply known-ones. Therefore unless
1374 * arg2 is constant, we can't infer anything from it.
1375 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001376 if (ti_is_const(t2)) {
1377 uint64_t v2 = ti_const_val(t2);
1378 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001379 return true;
1380 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001381 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001382 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001383
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001384 s_mask = t1->s_mask & t2->s_mask;
1385 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001386}
1387
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001388static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1389{
1390 /* If true and false values are the same, eliminate the cmp. */
1391 if (args_are_copies(op->args[2], op->args[3])) {
1392 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1393 }
1394
1395 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1396 uint64_t tv = arg_info(op->args[2])->val;
1397 uint64_t fv = arg_info(op->args[3])->val;
1398
1399 if (tv == -1 && fv == 0) {
1400 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1401 }
1402 if (tv == 0 && fv == -1) {
1403 if (TCG_TARGET_HAS_not_vec) {
1404 op->opc = INDEX_op_not_vec;
1405 return fold_not(ctx, op);
1406 } else {
1407 op->opc = INDEX_op_xor_vec;
1408 op->args[2] = arg_new_constant(ctx, -1);
1409 return fold_xor(ctx, op);
1410 }
1411 }
1412 }
1413 if (arg_is_const(op->args[2])) {
1414 uint64_t tv = arg_info(op->args[2])->val;
1415 if (tv == -1) {
1416 op->opc = INDEX_op_or_vec;
1417 op->args[2] = op->args[3];
1418 return fold_or(ctx, op);
1419 }
1420 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1421 op->opc = INDEX_op_andc_vec;
1422 op->args[2] = op->args[1];
1423 op->args[1] = op->args[3];
1424 return fold_andc(ctx, op);
1425 }
1426 }
1427 if (arg_is_const(op->args[3])) {
1428 uint64_t fv = arg_info(op->args[3])->val;
1429 if (fv == 0) {
1430 op->opc = INDEX_op_and_vec;
1431 return fold_and(ctx, op);
1432 }
1433 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1434 op->opc = INDEX_op_orc_vec;
1435 op->args[2] = op->args[1];
1436 op->args[1] = op->args[3];
1437 return fold_orc(ctx, op);
1438 }
1439 }
1440 return finish_folding(ctx, op);
1441}
1442
Richard Henderson079b0802021-08-24 09:30:59 -07001443static bool fold_brcond(OptContext *ctx, TCGOp *op)
1444{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001445 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001446 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001447 if (i == 0) {
1448 tcg_op_remove(ctx->tcg, op);
1449 return true;
1450 }
1451 if (i > 0) {
1452 op->opc = INDEX_op_br;
1453 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001454 finish_ebb(ctx);
1455 } else {
1456 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001457 }
Richard Henderson15268552024-12-08 07:45:11 -06001458 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001459}
1460
Richard Henderson764d2ab2021-08-24 09:22:11 -07001461static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1462{
Richard Henderson7e64b112023-10-24 16:53:56 -07001463 TCGCond cond;
1464 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001465 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001466
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001467 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001468 cond = op->args[4];
1469 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001470 if (i >= 0) {
1471 goto do_brcond_const;
1472 }
1473
1474 switch (cond) {
1475 case TCG_COND_LT:
1476 case TCG_COND_GE:
1477 /*
1478 * Simplify LT/GE comparisons vs zero to a single compare
1479 * vs the high word of the input.
1480 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001481 if (arg_is_const_val(op->args[2], 0) &&
1482 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001483 goto do_brcond_high;
1484 }
1485 break;
1486
1487 case TCG_COND_NE:
1488 inv = 1;
1489 QEMU_FALLTHROUGH;
1490 case TCG_COND_EQ:
1491 /*
1492 * Simplify EQ/NE comparisons where one of the pairs
1493 * can be simplified.
1494 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001495 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001496 op->args[2], cond);
1497 switch (i ^ inv) {
1498 case 0:
1499 goto do_brcond_const;
1500 case 1:
1501 goto do_brcond_high;
1502 }
1503
Richard Henderson67f84c92021-08-25 08:00:20 -07001504 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001505 op->args[3], cond);
1506 switch (i ^ inv) {
1507 case 0:
1508 goto do_brcond_const;
1509 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001510 goto do_brcond_low;
1511 }
1512 break;
1513
1514 case TCG_COND_TSTEQ:
1515 case TCG_COND_TSTNE:
1516 if (arg_is_const_val(op->args[2], 0)) {
1517 goto do_brcond_high;
1518 }
1519 if (arg_is_const_val(op->args[3], 0)) {
1520 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001521 }
1522 break;
1523
1524 default:
1525 break;
1526
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001527 do_brcond_low:
1528 op->opc = INDEX_op_brcond_i32;
1529 op->args[1] = op->args[2];
1530 op->args[2] = cond;
1531 op->args[3] = label;
1532 return fold_brcond(ctx, op);
1533
Richard Henderson764d2ab2021-08-24 09:22:11 -07001534 do_brcond_high:
1535 op->opc = INDEX_op_brcond_i32;
1536 op->args[0] = op->args[1];
1537 op->args[1] = op->args[3];
1538 op->args[2] = cond;
1539 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001540 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001541
1542 do_brcond_const:
1543 if (i == 0) {
1544 tcg_op_remove(ctx->tcg, op);
1545 return true;
1546 }
1547 op->opc = INDEX_op_br;
1548 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001549 finish_ebb(ctx);
1550 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001551 }
Richard Henderson15268552024-12-08 07:45:11 -06001552
1553 finish_bb(ctx);
1554 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001555}
1556
Richard Henderson09bacdc2021-08-24 11:58:12 -07001557static bool fold_bswap(OptContext *ctx, TCGOp *op)
1558{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001559 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001560 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001561
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001562 if (ti_is_const(t1)) {
1563 return tcg_opt_gen_movi(ctx, op, op->args[0],
1564 do_constant_folding(op->opc, ctx->type,
1565 ti_const_val(t1),
1566 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001567 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001568
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001569 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001570 switch (op->opc) {
1571 case INDEX_op_bswap16_i32:
1572 case INDEX_op_bswap16_i64:
1573 z_mask = bswap16(z_mask);
1574 sign = INT16_MIN;
1575 break;
1576 case INDEX_op_bswap32_i32:
1577 case INDEX_op_bswap32_i64:
1578 z_mask = bswap32(z_mask);
1579 sign = INT32_MIN;
1580 break;
1581 case INDEX_op_bswap64_i64:
1582 z_mask = bswap64(z_mask);
1583 sign = INT64_MIN;
1584 break;
1585 default:
1586 g_assert_not_reached();
1587 }
1588
Richard Henderson75c3bf32024-12-19 10:50:40 -08001589 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001590 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1591 case TCG_BSWAP_OZ:
1592 break;
1593 case TCG_BSWAP_OS:
1594 /* If the sign bit may be 1, force all the bits above to 1. */
1595 if (z_mask & sign) {
1596 z_mask |= sign;
1597 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001598 /* The value and therefore s_mask is explicitly sign-extended. */
1599 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001600 break;
1601 default:
1602 /* The high bits are undefined: force all bits above the sign to 1. */
1603 z_mask |= sign << 1;
1604 break;
1605 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001606
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001607 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001608}
1609
Richard Henderson5cf32be2021-08-24 08:17:08 -07001610static bool fold_call(OptContext *ctx, TCGOp *op)
1611{
1612 TCGContext *s = ctx->tcg;
1613 int nb_oargs = TCGOP_CALLO(op);
1614 int nb_iargs = TCGOP_CALLI(op);
1615 int flags, i;
1616
1617 init_arguments(ctx, op, nb_oargs + nb_iargs);
1618 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1619
1620 /* If the function reads or writes globals, reset temp data. */
1621 flags = tcg_call_flags(op);
1622 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1623 int nb_globals = s->nb_globals;
1624
1625 for (i = 0; i < nb_globals; i++) {
1626 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001627 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001628 }
1629 }
1630 }
1631
Richard Hendersonab84dc32023-08-23 23:04:24 -07001632 /* If the function has side effects, reset mem data. */
1633 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1634 remove_mem_copy_all(ctx);
1635 }
1636
Richard Henderson5cf32be2021-08-24 08:17:08 -07001637 /* Reset temp data for outputs. */
1638 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001639 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001640 }
1641
1642 /* Stop optimizing MB across calls. */
1643 ctx->prev_mb = NULL;
1644 return true;
1645}
1646
Richard Henderson29f65862024-12-09 14:09:49 -06001647static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1648{
1649 /* Canonicalize the comparison to put immediate second. */
1650 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1651 op->args[3] = tcg_swap_cond(op->args[3]);
1652 }
1653 return finish_folding(ctx, op);
1654}
1655
1656static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1657{
1658 /* If true and false values are the same, eliminate the cmp. */
1659 if (args_are_copies(op->args[3], op->args[4])) {
1660 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1661 }
1662
1663 /* Canonicalize the comparison to put immediate second. */
1664 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1665 op->args[5] = tcg_swap_cond(op->args[5]);
1666 }
1667 /*
1668 * Canonicalize the "false" input reg to match the destination,
1669 * so that the tcg backend can implement "move if true".
1670 */
1671 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1672 op->args[5] = tcg_invert_cond(op->args[5]);
1673 }
1674 return finish_folding(ctx, op);
1675}
1676
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001677static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1678{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001679 uint64_t z_mask, s_mask;
1680 TempOptInfo *t1 = arg_info(op->args[1]);
1681 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001682
Richard Hendersonce1d6632024-12-08 19:47:51 -06001683 if (ti_is_const(t1)) {
1684 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001685
1686 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001687 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001688 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1689 }
1690 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1691 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001692
1693 switch (ctx->type) {
1694 case TCG_TYPE_I32:
1695 z_mask = 31;
1696 break;
1697 case TCG_TYPE_I64:
1698 z_mask = 63;
1699 break;
1700 default:
1701 g_assert_not_reached();
1702 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001703 s_mask = ~z_mask;
1704 z_mask |= t2->z_mask;
1705 s_mask &= t2->s_mask;
1706
1707 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001708}
1709
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001710static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1711{
Richard Henderson81be07f2024-12-08 19:49:17 -06001712 uint64_t z_mask;
1713
Richard Hendersonfae450b2021-08-25 22:42:19 -07001714 if (fold_const1(ctx, op)) {
1715 return true;
1716 }
1717
1718 switch (ctx->type) {
1719 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001720 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001721 break;
1722 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001723 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001724 break;
1725 default:
1726 g_assert_not_reached();
1727 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001728 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001729}
1730
Richard Henderson1b1907b2021-08-24 10:47:04 -07001731static bool fold_deposit(OptContext *ctx, TCGOp *op)
1732{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001733 TempOptInfo *t1 = arg_info(op->args[1]);
1734 TempOptInfo *t2 = arg_info(op->args[2]);
1735 int ofs = op->args[3];
1736 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001737 int width = 8 * tcg_type_size(ctx->type);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001738 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001739
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001740 if (ti_is_const(t1) && ti_is_const(t2)) {
1741 return tcg_opt_gen_movi(ctx, op, op->args[0],
1742 deposit64(ti_const_val(t1), ofs, len,
1743 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001744 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001745
Richard Henderson8f7a8402023-08-13 11:03:05 -07001746 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001747 if (ti_is_const_val(t1, 0) && ofs == 0) {
1748 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001749
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001750 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001751 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001752 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001753 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001754 }
1755
1756 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001757 if (ti_is_const_val(t2, 0)) {
1758 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001759
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001760 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001761 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001762 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001763 }
1764
Richard Hendersonedb832c2024-12-19 17:56:05 -08001765 /* The s_mask from the top portion of the deposit is still valid. */
1766 if (ofs + len == width) {
1767 s_mask = t2->s_mask << ofs;
1768 } else {
1769 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1770 }
1771
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001772 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001773 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001774}
1775
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001776static bool fold_divide(OptContext *ctx, TCGOp *op)
1777{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001778 if (fold_const2(ctx, op) ||
1779 fold_xi_to_x(ctx, op, 1)) {
1780 return true;
1781 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001782 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001783}
1784
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001785static bool fold_dup(OptContext *ctx, TCGOp *op)
1786{
1787 if (arg_is_const(op->args[1])) {
1788 uint64_t t = arg_info(op->args[1])->val;
1789 t = dup_const(TCGOP_VECE(op), t);
1790 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1791 }
Richard Hendersone089d692024-12-08 20:00:51 -06001792 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001793}
1794
1795static bool fold_dup2(OptContext *ctx, TCGOp *op)
1796{
1797 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1798 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1799 arg_info(op->args[2])->val);
1800 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1801 }
1802
1803 if (args_are_copies(op->args[1], op->args[2])) {
1804 op->opc = INDEX_op_dup_vec;
1805 TCGOP_VECE(op) = MO_32;
1806 }
Richard Hendersone089d692024-12-08 20:00:51 -06001807 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001808}
1809
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001810static bool fold_eqv(OptContext *ctx, TCGOp *op)
1811{
Richard Hendersonef6be622024-12-08 20:03:15 -06001812 uint64_t s_mask;
Richard Henderson46c68d72023-11-15 11:51:28 -08001813 TempOptInfo *t1, *t2;
Richard Hendersonef6be622024-12-08 20:03:15 -06001814
Richard Henderson7a2f7082021-08-26 07:06:39 -07001815 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001816 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001817 fold_xi_to_not(ctx, op, 0)) {
1818 return true;
1819 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001820
Richard Henderson46c68d72023-11-15 11:51:28 -08001821 t2 = arg_info(op->args[2]);
1822 if (ti_is_const(t2)) {
1823 /* Fold eqv r,x,i to xor r,x,~i. */
1824 switch (ctx->type) {
1825 case TCG_TYPE_I32:
1826 case TCG_TYPE_I64:
1827 op->opc = INDEX_op_xor;
1828 break;
1829 case TCG_TYPE_V64:
1830 case TCG_TYPE_V128:
1831 case TCG_TYPE_V256:
1832 op->opc = INDEX_op_xor_vec;
1833 break;
1834 default:
1835 g_assert_not_reached();
1836 }
1837 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1838 return fold_xor(ctx, op);
1839 }
1840
1841 t1 = arg_info(op->args[1]);
1842 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersonef6be622024-12-08 20:03:15 -06001843 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001844}
1845
Richard Hendersonb6617c82021-08-24 10:44:53 -07001846static bool fold_extract(OptContext *ctx, TCGOp *op)
1847{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001848 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001849 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001850 int pos = op->args[2];
1851 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001852
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001853 if (ti_is_const(t1)) {
1854 return tcg_opt_gen_movi(ctx, op, op->args[0],
1855 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001856 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001857
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001858 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001859 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001860 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1861 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001862 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001863
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001864 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001865}
1866
Richard Hendersondcd08992021-08-24 10:41:39 -07001867static bool fold_extract2(OptContext *ctx, TCGOp *op)
1868{
1869 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1870 uint64_t v1 = arg_info(op->args[1])->val;
1871 uint64_t v2 = arg_info(op->args[2])->val;
1872 int shr = op->args[3];
1873
1874 if (op->opc == INDEX_op_extract2_i64) {
1875 v1 >>= shr;
1876 v2 <<= 64 - shr;
1877 } else {
1878 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001879 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001880 }
1881 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1882 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001883 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001884}
1885
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001886static bool fold_exts(OptContext *ctx, TCGOp *op)
1887{
Richard Henderson48e8de62024-12-26 12:01:57 -08001888 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06001889 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001890
1891 if (fold_const1(ctx, op)) {
1892 return true;
1893 }
1894
Richard Hendersona9621922024-12-08 20:08:46 -06001895 t1 = arg_info(op->args[1]);
1896 z_mask = t1->z_mask;
1897 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001898
1899 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001900 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001901 s_mask |= INT32_MIN;
1902 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001903 break;
1904 default:
1905 g_assert_not_reached();
1906 }
Richard Hendersona9621922024-12-08 20:08:46 -06001907 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001908}
1909
1910static bool fold_extu(OptContext *ctx, TCGOp *op)
1911{
Richard Henderson48e8de62024-12-26 12:01:57 -08001912 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001913
1914 if (fold_const1(ctx, op)) {
1915 return true;
1916 }
1917
Richard Henderson48e8de62024-12-26 12:01:57 -08001918 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001919 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001920 case INDEX_op_extrl_i64_i32:
1921 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001922 z_mask = (uint32_t)z_mask;
1923 break;
1924 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001925 z_mask >>= 32;
1926 break;
1927 default:
1928 g_assert_not_reached();
1929 }
Richard Henderson08abe292024-12-08 20:11:44 -06001930 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001931}
1932
Richard Henderson3eefdf22021-08-25 11:06:43 -07001933static bool fold_mb(OptContext *ctx, TCGOp *op)
1934{
1935 /* Eliminate duplicate and redundant fence instructions. */
1936 if (ctx->prev_mb) {
1937 /*
1938 * Merge two barriers of the same type into one,
1939 * or a weaker barrier into a stronger one,
1940 * or two weaker barriers into a stronger one.
1941 * mb X; mb Y => mb X|Y
1942 * mb; strl => mb; st
1943 * ldaq; mb => ld; mb
1944 * ldaq; strl => ld; mb; st
1945 * Other combinations are also merged into a strong
1946 * barrier. This is stricter than specified but for
1947 * the purposes of TCG is better than not optimizing.
1948 */
1949 ctx->prev_mb->args[0] |= op->args[0];
1950 tcg_op_remove(ctx->tcg, op);
1951 } else {
1952 ctx->prev_mb = op;
1953 }
1954 return true;
1955}
1956
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001957static bool fold_mov(OptContext *ctx, TCGOp *op)
1958{
1959 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1960}
1961
Richard Henderson0c310a32021-08-24 10:37:24 -07001962static bool fold_movcond(OptContext *ctx, TCGOp *op)
1963{
Richard Henderson32202782024-12-08 20:16:38 -06001964 uint64_t z_mask, s_mask;
1965 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001966 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001967
Richard Henderson141125e2024-09-06 21:00:10 -07001968 /* If true and false values are the same, eliminate the cmp. */
1969 if (args_are_copies(op->args[3], op->args[4])) {
1970 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1971 }
1972
Richard Henderson7a2f7082021-08-26 07:06:39 -07001973 /*
1974 * Canonicalize the "false" input reg to match the destination reg so
1975 * that the tcg backend can implement a "move if true" operation.
1976 */
1977 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001978 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001979 }
1980
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001981 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001982 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001983 if (i >= 0) {
1984 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1985 }
1986
Richard Henderson32202782024-12-08 20:16:38 -06001987 tt = arg_info(op->args[3]);
1988 ft = arg_info(op->args[4]);
1989 z_mask = tt->z_mask | ft->z_mask;
1990 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001991
Richard Henderson32202782024-12-08 20:16:38 -06001992 if (ti_is_const(tt) && ti_is_const(ft)) {
1993 uint64_t tv = ti_const_val(tt);
1994 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00001995 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001996 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001997
Richard Henderson67f84c92021-08-25 08:00:20 -07001998 switch (ctx->type) {
1999 case TCG_TYPE_I32:
2000 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00002001 if (TCG_TARGET_HAS_negsetcond_i32) {
2002 negopc = INDEX_op_negsetcond_i32;
2003 }
2004 tv = (int32_t)tv;
2005 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07002006 break;
2007 case TCG_TYPE_I64:
2008 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00002009 if (TCG_TARGET_HAS_negsetcond_i64) {
2010 negopc = INDEX_op_negsetcond_i64;
2011 }
Richard Henderson67f84c92021-08-25 08:00:20 -07002012 break;
2013 default:
2014 g_assert_not_reached();
2015 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002016
2017 if (tv == 1 && fv == 0) {
2018 op->opc = opc;
2019 op->args[3] = cond;
2020 } else if (fv == 1 && tv == 0) {
2021 op->opc = opc;
2022 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00002023 } else if (negopc) {
2024 if (tv == -1 && fv == 0) {
2025 op->opc = negopc;
2026 op->args[3] = cond;
2027 } else if (fv == -1 && tv == 0) {
2028 op->opc = negopc;
2029 op->args[3] = tcg_invert_cond(cond);
2030 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002031 }
2032 }
Richard Henderson32202782024-12-08 20:16:38 -06002033
2034 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002035}
2036
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002037static bool fold_mul(OptContext *ctx, TCGOp *op)
2038{
Richard Hendersone8679952021-08-25 13:19:52 -07002039 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002040 fold_xi_to_i(ctx, op, 0) ||
2041 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002042 return true;
2043 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002044 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002045}
2046
2047static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2048{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002049 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002050 fold_xi_to_i(ctx, op, 0)) {
2051 return true;
2052 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002053 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002054}
2055
Richard Henderson407112b2021-08-26 06:33:04 -07002056static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002057{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002058 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2059
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002060 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07002061 uint64_t a = arg_info(op->args[2])->val;
2062 uint64_t b = arg_info(op->args[3])->val;
2063 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002064 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002065 TCGOp *op2;
2066
2067 switch (op->opc) {
2068 case INDEX_op_mulu2_i32:
2069 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2070 h = (int32_t)(l >> 32);
2071 l = (int32_t)l;
2072 break;
2073 case INDEX_op_muls2_i32:
2074 l = (int64_t)(int32_t)a * (int32_t)b;
2075 h = l >> 32;
2076 l = (int32_t)l;
2077 break;
2078 case INDEX_op_mulu2_i64:
2079 mulu64(&l, &h, a, b);
2080 break;
2081 case INDEX_op_muls2_i64:
2082 muls64(&l, &h, a, b);
2083 break;
2084 default:
2085 g_assert_not_reached();
2086 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002087
2088 rl = op->args[0];
2089 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002090
2091 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002092 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002093
2094 tcg_opt_gen_movi(ctx, op, rl, l);
2095 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002096 return true;
2097 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002098 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002099}
2100
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002101static bool fold_nand(OptContext *ctx, TCGOp *op)
2102{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002103 uint64_t s_mask;
2104
Richard Henderson7a2f7082021-08-26 07:06:39 -07002105 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002106 fold_xi_to_not(ctx, op, -1)) {
2107 return true;
2108 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002109
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002110 s_mask = arg_info(op->args[1])->s_mask
2111 & arg_info(op->args[2])->s_mask;
2112 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002113}
2114
Richard Hendersone25fe882024-04-04 20:53:50 +00002115static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002116{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002117 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002118 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002119 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002120
Richard Hendersond151fd32024-12-08 20:23:11 -06002121 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002122}
2123
Richard Hendersone25fe882024-04-04 20:53:50 +00002124static bool fold_neg(OptContext *ctx, TCGOp *op)
2125{
2126 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2127}
2128
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002129static bool fold_nor(OptContext *ctx, TCGOp *op)
2130{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002131 uint64_t s_mask;
2132
Richard Henderson7a2f7082021-08-26 07:06:39 -07002133 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002134 fold_xi_to_not(ctx, op, 0)) {
2135 return true;
2136 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002137
Richard Henderson2b7b6952024-12-08 20:25:21 -06002138 s_mask = arg_info(op->args[1])->s_mask
2139 & arg_info(op->args[2])->s_mask;
2140 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002141}
2142
2143static bool fold_not(OptContext *ctx, TCGOp *op)
2144{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002145 if (fold_const1(ctx, op)) {
2146 return true;
2147 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002148 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002149}
2150
2151static bool fold_or(OptContext *ctx, TCGOp *op)
2152{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002153 uint64_t z_mask, s_mask;
2154 TempOptInfo *t1, *t2;
2155
Richard Henderson7a2f7082021-08-26 07:06:39 -07002156 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002157 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002158 fold_xx_to_x(ctx, op)) {
2159 return true;
2160 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002161
Richard Henderson83b1ba32024-12-08 20:28:59 -06002162 t1 = arg_info(op->args[1]);
2163 t2 = arg_info(op->args[2]);
2164 z_mask = t1->z_mask | t2->z_mask;
2165 s_mask = t1->s_mask & t2->s_mask;
2166 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002167}
2168
2169static bool fold_orc(OptContext *ctx, TCGOp *op)
2170{
Richard Henderson54e26b22024-12-08 20:30:20 -06002171 uint64_t s_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002172 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002173
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002174 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002175 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002176 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002177 fold_ix_to_not(ctx, op, 0)) {
2178 return true;
2179 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002180
Richard Henderson50e40ec2024-12-10 08:13:10 -06002181 t2 = arg_info(op->args[2]);
2182 if (ti_is_const(t2)) {
2183 /* Fold orc r,x,i to or r,x,~i. */
2184 switch (ctx->type) {
2185 case TCG_TYPE_I32:
2186 case TCG_TYPE_I64:
2187 op->opc = INDEX_op_or;
2188 break;
2189 case TCG_TYPE_V64:
2190 case TCG_TYPE_V128:
2191 case TCG_TYPE_V256:
2192 op->opc = INDEX_op_or_vec;
2193 break;
2194 default:
2195 g_assert_not_reached();
2196 }
2197 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2198 return fold_or(ctx, op);
2199 }
2200
2201 t1 = arg_info(op->args[1]);
2202 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson54e26b22024-12-08 20:30:20 -06002203 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002204}
2205
Richard Henderson6813be92024-12-08 20:33:30 -06002206static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002207{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002208 const TCGOpDef *def = &tcg_op_defs[op->opc];
2209 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2210 MemOp mop = get_memop(oi);
2211 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002212 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002213
Richard Henderson57fe5c62021-08-26 12:04:46 -07002214 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002215 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002216 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002217 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002218 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002219 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002220 }
2221
Richard Henderson3eefdf22021-08-25 11:06:43 -07002222 /* Opcodes that touch guest memory stop the mb optimization. */
2223 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002224
2225 return fold_masks_zs(ctx, op, z_mask, s_mask);
2226}
2227
2228static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2229{
2230 /* Opcodes that touch guest memory stop the mb optimization. */
2231 ctx->prev_mb = NULL;
2232 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002233}
2234
2235static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2236{
2237 /* Opcodes that touch guest memory stop the mb optimization. */
2238 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002239 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002240}
2241
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002242static bool fold_remainder(OptContext *ctx, TCGOp *op)
2243{
Richard Henderson267c17e2021-10-25 11:30:33 -07002244 if (fold_const2(ctx, op) ||
2245 fold_xx_to_i(ctx, op, 0)) {
2246 return true;
2247 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002248 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002249}
2250
Richard Henderson95eb2292024-12-08 20:47:59 -06002251/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2252static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002253{
2254 uint64_t a_zmask, b_val;
2255 TCGCond cond;
2256
2257 if (!arg_is_const(op->args[2])) {
2258 return false;
2259 }
2260
2261 a_zmask = arg_info(op->args[1])->z_mask;
2262 b_val = arg_info(op->args[2])->val;
2263 cond = op->args[3];
2264
2265 if (ctx->type == TCG_TYPE_I32) {
2266 a_zmask = (uint32_t)a_zmask;
2267 b_val = (uint32_t)b_val;
2268 }
2269
2270 /*
2271 * A with only low bits set vs B with high bits set means that A < B.
2272 */
2273 if (a_zmask < b_val) {
2274 bool inv = false;
2275
2276 switch (cond) {
2277 case TCG_COND_NE:
2278 case TCG_COND_LEU:
2279 case TCG_COND_LTU:
2280 inv = true;
2281 /* fall through */
2282 case TCG_COND_GTU:
2283 case TCG_COND_GEU:
2284 case TCG_COND_EQ:
2285 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2286 default:
2287 break;
2288 }
2289 }
2290
2291 /*
2292 * A with only lsb set is already boolean.
2293 */
2294 if (a_zmask <= 1) {
2295 bool convert = false;
2296 bool inv = false;
2297
2298 switch (cond) {
2299 case TCG_COND_EQ:
2300 inv = true;
2301 /* fall through */
2302 case TCG_COND_NE:
2303 convert = (b_val == 0);
2304 break;
2305 case TCG_COND_LTU:
2306 case TCG_COND_TSTEQ:
2307 inv = true;
2308 /* fall through */
2309 case TCG_COND_GEU:
2310 case TCG_COND_TSTNE:
2311 convert = (b_val == 1);
2312 break;
2313 default:
2314 break;
2315 }
2316 if (convert) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002317 TCGOpcode neg_opc;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002318
2319 if (!inv && !neg) {
2320 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2321 }
2322
2323 switch (ctx->type) {
2324 case TCG_TYPE_I32:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002325 neg_opc = INDEX_op_neg_i32;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002326 break;
2327 case TCG_TYPE_I64:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002328 neg_opc = INDEX_op_neg_i64;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002329 break;
2330 default:
2331 g_assert_not_reached();
2332 }
2333
2334 if (!inv) {
2335 op->opc = neg_opc;
2336 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002337 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002338 op->args[2] = arg_new_constant(ctx, -1);
2339 } else {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002340 op->opc = INDEX_op_xor;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002341 op->args[2] = arg_new_constant(ctx, 1);
2342 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002343 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002344 }
2345 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002346 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002347}
2348
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002349static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2350{
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002351 TCGOpcode neg_opc, shr_opc;
Paolo Bonziniff202812024-02-28 12:06:41 +01002352 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002353 TCGCond cond = op->args[3];
2354 TCGArg ret, src1, src2;
2355 TCGOp *op2;
2356 uint64_t val;
2357 int sh;
2358 bool inv;
2359
2360 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2361 return;
2362 }
2363
2364 src2 = op->args[2];
2365 val = arg_info(src2)->val;
2366 if (!is_power_of_2(val)) {
2367 return;
2368 }
2369 sh = ctz64(val);
2370
2371 switch (ctx->type) {
2372 case TCG_TYPE_I32:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002373 shr_opc = INDEX_op_shr_i32;
2374 neg_opc = INDEX_op_neg_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002375 if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002376 uext_opc = INDEX_op_extract_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002377 }
2378 if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002379 sext_opc = INDEX_op_sextract_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002380 }
2381 break;
2382 case TCG_TYPE_I64:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002383 shr_opc = INDEX_op_shr_i64;
2384 neg_opc = INDEX_op_neg_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002385 if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002386 uext_opc = INDEX_op_extract_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002387 }
2388 if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002389 sext_opc = INDEX_op_sextract_i64;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002390 }
2391 break;
2392 default:
2393 g_assert_not_reached();
2394 }
2395
2396 ret = op->args[0];
2397 src1 = op->args[1];
2398 inv = cond == TCG_COND_TSTEQ;
2399
2400 if (sh && sext_opc && neg && !inv) {
2401 op->opc = sext_opc;
2402 op->args[1] = src1;
2403 op->args[2] = sh;
2404 op->args[3] = 1;
2405 return;
2406 } else if (sh && uext_opc) {
2407 op->opc = uext_opc;
2408 op->args[1] = src1;
2409 op->args[2] = sh;
2410 op->args[3] = 1;
2411 } else {
2412 if (sh) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002413 op2 = opt_insert_before(ctx, op, shr_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002414 op2->args[0] = ret;
2415 op2->args[1] = src1;
2416 op2->args[2] = arg_new_constant(ctx, sh);
2417 src1 = ret;
2418 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002419 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002420 op->args[1] = src1;
2421 op->args[2] = arg_new_constant(ctx, 1);
2422 }
2423
2424 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002425 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002426 op2->args[0] = ret;
2427 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002428 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002429 } else if (inv) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002430 op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002431 op2->args[0] = ret;
2432 op2->args[1] = ret;
2433 op2->args[2] = arg_new_constant(ctx, 1);
2434 } else if (neg) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002435 op2 = opt_insert_after(ctx, op, neg_opc, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002436 op2->args[0] = ret;
2437 op2->args[1] = ret;
2438 }
2439}
2440
Richard Hendersonc63ff552021-08-24 09:35:30 -07002441static bool fold_setcond(OptContext *ctx, TCGOp *op)
2442{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002443 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002444 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002445 if (i >= 0) {
2446 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2447 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002448
Richard Henderson95eb2292024-12-08 20:47:59 -06002449 i = fold_setcond_zmask(ctx, op, false);
2450 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002451 return true;
2452 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002453 if (i == 0) {
2454 fold_setcond_tst_pow2(ctx, op, false);
2455 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002456
Richard Henderson2c8a2832024-12-08 20:50:37 -06002457 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002458}
2459
Richard Henderson36355022023-08-04 23:24:04 +00002460static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2461{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002462 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002463 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002464 if (i >= 0) {
2465 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2466 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002467
Richard Henderson95eb2292024-12-08 20:47:59 -06002468 i = fold_setcond_zmask(ctx, op, true);
2469 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002470 return true;
2471 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002472 if (i == 0) {
2473 fold_setcond_tst_pow2(ctx, op, true);
2474 }
Richard Henderson36355022023-08-04 23:24:04 +00002475
2476 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002477 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002478}
2479
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002480static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2481{
Richard Henderson7e64b112023-10-24 16:53:56 -07002482 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002483 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002484
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002485 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002486 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002487 if (i >= 0) {
2488 goto do_setcond_const;
2489 }
2490
2491 switch (cond) {
2492 case TCG_COND_LT:
2493 case TCG_COND_GE:
2494 /*
2495 * Simplify LT/GE comparisons vs zero to a single compare
2496 * vs the high word of the input.
2497 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002498 if (arg_is_const_val(op->args[3], 0) &&
2499 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002500 goto do_setcond_high;
2501 }
2502 break;
2503
2504 case TCG_COND_NE:
2505 inv = 1;
2506 QEMU_FALLTHROUGH;
2507 case TCG_COND_EQ:
2508 /*
2509 * Simplify EQ/NE comparisons where one of the pairs
2510 * can be simplified.
2511 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002512 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002513 op->args[3], cond);
2514 switch (i ^ inv) {
2515 case 0:
2516 goto do_setcond_const;
2517 case 1:
2518 goto do_setcond_high;
2519 }
2520
Richard Henderson67f84c92021-08-25 08:00:20 -07002521 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002522 op->args[4], cond);
2523 switch (i ^ inv) {
2524 case 0:
2525 goto do_setcond_const;
2526 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002527 goto do_setcond_low;
2528 }
2529 break;
2530
2531 case TCG_COND_TSTEQ:
2532 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002533 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002534 goto do_setcond_high;
2535 }
2536 if (arg_is_const_val(op->args[4], 0)) {
2537 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002538 }
2539 break;
2540
2541 default:
2542 break;
2543
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002544 do_setcond_low:
2545 op->args[2] = op->args[3];
2546 op->args[3] = cond;
2547 op->opc = INDEX_op_setcond_i32;
2548 return fold_setcond(ctx, op);
2549
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002550 do_setcond_high:
2551 op->args[1] = op->args[2];
2552 op->args[2] = op->args[4];
2553 op->args[3] = cond;
2554 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002555 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002556 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002557
Richard Hendersona53502c2024-12-08 20:56:36 -06002558 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002559
2560 do_setcond_const:
2561 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2562}
2563
Richard Hendersonb6617c82021-08-24 10:44:53 -07002564static bool fold_sextract(OptContext *ctx, TCGOp *op)
2565{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002566 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002567 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002568 int pos = op->args[2];
2569 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002570
Richard Hendersonbaff5072024-12-08 21:09:30 -06002571 if (ti_is_const(t1)) {
2572 return tcg_opt_gen_movi(ctx, op, op->args[0],
2573 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002574 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002575
Richard Hendersonbaff5072024-12-08 21:09:30 -06002576 s_mask_old = t1->s_mask;
2577 s_mask = s_mask_old >> pos;
2578 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002579
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002580 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002581 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002582 }
2583
Richard Hendersonbaff5072024-12-08 21:09:30 -06002584 z_mask = sextract64(t1->z_mask, pos, len);
2585 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002586}
2587
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002588static bool fold_shift(OptContext *ctx, TCGOp *op)
2589{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002590 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002591 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002592
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002593 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002594 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002595 fold_xi_to_x(ctx, op, 0)) {
2596 return true;
2597 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002598
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002599 t1 = arg_info(op->args[1]);
2600 t2 = arg_info(op->args[2]);
2601 s_mask = t1->s_mask;
2602 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002603
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002604 if (ti_is_const(t2)) {
2605 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002606
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002607 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002608 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002609
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002610 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002611 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002612
2613 switch (op->opc) {
2614 CASE_OP_32_64(sar):
2615 /*
2616 * Arithmetic right shift will not reduce the number of
2617 * input sign repetitions.
2618 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002619 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002620 CASE_OP_32_64(shr):
2621 /*
2622 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002623 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002624 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002625 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002626 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002627 }
2628 break;
2629 default:
2630 break;
2631 }
2632
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002633 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002634}
2635
Richard Henderson9caca882021-08-24 13:30:32 -07002636static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2637{
2638 TCGOpcode neg_op;
2639 bool have_neg;
2640
2641 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2642 return false;
2643 }
2644
2645 switch (ctx->type) {
2646 case TCG_TYPE_I32:
2647 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002648 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002649 break;
2650 case TCG_TYPE_I64:
2651 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002652 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002653 break;
2654 case TCG_TYPE_V64:
2655 case TCG_TYPE_V128:
2656 case TCG_TYPE_V256:
2657 neg_op = INDEX_op_neg_vec;
2658 have_neg = (TCG_TARGET_HAS_neg_vec &&
2659 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2660 break;
2661 default:
2662 g_assert_not_reached();
2663 }
2664 if (have_neg) {
2665 op->opc = neg_op;
2666 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002667 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002668 }
2669 return false;
2670}
2671
Richard Hendersonc578ff12021-12-16 06:07:25 -08002672/* We cannot as yet do_constant_folding with vectors. */
2673static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002674{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002675 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002676 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002677 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002678 return true;
2679 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002680 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002681}
2682
Richard Hendersonc578ff12021-12-16 06:07:25 -08002683static bool fold_sub(OptContext *ctx, TCGOp *op)
2684{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002685 if (fold_const2(ctx, op) ||
2686 fold_xx_to_i(ctx, op, 0) ||
2687 fold_xi_to_x(ctx, op, 0) ||
2688 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002689 return true;
2690 }
2691
2692 /* Fold sub r,x,i to add r,x,-i */
2693 if (arg_is_const(op->args[2])) {
2694 uint64_t val = arg_info(op->args[2])->val;
2695
Richard Henderson79602f62025-01-06 09:11:39 -08002696 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002697 op->args[2] = arg_new_constant(ctx, -val);
2698 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002699 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002700}
2701
Richard Henderson9531c072021-08-26 06:51:39 -07002702static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002703{
Richard Henderson9531c072021-08-26 06:51:39 -07002704 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002705}
2706
Richard Hendersonfae450b2021-08-25 22:42:19 -07002707static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2708{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002709 uint64_t z_mask = -1, s_mask = 0;
2710
Richard Hendersonfae450b2021-08-25 22:42:19 -07002711 /* We can't do any folding with a load, but we can record bits. */
2712 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002713 CASE_OP_32_64(ld8s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002714 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002715 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002716 CASE_OP_32_64(ld8u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002717 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002718 break;
2719 CASE_OP_32_64(ld16s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002720 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002721 break;
2722 CASE_OP_32_64(ld16u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002723 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002724 break;
2725 case INDEX_op_ld32s_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002726 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002727 break;
2728 case INDEX_op_ld32u_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002729 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002730 break;
2731 default:
2732 g_assert_not_reached();
2733 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002734 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002735}
2736
Richard Hendersonab84dc32023-08-23 23:04:24 -07002737static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2738{
2739 TCGTemp *dst, *src;
2740 intptr_t ofs;
2741 TCGType type;
2742
2743 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002744 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002745 }
2746
2747 type = ctx->type;
2748 ofs = op->args[2];
2749 dst = arg_temp(op->args[0]);
2750 src = find_mem_copy_for(ctx, type, ofs);
2751 if (src && src->base_type == type) {
2752 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2753 }
2754
2755 reset_ts(ctx, dst);
2756 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2757 return true;
2758}
2759
2760static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2761{
2762 intptr_t ofs = op->args[2];
2763 intptr_t lm1;
2764
2765 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2766 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002767 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002768 }
2769
2770 switch (op->opc) {
2771 CASE_OP_32_64(st8):
2772 lm1 = 0;
2773 break;
2774 CASE_OP_32_64(st16):
2775 lm1 = 1;
2776 break;
2777 case INDEX_op_st32_i64:
2778 case INDEX_op_st_i32:
2779 lm1 = 3;
2780 break;
2781 case INDEX_op_st_i64:
2782 lm1 = 7;
2783 break;
2784 case INDEX_op_st_vec:
2785 lm1 = tcg_type_size(ctx->type) - 1;
2786 break;
2787 default:
2788 g_assert_not_reached();
2789 }
2790 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002791 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002792}
2793
2794static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2795{
2796 TCGTemp *src;
2797 intptr_t ofs, last;
2798 TCGType type;
2799
2800 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002801 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002802 }
2803
2804 src = arg_temp(op->args[0]);
2805 ofs = op->args[2];
2806 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002807
2808 /*
2809 * Eliminate duplicate stores of a constant.
2810 * This happens frequently when the target ISA zero-extends.
2811 */
2812 if (ts_is_const(src)) {
2813 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2814 if (src == prev) {
2815 tcg_op_remove(ctx->tcg, op);
2816 return true;
2817 }
2818 }
2819
Richard Hendersonab84dc32023-08-23 23:04:24 -07002820 last = ofs + tcg_type_size(type) - 1;
2821 remove_mem_copy_in(ctx, ofs, last);
2822 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002823 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002824}
2825
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002826static bool fold_xor(OptContext *ctx, TCGOp *op)
2827{
Richard Hendersonc890fd72024-12-08 21:39:01 -06002828 uint64_t z_mask, s_mask;
2829 TempOptInfo *t1, *t2;
2830
Richard Henderson7a2f7082021-08-26 07:06:39 -07002831 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002832 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002833 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002834 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002835 return true;
2836 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002837
Richard Hendersonc890fd72024-12-08 21:39:01 -06002838 t1 = arg_info(op->args[1]);
2839 t2 = arg_info(op->args[2]);
2840 z_mask = t1->z_mask | t2->z_mask;
2841 s_mask = t1->s_mask & t2->s_mask;
2842 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002843}
2844
Kirill Batuzov22613af2011-07-07 16:37:13 +04002845/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002846void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002847{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002848 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002849 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002850 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002851
Richard Hendersonab84dc32023-08-23 23:04:24 -07002852 QSIMPLEQ_INIT(&ctx.mem_free);
2853
Kirill Batuzov22613af2011-07-07 16:37:13 +04002854 /* Array VALS has an element for each temp.
2855 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002856 If this temp is a copy of other ones then the other copies are
2857 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002858
2859 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002860 for (i = 0; i < nb_temps; ++i) {
2861 s->temps[i].state_ptr = NULL;
2862 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002863
Richard Henderson15fa08f2017-11-02 15:19:14 +01002864 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002865 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002866 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002867 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002868
Richard Henderson5cf32be2021-08-24 08:17:08 -07002869 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002870 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002871 fold_call(&ctx, op);
2872 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002873 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002874
2875 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002876 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2877 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002878
Richard Henderson67f84c92021-08-25 08:00:20 -07002879 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08002880 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07002881
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002882 /*
2883 * Process each opcode.
2884 * Sorted alphabetically by opcode as much as possible.
2885 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002886 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08002887 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002888 done = fold_add(&ctx, op);
2889 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002890 case INDEX_op_add_vec:
2891 done = fold_add_vec(&ctx, op);
2892 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002893 CASE_OP_32_64(add2):
2894 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002895 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002896 case INDEX_op_and:
2897 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002898 done = fold_and(&ctx, op);
2899 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08002900 case INDEX_op_andc:
2901 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002902 done = fold_andc(&ctx, op);
2903 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002904 CASE_OP_32_64(brcond):
2905 done = fold_brcond(&ctx, op);
2906 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002907 case INDEX_op_brcond2_i32:
2908 done = fold_brcond2(&ctx, op);
2909 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002910 CASE_OP_32_64(bswap16):
2911 CASE_OP_32_64(bswap32):
2912 case INDEX_op_bswap64_i64:
2913 done = fold_bswap(&ctx, op);
2914 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002915 CASE_OP_32_64(clz):
2916 CASE_OP_32_64(ctz):
2917 done = fold_count_zeros(&ctx, op);
2918 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002919 CASE_OP_32_64(ctpop):
2920 done = fold_ctpop(&ctx, op);
2921 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002922 CASE_OP_32_64(deposit):
2923 done = fold_deposit(&ctx, op);
2924 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002925 CASE_OP_32_64(div):
2926 CASE_OP_32_64(divu):
2927 done = fold_divide(&ctx, op);
2928 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002929 case INDEX_op_dup_vec:
2930 done = fold_dup(&ctx, op);
2931 break;
2932 case INDEX_op_dup2_vec:
2933 done = fold_dup2(&ctx, op);
2934 break;
Richard Henderson5c0968a2025-01-06 15:47:53 -08002935 case INDEX_op_eqv:
2936 case INDEX_op_eqv_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002937 done = fold_eqv(&ctx, op);
2938 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002939 CASE_OP_32_64(extract):
2940 done = fold_extract(&ctx, op);
2941 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002942 CASE_OP_32_64(extract2):
2943 done = fold_extract2(&ctx, op);
2944 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002945 case INDEX_op_ext_i32_i64:
2946 done = fold_exts(&ctx, op);
2947 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002948 case INDEX_op_extu_i32_i64:
2949 case INDEX_op_extrl_i64_i32:
2950 case INDEX_op_extrh_i64_i32:
2951 done = fold_extu(&ctx, op);
2952 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002953 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002954 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002955 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002956 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002957 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002958 case INDEX_op_ld32u_i64:
2959 done = fold_tcg_ld(&ctx, op);
2960 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002961 case INDEX_op_ld_i32:
2962 case INDEX_op_ld_i64:
2963 case INDEX_op_ld_vec:
2964 done = fold_tcg_ld_memcopy(&ctx, op);
2965 break;
2966 CASE_OP_32_64(st8):
2967 CASE_OP_32_64(st16):
2968 case INDEX_op_st32_i64:
2969 done = fold_tcg_st(&ctx, op);
2970 break;
2971 case INDEX_op_st_i32:
2972 case INDEX_op_st_i64:
2973 case INDEX_op_st_vec:
2974 done = fold_tcg_st_memcopy(&ctx, op);
2975 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002976 case INDEX_op_mb:
2977 done = fold_mb(&ctx, op);
2978 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08002979 case INDEX_op_mov:
2980 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002981 done = fold_mov(&ctx, op);
2982 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002983 CASE_OP_32_64(movcond):
2984 done = fold_movcond(&ctx, op);
2985 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002986 CASE_OP_32_64(mul):
2987 done = fold_mul(&ctx, op);
2988 break;
2989 CASE_OP_32_64(mulsh):
2990 CASE_OP_32_64(muluh):
2991 done = fold_mul_highpart(&ctx, op);
2992 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002993 CASE_OP_32_64(muls2):
2994 CASE_OP_32_64(mulu2):
2995 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002996 break;
Richard Henderson59379a42025-01-06 20:32:54 -08002997 case INDEX_op_nand:
2998 case INDEX_op_nand_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002999 done = fold_nand(&ctx, op);
3000 break;
3001 CASE_OP_32_64(neg):
3002 done = fold_neg(&ctx, op);
3003 break;
Richard Henderson3a8c4e92025-01-06 21:02:17 -08003004 case INDEX_op_nor:
3005 case INDEX_op_nor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003006 done = fold_nor(&ctx, op);
3007 break;
3008 CASE_OP_32_64_VEC(not):
3009 done = fold_not(&ctx, op);
3010 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08003011 case INDEX_op_or:
3012 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003013 done = fold_or(&ctx, op);
3014 break;
Richard Henderson6aba25e2025-01-06 14:46:26 -08003015 case INDEX_op_orc:
3016 case INDEX_op_orc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003017 done = fold_orc(&ctx, op);
3018 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003019 case INDEX_op_qemu_ld_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06003020 done = fold_qemu_ld_1reg(&ctx, op);
3021 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003022 case INDEX_op_qemu_ld_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06003023 if (TCG_TARGET_REG_BITS == 64) {
3024 done = fold_qemu_ld_1reg(&ctx, op);
3025 break;
3026 }
3027 QEMU_FALLTHROUGH;
Richard Henderson50b7a192025-02-04 13:46:09 -08003028 case INDEX_op_qemu_ld_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06003029 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003030 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08003031 case INDEX_op_qemu_st8_i32:
3032 case INDEX_op_qemu_st_i32:
3033 case INDEX_op_qemu_st_i64:
3034 case INDEX_op_qemu_st_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003035 done = fold_qemu_st(&ctx, op);
3036 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003037 CASE_OP_32_64(rem):
3038 CASE_OP_32_64(remu):
3039 done = fold_remainder(&ctx, op);
3040 break;
3041 CASE_OP_32_64(rotl):
3042 CASE_OP_32_64(rotr):
3043 CASE_OP_32_64(sar):
3044 CASE_OP_32_64(shl):
3045 CASE_OP_32_64(shr):
3046 done = fold_shift(&ctx, op);
3047 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003048 CASE_OP_32_64(setcond):
3049 done = fold_setcond(&ctx, op);
3050 break;
Richard Henderson36355022023-08-04 23:24:04 +00003051 CASE_OP_32_64(negsetcond):
3052 done = fold_negsetcond(&ctx, op);
3053 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003054 case INDEX_op_setcond2_i32:
3055 done = fold_setcond2(&ctx, op);
3056 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003057 case INDEX_op_cmp_vec:
3058 done = fold_cmp_vec(&ctx, op);
3059 break;
3060 case INDEX_op_cmpsel_vec:
3061 done = fold_cmpsel_vec(&ctx, op);
3062 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003063 case INDEX_op_bitsel_vec:
3064 done = fold_bitsel_vec(&ctx, op);
3065 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003066 CASE_OP_32_64(sextract):
3067 done = fold_sextract(&ctx, op);
3068 break;
Richard Henderson60f34f52025-01-06 22:06:32 -08003069 case INDEX_op_sub:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003070 done = fold_sub(&ctx, op);
3071 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003072 case INDEX_op_sub_vec:
3073 done = fold_sub_vec(&ctx, op);
3074 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003075 CASE_OP_32_64(sub2):
3076 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003077 break;
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08003078 case INDEX_op_xor:
3079 case INDEX_op_xor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003080 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003081 break;
Richard Henderson15268552024-12-08 07:45:11 -06003082 case INDEX_op_set_label:
3083 case INDEX_op_br:
3084 case INDEX_op_exit_tb:
3085 case INDEX_op_goto_tb:
3086 case INDEX_op_goto_ptr:
3087 finish_ebb(&ctx);
3088 done = true;
3089 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003090 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003091 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003092 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003093 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003094 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003095 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003096}