blob: 043568a10d3df44bda6f910f5d237cfa2cfb168c [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Richard Henderson170ba882017-11-22 09:07:11 +010033
Richard Hendersonab84dc32023-08-23 23:04:24 -070034typedef struct MemCopyInfo {
35 IntervalTreeNode itree;
36 QSIMPLEQ_ENTRY (MemCopyInfo) next;
37 TCGTemp *ts;
38 TCGType type;
39} MemCopyInfo;
40
Richard Henderson6fcb98e2020-03-30 17:44:30 -070041typedef struct TempOptInfo {
Richard Henderson63490392017-06-20 13:43:15 -070042 TCGTemp *prev_copy;
43 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070044 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Hendersonb1fde412021-08-23 13:07:49 -070045 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson56f15f62024-12-22 15:07:31 -080046 uint64_t o_mask; /* mask bit is 1 if and only if value bit is 1 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080047 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070048} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040049
Richard Henderson3b3f8472021-08-23 22:06:31 -070050typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070051 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070052 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070053 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070054
Richard Hendersonab84dc32023-08-23 23:04:24 -070055 IntervalTreeRoot mem_copy;
56 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
57
Richard Henderson137f1f42021-08-24 08:49:25 -070058 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070059 TCGType type;
Richard Hendersonaeb35142025-01-14 18:28:15 -080060 int carry_state; /* -1 = non-constant, {0,1} = constant carry-in */
Richard Henderson3b3f8472021-08-23 22:06:31 -070061} OptContext;
62
Richard Henderson6fcb98e2020-03-30 17:44:30 -070063static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020064{
Richard Henderson63490392017-06-20 13:43:15 -070065 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020066}
67
Richard Henderson6fcb98e2020-03-30 17:44:30 -070068static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020069{
Richard Henderson63490392017-06-20 13:43:15 -070070 return ts_info(arg_temp(arg));
71}
72
Richard Hendersone1b6c142024-12-22 10:26:14 -080073static inline bool ti_is_const(TempOptInfo *ti)
74{
Richard Henderson56f15f62024-12-22 15:07:31 -080075 /* If all bits that are not known zeros are known ones, it's constant. */
76 return ti->z_mask == ti->o_mask;
Richard Hendersone1b6c142024-12-22 10:26:14 -080077}
78
79static inline uint64_t ti_const_val(TempOptInfo *ti)
80{
Richard Henderson56f15f62024-12-22 15:07:31 -080081 /* If constant, both z_mask and o_mask contain the value. */
82 return ti->z_mask;
Richard Hendersone1b6c142024-12-22 10:26:14 -080083}
84
85static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
86{
87 return ti_is_const(ti) && ti_const_val(ti) == val;
88}
89
Richard Henderson63490392017-06-20 13:43:15 -070090static inline bool ts_is_const(TCGTemp *ts)
91{
Richard Hendersone1b6c142024-12-22 10:26:14 -080092 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070093}
94
Richard Henderson27cdb852023-10-23 11:38:00 -070095static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
96{
Richard Hendersone1b6c142024-12-22 10:26:14 -080097 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -070098}
99
Richard Henderson63490392017-06-20 13:43:15 -0700100static inline bool arg_is_const(TCGArg arg)
101{
102 return ts_is_const(arg_temp(arg));
103}
104
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800105static inline uint64_t arg_const_val(TCGArg arg)
106{
107 return ti_const_val(arg_info(arg));
108}
109
Richard Henderson27cdb852023-10-23 11:38:00 -0700110static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
111{
112 return ts_is_const_val(arg_temp(arg), val);
113}
114
Richard Henderson63490392017-06-20 13:43:15 -0700115static inline bool ts_is_copy(TCGTemp *ts)
116{
117 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200118}
119
Richard Henderson9f75e522023-11-02 13:37:46 -0700120static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
121{
122 return a->kind < b->kind ? b : a;
123}
124
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200125/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700126static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200127{
Richard Henderson63490392017-06-20 13:43:15 -0700128 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700129 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700130
Richard Henderson3b3f8472021-08-23 22:06:31 -0700131 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700132 return;
133 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700134 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700135
136 ti = ts->state_ptr;
137 if (ti == NULL) {
138 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700139 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700140 }
141
142 ti->next_copy = ts;
143 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700144 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700145 if (ts->kind == TEMP_CONST) {
Richard Hendersonb1fde412021-08-23 13:07:49 -0700146 ti->z_mask = ts->val;
Richard Henderson56f15f62024-12-22 15:07:31 -0800147 ti->o_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800148 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700149 } else {
Richard Hendersonb1fde412021-08-23 13:07:49 -0700150 ti->z_mask = -1;
Richard Henderson56f15f62024-12-22 15:07:31 -0800151 ti->o_mask = 0;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700152 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200153 }
154}
155
Richard Hendersonab84dc32023-08-23 23:04:24 -0700156static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
157{
158 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
159 return r ? container_of(r, MemCopyInfo, itree) : NULL;
160}
161
162static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
163{
164 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
165 return r ? container_of(r, MemCopyInfo, itree) : NULL;
166}
167
168static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
169{
170 TCGTemp *ts = mc->ts;
171 TempOptInfo *ti = ts_info(ts);
172
173 interval_tree_remove(&mc->itree, &ctx->mem_copy);
174 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
175 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
176}
177
178static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
179{
180 while (true) {
181 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
182 if (!mc) {
183 break;
184 }
185 remove_mem_copy(ctx, mc);
186 }
187}
188
189static void remove_mem_copy_all(OptContext *ctx)
190{
191 remove_mem_copy_in(ctx, 0, -1);
192 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
193}
194
Richard Henderson9f75e522023-11-02 13:37:46 -0700195static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200196{
Richard Henderson9f75e522023-11-02 13:37:46 -0700197 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198
Richard Henderson4c868ce2020-04-23 09:02:23 -0700199 /* If this is already readonly, we can't do better. */
200 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700201 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200202 }
203
Richard Henderson9f75e522023-11-02 13:37:46 -0700204 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700205 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200207 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209}
210
Richard Hendersonab84dc32023-08-23 23:04:24 -0700211static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
212{
213 TempOptInfo *si = ts_info(src_ts);
214 TempOptInfo *di = ts_info(dst_ts);
215 MemCopyInfo *mc;
216
217 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
218 tcg_debug_assert(mc->ts == src_ts);
219 mc->ts = dst_ts;
220 }
221 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
222}
223
224/* Reset TEMP's state, possibly removing the temp for the list of copies. */
225static void reset_ts(OptContext *ctx, TCGTemp *ts)
226{
227 TempOptInfo *ti = ts_info(ts);
228 TCGTemp *pts = ti->prev_copy;
229 TCGTemp *nts = ti->next_copy;
230 TempOptInfo *pi = ts_info(pts);
231 TempOptInfo *ni = ts_info(nts);
232
233 ni->prev_copy = ti->prev_copy;
234 pi->next_copy = ti->next_copy;
235 ti->next_copy = ts;
236 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700237 ti->z_mask = -1;
Richard Henderson56f15f62024-12-22 15:07:31 -0800238 ti->o_mask = 0;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700239 ti->s_mask = 0;
240
241 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
242 if (ts == nts) {
243 /* Last temp copy being removed, the mem copies die. */
244 MemCopyInfo *mc;
245 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
246 interval_tree_remove(&mc->itree, &ctx->mem_copy);
247 }
248 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
249 } else {
250 move_mem_copies(find_better_copy(nts), ts);
251 }
252 }
253}
254
255static void reset_temp(OptContext *ctx, TCGArg arg)
256{
257 reset_ts(ctx, arg_temp(arg));
258}
259
260static void record_mem_copy(OptContext *ctx, TCGType type,
261 TCGTemp *ts, intptr_t start, intptr_t last)
262{
263 MemCopyInfo *mc;
264 TempOptInfo *ti;
265
266 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
267 if (mc) {
268 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
269 } else {
270 mc = tcg_malloc(sizeof(*mc));
271 }
272
273 memset(mc, 0, sizeof(*mc));
274 mc->itree.start = start;
275 mc->itree.last = last;
276 mc->type = type;
277 interval_tree_insert(&mc->itree, &ctx->mem_copy);
278
279 ts = find_better_copy(ts);
280 ti = ts_info(ts);
281 mc->ts = ts;
282 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
283}
284
Richard Henderson63490392017-06-20 13:43:15 -0700285static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200286{
Richard Henderson63490392017-06-20 13:43:15 -0700287 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288
Richard Henderson63490392017-06-20 13:43:15 -0700289 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290 return true;
291 }
292
Richard Henderson63490392017-06-20 13:43:15 -0700293 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200294 return false;
295 }
296
Richard Henderson63490392017-06-20 13:43:15 -0700297 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
298 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200299 return true;
300 }
301 }
302
303 return false;
304}
305
Richard Henderson63490392017-06-20 13:43:15 -0700306static bool args_are_copies(TCGArg arg1, TCGArg arg2)
307{
308 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
309}
310
Richard Hendersonab84dc32023-08-23 23:04:24 -0700311static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
312{
313 MemCopyInfo *mc;
314
315 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
316 if (mc->itree.start == s && mc->type == type) {
317 return find_better_copy(mc->ts);
318 }
319 }
320 return NULL;
321}
322
Richard Henderson26aac972023-10-23 12:31:57 -0700323static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
324{
325 TCGType type = ctx->type;
326 TCGTemp *ts;
327
328 if (type == TCG_TYPE_I32) {
329 val = (int32_t)val;
330 }
331
332 ts = tcg_constant_internal(type, val);
333 init_ts_info(ctx, ts);
334
335 return temp_arg(ts);
336}
337
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100338static TCGArg arg_new_temp(OptContext *ctx)
339{
340 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
341 init_ts_info(ctx, ts);
342 return temp_arg(ts);
343}
344
Richard Hendersona3c1c572025-04-21 11:05:29 -0700345static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
346 TCGOpcode opc, unsigned narg)
347{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800348 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700349}
350
351static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
352 TCGOpcode opc, unsigned narg)
353{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800354 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700355}
356
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700357static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400358{
Richard Henderson63490392017-06-20 13:43:15 -0700359 TCGTemp *dst_ts = arg_temp(dst);
360 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700361 TempOptInfo *di;
362 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700363 TCGOpcode new_op;
364
365 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700366 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700367 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200368 }
369
Richard Henderson986cac12023-01-09 13:59:35 -0800370 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700371 di = ts_info(dst_ts);
372 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700373
374 switch (ctx->type) {
375 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700376 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800377 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 break;
379 case TCG_TYPE_V64:
380 case TCG_TYPE_V128:
381 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800382 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700383 new_op = INDEX_op_mov_vec;
384 break;
385 default:
386 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100387 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700388 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700389 op->args[0] = dst;
390 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700391
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700392 di->z_mask = si->z_mask;
Richard Henderson56f15f62024-12-22 15:07:31 -0800393 di->o_mask = si->o_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700394 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700395
Richard Henderson63490392017-06-20 13:43:15 -0700396 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700397 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700398
399 di->next_copy = si->next_copy;
400 di->prev_copy = src_ts;
401 ni->prev_copy = dst_ts;
402 si->next_copy = dst_ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700403
404 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
405 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
406 move_mem_copies(dst_ts, src_ts);
407 }
Richard Henderson56f15f62024-12-22 15:07:31 -0800408 } else if (dst_ts->type == TCG_TYPE_I32) {
409 di->z_mask = (int32_t)di->z_mask;
410 di->o_mask = (int32_t)di->o_mask;
411 di->s_mask |= INT32_MIN;
412 } else {
413 di->z_mask |= MAKE_64BIT_MASK(32, 32);
414 di->o_mask = (uint32_t)di->o_mask;
415 di->s_mask = INT64_MIN;
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800416 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700417 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400418}
419
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700420static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700421 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700422{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700423 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700424 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700425}
426
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800427static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
428 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400429{
Richard Henderson03271522013-08-14 14:35:56 -0700430 uint64_t l64, h64;
431
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400432 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800433 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400434 return x + y;
435
Richard Henderson60f34f52025-01-06 22:06:32 -0800436 case INDEX_op_sub:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400437 return x - y;
438
Richard Hendersond2c3eca2025-01-07 09:32:18 -0800439 case INDEX_op_mul:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400440 return x * y;
441
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800442 case INDEX_op_and:
443 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400444 return x & y;
445
Richard Henderson49bd7512025-01-06 14:00:40 -0800446 case INDEX_op_or:
447 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400448 return x | y;
449
Richard Hendersonfffd3dc2025-01-06 15:18:35 -0800450 case INDEX_op_xor:
451 case INDEX_op_xor_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400452 return x ^ y;
453
Richard Henderson6ca59452025-01-07 21:50:04 -0800454 case INDEX_op_shl:
455 if (type == TCG_TYPE_I32) {
456 return (uint32_t)x << (y & 31);
457 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700458 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400459
Richard Henderson74dbd362025-01-07 22:52:10 -0800460 case INDEX_op_shr:
461 if (type == TCG_TYPE_I32) {
462 return (uint32_t)x >> (y & 31);
463 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700464 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400465
Richard Henderson3949f362025-01-08 08:05:18 -0800466 case INDEX_op_sar:
467 if (type == TCG_TYPE_I32) {
468 return (int32_t)x >> (y & 31);
469 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700470 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400471
Richard Henderson005a87e2025-01-08 10:42:16 -0800472 case INDEX_op_rotr:
473 if (type == TCG_TYPE_I32) {
474 return ror32(x, y & 31);
475 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700476 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400477
Richard Henderson005a87e2025-01-08 10:42:16 -0800478 case INDEX_op_rotl:
479 if (type == TCG_TYPE_I32) {
480 return rol32(x, y & 31);
481 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700482 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400483
Richard Henderson5c62d372025-01-06 23:46:47 -0800484 case INDEX_op_not:
485 case INDEX_op_not_vec:
Kirill Batuzova640f032011-07-07 16:37:17 +0400486 return ~x;
487
Richard Henderson69713582025-01-06 22:48:57 -0800488 case INDEX_op_neg:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700489 return -x;
490
Richard Henderson46f96bf2025-01-06 12:37:02 -0800491 case INDEX_op_andc:
492 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700493 return x & ~y;
494
Richard Henderson6aba25e2025-01-06 14:46:26 -0800495 case INDEX_op_orc:
496 case INDEX_op_orc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700497 return x | ~y;
498
Richard Henderson5c0968a2025-01-06 15:47:53 -0800499 case INDEX_op_eqv:
500 case INDEX_op_eqv_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700501 return ~(x ^ y);
502
Richard Henderson59379a42025-01-06 20:32:54 -0800503 case INDEX_op_nand:
504 case INDEX_op_nand_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700505 return ~(x & y);
506
Richard Henderson3a8c4e92025-01-06 21:02:17 -0800507 case INDEX_op_nor:
508 case INDEX_op_nor_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700509 return ~(x | y);
510
Richard Henderson5a5bb0a2025-01-08 16:12:46 -0800511 case INDEX_op_clz:
512 if (type == TCG_TYPE_I32) {
513 return (uint32_t)x ? clz32(x) : y;
514 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100515 return x ? clz64(x) : y;
516
Richard Hendersonc96447d2025-01-08 17:07:01 -0800517 case INDEX_op_ctz:
518 if (type == TCG_TYPE_I32) {
519 return (uint32_t)x ? ctz32(x) : y;
520 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100521 return x ? ctz64(x) : y;
522
Richard Henderson97218ae2025-01-08 18:37:43 -0800523 case INDEX_op_ctpop:
524 return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
Richard Hendersona768e4e2016-11-21 11:13:39 +0100525
Richard Henderson0dd07ee2025-01-10 18:51:16 -0800526 case INDEX_op_bswap16:
Richard Henderson0b76ff82021-06-13 13:04:00 -0700527 x = bswap16(x);
528 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100529
Richard Henderson7498d882025-01-10 19:53:51 -0800530 case INDEX_op_bswap32:
Richard Henderson0b76ff82021-06-13 13:04:00 -0700531 x = bswap32(x);
532 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100533
Richard Henderson3ad5d4c2025-01-10 21:54:44 -0800534 case INDEX_op_bswap64:
Richard Henderson64985942018-11-20 08:53:34 +0100535 return bswap64(x);
536
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200537 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400538 return (int32_t)x;
539
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200540 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700541 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400542 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400543
Richard Henderson609ad702015-07-24 07:16:00 -0700544 case INDEX_op_extrh_i64_i32:
545 return (uint64_t)x >> 32;
546
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800547 case INDEX_op_muluh:
548 if (type == TCG_TYPE_I32) {
549 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
550 }
551 mulu64(&l64, &h64, x, y);
552 return h64;
553
Richard Hendersonc7428242025-01-07 11:19:29 -0800554 case INDEX_op_mulsh:
555 if (type == TCG_TYPE_I32) {
556 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
557 }
Richard Henderson03271522013-08-14 14:35:56 -0700558 muls64(&l64, &h64, x, y);
559 return h64;
560
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800561 case INDEX_op_divs:
Richard Henderson01547f72013-08-14 15:22:46 -0700562 /* Avoid crashing on divide by zero, otherwise undefined. */
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800563 if (type == TCG_TYPE_I32) {
564 return (int32_t)x / ((int32_t)y ? : 1);
565 }
566 return (int64_t)x / ((int64_t)y ? : 1);
567
Richard Henderson961b80a2025-01-07 14:27:19 -0800568 case INDEX_op_divu:
569 if (type == TCG_TYPE_I32) {
570 return (uint32_t)x / ((uint32_t)y ? : 1);
571 }
Richard Henderson01547f72013-08-14 15:22:46 -0700572 return (uint64_t)x / ((uint64_t)y ? : 1);
573
Richard Henderson9a6bc182025-01-07 19:00:51 -0800574 case INDEX_op_rems:
575 if (type == TCG_TYPE_I32) {
576 return (int32_t)x % ((int32_t)y ? : 1);
577 }
578 return (int64_t)x % ((int64_t)y ? : 1);
579
Richard Hendersoncd9acd22025-01-07 20:25:14 -0800580 case INDEX_op_remu:
581 if (type == TCG_TYPE_I32) {
582 return (uint32_t)x % ((uint32_t)y ? : 1);
583 }
Richard Henderson01547f72013-08-14 15:22:46 -0700584 return (uint64_t)x % ((uint64_t)y ? : 1);
585
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400586 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700587 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400588 }
589}
590
Richard Henderson67f84c92021-08-25 08:00:20 -0700591static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
592 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400593{
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800594 uint64_t res = do_constant_folding_2(op, type, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700595 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200596 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400597 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400598 return res;
599}
600
Richard Henderson9519da72012-10-02 11:32:26 -0700601static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
602{
603 switch (c) {
604 case TCG_COND_EQ:
605 return x == y;
606 case TCG_COND_NE:
607 return x != y;
608 case TCG_COND_LT:
609 return (int32_t)x < (int32_t)y;
610 case TCG_COND_GE:
611 return (int32_t)x >= (int32_t)y;
612 case TCG_COND_LE:
613 return (int32_t)x <= (int32_t)y;
614 case TCG_COND_GT:
615 return (int32_t)x > (int32_t)y;
616 case TCG_COND_LTU:
617 return x < y;
618 case TCG_COND_GEU:
619 return x >= y;
620 case TCG_COND_LEU:
621 return x <= y;
622 case TCG_COND_GTU:
623 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700624 case TCG_COND_TSTEQ:
625 return (x & y) == 0;
626 case TCG_COND_TSTNE:
627 return (x & y) != 0;
628 case TCG_COND_ALWAYS:
629 case TCG_COND_NEVER:
630 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700631 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700632 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700633}
634
635static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
636{
637 switch (c) {
638 case TCG_COND_EQ:
639 return x == y;
640 case TCG_COND_NE:
641 return x != y;
642 case TCG_COND_LT:
643 return (int64_t)x < (int64_t)y;
644 case TCG_COND_GE:
645 return (int64_t)x >= (int64_t)y;
646 case TCG_COND_LE:
647 return (int64_t)x <= (int64_t)y;
648 case TCG_COND_GT:
649 return (int64_t)x > (int64_t)y;
650 case TCG_COND_LTU:
651 return x < y;
652 case TCG_COND_GEU:
653 return x >= y;
654 case TCG_COND_LEU:
655 return x <= y;
656 case TCG_COND_GTU:
657 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700658 case TCG_COND_TSTEQ:
659 return (x & y) == 0;
660 case TCG_COND_TSTNE:
661 return (x & y) != 0;
662 case TCG_COND_ALWAYS:
663 case TCG_COND_NEVER:
664 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700665 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700666 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700667}
668
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700669static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700670{
671 switch (c) {
672 case TCG_COND_GT:
673 case TCG_COND_LTU:
674 case TCG_COND_LT:
675 case TCG_COND_GTU:
676 case TCG_COND_NE:
677 return 0;
678 case TCG_COND_GE:
679 case TCG_COND_GEU:
680 case TCG_COND_LE:
681 case TCG_COND_LEU:
682 case TCG_COND_EQ:
683 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700684 case TCG_COND_TSTEQ:
685 case TCG_COND_TSTNE:
686 return -1;
687 case TCG_COND_ALWAYS:
688 case TCG_COND_NEVER:
689 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700690 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700691 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700692}
693
Richard Henderson8d57bf12021-08-24 08:34:27 -0700694/*
695 * Return -1 if the condition can't be simplified,
696 * and the result of the condition (0 or 1) if it can.
697 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700698static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700699 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200700{
Richard Henderson63490392017-06-20 13:43:15 -0700701 if (arg_is_const(x) && arg_is_const(y)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800702 uint64_t xv = arg_const_val(x);
703 uint64_t yv = arg_const_val(y);
Alex Bennée9becc362022-02-09 11:21:42 +0000704
Richard Henderson67f84c92021-08-25 08:00:20 -0700705 switch (type) {
706 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100707 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700708 case TCG_TYPE_I64:
709 return do_constant_folding_cond_64(xv, yv, c);
710 default:
711 /* Only scalar comparisons are optimizable */
712 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200713 }
Richard Henderson63490392017-06-20 13:43:15 -0700714 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700715 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700716 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200717 switch (c) {
718 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700719 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200720 return 0;
721 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700722 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200723 return 1;
724 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700725 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200726 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200727 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700728 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200729}
730
Richard Henderson7a2f7082021-08-26 07:06:39 -0700731/**
732 * swap_commutative:
733 * @dest: TCGArg of the destination argument, or NO_DEST.
734 * @p1: first paired argument
735 * @p2: second paired argument
736 *
737 * If *@p1 is a constant and *@p2 is not, swap.
738 * If *@p2 matches @dest, swap.
739 * Return true if a swap was performed.
740 */
741
742#define NO_DEST temp_arg(NULL)
743
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800744static int pref_commutative(TempOptInfo *ti)
745{
746 /* Slight preference for non-zero constants second. */
747 return !ti_is_const(ti) ? 0 : ti_const_val(ti) ? 3 : 2;
748}
749
Richard Henderson24c9ae42012-10-02 11:32:21 -0700750static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
751{
752 TCGArg a1 = *p1, a2 = *p2;
753 int sum = 0;
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800754 sum += pref_commutative(arg_info(a1));
755 sum -= pref_commutative(arg_info(a2));
Richard Henderson24c9ae42012-10-02 11:32:21 -0700756
757 /* Prefer the constant in second argument, and then the form
758 op a, a, b, which is better handled on non-RISC hosts. */
759 if (sum > 0 || (sum == 0 && dest == a2)) {
760 *p1 = a2;
761 *p2 = a1;
762 return true;
763 }
764 return false;
765}
766
Richard Henderson0bfcb862012-10-02 11:32:23 -0700767static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
768{
769 int sum = 0;
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800770 sum += pref_commutative(arg_info(p1[0]));
771 sum += pref_commutative(arg_info(p1[1]));
772 sum -= pref_commutative(arg_info(p2[0]));
773 sum -= pref_commutative(arg_info(p2[1]));
Richard Henderson0bfcb862012-10-02 11:32:23 -0700774 if (sum > 0) {
775 TCGArg t;
776 t = p1[0], p1[0] = p2[0], p2[0] = t;
777 t = p1[1], p1[1] = p2[1], p2[1] = t;
778 return true;
779 }
780 return false;
781}
782
Richard Henderson7e64b112023-10-24 16:53:56 -0700783/*
784 * Return -1 if the condition can't be simplified,
785 * and the result of the condition (0 or 1) if it can.
786 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100787static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700788 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
789{
790 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100791 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700792 bool swap;
793 int r;
794
795 swap = swap_commutative(dest, p1, p2);
796 cond = *pcond;
797 if (swap) {
798 *pcond = cond = tcg_swap_cond(cond);
799 }
800
801 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700802 if (r >= 0) {
803 return r;
804 }
805 if (!is_tst_cond(cond)) {
806 return -1;
807 }
808
Paolo Bonzini35020622024-01-22 10:48:11 +0100809 i1 = arg_info(*p1);
810
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700811 /*
812 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100813 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700814 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100815 if (args_are_copies(*p1, *p2) ||
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800816 (arg_is_const(*p2) && (i1->z_mask & ~arg_const_val(*p2)) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700817 *p2 = arg_new_constant(ctx, 0);
818 *pcond = tcg_tst_eqne_cond(cond);
819 return -1;
820 }
821
Paolo Bonzini35020622024-01-22 10:48:11 +0100822 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800823 if (arg_is_const(*p2) && (arg_const_val(*p2) & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700824 *p2 = arg_new_constant(ctx, 0);
825 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100826 return -1;
827 }
828
829 /* Expand to AND with a temporary if no backend support. */
830 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800831 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100832 TCGArg tmp = arg_new_temp(ctx);
833
834 op2->args[0] = tmp;
835 op2->args[1] = *p1;
836 op2->args[2] = *p2;
837
838 *p1 = tmp;
839 *p2 = arg_new_constant(ctx, 0);
840 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700841 }
842 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700843}
844
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100845static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700846{
847 TCGArg al, ah, bl, bh;
848 TCGCond c;
849 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700850 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700851
852 swap = swap_commutative2(args, args + 2);
853 c = args[4];
854 if (swap) {
855 args[4] = c = tcg_swap_cond(c);
856 }
857
858 al = args[0];
859 ah = args[1];
860 bl = args[2];
861 bh = args[3];
862
863 if (arg_is_const(bl) && arg_is_const(bh)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800864 tcg_target_ulong blv = arg_const_val(bl);
865 tcg_target_ulong bhv = arg_const_val(bh);
Richard Henderson7e64b112023-10-24 16:53:56 -0700866 uint64_t b = deposit64(blv, 32, 32, bhv);
867
868 if (arg_is_const(al) && arg_is_const(ah)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800869 tcg_target_ulong alv = arg_const_val(al);
870 tcg_target_ulong ahv = arg_const_val(ah);
Richard Henderson7e64b112023-10-24 16:53:56 -0700871 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700872
873 r = do_constant_folding_cond_64(a, b, c);
874 if (r >= 0) {
875 return r;
876 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700877 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700878
Richard Henderson7e64b112023-10-24 16:53:56 -0700879 if (b == 0) {
880 switch (c) {
881 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700882 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700883 return 0;
884 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700885 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700886 return 1;
887 default:
888 break;
889 }
890 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700891
892 /* TSTNE x,-1 -> NE x,0 */
893 if (b == -1 && is_tst_cond(c)) {
894 args[3] = args[2] = arg_new_constant(ctx, 0);
895 args[4] = tcg_tst_eqne_cond(c);
896 return -1;
897 }
898
899 /* TSTNE x,sign -> LT x,0 */
900 if (b == INT64_MIN && is_tst_cond(c)) {
901 /* bl must be 0, so copy that to bh */
902 args[3] = bl;
903 args[4] = tcg_tst_ltge_cond(c);
904 return -1;
905 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700906 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700907
Richard Henderson7e64b112023-10-24 16:53:56 -0700908 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700909 r = do_constant_folding_cond_eq(c);
910 if (r >= 0) {
911 return r;
912 }
913
914 /* TSTNE x,x -> NE x,0 */
915 if (is_tst_cond(c)) {
916 args[3] = args[2] = arg_new_constant(ctx, 0);
917 args[4] = tcg_tst_eqne_cond(c);
918 return -1;
919 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700920 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100921
922 /* Expand to AND with a temporary if no backend support. */
923 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800924 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
925 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100926 TCGArg t1 = arg_new_temp(ctx);
927 TCGArg t2 = arg_new_temp(ctx);
928
929 op1->args[0] = t1;
930 op1->args[1] = al;
931 op1->args[2] = bl;
932 op2->args[0] = t2;
933 op2->args[1] = ah;
934 op2->args[2] = bh;
935
936 args[0] = t1;
937 args[1] = t2;
938 args[3] = args[2] = arg_new_constant(ctx, 0);
939 args[4] = tcg_tst_eqne_cond(c);
940 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700941 return -1;
942}
943
Richard Hendersone2577ea2021-08-24 08:00:48 -0700944static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
945{
946 for (int i = 0; i < nb_args; i++) {
947 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000948 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700949 }
950}
951
Richard Henderson8774dde2021-08-24 08:04:47 -0700952static void copy_propagate(OptContext *ctx, TCGOp *op,
953 int nb_oargs, int nb_iargs)
954{
Richard Henderson8774dde2021-08-24 08:04:47 -0700955 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
956 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000957 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700958 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700959 }
960 }
961}
962
Richard Henderson15268552024-12-08 07:45:11 -0600963static void finish_bb(OptContext *ctx)
964{
965 /* We only optimize memory barriers across basic blocks. */
966 ctx->prev_mb = NULL;
967}
968
969static void finish_ebb(OptContext *ctx)
970{
971 finish_bb(ctx);
972 /* We only optimize across extended basic blocks. */
973 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
974 remove_mem_copy_all(ctx);
975}
976
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600977static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700978{
979 const TCGOpDef *def = &tcg_op_defs[op->opc];
980 int i, nb_oargs;
981
Richard Henderson137f1f42021-08-24 08:49:25 -0700982 nb_oargs = def->nb_oargs;
983 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700984 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800985 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700986 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600987 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700988}
989
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700990/*
991 * The fold_* functions return true when processing is complete,
992 * usually by folding the operation to a constant or to a copy,
993 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
994 * like collect information about the value produced, for use in
995 * optimizing a subsequent operation.
996 *
997 * These first fold_* functions are all helpers, used by other
998 * folders for more specific operations.
999 */
1000
1001static bool fold_const1(OptContext *ctx, TCGOp *op)
1002{
1003 if (arg_is_const(op->args[1])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001004 uint64_t t = arg_const_val(op->args[1]);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001005
Richard Henderson67f84c92021-08-25 08:00:20 -07001006 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001007 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1008 }
1009 return false;
1010}
1011
1012static bool fold_const2(OptContext *ctx, TCGOp *op)
1013{
1014 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001015 uint64_t t1 = arg_const_val(op->args[1]);
1016 uint64_t t2 = arg_const_val(op->args[2]);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001017
Richard Henderson67f84c92021-08-25 08:00:20 -07001018 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001019 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1020 }
1021 return false;
1022}
1023
Richard Hendersonc578ff12021-12-16 06:07:25 -08001024static bool fold_commutative(OptContext *ctx, TCGOp *op)
1025{
1026 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1027 return false;
1028}
1029
Richard Henderson7a2f7082021-08-26 07:06:39 -07001030static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1031{
1032 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1033 return fold_const2(ctx, op);
1034}
1035
Richard Hendersond582b142024-12-19 10:43:26 -08001036/*
1037 * Record "zero" and "sign" masks for the single output of @op.
1038 * See TempOptInfo definition of z_mask and s_mask.
1039 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001040 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001041 */
Richard Henderson9e397cc2025-06-02 14:11:51 +01001042static bool fold_masks_zosa(OptContext *ctx, TCGOp *op, uint64_t z_mask,
1043 uint64_t o_mask, int64_t s_mask, uint64_t a_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001044{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001045 const TCGOpDef *def = &tcg_op_defs[op->opc];
1046 TCGTemp *ts;
1047 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001048 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001049
1050 /* Only single-output opcodes are supported here. */
1051 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001052
1053 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001054 * 32-bit ops generate 32-bit results, which for the purpose of
1055 * simplifying tcg are sign-extended. Certainly that's how we
1056 * represent our constants elsewhere. Note that the bits will
1057 * be reset properly for a 64-bit value when encountering the
1058 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001059 */
1060 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001061 z_mask = (int32_t)z_mask;
Richard Henderson56f15f62024-12-22 15:07:31 -08001062 o_mask = (int32_t)o_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001063 s_mask |= INT32_MIN;
Richard Henderson9e397cc2025-06-02 14:11:51 +01001064 a_mask = (uint32_t)a_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001065 }
1066
Richard Henderson56f15f62024-12-22 15:07:31 -08001067 /* Bits that are known 1 and bits that are known 0 must not overlap. */
1068 tcg_debug_assert((o_mask & ~z_mask) == 0);
1069
1070 /* All bits that are not known zero are known one is a constant. */
1071 if (z_mask == o_mask) {
1072 return tcg_opt_gen_movi(ctx, op, op->args[0], o_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001073 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001074
Richard Henderson9e397cc2025-06-02 14:11:51 +01001075 /* If no bits are affected, the operation devolves to a copy. */
1076 if (a_mask == 0) {
1077 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1078 }
1079
Richard Henderson56e06ec2024-12-08 18:26:48 -06001080 ts = arg_temp(op->args[0]);
1081 reset_ts(ctx, ts);
1082
1083 ti = ts_info(ts);
1084 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001085
1086 /* Canonicalize s_mask and incorporate data from z_mask. */
1087 rep = clz64(~s_mask);
1088 rep = MAX(rep, clz64(z_mask));
Richard Henderson56f15f62024-12-22 15:07:31 -08001089 rep = MAX(rep, clz64(~o_mask));
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001090 rep = MAX(rep - 1, 0);
1091 ti->s_mask = INT64_MIN >> rep;
1092
Richard Henderson56e06ec2024-12-08 18:26:48 -06001093 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001094}
1095
Richard Henderson33fceba2024-12-10 08:26:56 -06001096static bool fold_masks_zos(OptContext *ctx, TCGOp *op,
1097 uint64_t z_mask, uint64_t o_mask, uint64_t s_mask)
1098{
1099 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, -1);
1100}
1101
Richard Henderson56f15f62024-12-22 15:07:31 -08001102static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
1103 uint64_t z_mask, uint64_t s_mask)
1104{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001105 return fold_masks_zosa(ctx, op, z_mask, 0, s_mask, -1);
Richard Henderson56f15f62024-12-22 15:07:31 -08001106}
1107
Richard Henderson81be07f2024-12-08 19:49:17 -06001108static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1109{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001110 return fold_masks_zosa(ctx, op, z_mask, 0, 0, -1);
Richard Henderson81be07f2024-12-08 19:49:17 -06001111}
1112
Richard Hendersonef6be622024-12-08 20:03:15 -06001113static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1114{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001115 return fold_masks_zosa(ctx, op, -1, 0, s_mask, -1);
Richard Hendersonef6be622024-12-08 20:03:15 -06001116}
1117
Richard Henderson045ace32024-12-19 10:33:51 -08001118/*
1119 * An "affected" mask bit is 0 if and only if the result is identical
1120 * to the first input. Thus if the entire mask is 0, the operation
1121 * is equivalent to a copy.
1122 */
1123static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1124{
1125 if (ctx->type == TCG_TYPE_I32) {
1126 a_mask = (uint32_t)a_mask;
1127 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001128 if (a_mask == 0) {
1129 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1130 }
1131 return false;
1132}
1133
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001134/*
1135 * Convert @op to NOT, if NOT is supported by the host.
1136 * Return true f the conversion is successful, which will still
1137 * indicate that the processing is complete.
1138 */
1139static bool fold_not(OptContext *ctx, TCGOp *op);
1140static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1141{
1142 TCGOpcode not_op;
1143 bool have_not;
1144
1145 switch (ctx->type) {
1146 case TCG_TYPE_I32:
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001147 case TCG_TYPE_I64:
Richard Henderson5c62d372025-01-06 23:46:47 -08001148 not_op = INDEX_op_not;
1149 have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001150 break;
1151 case TCG_TYPE_V64:
1152 case TCG_TYPE_V128:
1153 case TCG_TYPE_V256:
1154 not_op = INDEX_op_not_vec;
1155 have_not = TCG_TARGET_HAS_not_vec;
1156 break;
1157 default:
1158 g_assert_not_reached();
1159 }
1160 if (have_not) {
1161 op->opc = not_op;
1162 op->args[1] = op->args[idx];
1163 return fold_not(ctx, op);
1164 }
1165 return false;
1166}
1167
Richard Hendersonda48e272021-08-25 20:42:04 -07001168/* If the binary operation has first argument @i, fold to @i. */
1169static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1170{
Richard Henderson27cdb852023-10-23 11:38:00 -07001171 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001172 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1173 }
1174 return false;
1175}
1176
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001177/* If the binary operation has first argument @i, fold to NOT. */
1178static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1179{
Richard Henderson27cdb852023-10-23 11:38:00 -07001180 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001181 return fold_to_not(ctx, op, 2);
1182 }
1183 return false;
1184}
1185
Richard Hendersone8679952021-08-25 13:19:52 -07001186/* If the binary operation has second argument @i, fold to @i. */
1187static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1188{
Richard Henderson27cdb852023-10-23 11:38:00 -07001189 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001190 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1191 }
1192 return false;
1193}
1194
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001195/* If the binary operation has second argument @i, fold to identity. */
1196static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1197{
Richard Henderson27cdb852023-10-23 11:38:00 -07001198 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001199 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1200 }
1201 return false;
1202}
1203
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001204/* If the binary operation has second argument @i, fold to NOT. */
1205static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1206{
Richard Henderson27cdb852023-10-23 11:38:00 -07001207 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001208 return fold_to_not(ctx, op, 1);
1209 }
1210 return false;
1211}
1212
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001213/* If the binary operation has both arguments equal, fold to @i. */
1214static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1215{
1216 if (args_are_copies(op->args[1], op->args[2])) {
1217 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1218 }
1219 return false;
1220}
1221
Richard Hendersonca7bb042021-08-25 13:14:21 -07001222/* If the binary operation has both arguments equal, fold to identity. */
1223static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1224{
1225 if (args_are_copies(op->args[1], op->args[2])) {
1226 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1227 }
1228 return false;
1229}
1230
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001231/*
1232 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001233 *
1234 * The ordering of the transformations should be:
1235 * 1) those that produce a constant
1236 * 2) those that produce a copy
1237 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001238 */
1239
Richard Hendersonaeb35142025-01-14 18:28:15 -08001240static bool fold_addco(OptContext *ctx, TCGOp *op);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001241static bool fold_or(OptContext *ctx, TCGOp *op);
1242static bool fold_orc(OptContext *ctx, TCGOp *op);
Richard Hendersonaeb35142025-01-14 18:28:15 -08001243static bool fold_subbo(OptContext *ctx, TCGOp *op);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001244static bool fold_xor(OptContext *ctx, TCGOp *op);
1245
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001246static bool fold_add(OptContext *ctx, TCGOp *op)
1247{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001248 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001249 fold_xi_to_x(ctx, op, 0)) {
1250 return true;
1251 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001252 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001253}
1254
Richard Hendersonc578ff12021-12-16 06:07:25 -08001255/* We cannot as yet do_constant_folding with vectors. */
1256static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1257{
1258 if (fold_commutative(ctx, op) ||
1259 fold_xi_to_x(ctx, op, 0)) {
1260 return true;
1261 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001262 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001263}
1264
Richard Hendersonaeb35142025-01-14 18:28:15 -08001265static void squash_prev_carryout(OptContext *ctx, TCGOp *op)
1266{
1267 TempOptInfo *t2;
1268
1269 op = QTAILQ_PREV(op, link);
1270 switch (op->opc) {
1271 case INDEX_op_addco:
1272 op->opc = INDEX_op_add;
1273 fold_add(ctx, op);
1274 break;
1275 case INDEX_op_addcio:
1276 op->opc = INDEX_op_addci;
1277 break;
1278 case INDEX_op_addc1o:
1279 op->opc = INDEX_op_add;
1280 t2 = arg_info(op->args[2]);
1281 if (ti_is_const(t2)) {
1282 op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
1283 /* Perform other constant folding, if needed. */
1284 fold_add(ctx, op);
1285 } else {
1286 TCGArg ret = op->args[0];
1287 op = opt_insert_after(ctx, op, INDEX_op_add, 3);
1288 op->args[0] = ret;
1289 op->args[1] = ret;
1290 op->args[2] = arg_new_constant(ctx, 1);
1291 }
1292 break;
1293 default:
1294 g_assert_not_reached();
1295 }
1296}
1297
1298static bool fold_addci(OptContext *ctx, TCGOp *op)
Richard Henderson76f42782025-01-14 13:58:39 -08001299{
1300 fold_commutative(ctx, op);
Richard Hendersonaeb35142025-01-14 18:28:15 -08001301
1302 if (ctx->carry_state < 0) {
1303 return finish_folding(ctx, op);
1304 }
1305
1306 squash_prev_carryout(ctx, op);
1307 op->opc = INDEX_op_add;
1308
1309 if (ctx->carry_state > 0) {
1310 TempOptInfo *t2 = arg_info(op->args[2]);
1311
1312 /*
1313 * Propagate the known carry-in into a constant, if possible.
1314 * Otherwise emit a second add +1.
1315 */
1316 if (ti_is_const(t2)) {
1317 op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
1318 } else {
1319 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_add, 3);
1320
1321 op2->args[0] = op->args[0];
1322 op2->args[1] = op->args[1];
1323 op2->args[2] = op->args[2];
1324 fold_add(ctx, op2);
1325
1326 op->args[1] = op->args[0];
1327 op->args[2] = arg_new_constant(ctx, 1);
1328 }
1329 }
1330
1331 ctx->carry_state = -1;
1332 return fold_add(ctx, op);
1333}
1334
1335static bool fold_addcio(OptContext *ctx, TCGOp *op)
1336{
1337 TempOptInfo *t1, *t2;
1338 int carry_out = -1;
1339 uint64_t sum, max;
1340
1341 fold_commutative(ctx, op);
1342 t1 = arg_info(op->args[1]);
1343 t2 = arg_info(op->args[2]);
1344
1345 /*
1346 * The z_mask value is >= the maximum value that can be represented
1347 * with the known zero bits. So adding the z_mask values will not
1348 * overflow if and only if the true values cannot overflow.
1349 */
1350 if (!uadd64_overflow(t1->z_mask, t2->z_mask, &sum) &&
1351 !uadd64_overflow(sum, ctx->carry_state != 0, &sum)) {
1352 carry_out = 0;
1353 }
1354
1355 if (ctx->carry_state < 0) {
1356 ctx->carry_state = carry_out;
1357 return finish_folding(ctx, op);
1358 }
1359
1360 squash_prev_carryout(ctx, op);
1361 if (ctx->carry_state == 0) {
1362 goto do_addco;
1363 }
1364
1365 /* Propagate the known carry-in into a constant, if possible. */
1366 max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
1367 if (ti_is_const(t2)) {
1368 uint64_t v = ti_const_val(t2) & max;
1369 if (v < max) {
1370 op->args[2] = arg_new_constant(ctx, v + 1);
1371 goto do_addco;
1372 }
1373 /* max + known carry in produces known carry out. */
1374 carry_out = 1;
1375 }
1376 if (ti_is_const(t1)) {
1377 uint64_t v = ti_const_val(t1) & max;
1378 if (v < max) {
1379 op->args[1] = arg_new_constant(ctx, v + 1);
1380 goto do_addco;
1381 }
1382 carry_out = 1;
1383 }
1384
1385 /* Adjust the opcode to remember the known carry-in. */
1386 op->opc = INDEX_op_addc1o;
1387 ctx->carry_state = carry_out;
1388 return finish_folding(ctx, op);
1389
1390 do_addco:
1391 op->opc = INDEX_op_addco;
1392 return fold_addco(ctx, op);
1393}
1394
1395static bool fold_addco(OptContext *ctx, TCGOp *op)
1396{
1397 TempOptInfo *t1, *t2;
1398 int carry_out = -1;
1399 uint64_t ign;
1400
1401 fold_commutative(ctx, op);
1402 t1 = arg_info(op->args[1]);
1403 t2 = arg_info(op->args[2]);
1404
1405 if (ti_is_const(t2)) {
1406 uint64_t v2 = ti_const_val(t2);
1407
1408 if (ti_is_const(t1)) {
1409 uint64_t v1 = ti_const_val(t1);
1410 /* Given sign-extension of z_mask for I32, we need not truncate. */
1411 carry_out = uadd64_overflow(v1, v2, &ign);
1412 } else if (v2 == 0) {
1413 carry_out = 0;
1414 }
1415 } else {
1416 /*
1417 * The z_mask value is >= the maximum value that can be represented
1418 * with the known zero bits. So adding the z_mask values will not
1419 * overflow if and only if the true values cannot overflow.
1420 */
1421 if (!uadd64_overflow(t1->z_mask, t2->z_mask, &ign)) {
1422 carry_out = 0;
1423 }
1424 }
1425 ctx->carry_state = carry_out;
Richard Henderson76f42782025-01-14 13:58:39 -08001426 return finish_folding(ctx, op);
1427}
1428
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001429static bool fold_and(OptContext *ctx, TCGOp *op)
1430{
Richard Henderson1e2edf82024-12-09 16:48:36 -06001431 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson1ca73722024-12-08 18:47:15 -06001432 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001433
Richard Henderson7a2f7082021-08-26 07:06:39 -07001434 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001435 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001436 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001437 fold_xx_to_x(ctx, op)) {
1438 return true;
1439 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001440
Richard Henderson1ca73722024-12-08 18:47:15 -06001441 t1 = arg_info(op->args[1]);
1442 t2 = arg_info(op->args[2]);
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001443
Richard Henderson1e2edf82024-12-09 16:48:36 -06001444 z_mask = t1->z_mask & t2->z_mask;
1445 o_mask = t1->o_mask & t2->o_mask;
Richard Henderson1ca73722024-12-08 18:47:15 -06001446
1447 /*
1448 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1449 * Bitwise operations preserve the relative quantity of the repetitions.
1450 */
1451 s_mask = t1->s_mask & t2->s_mask;
1452
Richard Henderson1e2edf82024-12-09 16:48:36 -06001453 /* Affected bits are those not known zero, masked by those known one. */
1454 a_mask = t1->z_mask & ~t2->o_mask;
1455
1456 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001457}
1458
1459static bool fold_andc(OptContext *ctx, TCGOp *op)
1460{
Richard Hendersond4d441e2024-12-22 16:08:42 -08001461 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001462 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001463
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001464 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001465 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001466 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001467 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001468 return true;
1469 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001470
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001471 t1 = arg_info(op->args[1]);
1472 t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001473
Richard Henderson899281c2023-11-15 11:18:55 -08001474 if (ti_is_const(t2)) {
1475 /* Fold andc r,x,i to and r,x,~i. */
1476 switch (ctx->type) {
1477 case TCG_TYPE_I32:
1478 case TCG_TYPE_I64:
1479 op->opc = INDEX_op_and;
1480 break;
1481 case TCG_TYPE_V64:
1482 case TCG_TYPE_V128:
1483 case TCG_TYPE_V256:
1484 op->opc = INDEX_op_and_vec;
1485 break;
1486 default:
1487 g_assert_not_reached();
1488 }
1489 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1490 return fold_and(ctx, op);
1491 }
1492
Richard Hendersond4d441e2024-12-22 16:08:42 -08001493 z_mask = t1->z_mask & ~t2->o_mask;
1494 o_mask = t1->o_mask & ~t2->z_mask;
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001495 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersond4d441e2024-12-22 16:08:42 -08001496
1497 /* Affected bits are those not known zero, masked by those known zero. */
1498 a_mask = t1->z_mask & t2->z_mask;
1499
1500 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001501}
1502
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001503static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1504{
1505 /* If true and false values are the same, eliminate the cmp. */
1506 if (args_are_copies(op->args[2], op->args[3])) {
1507 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1508 }
1509
1510 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001511 uint64_t tv = arg_const_val(op->args[2]);
1512 uint64_t fv = arg_const_val(op->args[3]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001513
1514 if (tv == -1 && fv == 0) {
1515 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1516 }
1517 if (tv == 0 && fv == -1) {
1518 if (TCG_TARGET_HAS_not_vec) {
1519 op->opc = INDEX_op_not_vec;
1520 return fold_not(ctx, op);
1521 } else {
1522 op->opc = INDEX_op_xor_vec;
1523 op->args[2] = arg_new_constant(ctx, -1);
1524 return fold_xor(ctx, op);
1525 }
1526 }
1527 }
1528 if (arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001529 uint64_t tv = arg_const_val(op->args[2]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001530 if (tv == -1) {
1531 op->opc = INDEX_op_or_vec;
1532 op->args[2] = op->args[3];
1533 return fold_or(ctx, op);
1534 }
1535 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1536 op->opc = INDEX_op_andc_vec;
1537 op->args[2] = op->args[1];
1538 op->args[1] = op->args[3];
1539 return fold_andc(ctx, op);
1540 }
1541 }
1542 if (arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001543 uint64_t fv = arg_const_val(op->args[3]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001544 if (fv == 0) {
1545 op->opc = INDEX_op_and_vec;
1546 return fold_and(ctx, op);
1547 }
1548 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1549 op->opc = INDEX_op_orc_vec;
1550 op->args[2] = op->args[1];
1551 op->args[1] = op->args[3];
1552 return fold_orc(ctx, op);
1553 }
1554 }
1555 return finish_folding(ctx, op);
1556}
1557
Richard Henderson079b0802021-08-24 09:30:59 -07001558static bool fold_brcond(OptContext *ctx, TCGOp *op)
1559{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001560 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001561 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001562 if (i == 0) {
1563 tcg_op_remove(ctx->tcg, op);
1564 return true;
1565 }
1566 if (i > 0) {
1567 op->opc = INDEX_op_br;
1568 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001569 finish_ebb(ctx);
1570 } else {
1571 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001572 }
Richard Henderson15268552024-12-08 07:45:11 -06001573 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001574}
1575
Richard Henderson764d2ab2021-08-24 09:22:11 -07001576static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1577{
Richard Henderson7e64b112023-10-24 16:53:56 -07001578 TCGCond cond;
1579 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001580 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001581
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001582 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001583 cond = op->args[4];
1584 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001585 if (i >= 0) {
1586 goto do_brcond_const;
1587 }
1588
1589 switch (cond) {
1590 case TCG_COND_LT:
1591 case TCG_COND_GE:
1592 /*
1593 * Simplify LT/GE comparisons vs zero to a single compare
1594 * vs the high word of the input.
1595 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001596 if (arg_is_const_val(op->args[2], 0) &&
1597 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001598 goto do_brcond_high;
1599 }
1600 break;
1601
1602 case TCG_COND_NE:
1603 inv = 1;
1604 QEMU_FALLTHROUGH;
1605 case TCG_COND_EQ:
1606 /*
1607 * Simplify EQ/NE comparisons where one of the pairs
1608 * can be simplified.
1609 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001610 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001611 op->args[2], cond);
1612 switch (i ^ inv) {
1613 case 0:
1614 goto do_brcond_const;
1615 case 1:
1616 goto do_brcond_high;
1617 }
1618
Richard Henderson67f84c92021-08-25 08:00:20 -07001619 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001620 op->args[3], cond);
1621 switch (i ^ inv) {
1622 case 0:
1623 goto do_brcond_const;
1624 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001625 goto do_brcond_low;
1626 }
1627 break;
1628
1629 case TCG_COND_TSTEQ:
1630 case TCG_COND_TSTNE:
1631 if (arg_is_const_val(op->args[2], 0)) {
1632 goto do_brcond_high;
1633 }
1634 if (arg_is_const_val(op->args[3], 0)) {
1635 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001636 }
1637 break;
1638
1639 default:
1640 break;
1641
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001642 do_brcond_low:
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08001643 op->opc = INDEX_op_brcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001644 op->args[1] = op->args[2];
1645 op->args[2] = cond;
1646 op->args[3] = label;
1647 return fold_brcond(ctx, op);
1648
Richard Henderson764d2ab2021-08-24 09:22:11 -07001649 do_brcond_high:
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08001650 op->opc = INDEX_op_brcond;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001651 op->args[0] = op->args[1];
1652 op->args[1] = op->args[3];
1653 op->args[2] = cond;
1654 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001655 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001656
1657 do_brcond_const:
1658 if (i == 0) {
1659 tcg_op_remove(ctx->tcg, op);
1660 return true;
1661 }
1662 op->opc = INDEX_op_br;
1663 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001664 finish_ebb(ctx);
1665 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001666 }
Richard Henderson15268552024-12-08 07:45:11 -06001667
1668 finish_bb(ctx);
1669 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001670}
1671
Richard Henderson09bacdc2021-08-24 11:58:12 -07001672static bool fold_bswap(OptContext *ctx, TCGOp *op)
1673{
Richard Hendersone6e37332024-12-10 15:02:41 -06001674 uint64_t z_mask, o_mask, s_mask;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001675 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersone6e37332024-12-10 15:02:41 -06001676 int flags = op->args[2];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001677
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001678 if (ti_is_const(t1)) {
1679 return tcg_opt_gen_movi(ctx, op, op->args[0],
1680 do_constant_folding(op->opc, ctx->type,
Richard Hendersone6e37332024-12-10 15:02:41 -06001681 ti_const_val(t1), flags));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001682 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001683
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001684 z_mask = t1->z_mask;
Richard Hendersone6e37332024-12-10 15:02:41 -06001685 o_mask = t1->o_mask;
1686 s_mask = 0;
1687
Richard Hendersonfae450b2021-08-25 22:42:19 -07001688 switch (op->opc) {
Richard Henderson0dd07ee2025-01-10 18:51:16 -08001689 case INDEX_op_bswap16:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001690 z_mask = bswap16(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001691 o_mask = bswap16(o_mask);
1692 if (flags & TCG_BSWAP_OS) {
1693 z_mask = (int16_t)z_mask;
1694 o_mask = (int16_t)o_mask;
1695 s_mask = INT16_MIN;
1696 } else if (!(flags & TCG_BSWAP_OZ)) {
1697 z_mask |= MAKE_64BIT_MASK(16, 48);
1698 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001699 break;
Richard Henderson7498d882025-01-10 19:53:51 -08001700 case INDEX_op_bswap32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001701 z_mask = bswap32(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001702 o_mask = bswap32(o_mask);
1703 if (flags & TCG_BSWAP_OS) {
1704 z_mask = (int32_t)z_mask;
1705 o_mask = (int32_t)o_mask;
1706 s_mask = INT32_MIN;
1707 } else if (!(flags & TCG_BSWAP_OZ)) {
1708 z_mask |= MAKE_64BIT_MASK(32, 32);
1709 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001710 break;
Richard Henderson3ad5d4c2025-01-10 21:54:44 -08001711 case INDEX_op_bswap64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001712 z_mask = bswap64(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001713 o_mask = bswap64(o_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001714 break;
1715 default:
1716 g_assert_not_reached();
1717 }
1718
Richard Hendersone6e37332024-12-10 15:02:41 -06001719 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001720}
1721
Richard Henderson5cf32be2021-08-24 08:17:08 -07001722static bool fold_call(OptContext *ctx, TCGOp *op)
1723{
1724 TCGContext *s = ctx->tcg;
1725 int nb_oargs = TCGOP_CALLO(op);
1726 int nb_iargs = TCGOP_CALLI(op);
1727 int flags, i;
1728
1729 init_arguments(ctx, op, nb_oargs + nb_iargs);
1730 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1731
1732 /* If the function reads or writes globals, reset temp data. */
1733 flags = tcg_call_flags(op);
1734 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1735 int nb_globals = s->nb_globals;
1736
1737 for (i = 0; i < nb_globals; i++) {
1738 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001739 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001740 }
1741 }
1742 }
1743
Richard Hendersonab84dc32023-08-23 23:04:24 -07001744 /* If the function has side effects, reset mem data. */
1745 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1746 remove_mem_copy_all(ctx);
1747 }
1748
Richard Henderson5cf32be2021-08-24 08:17:08 -07001749 /* Reset temp data for outputs. */
1750 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001751 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001752 }
1753
1754 /* Stop optimizing MB across calls. */
1755 ctx->prev_mb = NULL;
1756 return true;
1757}
1758
Richard Henderson29f65862024-12-09 14:09:49 -06001759static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1760{
1761 /* Canonicalize the comparison to put immediate second. */
1762 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1763 op->args[3] = tcg_swap_cond(op->args[3]);
1764 }
1765 return finish_folding(ctx, op);
1766}
1767
1768static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1769{
1770 /* If true and false values are the same, eliminate the cmp. */
1771 if (args_are_copies(op->args[3], op->args[4])) {
1772 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1773 }
1774
1775 /* Canonicalize the comparison to put immediate second. */
1776 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1777 op->args[5] = tcg_swap_cond(op->args[5]);
1778 }
1779 /*
1780 * Canonicalize the "false" input reg to match the destination,
1781 * so that the tcg backend can implement "move if true".
1782 */
1783 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1784 op->args[5] = tcg_invert_cond(op->args[5]);
1785 }
1786 return finish_folding(ctx, op);
1787}
1788
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001789static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1790{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001791 uint64_t z_mask, s_mask;
1792 TempOptInfo *t1 = arg_info(op->args[1]);
1793 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001794
Richard Hendersonce1d6632024-12-08 19:47:51 -06001795 if (ti_is_const(t1)) {
1796 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001797
1798 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001799 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001800 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1801 }
1802 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1803 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001804
1805 switch (ctx->type) {
1806 case TCG_TYPE_I32:
1807 z_mask = 31;
1808 break;
1809 case TCG_TYPE_I64:
1810 z_mask = 63;
1811 break;
1812 default:
1813 g_assert_not_reached();
1814 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001815 s_mask = ~z_mask;
1816 z_mask |= t2->z_mask;
1817 s_mask &= t2->s_mask;
1818
1819 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001820}
1821
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001822static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1823{
Richard Henderson81be07f2024-12-08 19:49:17 -06001824 uint64_t z_mask;
1825
Richard Hendersonfae450b2021-08-25 22:42:19 -07001826 if (fold_const1(ctx, op)) {
1827 return true;
1828 }
1829
1830 switch (ctx->type) {
1831 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001832 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001833 break;
1834 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001835 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001836 break;
1837 default:
1838 g_assert_not_reached();
1839 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001840 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001841}
1842
Richard Henderson1b1907b2021-08-24 10:47:04 -07001843static bool fold_deposit(OptContext *ctx, TCGOp *op)
1844{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001845 TempOptInfo *t1 = arg_info(op->args[1]);
1846 TempOptInfo *t2 = arg_info(op->args[2]);
1847 int ofs = op->args[3];
1848 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001849 int width = 8 * tcg_type_size(ctx->type);
Richard Henderson9d80b3c2024-12-10 14:45:44 -06001850 uint64_t z_mask, o_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001851
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001852 if (ti_is_const(t1) && ti_is_const(t2)) {
1853 return tcg_opt_gen_movi(ctx, op, op->args[0],
1854 deposit64(ti_const_val(t1), ofs, len,
1855 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001856 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001857
Richard Henderson8f7a8402023-08-13 11:03:05 -07001858 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001859 if (ti_is_const_val(t1, 0) && ofs == 0) {
1860 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001861
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001862 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001863 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001864 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001865 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001866 }
1867
1868 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001869 if (ti_is_const_val(t2, 0)) {
1870 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001871
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001872 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001873 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001874 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001875 }
1876
Richard Hendersonedb832c2024-12-19 17:56:05 -08001877 /* The s_mask from the top portion of the deposit is still valid. */
1878 if (ofs + len == width) {
1879 s_mask = t2->s_mask << ofs;
1880 } else {
1881 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1882 }
1883
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001884 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Henderson9d80b3c2024-12-10 14:45:44 -06001885 o_mask = deposit64(t1->o_mask, ofs, len, t2->o_mask);
1886
1887 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001888}
1889
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001890static bool fold_divide(OptContext *ctx, TCGOp *op)
1891{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001892 if (fold_const2(ctx, op) ||
1893 fold_xi_to_x(ctx, op, 1)) {
1894 return true;
1895 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001896 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001897}
1898
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001899static bool fold_dup(OptContext *ctx, TCGOp *op)
1900{
1901 if (arg_is_const(op->args[1])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001902 uint64_t t = arg_const_val(op->args[1]);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001903 t = dup_const(TCGOP_VECE(op), t);
1904 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1905 }
Richard Hendersone089d692024-12-08 20:00:51 -06001906 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001907}
1908
1909static bool fold_dup2(OptContext *ctx, TCGOp *op)
1910{
1911 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001912 uint64_t t = deposit64(arg_const_val(op->args[1]), 32, 32,
1913 arg_const_val(op->args[2]));
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001914 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1915 }
1916
1917 if (args_are_copies(op->args[1], op->args[2])) {
1918 op->opc = INDEX_op_dup_vec;
1919 TCGOP_VECE(op) = MO_32;
1920 }
Richard Hendersone089d692024-12-08 20:00:51 -06001921 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001922}
1923
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001924static bool fold_eqv(OptContext *ctx, TCGOp *op)
1925{
Richard Henderson33fceba2024-12-10 08:26:56 -06001926 uint64_t z_mask, o_mask, s_mask;
Richard Henderson46c68d72023-11-15 11:51:28 -08001927 TempOptInfo *t1, *t2;
Richard Hendersonef6be622024-12-08 20:03:15 -06001928
Richard Henderson7a2f7082021-08-26 07:06:39 -07001929 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001930 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001931 fold_xi_to_not(ctx, op, 0)) {
1932 return true;
1933 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001934
Richard Henderson46c68d72023-11-15 11:51:28 -08001935 t2 = arg_info(op->args[2]);
1936 if (ti_is_const(t2)) {
1937 /* Fold eqv r,x,i to xor r,x,~i. */
1938 switch (ctx->type) {
1939 case TCG_TYPE_I32:
1940 case TCG_TYPE_I64:
1941 op->opc = INDEX_op_xor;
1942 break;
1943 case TCG_TYPE_V64:
1944 case TCG_TYPE_V128:
1945 case TCG_TYPE_V256:
1946 op->opc = INDEX_op_xor_vec;
1947 break;
1948 default:
1949 g_assert_not_reached();
1950 }
1951 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1952 return fold_xor(ctx, op);
1953 }
1954
1955 t1 = arg_info(op->args[1]);
Richard Henderson33fceba2024-12-10 08:26:56 -06001956
1957 z_mask = (t1->z_mask | ~t2->o_mask) & (t2->z_mask | ~t1->o_mask);
1958 o_mask = ~(t1->z_mask | t2->z_mask) | (t1->o_mask & t2->o_mask);
Richard Henderson46c68d72023-11-15 11:51:28 -08001959 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson33fceba2024-12-10 08:26:56 -06001960
1961 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001962}
1963
Richard Hendersonb6617c82021-08-24 10:44:53 -07001964static bool fold_extract(OptContext *ctx, TCGOp *op)
1965{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001966 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001967 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001968 int pos = op->args[2];
1969 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001970
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001971 if (ti_is_const(t1)) {
1972 return tcg_opt_gen_movi(ctx, op, op->args[0],
1973 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001974 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001975
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001976 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001977 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001978 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1979 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001980 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001981
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001982 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001983}
1984
Richard Hendersondcd08992021-08-24 10:41:39 -07001985static bool fold_extract2(OptContext *ctx, TCGOp *op)
1986{
1987 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001988 uint64_t v1 = arg_const_val(op->args[1]);
1989 uint64_t v2 = arg_const_val(op->args[2]);
Richard Hendersondcd08992021-08-24 10:41:39 -07001990 int shr = op->args[3];
1991
Richard Henderson61d6a872025-01-12 21:40:43 -08001992 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersondcd08992021-08-24 10:41:39 -07001993 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001994 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Henderson61d6a872025-01-12 21:40:43 -08001995 } else {
1996 v1 >>= shr;
1997 v2 <<= 64 - shr;
Richard Hendersondcd08992021-08-24 10:41:39 -07001998 }
1999 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
2000 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06002001 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07002002}
2003
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002004static bool fold_exts(OptContext *ctx, TCGOp *op)
2005{
Richard Henderson48e8de62024-12-26 12:01:57 -08002006 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06002007 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002008
2009 if (fold_const1(ctx, op)) {
2010 return true;
2011 }
2012
Richard Hendersona9621922024-12-08 20:08:46 -06002013 t1 = arg_info(op->args[1]);
2014 z_mask = t1->z_mask;
2015 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002016
2017 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07002018 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06002019 s_mask |= INT32_MIN;
2020 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002021 break;
2022 default:
2023 g_assert_not_reached();
2024 }
Richard Hendersona9621922024-12-08 20:08:46 -06002025 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002026}
2027
2028static bool fold_extu(OptContext *ctx, TCGOp *op)
2029{
Richard Henderson48e8de62024-12-26 12:01:57 -08002030 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002031
2032 if (fold_const1(ctx, op)) {
2033 return true;
2034 }
2035
Richard Henderson48e8de62024-12-26 12:01:57 -08002036 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002037 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07002038 case INDEX_op_extrl_i64_i32:
2039 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002040 z_mask = (uint32_t)z_mask;
2041 break;
2042 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002043 z_mask >>= 32;
2044 break;
2045 default:
2046 g_assert_not_reached();
2047 }
Richard Henderson08abe292024-12-08 20:11:44 -06002048 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002049}
2050
Richard Henderson3eefdf22021-08-25 11:06:43 -07002051static bool fold_mb(OptContext *ctx, TCGOp *op)
2052{
2053 /* Eliminate duplicate and redundant fence instructions. */
2054 if (ctx->prev_mb) {
2055 /*
2056 * Merge two barriers of the same type into one,
2057 * or a weaker barrier into a stronger one,
2058 * or two weaker barriers into a stronger one.
2059 * mb X; mb Y => mb X|Y
2060 * mb; strl => mb; st
2061 * ldaq; mb => ld; mb
2062 * ldaq; strl => ld; mb; st
2063 * Other combinations are also merged into a strong
2064 * barrier. This is stricter than specified but for
2065 * the purposes of TCG is better than not optimizing.
2066 */
2067 ctx->prev_mb->args[0] |= op->args[0];
2068 tcg_op_remove(ctx->tcg, op);
2069 } else {
2070 ctx->prev_mb = op;
2071 }
2072 return true;
2073}
2074
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002075static bool fold_mov(OptContext *ctx, TCGOp *op)
2076{
2077 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2078}
2079
Richard Henderson0c310a32021-08-24 10:37:24 -07002080static bool fold_movcond(OptContext *ctx, TCGOp *op)
2081{
Richard Henderson32202782024-12-08 20:16:38 -06002082 uint64_t z_mask, s_mask;
2083 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002084 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07002085
Richard Henderson141125e2024-09-06 21:00:10 -07002086 /* If true and false values are the same, eliminate the cmp. */
2087 if (args_are_copies(op->args[3], op->args[4])) {
2088 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2089 }
2090
Richard Henderson7a2f7082021-08-26 07:06:39 -07002091 /*
2092 * Canonicalize the "false" input reg to match the destination reg so
2093 * that the tcg backend can implement a "move if true" operation.
2094 */
2095 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07002096 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07002097 }
2098
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002099 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002100 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07002101 if (i >= 0) {
2102 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
2103 }
2104
Richard Henderson32202782024-12-08 20:16:38 -06002105 tt = arg_info(op->args[3]);
2106 ft = arg_info(op->args[4]);
2107 z_mask = tt->z_mask | ft->z_mask;
2108 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002109
Richard Henderson32202782024-12-08 20:16:38 -06002110 if (ti_is_const(tt) && ti_is_const(ft)) {
2111 uint64_t tv = ti_const_val(tt);
2112 uint64_t fv = ti_const_val(ft);
Richard Henderson246c4b72023-10-24 16:36:50 -07002113 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07002114
Richard Henderson0c310a32021-08-24 10:37:24 -07002115 if (tv == 1 && fv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002116 op->opc = INDEX_op_setcond;
Richard Henderson0c310a32021-08-24 10:37:24 -07002117 op->args[3] = cond;
2118 } else if (fv == 1 && tv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002119 op->opc = INDEX_op_setcond;
Richard Henderson0c310a32021-08-24 10:37:24 -07002120 op->args[3] = tcg_invert_cond(cond);
Richard Hendersonf7914582025-01-09 12:48:21 -08002121 } else if (tv == -1 && fv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002122 op->opc = INDEX_op_negsetcond;
Richard Hendersonf7914582025-01-09 12:48:21 -08002123 op->args[3] = cond;
2124 } else if (fv == -1 && tv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002125 op->opc = INDEX_op_negsetcond;
Richard Hendersonf7914582025-01-09 12:48:21 -08002126 op->args[3] = tcg_invert_cond(cond);
Richard Henderson0c310a32021-08-24 10:37:24 -07002127 }
2128 }
Richard Henderson32202782024-12-08 20:16:38 -06002129
2130 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002131}
2132
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002133static bool fold_mul(OptContext *ctx, TCGOp *op)
2134{
Richard Hendersone8679952021-08-25 13:19:52 -07002135 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002136 fold_xi_to_i(ctx, op, 0) ||
2137 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002138 return true;
2139 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002140 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002141}
2142
2143static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2144{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002145 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002146 fold_xi_to_i(ctx, op, 0)) {
2147 return true;
2148 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002149 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002150}
2151
Richard Henderson407112b2021-08-26 06:33:04 -07002152static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002153{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002154 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2155
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002156 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002157 uint64_t a = arg_const_val(op->args[2]);
2158 uint64_t b = arg_const_val(op->args[3]);
Richard Henderson407112b2021-08-26 06:33:04 -07002159 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002160 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002161 TCGOp *op2;
2162
2163 switch (op->opc) {
Richard Hendersond7761982025-01-09 09:11:53 -08002164 case INDEX_op_mulu2:
2165 if (ctx->type == TCG_TYPE_I32) {
2166 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2167 h = (int32_t)(l >> 32);
2168 l = (int32_t)l;
2169 } else {
2170 mulu64(&l, &h, a, b);
2171 }
Richard Henderson407112b2021-08-26 06:33:04 -07002172 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08002173 case INDEX_op_muls2:
2174 if (ctx->type == TCG_TYPE_I32) {
2175 l = (int64_t)(int32_t)a * (int32_t)b;
2176 h = l >> 32;
2177 l = (int32_t)l;
2178 } else {
2179 muls64(&l, &h, a, b);
2180 }
Richard Henderson407112b2021-08-26 06:33:04 -07002181 break;
2182 default:
2183 g_assert_not_reached();
2184 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002185
2186 rl = op->args[0];
2187 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002188
2189 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002190 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002191
2192 tcg_opt_gen_movi(ctx, op, rl, l);
2193 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002194 return true;
2195 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002196 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002197}
2198
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002199static bool fold_nand(OptContext *ctx, TCGOp *op)
2200{
Richard Henderson16559c32024-12-09 18:13:15 -06002201 uint64_t z_mask, o_mask, s_mask;
2202 TempOptInfo *t1, *t2;
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002203
Richard Henderson7a2f7082021-08-26 07:06:39 -07002204 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002205 fold_xi_to_not(ctx, op, -1)) {
2206 return true;
2207 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002208
Richard Henderson16559c32024-12-09 18:13:15 -06002209 t1 = arg_info(op->args[1]);
2210 t2 = arg_info(op->args[2]);
2211
2212 z_mask = ~(t1->o_mask & t2->o_mask);
2213 o_mask = ~(t1->z_mask & t2->z_mask);
2214 s_mask = t1->s_mask & t2->s_mask;
2215
2216 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002217}
2218
Richard Hendersone25fe882024-04-04 20:53:50 +00002219static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002220{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002221 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002222 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002223 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002224
Richard Hendersond151fd32024-12-08 20:23:11 -06002225 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002226}
2227
Richard Hendersone25fe882024-04-04 20:53:50 +00002228static bool fold_neg(OptContext *ctx, TCGOp *op)
2229{
2230 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2231}
2232
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002233static bool fold_nor(OptContext *ctx, TCGOp *op)
2234{
Richard Henderson682d6d52024-12-09 21:13:02 -06002235 uint64_t z_mask, o_mask, s_mask;
2236 TempOptInfo *t1, *t2;
Richard Henderson2b7b6952024-12-08 20:25:21 -06002237
Richard Henderson7a2f7082021-08-26 07:06:39 -07002238 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002239 fold_xi_to_not(ctx, op, 0)) {
2240 return true;
2241 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002242
Richard Henderson682d6d52024-12-09 21:13:02 -06002243 t1 = arg_info(op->args[1]);
2244 t2 = arg_info(op->args[2]);
2245
2246 z_mask = ~(t1->o_mask | t2->o_mask);
2247 o_mask = ~(t1->z_mask | t2->z_mask);
2248 s_mask = t1->s_mask & t2->s_mask;
2249
2250 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002251}
2252
2253static bool fold_not(OptContext *ctx, TCGOp *op)
2254{
Richard Hendersond89504b2024-12-09 21:15:37 -06002255 TempOptInfo *t1;
2256
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002257 if (fold_const1(ctx, op)) {
2258 return true;
2259 }
Richard Hendersond89504b2024-12-09 21:15:37 -06002260
2261 t1 = arg_info(op->args[1]);
2262 return fold_masks_zos(ctx, op, ~t1->o_mask, ~t1->z_mask, t1->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002263}
2264
2265static bool fold_or(OptContext *ctx, TCGOp *op)
2266{
Richard Henderson84b399d2024-12-09 21:35:53 -06002267 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson83b1ba32024-12-08 20:28:59 -06002268 TempOptInfo *t1, *t2;
2269
Richard Henderson7a2f7082021-08-26 07:06:39 -07002270 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002271 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002272 fold_xx_to_x(ctx, op)) {
2273 return true;
2274 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002275
Richard Henderson83b1ba32024-12-08 20:28:59 -06002276 t1 = arg_info(op->args[1]);
2277 t2 = arg_info(op->args[2]);
Richard Henderson84b399d2024-12-09 21:35:53 -06002278
Richard Henderson83b1ba32024-12-08 20:28:59 -06002279 z_mask = t1->z_mask | t2->z_mask;
Richard Henderson84b399d2024-12-09 21:35:53 -06002280 o_mask = t1->o_mask | t2->o_mask;
Richard Henderson83b1ba32024-12-08 20:28:59 -06002281 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson84b399d2024-12-09 21:35:53 -06002282
2283 /* Affected bits are those not known one, masked by those known zero. */
2284 a_mask = ~t1->o_mask & t2->z_mask;
2285
2286 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002287}
2288
2289static bool fold_orc(OptContext *ctx, TCGOp *op)
2290{
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002291 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002292 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002293
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002294 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002295 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002296 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002297 fold_ix_to_not(ctx, op, 0)) {
2298 return true;
2299 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002300
Richard Henderson50e40ec2024-12-10 08:13:10 -06002301 t2 = arg_info(op->args[2]);
2302 if (ti_is_const(t2)) {
2303 /* Fold orc r,x,i to or r,x,~i. */
2304 switch (ctx->type) {
2305 case TCG_TYPE_I32:
2306 case TCG_TYPE_I64:
2307 op->opc = INDEX_op_or;
2308 break;
2309 case TCG_TYPE_V64:
2310 case TCG_TYPE_V128:
2311 case TCG_TYPE_V256:
2312 op->opc = INDEX_op_or_vec;
2313 break;
2314 default:
2315 g_assert_not_reached();
2316 }
2317 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2318 return fold_or(ctx, op);
2319 }
2320
2321 t1 = arg_info(op->args[1]);
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002322
2323 z_mask = t1->z_mask | ~t2->o_mask;
2324 o_mask = t1->o_mask | ~t2->z_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002325 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002326
2327 /* Affected bits are those not known one, masked by those known one. */
2328 a_mask = ~t1->o_mask & t2->o_mask;
2329
2330 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002331}
2332
Richard Henderson6813be92024-12-08 20:33:30 -06002333static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002334{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002335 const TCGOpDef *def = &tcg_op_defs[op->opc];
2336 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2337 MemOp mop = get_memop(oi);
2338 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002339 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002340
Richard Henderson57fe5c62021-08-26 12:04:46 -07002341 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002342 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002343 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002344 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002345 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002346 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002347 }
2348
Richard Henderson3eefdf22021-08-25 11:06:43 -07002349 /* Opcodes that touch guest memory stop the mb optimization. */
2350 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002351
2352 return fold_masks_zs(ctx, op, z_mask, s_mask);
2353}
2354
2355static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2356{
2357 /* Opcodes that touch guest memory stop the mb optimization. */
2358 ctx->prev_mb = NULL;
2359 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002360}
2361
2362static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2363{
2364 /* Opcodes that touch guest memory stop the mb optimization. */
2365 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002366 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002367}
2368
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002369static bool fold_remainder(OptContext *ctx, TCGOp *op)
2370{
Richard Henderson267c17e2021-10-25 11:30:33 -07002371 if (fold_const2(ctx, op) ||
2372 fold_xx_to_i(ctx, op, 0)) {
2373 return true;
2374 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002375 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002376}
2377
Richard Henderson95eb2292024-12-08 20:47:59 -06002378/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2379static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002380{
2381 uint64_t a_zmask, b_val;
2382 TCGCond cond;
2383
2384 if (!arg_is_const(op->args[2])) {
2385 return false;
2386 }
2387
2388 a_zmask = arg_info(op->args[1])->z_mask;
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002389 b_val = arg_const_val(op->args[2]);
Richard Henderson8d65cda2024-03-26 16:00:40 -10002390 cond = op->args[3];
2391
2392 if (ctx->type == TCG_TYPE_I32) {
2393 a_zmask = (uint32_t)a_zmask;
2394 b_val = (uint32_t)b_val;
2395 }
2396
2397 /*
2398 * A with only low bits set vs B with high bits set means that A < B.
2399 */
2400 if (a_zmask < b_val) {
2401 bool inv = false;
2402
2403 switch (cond) {
2404 case TCG_COND_NE:
2405 case TCG_COND_LEU:
2406 case TCG_COND_LTU:
2407 inv = true;
2408 /* fall through */
2409 case TCG_COND_GTU:
2410 case TCG_COND_GEU:
2411 case TCG_COND_EQ:
2412 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2413 default:
2414 break;
2415 }
2416 }
2417
2418 /*
2419 * A with only lsb set is already boolean.
2420 */
2421 if (a_zmask <= 1) {
2422 bool convert = false;
2423 bool inv = false;
2424
2425 switch (cond) {
2426 case TCG_COND_EQ:
2427 inv = true;
2428 /* fall through */
2429 case TCG_COND_NE:
2430 convert = (b_val == 0);
2431 break;
2432 case TCG_COND_LTU:
2433 case TCG_COND_TSTEQ:
2434 inv = true;
2435 /* fall through */
2436 case TCG_COND_GEU:
2437 case TCG_COND_TSTNE:
2438 convert = (b_val == 1);
2439 break;
2440 default:
2441 break;
2442 }
2443 if (convert) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002444 if (!inv && !neg) {
2445 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2446 }
2447
Richard Henderson8d65cda2024-03-26 16:00:40 -10002448 if (!inv) {
Richard Henderson69713582025-01-06 22:48:57 -08002449 op->opc = INDEX_op_neg;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002450 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002451 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002452 op->args[2] = arg_new_constant(ctx, -1);
2453 } else {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002454 op->opc = INDEX_op_xor;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002455 op->args[2] = arg_new_constant(ctx, 1);
2456 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002457 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002458 }
2459 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002460 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002461}
2462
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002463static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2464{
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002465 TCGCond cond = op->args[3];
2466 TCGArg ret, src1, src2;
2467 TCGOp *op2;
2468 uint64_t val;
2469 int sh;
2470 bool inv;
2471
2472 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2473 return;
2474 }
2475
2476 src2 = op->args[2];
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002477 val = arg_const_val(src2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002478 if (!is_power_of_2(val)) {
2479 return;
2480 }
2481 sh = ctz64(val);
2482
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002483 ret = op->args[0];
2484 src1 = op->args[1];
2485 inv = cond == TCG_COND_TSTEQ;
2486
Richard Hendersonfa361ee2025-01-12 11:50:09 -08002487 if (sh && neg && !inv && TCG_TARGET_sextract_valid(ctx->type, sh, 1)) {
2488 op->opc = INDEX_op_sextract;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002489 op->args[1] = src1;
2490 op->args[2] = sh;
2491 op->args[3] = 1;
2492 return;
Richard Henderson07d5d502025-01-11 09:01:46 -08002493 } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
2494 op->opc = INDEX_op_extract;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002495 op->args[1] = src1;
2496 op->args[2] = sh;
2497 op->args[3] = 1;
2498 } else {
2499 if (sh) {
Richard Henderson74dbd362025-01-07 22:52:10 -08002500 op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002501 op2->args[0] = ret;
2502 op2->args[1] = src1;
2503 op2->args[2] = arg_new_constant(ctx, sh);
2504 src1 = ret;
2505 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002506 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002507 op->args[1] = src1;
2508 op->args[2] = arg_new_constant(ctx, 1);
2509 }
2510
2511 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002512 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002513 op2->args[0] = ret;
2514 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002515 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002516 } else if (inv) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002517 op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002518 op2->args[0] = ret;
2519 op2->args[1] = ret;
2520 op2->args[2] = arg_new_constant(ctx, 1);
2521 } else if (neg) {
Richard Henderson69713582025-01-06 22:48:57 -08002522 op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002523 op2->args[0] = ret;
2524 op2->args[1] = ret;
2525 }
2526}
2527
Richard Hendersonc63ff552021-08-24 09:35:30 -07002528static bool fold_setcond(OptContext *ctx, TCGOp *op)
2529{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002530 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002531 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002532 if (i >= 0) {
2533 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2534 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002535
Richard Henderson95eb2292024-12-08 20:47:59 -06002536 i = fold_setcond_zmask(ctx, op, false);
2537 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002538 return true;
2539 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002540 if (i == 0) {
2541 fold_setcond_tst_pow2(ctx, op, false);
2542 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002543
Richard Henderson2c8a2832024-12-08 20:50:37 -06002544 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002545}
2546
Richard Henderson36355022023-08-04 23:24:04 +00002547static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2548{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002549 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002550 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002551 if (i >= 0) {
2552 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2553 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002554
Richard Henderson95eb2292024-12-08 20:47:59 -06002555 i = fold_setcond_zmask(ctx, op, true);
2556 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002557 return true;
2558 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002559 if (i == 0) {
2560 fold_setcond_tst_pow2(ctx, op, true);
2561 }
Richard Henderson36355022023-08-04 23:24:04 +00002562
2563 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002564 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002565}
2566
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002567static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2568{
Richard Henderson7e64b112023-10-24 16:53:56 -07002569 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002570 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002571
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002572 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002573 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002574 if (i >= 0) {
2575 goto do_setcond_const;
2576 }
2577
2578 switch (cond) {
2579 case TCG_COND_LT:
2580 case TCG_COND_GE:
2581 /*
2582 * Simplify LT/GE comparisons vs zero to a single compare
2583 * vs the high word of the input.
2584 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002585 if (arg_is_const_val(op->args[3], 0) &&
2586 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002587 goto do_setcond_high;
2588 }
2589 break;
2590
2591 case TCG_COND_NE:
2592 inv = 1;
2593 QEMU_FALLTHROUGH;
2594 case TCG_COND_EQ:
2595 /*
2596 * Simplify EQ/NE comparisons where one of the pairs
2597 * can be simplified.
2598 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002599 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002600 op->args[3], cond);
2601 switch (i ^ inv) {
2602 case 0:
2603 goto do_setcond_const;
2604 case 1:
2605 goto do_setcond_high;
2606 }
2607
Richard Henderson67f84c92021-08-25 08:00:20 -07002608 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002609 op->args[4], cond);
2610 switch (i ^ inv) {
2611 case 0:
2612 goto do_setcond_const;
2613 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002614 goto do_setcond_low;
2615 }
2616 break;
2617
2618 case TCG_COND_TSTEQ:
2619 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002620 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002621 goto do_setcond_high;
2622 }
2623 if (arg_is_const_val(op->args[4], 0)) {
2624 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002625 }
2626 break;
2627
2628 default:
2629 break;
2630
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002631 do_setcond_low:
2632 op->args[2] = op->args[3];
2633 op->args[3] = cond;
Richard Hendersona363e1e2025-01-10 09:26:44 -08002634 op->opc = INDEX_op_setcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002635 return fold_setcond(ctx, op);
2636
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002637 do_setcond_high:
2638 op->args[1] = op->args[2];
2639 op->args[2] = op->args[4];
2640 op->args[3] = cond;
Richard Hendersona363e1e2025-01-10 09:26:44 -08002641 op->opc = INDEX_op_setcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002642 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002643 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002644
Richard Hendersona53502c2024-12-08 20:56:36 -06002645 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002646
2647 do_setcond_const:
2648 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2649}
2650
Richard Hendersonb6617c82021-08-24 10:44:53 -07002651static bool fold_sextract(OptContext *ctx, TCGOp *op)
2652{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002653 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002654 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002655 int pos = op->args[2];
2656 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002657
Richard Hendersonbaff5072024-12-08 21:09:30 -06002658 if (ti_is_const(t1)) {
2659 return tcg_opt_gen_movi(ctx, op, op->args[0],
2660 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002661 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002662
Richard Hendersonbaff5072024-12-08 21:09:30 -06002663 s_mask_old = t1->s_mask;
2664 s_mask = s_mask_old >> pos;
2665 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002666
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002667 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002668 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002669 }
2670
Richard Hendersonbaff5072024-12-08 21:09:30 -06002671 z_mask = sextract64(t1->z_mask, pos, len);
2672 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002673}
2674
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002675static bool fold_shift(OptContext *ctx, TCGOp *op)
2676{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002677 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002678 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002679
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002680 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002681 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002682 fold_xi_to_x(ctx, op, 0)) {
2683 return true;
2684 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002685
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002686 t1 = arg_info(op->args[1]);
2687 t2 = arg_info(op->args[2]);
2688 s_mask = t1->s_mask;
2689 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002690
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002691 if (ti_is_const(t2)) {
2692 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002693
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002694 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002695 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002696
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002697 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002698 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002699
2700 switch (op->opc) {
Richard Henderson3949f362025-01-08 08:05:18 -08002701 case INDEX_op_sar:
Richard Henderson93a967f2021-08-26 13:24:59 -07002702 /*
2703 * Arithmetic right shift will not reduce the number of
2704 * input sign repetitions.
2705 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002706 return fold_masks_s(ctx, op, s_mask);
Richard Henderson74dbd362025-01-07 22:52:10 -08002707 case INDEX_op_shr:
Richard Henderson93a967f2021-08-26 13:24:59 -07002708 /*
2709 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002710 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002711 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002712 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002713 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002714 }
2715 break;
2716 default:
2717 break;
2718 }
2719
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002720 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002721}
2722
Richard Henderson9caca882021-08-24 13:30:32 -07002723static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2724{
2725 TCGOpcode neg_op;
2726 bool have_neg;
2727
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002728 if (!arg_is_const_val(op->args[1], 0)) {
Richard Henderson9caca882021-08-24 13:30:32 -07002729 return false;
2730 }
2731
2732 switch (ctx->type) {
2733 case TCG_TYPE_I32:
Richard Henderson9caca882021-08-24 13:30:32 -07002734 case TCG_TYPE_I64:
Richard Henderson69713582025-01-06 22:48:57 -08002735 neg_op = INDEX_op_neg;
Richard Hendersonb701f192023-10-25 21:14:04 -07002736 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002737 break;
2738 case TCG_TYPE_V64:
2739 case TCG_TYPE_V128:
2740 case TCG_TYPE_V256:
2741 neg_op = INDEX_op_neg_vec;
2742 have_neg = (TCG_TARGET_HAS_neg_vec &&
2743 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2744 break;
2745 default:
2746 g_assert_not_reached();
2747 }
2748 if (have_neg) {
2749 op->opc = neg_op;
2750 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002751 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002752 }
2753 return false;
2754}
2755
Richard Hendersonc578ff12021-12-16 06:07:25 -08002756/* We cannot as yet do_constant_folding with vectors. */
2757static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002758{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002759 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002760 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002761 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002762 return true;
2763 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002764 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002765}
2766
Richard Hendersonc578ff12021-12-16 06:07:25 -08002767static bool fold_sub(OptContext *ctx, TCGOp *op)
2768{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002769 if (fold_const2(ctx, op) ||
2770 fold_xx_to_i(ctx, op, 0) ||
2771 fold_xi_to_x(ctx, op, 0) ||
2772 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002773 return true;
2774 }
2775
2776 /* Fold sub r,x,i to add r,x,-i */
2777 if (arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002778 uint64_t val = arg_const_val(op->args[2]);
Richard Henderson6334a962023-10-25 18:39:43 -07002779
Richard Henderson79602f62025-01-06 09:11:39 -08002780 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002781 op->args[2] = arg_new_constant(ctx, -val);
2782 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002783 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002784}
2785
Richard Hendersonaeb35142025-01-14 18:28:15 -08002786static void squash_prev_borrowout(OptContext *ctx, TCGOp *op)
2787{
2788 TempOptInfo *t2;
2789
2790 op = QTAILQ_PREV(op, link);
2791 switch (op->opc) {
2792 case INDEX_op_subbo:
2793 op->opc = INDEX_op_sub;
2794 fold_sub(ctx, op);
2795 break;
2796 case INDEX_op_subbio:
2797 op->opc = INDEX_op_subbi;
2798 break;
2799 case INDEX_op_subb1o:
2800 t2 = arg_info(op->args[2]);
2801 if (ti_is_const(t2)) {
2802 op->opc = INDEX_op_add;
2803 op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
2804 /* Perform other constant folding, if needed. */
2805 fold_add(ctx, op);
2806 } else {
2807 TCGArg ret = op->args[0];
2808 op->opc = INDEX_op_sub;
2809 op = opt_insert_after(ctx, op, INDEX_op_add, 3);
2810 op->args[0] = ret;
2811 op->args[1] = ret;
2812 op->args[2] = arg_new_constant(ctx, -1);
2813 }
2814 break;
2815 default:
2816 g_assert_not_reached();
2817 }
2818}
2819
2820static bool fold_subbi(OptContext *ctx, TCGOp *op)
2821{
2822 TempOptInfo *t2;
2823 int borrow_in = ctx->carry_state;
2824
2825 if (borrow_in < 0) {
2826 return finish_folding(ctx, op);
2827 }
2828 ctx->carry_state = -1;
2829
2830 squash_prev_borrowout(ctx, op);
2831 if (borrow_in == 0) {
2832 op->opc = INDEX_op_sub;
2833 return fold_sub(ctx, op);
2834 }
2835
2836 /*
2837 * Propagate the known carry-in into any constant, then negate to
2838 * transform from sub to add. If there is no constant, emit a
2839 * separate add -1.
2840 */
2841 t2 = arg_info(op->args[2]);
2842 if (ti_is_const(t2)) {
2843 op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
2844 } else {
2845 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_sub, 3);
2846
2847 op2->args[0] = op->args[0];
2848 op2->args[1] = op->args[1];
2849 op2->args[2] = op->args[2];
2850 fold_sub(ctx, op2);
2851
2852 op->args[1] = op->args[0];
2853 op->args[2] = arg_new_constant(ctx, -1);
2854 }
2855 op->opc = INDEX_op_add;
2856 return fold_add(ctx, op);
2857}
2858
2859static bool fold_subbio(OptContext *ctx, TCGOp *op)
2860{
2861 TempOptInfo *t1, *t2;
2862 int borrow_out = -1;
2863
2864 if (ctx->carry_state < 0) {
2865 return finish_folding(ctx, op);
2866 }
2867
2868 squash_prev_borrowout(ctx, op);
2869 if (ctx->carry_state == 0) {
2870 goto do_subbo;
2871 }
2872
2873 t1 = arg_info(op->args[1]);
2874 t2 = arg_info(op->args[2]);
2875
2876 /* Propagate the known borrow-in into a constant, if possible. */
2877 if (ti_is_const(t2)) {
2878 uint64_t max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
2879 uint64_t v = ti_const_val(t2) & max;
2880
2881 if (v < max) {
2882 op->args[2] = arg_new_constant(ctx, v + 1);
2883 goto do_subbo;
2884 }
2885 /* subtracting max + 1 produces known borrow out. */
2886 borrow_out = 1;
2887 }
2888 if (ti_is_const(t1)) {
2889 uint64_t v = ti_const_val(t1);
2890 if (v != 0) {
2891 op->args[2] = arg_new_constant(ctx, v - 1);
2892 goto do_subbo;
2893 }
2894 }
2895
2896 /* Adjust the opcode to remember the known carry-in. */
2897 op->opc = INDEX_op_subb1o;
2898 ctx->carry_state = borrow_out;
2899 return finish_folding(ctx, op);
2900
2901 do_subbo:
2902 op->opc = INDEX_op_subbo;
2903 return fold_subbo(ctx, op);
2904}
2905
2906static bool fold_subbo(OptContext *ctx, TCGOp *op)
2907{
2908 TempOptInfo *t1 = arg_info(op->args[1]);
2909 TempOptInfo *t2 = arg_info(op->args[2]);
2910 int borrow_out = -1;
2911
2912 if (ti_is_const(t2)) {
2913 uint64_t v2 = ti_const_val(t2);
2914 if (v2 == 0) {
2915 borrow_out = 0;
2916 } else if (ti_is_const(t1)) {
2917 uint64_t v1 = ti_const_val(t1);
2918 borrow_out = v1 < v2;
2919 }
2920 }
2921 ctx->carry_state = borrow_out;
2922 return finish_folding(ctx, op);
2923}
2924
Richard Hendersonfae450b2021-08-25 22:42:19 -07002925static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2926{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002927 uint64_t z_mask = -1, s_mask = 0;
2928
Richard Hendersonfae450b2021-08-25 22:42:19 -07002929 /* We can't do any folding with a load, but we can record bits. */
2930 switch (op->opc) {
Richard Hendersone9968042025-01-21 21:47:16 -08002931 case INDEX_op_ld8s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002932 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002933 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002934 case INDEX_op_ld8u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002935 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002936 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002937 case INDEX_op_ld16s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002938 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002939 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002940 case INDEX_op_ld16u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002941 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002942 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002943 case INDEX_op_ld32s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002944 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002945 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002946 case INDEX_op_ld32u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002947 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002948 break;
2949 default:
2950 g_assert_not_reached();
2951 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002952 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002953}
2954
Richard Hendersonab84dc32023-08-23 23:04:24 -07002955static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2956{
2957 TCGTemp *dst, *src;
2958 intptr_t ofs;
2959 TCGType type;
2960
2961 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002962 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002963 }
2964
2965 type = ctx->type;
2966 ofs = op->args[2];
2967 dst = arg_temp(op->args[0]);
2968 src = find_mem_copy_for(ctx, type, ofs);
2969 if (src && src->base_type == type) {
2970 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2971 }
2972
2973 reset_ts(ctx, dst);
2974 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2975 return true;
2976}
2977
2978static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2979{
2980 intptr_t ofs = op->args[2];
2981 intptr_t lm1;
2982
2983 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2984 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002985 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002986 }
2987
2988 switch (op->opc) {
Richard Hendersona28f1512025-01-22 13:28:55 -08002989 case INDEX_op_st8:
Richard Hendersonab84dc32023-08-23 23:04:24 -07002990 lm1 = 0;
2991 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08002992 case INDEX_op_st16:
Richard Hendersonab84dc32023-08-23 23:04:24 -07002993 lm1 = 1;
2994 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08002995 case INDEX_op_st32:
Richard Hendersonab84dc32023-08-23 23:04:24 -07002996 lm1 = 3;
2997 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08002998 case INDEX_op_st:
Richard Hendersonab84dc32023-08-23 23:04:24 -07002999 case INDEX_op_st_vec:
3000 lm1 = tcg_type_size(ctx->type) - 1;
3001 break;
3002 default:
3003 g_assert_not_reached();
3004 }
3005 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06003006 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07003007}
3008
3009static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
3010{
3011 TCGTemp *src;
3012 intptr_t ofs, last;
3013 TCGType type;
3014
3015 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06003016 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07003017 }
3018
3019 src = arg_temp(op->args[0]);
3020 ofs = op->args[2];
3021 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07003022
3023 /*
3024 * Eliminate duplicate stores of a constant.
3025 * This happens frequently when the target ISA zero-extends.
3026 */
3027 if (ts_is_const(src)) {
3028 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
3029 if (src == prev) {
3030 tcg_op_remove(ctx->tcg, op);
3031 return true;
3032 }
3033 }
3034
Richard Hendersonab84dc32023-08-23 23:04:24 -07003035 last = ofs + tcg_type_size(type) - 1;
3036 remove_mem_copy_in(ctx, ofs, last);
3037 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06003038 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07003039}
3040
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003041static bool fold_xor(OptContext *ctx, TCGOp *op)
3042{
Richard Henderson787190e2024-12-10 08:39:56 -06003043 uint64_t z_mask, o_mask, s_mask;
Richard Hendersonc890fd72024-12-08 21:39:01 -06003044 TempOptInfo *t1, *t2;
3045
Richard Henderson7a2f7082021-08-26 07:06:39 -07003046 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07003047 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07003048 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07003049 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07003050 return true;
3051 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07003052
Richard Hendersonc890fd72024-12-08 21:39:01 -06003053 t1 = arg_info(op->args[1]);
3054 t2 = arg_info(op->args[2]);
Richard Henderson787190e2024-12-10 08:39:56 -06003055
3056 z_mask = (t1->z_mask | t2->z_mask) & ~(t1->o_mask & t2->o_mask);
3057 o_mask = (t1->o_mask & ~t2->z_mask) | (t2->o_mask & ~t1->z_mask);
Richard Hendersonc890fd72024-12-08 21:39:01 -06003058 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson787190e2024-12-10 08:39:56 -06003059
3060 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003061}
3062
Kirill Batuzov22613af2011-07-07 16:37:13 +04003063/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02003064void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003065{
Richard Henderson5cf32be2021-08-24 08:17:08 -07003066 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07003067 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07003068 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07003069
Richard Hendersonab84dc32023-08-23 23:04:24 -07003070 QSIMPLEQ_INIT(&ctx.mem_free);
3071
Kirill Batuzov22613af2011-07-07 16:37:13 +04003072 /* Array VALS has an element for each temp.
3073 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02003074 If this temp is a copy of other ones then the other copies are
3075 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003076
3077 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07003078 for (i = 0; i < nb_temps; ++i) {
3079 s->temps[i].state_ptr = NULL;
3080 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003081
Richard Henderson15fa08f2017-11-02 15:19:14 +01003082 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003083 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07003084 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07003085 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003086
Richard Henderson5cf32be2021-08-24 08:17:08 -07003087 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003088 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07003089 fold_call(&ctx, op);
3090 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07003091 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07003092
3093 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07003094 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
3095 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04003096
Richard Henderson67f84c92021-08-25 08:00:20 -07003097 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08003098 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07003099
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003100 /*
3101 * Process each opcode.
3102 * Sorted alphabetically by opcode as much as possible.
3103 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003104 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08003105 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003106 done = fold_add(&ctx, op);
3107 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003108 case INDEX_op_add_vec:
3109 done = fold_add_vec(&ctx, op);
3110 break;
Richard Henderson76f42782025-01-14 13:58:39 -08003111 case INDEX_op_addci:
Richard Hendersonaeb35142025-01-14 18:28:15 -08003112 done = fold_addci(&ctx, op);
3113 break;
Richard Henderson76f42782025-01-14 13:58:39 -08003114 case INDEX_op_addcio:
Richard Hendersonaeb35142025-01-14 18:28:15 -08003115 done = fold_addcio(&ctx, op);
3116 break;
3117 case INDEX_op_addco:
3118 done = fold_addco(&ctx, op);
Richard Henderson76f42782025-01-14 13:58:39 -08003119 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08003120 case INDEX_op_and:
3121 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003122 done = fold_and(&ctx, op);
3123 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08003124 case INDEX_op_andc:
3125 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003126 done = fold_andc(&ctx, op);
3127 break;
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08003128 case INDEX_op_brcond:
Richard Henderson079b0802021-08-24 09:30:59 -07003129 done = fold_brcond(&ctx, op);
3130 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07003131 case INDEX_op_brcond2_i32:
3132 done = fold_brcond2(&ctx, op);
3133 break;
Richard Henderson0dd07ee2025-01-10 18:51:16 -08003134 case INDEX_op_bswap16:
Richard Henderson7498d882025-01-10 19:53:51 -08003135 case INDEX_op_bswap32:
Richard Henderson3ad5d4c2025-01-10 21:54:44 -08003136 case INDEX_op_bswap64:
Richard Henderson09bacdc2021-08-24 11:58:12 -07003137 done = fold_bswap(&ctx, op);
3138 break;
Richard Henderson5a5bb0a2025-01-08 16:12:46 -08003139 case INDEX_op_clz:
Richard Hendersonc96447d2025-01-08 17:07:01 -08003140 case INDEX_op_ctz:
Richard Henderson30dd0bf2021-08-24 10:51:34 -07003141 done = fold_count_zeros(&ctx, op);
3142 break;
Richard Henderson97218ae2025-01-08 18:37:43 -08003143 case INDEX_op_ctpop:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003144 done = fold_ctpop(&ctx, op);
3145 break;
Richard Henderson4d137ff2025-01-12 20:48:57 -08003146 case INDEX_op_deposit:
Richard Henderson1b1907b2021-08-24 10:47:04 -07003147 done = fold_deposit(&ctx, op);
3148 break;
Richard Hendersonb2c514f2025-01-07 13:22:56 -08003149 case INDEX_op_divs:
Richard Henderson961b80a2025-01-07 14:27:19 -08003150 case INDEX_op_divu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003151 done = fold_divide(&ctx, op);
3152 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07003153 case INDEX_op_dup_vec:
3154 done = fold_dup(&ctx, op);
3155 break;
3156 case INDEX_op_dup2_vec:
3157 done = fold_dup2(&ctx, op);
3158 break;
Richard Henderson5c0968a2025-01-06 15:47:53 -08003159 case INDEX_op_eqv:
3160 case INDEX_op_eqv_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003161 done = fold_eqv(&ctx, op);
3162 break;
Richard Henderson07d5d502025-01-11 09:01:46 -08003163 case INDEX_op_extract:
Richard Hendersonb6617c82021-08-24 10:44:53 -07003164 done = fold_extract(&ctx, op);
3165 break;
Richard Henderson61d6a872025-01-12 21:40:43 -08003166 case INDEX_op_extract2:
Richard Hendersondcd08992021-08-24 10:41:39 -07003167 done = fold_extract2(&ctx, op);
3168 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003169 case INDEX_op_ext_i32_i64:
3170 done = fold_exts(&ctx, op);
3171 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003172 case INDEX_op_extu_i32_i64:
3173 case INDEX_op_extrl_i64_i32:
3174 case INDEX_op_extrh_i64_i32:
3175 done = fold_extu(&ctx, op);
3176 break;
Richard Hendersone9968042025-01-21 21:47:16 -08003177 case INDEX_op_ld8s:
3178 case INDEX_op_ld8u:
3179 case INDEX_op_ld16s:
3180 case INDEX_op_ld16u:
3181 case INDEX_op_ld32s:
3182 case INDEX_op_ld32u:
Richard Hendersonfae450b2021-08-25 22:42:19 -07003183 done = fold_tcg_ld(&ctx, op);
3184 break;
Richard Hendersone9968042025-01-21 21:47:16 -08003185 case INDEX_op_ld:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003186 case INDEX_op_ld_vec:
3187 done = fold_tcg_ld_memcopy(&ctx, op);
3188 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003189 case INDEX_op_st8:
3190 case INDEX_op_st16:
3191 case INDEX_op_st32:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003192 done = fold_tcg_st(&ctx, op);
3193 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003194 case INDEX_op_st:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003195 case INDEX_op_st_vec:
3196 done = fold_tcg_st_memcopy(&ctx, op);
3197 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07003198 case INDEX_op_mb:
3199 done = fold_mb(&ctx, op);
3200 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08003201 case INDEX_op_mov:
3202 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003203 done = fold_mov(&ctx, op);
3204 break;
Richard Hendersonea46c4b2025-01-10 13:41:25 -08003205 case INDEX_op_movcond:
Richard Henderson0c310a32021-08-24 10:37:24 -07003206 done = fold_movcond(&ctx, op);
3207 break;
Richard Hendersond2c3eca2025-01-07 09:32:18 -08003208 case INDEX_op_mul:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003209 done = fold_mul(&ctx, op);
3210 break;
Richard Hendersonc7428242025-01-07 11:19:29 -08003211 case INDEX_op_mulsh:
Richard Hendersonaa28c9e2025-01-07 10:36:24 -08003212 case INDEX_op_muluh:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003213 done = fold_mul_highpart(&ctx, op);
3214 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08003215 case INDEX_op_muls2:
Richard Hendersond7761982025-01-09 09:11:53 -08003216 case INDEX_op_mulu2:
Richard Henderson407112b2021-08-26 06:33:04 -07003217 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07003218 break;
Richard Henderson59379a42025-01-06 20:32:54 -08003219 case INDEX_op_nand:
3220 case INDEX_op_nand_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003221 done = fold_nand(&ctx, op);
3222 break;
Richard Henderson69713582025-01-06 22:48:57 -08003223 case INDEX_op_neg:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003224 done = fold_neg(&ctx, op);
3225 break;
Richard Henderson3a8c4e92025-01-06 21:02:17 -08003226 case INDEX_op_nor:
3227 case INDEX_op_nor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003228 done = fold_nor(&ctx, op);
3229 break;
Richard Henderson5c62d372025-01-06 23:46:47 -08003230 case INDEX_op_not:
3231 case INDEX_op_not_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003232 done = fold_not(&ctx, op);
3233 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08003234 case INDEX_op_or:
3235 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003236 done = fold_or(&ctx, op);
3237 break;
Richard Henderson6aba25e2025-01-06 14:46:26 -08003238 case INDEX_op_orc:
3239 case INDEX_op_orc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003240 done = fold_orc(&ctx, op);
3241 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003242 case INDEX_op_qemu_ld:
Richard Henderson6813be92024-12-08 20:33:30 -06003243 done = fold_qemu_ld_1reg(&ctx, op);
3244 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003245 case INDEX_op_qemu_ld2:
Richard Henderson6813be92024-12-08 20:33:30 -06003246 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003247 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003248 case INDEX_op_qemu_st:
3249 case INDEX_op_qemu_st2:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003250 done = fold_qemu_st(&ctx, op);
3251 break;
Richard Henderson9a6bc182025-01-07 19:00:51 -08003252 case INDEX_op_rems:
Richard Hendersoncd9acd22025-01-07 20:25:14 -08003253 case INDEX_op_remu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003254 done = fold_remainder(&ctx, op);
3255 break;
Richard Henderson005a87e2025-01-08 10:42:16 -08003256 case INDEX_op_rotl:
3257 case INDEX_op_rotr:
Richard Henderson3949f362025-01-08 08:05:18 -08003258 case INDEX_op_sar:
Richard Henderson6ca59452025-01-07 21:50:04 -08003259 case INDEX_op_shl:
Richard Henderson74dbd362025-01-07 22:52:10 -08003260 case INDEX_op_shr:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003261 done = fold_shift(&ctx, op);
3262 break;
Richard Hendersona363e1e2025-01-10 09:26:44 -08003263 case INDEX_op_setcond:
Richard Hendersonc63ff552021-08-24 09:35:30 -07003264 done = fold_setcond(&ctx, op);
3265 break;
Richard Hendersona363e1e2025-01-10 09:26:44 -08003266 case INDEX_op_negsetcond:
Richard Henderson36355022023-08-04 23:24:04 +00003267 done = fold_negsetcond(&ctx, op);
3268 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003269 case INDEX_op_setcond2_i32:
3270 done = fold_setcond2(&ctx, op);
3271 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003272 case INDEX_op_cmp_vec:
3273 done = fold_cmp_vec(&ctx, op);
3274 break;
3275 case INDEX_op_cmpsel_vec:
3276 done = fold_cmpsel_vec(&ctx, op);
3277 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003278 case INDEX_op_bitsel_vec:
3279 done = fold_bitsel_vec(&ctx, op);
3280 break;
Richard Hendersonfa361ee2025-01-12 11:50:09 -08003281 case INDEX_op_sextract:
Richard Hendersonb6617c82021-08-24 10:44:53 -07003282 done = fold_sextract(&ctx, op);
3283 break;
Richard Henderson60f34f52025-01-06 22:06:32 -08003284 case INDEX_op_sub:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003285 done = fold_sub(&ctx, op);
3286 break;
Richard Hendersonaeb35142025-01-14 18:28:15 -08003287 case INDEX_op_subbi:
3288 done = fold_subbi(&ctx, op);
3289 break;
3290 case INDEX_op_subbio:
3291 done = fold_subbio(&ctx, op);
3292 break;
3293 case INDEX_op_subbo:
3294 done = fold_subbo(&ctx, op);
3295 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003296 case INDEX_op_sub_vec:
3297 done = fold_sub_vec(&ctx, op);
3298 break;
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08003299 case INDEX_op_xor:
3300 case INDEX_op_xor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003301 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003302 break;
Richard Henderson15268552024-12-08 07:45:11 -06003303 case INDEX_op_set_label:
3304 case INDEX_op_br:
3305 case INDEX_op_exit_tb:
3306 case INDEX_op_goto_tb:
3307 case INDEX_op_goto_ptr:
3308 finish_ebb(&ctx);
3309 done = true;
3310 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003311 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003312 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003313 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003314 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003315 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003316 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003317}