blob: 103c94b12f6f9a6f88acd3f587ae7506f568a59b [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Richard Henderson170ba882017-11-22 09:07:11 +010033
Richard Hendersonab84dc32023-08-23 23:04:24 -070034typedef struct MemCopyInfo {
35 IntervalTreeNode itree;
36 QSIMPLEQ_ENTRY (MemCopyInfo) next;
37 TCGTemp *ts;
38 TCGType type;
39} MemCopyInfo;
40
Richard Henderson6fcb98e2020-03-30 17:44:30 -070041typedef struct TempOptInfo {
Richard Henderson63490392017-06-20 13:43:15 -070042 TCGTemp *prev_copy;
43 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070044 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Hendersonb1fde412021-08-23 13:07:49 -070045 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson56f15f62024-12-22 15:07:31 -080046 uint64_t o_mask; /* mask bit is 1 if and only if value bit is 1 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080047 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070048} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040049
Richard Henderson3b3f8472021-08-23 22:06:31 -070050typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070051 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070052 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070053 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070054
Richard Hendersonab84dc32023-08-23 23:04:24 -070055 IntervalTreeRoot mem_copy;
56 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
57
Richard Henderson137f1f42021-08-24 08:49:25 -070058 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070059 TCGType type;
Richard Hendersonaeb35142025-01-14 18:28:15 -080060 int carry_state; /* -1 = non-constant, {0,1} = constant carry-in */
Richard Henderson3b3f8472021-08-23 22:06:31 -070061} OptContext;
62
Richard Henderson6fcb98e2020-03-30 17:44:30 -070063static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020064{
Richard Henderson63490392017-06-20 13:43:15 -070065 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020066}
67
Richard Henderson6fcb98e2020-03-30 17:44:30 -070068static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020069{
Richard Henderson63490392017-06-20 13:43:15 -070070 return ts_info(arg_temp(arg));
71}
72
Richard Hendersone1b6c142024-12-22 10:26:14 -080073static inline bool ti_is_const(TempOptInfo *ti)
74{
Richard Henderson56f15f62024-12-22 15:07:31 -080075 /* If all bits that are not known zeros are known ones, it's constant. */
76 return ti->z_mask == ti->o_mask;
Richard Hendersone1b6c142024-12-22 10:26:14 -080077}
78
79static inline uint64_t ti_const_val(TempOptInfo *ti)
80{
Richard Henderson56f15f62024-12-22 15:07:31 -080081 /* If constant, both z_mask and o_mask contain the value. */
82 return ti->z_mask;
Richard Hendersone1b6c142024-12-22 10:26:14 -080083}
84
85static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
86{
87 return ti_is_const(ti) && ti_const_val(ti) == val;
88}
89
Richard Henderson63490392017-06-20 13:43:15 -070090static inline bool ts_is_const(TCGTemp *ts)
91{
Richard Hendersone1b6c142024-12-22 10:26:14 -080092 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070093}
94
Richard Henderson27cdb852023-10-23 11:38:00 -070095static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
96{
Richard Hendersone1b6c142024-12-22 10:26:14 -080097 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -070098}
99
Richard Henderson63490392017-06-20 13:43:15 -0700100static inline bool arg_is_const(TCGArg arg)
101{
102 return ts_is_const(arg_temp(arg));
103}
104
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800105static inline uint64_t arg_const_val(TCGArg arg)
106{
107 return ti_const_val(arg_info(arg));
108}
109
Richard Henderson27cdb852023-10-23 11:38:00 -0700110static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
111{
112 return ts_is_const_val(arg_temp(arg), val);
113}
114
Richard Henderson63490392017-06-20 13:43:15 -0700115static inline bool ts_is_copy(TCGTemp *ts)
116{
117 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200118}
119
Richard Henderson9f75e522023-11-02 13:37:46 -0700120static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
121{
122 return a->kind < b->kind ? b : a;
123}
124
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200125/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700126static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200127{
Richard Henderson63490392017-06-20 13:43:15 -0700128 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700129 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700130
Richard Henderson3b3f8472021-08-23 22:06:31 -0700131 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700132 return;
133 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700134 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700135
136 ti = ts->state_ptr;
137 if (ti == NULL) {
138 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700139 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700140 }
141
142 ti->next_copy = ts;
143 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700144 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700145 if (ts->kind == TEMP_CONST) {
Richard Hendersonb1fde412021-08-23 13:07:49 -0700146 ti->z_mask = ts->val;
Richard Henderson56f15f62024-12-22 15:07:31 -0800147 ti->o_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800148 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700149 } else {
Richard Hendersonb1fde412021-08-23 13:07:49 -0700150 ti->z_mask = -1;
Richard Henderson56f15f62024-12-22 15:07:31 -0800151 ti->o_mask = 0;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700152 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200153 }
154}
155
Richard Hendersonab84dc32023-08-23 23:04:24 -0700156static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
157{
158 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
159 return r ? container_of(r, MemCopyInfo, itree) : NULL;
160}
161
162static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
163{
164 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
165 return r ? container_of(r, MemCopyInfo, itree) : NULL;
166}
167
168static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
169{
170 TCGTemp *ts = mc->ts;
171 TempOptInfo *ti = ts_info(ts);
172
173 interval_tree_remove(&mc->itree, &ctx->mem_copy);
174 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
175 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
176}
177
178static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
179{
180 while (true) {
181 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
182 if (!mc) {
183 break;
184 }
185 remove_mem_copy(ctx, mc);
186 }
187}
188
189static void remove_mem_copy_all(OptContext *ctx)
190{
191 remove_mem_copy_in(ctx, 0, -1);
192 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
193}
194
Richard Henderson9f75e522023-11-02 13:37:46 -0700195static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200196{
Richard Henderson9f75e522023-11-02 13:37:46 -0700197 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198
Richard Henderson4c868ce2020-04-23 09:02:23 -0700199 /* If this is already readonly, we can't do better. */
200 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700201 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200202 }
203
Richard Henderson9f75e522023-11-02 13:37:46 -0700204 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700205 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200207 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209}
210
Richard Hendersonab84dc32023-08-23 23:04:24 -0700211static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
212{
213 TempOptInfo *si = ts_info(src_ts);
214 TempOptInfo *di = ts_info(dst_ts);
215 MemCopyInfo *mc;
216
217 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
218 tcg_debug_assert(mc->ts == src_ts);
219 mc->ts = dst_ts;
220 }
221 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
222}
223
224/* Reset TEMP's state, possibly removing the temp for the list of copies. */
225static void reset_ts(OptContext *ctx, TCGTemp *ts)
226{
227 TempOptInfo *ti = ts_info(ts);
228 TCGTemp *pts = ti->prev_copy;
229 TCGTemp *nts = ti->next_copy;
230 TempOptInfo *pi = ts_info(pts);
231 TempOptInfo *ni = ts_info(nts);
232
233 ni->prev_copy = ti->prev_copy;
234 pi->next_copy = ti->next_copy;
235 ti->next_copy = ts;
236 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700237 ti->z_mask = -1;
Richard Henderson56f15f62024-12-22 15:07:31 -0800238 ti->o_mask = 0;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700239 ti->s_mask = 0;
240
241 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
242 if (ts == nts) {
243 /* Last temp copy being removed, the mem copies die. */
244 MemCopyInfo *mc;
245 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
246 interval_tree_remove(&mc->itree, &ctx->mem_copy);
247 }
248 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
249 } else {
250 move_mem_copies(find_better_copy(nts), ts);
251 }
252 }
253}
254
255static void reset_temp(OptContext *ctx, TCGArg arg)
256{
257 reset_ts(ctx, arg_temp(arg));
258}
259
260static void record_mem_copy(OptContext *ctx, TCGType type,
261 TCGTemp *ts, intptr_t start, intptr_t last)
262{
263 MemCopyInfo *mc;
264 TempOptInfo *ti;
265
266 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
267 if (mc) {
268 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
269 } else {
270 mc = tcg_malloc(sizeof(*mc));
271 }
272
273 memset(mc, 0, sizeof(*mc));
274 mc->itree.start = start;
275 mc->itree.last = last;
276 mc->type = type;
277 interval_tree_insert(&mc->itree, &ctx->mem_copy);
278
279 ts = find_better_copy(ts);
280 ti = ts_info(ts);
281 mc->ts = ts;
282 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
283}
284
Richard Henderson63490392017-06-20 13:43:15 -0700285static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200286{
Richard Henderson63490392017-06-20 13:43:15 -0700287 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288
Richard Henderson63490392017-06-20 13:43:15 -0700289 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290 return true;
291 }
292
Richard Henderson63490392017-06-20 13:43:15 -0700293 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200294 return false;
295 }
296
Richard Henderson63490392017-06-20 13:43:15 -0700297 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
298 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200299 return true;
300 }
301 }
302
303 return false;
304}
305
Richard Henderson63490392017-06-20 13:43:15 -0700306static bool args_are_copies(TCGArg arg1, TCGArg arg2)
307{
308 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
309}
310
Richard Hendersonab84dc32023-08-23 23:04:24 -0700311static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
312{
313 MemCopyInfo *mc;
314
315 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
316 if (mc->itree.start == s && mc->type == type) {
317 return find_better_copy(mc->ts);
318 }
319 }
320 return NULL;
321}
322
Richard Henderson26aac972023-10-23 12:31:57 -0700323static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
324{
325 TCGType type = ctx->type;
326 TCGTemp *ts;
327
328 if (type == TCG_TYPE_I32) {
329 val = (int32_t)val;
330 }
331
332 ts = tcg_constant_internal(type, val);
333 init_ts_info(ctx, ts);
334
335 return temp_arg(ts);
336}
337
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100338static TCGArg arg_new_temp(OptContext *ctx)
339{
340 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
341 init_ts_info(ctx, ts);
342 return temp_arg(ts);
343}
344
Richard Hendersona3c1c572025-04-21 11:05:29 -0700345static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
346 TCGOpcode opc, unsigned narg)
347{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800348 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700349}
350
351static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
352 TCGOpcode opc, unsigned narg)
353{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800354 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700355}
356
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700357static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400358{
Richard Henderson63490392017-06-20 13:43:15 -0700359 TCGTemp *dst_ts = arg_temp(dst);
360 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700361 TempOptInfo *di;
362 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700363 TCGOpcode new_op;
364
365 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700366 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700367 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200368 }
369
Richard Henderson986cac12023-01-09 13:59:35 -0800370 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700371 di = ts_info(dst_ts);
372 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700373
374 switch (ctx->type) {
375 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700376 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800377 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 break;
379 case TCG_TYPE_V64:
380 case TCG_TYPE_V128:
381 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800382 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700383 new_op = INDEX_op_mov_vec;
384 break;
385 default:
386 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100387 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700388 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700389 op->args[0] = dst;
390 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700391
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700392 di->z_mask = si->z_mask;
Richard Henderson56f15f62024-12-22 15:07:31 -0800393 di->o_mask = si->o_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700394 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700395
Richard Henderson63490392017-06-20 13:43:15 -0700396 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700397 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700398
399 di->next_copy = si->next_copy;
400 di->prev_copy = src_ts;
401 ni->prev_copy = dst_ts;
402 si->next_copy = dst_ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700403
404 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
405 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
406 move_mem_copies(dst_ts, src_ts);
407 }
Richard Henderson56f15f62024-12-22 15:07:31 -0800408 } else if (dst_ts->type == TCG_TYPE_I32) {
409 di->z_mask = (int32_t)di->z_mask;
410 di->o_mask = (int32_t)di->o_mask;
411 di->s_mask |= INT32_MIN;
412 } else {
413 di->z_mask |= MAKE_64BIT_MASK(32, 32);
414 di->o_mask = (uint32_t)di->o_mask;
415 di->s_mask = INT64_MIN;
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800416 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700417 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400418}
419
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700420static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700421 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700422{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700423 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700424 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700425}
426
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800427static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type,
428 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400429{
Richard Henderson03271522013-08-14 14:35:56 -0700430 uint64_t l64, h64;
431
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400432 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800433 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400434 return x + y;
435
Richard Henderson60f34f52025-01-06 22:06:32 -0800436 case INDEX_op_sub:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400437 return x - y;
438
Richard Hendersond2c3eca2025-01-07 09:32:18 -0800439 case INDEX_op_mul:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400440 return x * y;
441
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800442 case INDEX_op_and:
443 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400444 return x & y;
445
Richard Henderson49bd7512025-01-06 14:00:40 -0800446 case INDEX_op_or:
447 case INDEX_op_or_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400448 return x | y;
449
Richard Hendersonfffd3dc2025-01-06 15:18:35 -0800450 case INDEX_op_xor:
451 case INDEX_op_xor_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400452 return x ^ y;
453
Richard Henderson6ca59452025-01-07 21:50:04 -0800454 case INDEX_op_shl:
455 if (type == TCG_TYPE_I32) {
456 return (uint32_t)x << (y & 31);
457 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700458 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400459
Richard Henderson74dbd362025-01-07 22:52:10 -0800460 case INDEX_op_shr:
461 if (type == TCG_TYPE_I32) {
462 return (uint32_t)x >> (y & 31);
463 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700464 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400465
Richard Henderson3949f362025-01-08 08:05:18 -0800466 case INDEX_op_sar:
467 if (type == TCG_TYPE_I32) {
468 return (int32_t)x >> (y & 31);
469 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700470 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400471
Richard Henderson005a87e2025-01-08 10:42:16 -0800472 case INDEX_op_rotr:
473 if (type == TCG_TYPE_I32) {
474 return ror32(x, y & 31);
475 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700476 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400477
Richard Henderson005a87e2025-01-08 10:42:16 -0800478 case INDEX_op_rotl:
479 if (type == TCG_TYPE_I32) {
480 return rol32(x, y & 31);
481 }
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700482 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400483
Richard Henderson5c62d372025-01-06 23:46:47 -0800484 case INDEX_op_not:
485 case INDEX_op_not_vec:
Kirill Batuzova640f032011-07-07 16:37:17 +0400486 return ~x;
487
Richard Henderson69713582025-01-06 22:48:57 -0800488 case INDEX_op_neg:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700489 return -x;
490
Richard Henderson46f96bf2025-01-06 12:37:02 -0800491 case INDEX_op_andc:
492 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700493 return x & ~y;
494
Richard Henderson6aba25e2025-01-06 14:46:26 -0800495 case INDEX_op_orc:
496 case INDEX_op_orc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700497 return x | ~y;
498
Richard Henderson5c0968a2025-01-06 15:47:53 -0800499 case INDEX_op_eqv:
500 case INDEX_op_eqv_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700501 return ~(x ^ y);
502
Richard Henderson59379a42025-01-06 20:32:54 -0800503 case INDEX_op_nand:
504 case INDEX_op_nand_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700505 return ~(x & y);
506
Richard Henderson3a8c4e92025-01-06 21:02:17 -0800507 case INDEX_op_nor:
508 case INDEX_op_nor_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700509 return ~(x | y);
510
Richard Henderson5a5bb0a2025-01-08 16:12:46 -0800511 case INDEX_op_clz:
512 if (type == TCG_TYPE_I32) {
513 return (uint32_t)x ? clz32(x) : y;
514 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100515 return x ? clz64(x) : y;
516
Richard Hendersonc96447d2025-01-08 17:07:01 -0800517 case INDEX_op_ctz:
518 if (type == TCG_TYPE_I32) {
519 return (uint32_t)x ? ctz32(x) : y;
520 }
Richard Henderson0e28d002016-11-16 09:23:28 +0100521 return x ? ctz64(x) : y;
522
Richard Henderson97218ae2025-01-08 18:37:43 -0800523 case INDEX_op_ctpop:
524 return type == TCG_TYPE_I32 ? ctpop32(x) : ctpop64(x);
Richard Hendersona768e4e2016-11-21 11:13:39 +0100525
Richard Henderson0dd07ee2025-01-10 18:51:16 -0800526 case INDEX_op_bswap16:
Richard Henderson0b76ff82021-06-13 13:04:00 -0700527 x = bswap16(x);
528 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100529
Richard Henderson7498d882025-01-10 19:53:51 -0800530 case INDEX_op_bswap32:
Richard Henderson0b76ff82021-06-13 13:04:00 -0700531 x = bswap32(x);
532 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100533
Richard Henderson3ad5d4c2025-01-10 21:54:44 -0800534 case INDEX_op_bswap64:
Richard Henderson64985942018-11-20 08:53:34 +0100535 return bswap64(x);
536
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200537 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400538 return (int32_t)x;
539
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200540 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700541 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400542 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400543
Richard Henderson609ad702015-07-24 07:16:00 -0700544 case INDEX_op_extrh_i64_i32:
545 return (uint64_t)x >> 32;
546
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800547 case INDEX_op_muluh:
548 if (type == TCG_TYPE_I32) {
549 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
550 }
551 mulu64(&l64, &h64, x, y);
552 return h64;
553
Richard Hendersonc7428242025-01-07 11:19:29 -0800554 case INDEX_op_mulsh:
555 if (type == TCG_TYPE_I32) {
556 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
557 }
Richard Henderson03271522013-08-14 14:35:56 -0700558 muls64(&l64, &h64, x, y);
559 return h64;
560
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800561 case INDEX_op_divs:
Richard Henderson01547f72013-08-14 15:22:46 -0700562 /* Avoid crashing on divide by zero, otherwise undefined. */
Richard Hendersonb2c514f2025-01-07 13:22:56 -0800563 if (type == TCG_TYPE_I32) {
564 return (int32_t)x / ((int32_t)y ? : 1);
565 }
566 return (int64_t)x / ((int64_t)y ? : 1);
567
Richard Henderson961b80a2025-01-07 14:27:19 -0800568 case INDEX_op_divu:
569 if (type == TCG_TYPE_I32) {
570 return (uint32_t)x / ((uint32_t)y ? : 1);
571 }
Richard Henderson01547f72013-08-14 15:22:46 -0700572 return (uint64_t)x / ((uint64_t)y ? : 1);
573
Richard Henderson9a6bc182025-01-07 19:00:51 -0800574 case INDEX_op_rems:
575 if (type == TCG_TYPE_I32) {
576 return (int32_t)x % ((int32_t)y ? : 1);
577 }
578 return (int64_t)x % ((int64_t)y ? : 1);
579
Richard Hendersoncd9acd22025-01-07 20:25:14 -0800580 case INDEX_op_remu:
581 if (type == TCG_TYPE_I32) {
582 return (uint32_t)x % ((uint32_t)y ? : 1);
583 }
Richard Henderson01547f72013-08-14 15:22:46 -0700584 return (uint64_t)x % ((uint64_t)y ? : 1);
585
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400586 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700587 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400588 }
589}
590
Richard Henderson67f84c92021-08-25 08:00:20 -0700591static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
592 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400593{
Richard Hendersonaa28c9e2025-01-07 10:36:24 -0800594 uint64_t res = do_constant_folding_2(op, type, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700595 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200596 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400597 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400598 return res;
599}
600
Richard Henderson9519da72012-10-02 11:32:26 -0700601static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
602{
603 switch (c) {
604 case TCG_COND_EQ:
605 return x == y;
606 case TCG_COND_NE:
607 return x != y;
608 case TCG_COND_LT:
609 return (int32_t)x < (int32_t)y;
610 case TCG_COND_GE:
611 return (int32_t)x >= (int32_t)y;
612 case TCG_COND_LE:
613 return (int32_t)x <= (int32_t)y;
614 case TCG_COND_GT:
615 return (int32_t)x > (int32_t)y;
616 case TCG_COND_LTU:
617 return x < y;
618 case TCG_COND_GEU:
619 return x >= y;
620 case TCG_COND_LEU:
621 return x <= y;
622 case TCG_COND_GTU:
623 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700624 case TCG_COND_TSTEQ:
625 return (x & y) == 0;
626 case TCG_COND_TSTNE:
627 return (x & y) != 0;
628 case TCG_COND_ALWAYS:
629 case TCG_COND_NEVER:
630 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700631 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700632 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700633}
634
635static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
636{
637 switch (c) {
638 case TCG_COND_EQ:
639 return x == y;
640 case TCG_COND_NE:
641 return x != y;
642 case TCG_COND_LT:
643 return (int64_t)x < (int64_t)y;
644 case TCG_COND_GE:
645 return (int64_t)x >= (int64_t)y;
646 case TCG_COND_LE:
647 return (int64_t)x <= (int64_t)y;
648 case TCG_COND_GT:
649 return (int64_t)x > (int64_t)y;
650 case TCG_COND_LTU:
651 return x < y;
652 case TCG_COND_GEU:
653 return x >= y;
654 case TCG_COND_LEU:
655 return x <= y;
656 case TCG_COND_GTU:
657 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700658 case TCG_COND_TSTEQ:
659 return (x & y) == 0;
660 case TCG_COND_TSTNE:
661 return (x & y) != 0;
662 case TCG_COND_ALWAYS:
663 case TCG_COND_NEVER:
664 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700665 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700666 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700667}
668
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700669static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700670{
671 switch (c) {
672 case TCG_COND_GT:
673 case TCG_COND_LTU:
674 case TCG_COND_LT:
675 case TCG_COND_GTU:
676 case TCG_COND_NE:
677 return 0;
678 case TCG_COND_GE:
679 case TCG_COND_GEU:
680 case TCG_COND_LE:
681 case TCG_COND_LEU:
682 case TCG_COND_EQ:
683 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700684 case TCG_COND_TSTEQ:
685 case TCG_COND_TSTNE:
686 return -1;
687 case TCG_COND_ALWAYS:
688 case TCG_COND_NEVER:
689 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700690 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700691 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700692}
693
Richard Henderson8d57bf12021-08-24 08:34:27 -0700694/*
695 * Return -1 if the condition can't be simplified,
696 * and the result of the condition (0 or 1) if it can.
697 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700698static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700699 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200700{
Richard Henderson63490392017-06-20 13:43:15 -0700701 if (arg_is_const(x) && arg_is_const(y)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800702 uint64_t xv = arg_const_val(x);
703 uint64_t yv = arg_const_val(y);
Alex Bennée9becc362022-02-09 11:21:42 +0000704
Richard Henderson67f84c92021-08-25 08:00:20 -0700705 switch (type) {
706 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100707 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700708 case TCG_TYPE_I64:
709 return do_constant_folding_cond_64(xv, yv, c);
710 default:
711 /* Only scalar comparisons are optimizable */
712 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200713 }
Richard Henderson63490392017-06-20 13:43:15 -0700714 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700715 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700716 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200717 switch (c) {
718 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700719 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200720 return 0;
721 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700722 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200723 return 1;
724 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700725 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200726 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200727 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700728 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200729}
730
Richard Henderson7a2f7082021-08-26 07:06:39 -0700731/**
732 * swap_commutative:
733 * @dest: TCGArg of the destination argument, or NO_DEST.
734 * @p1: first paired argument
735 * @p2: second paired argument
736 *
737 * If *@p1 is a constant and *@p2 is not, swap.
738 * If *@p2 matches @dest, swap.
739 * Return true if a swap was performed.
740 */
741
742#define NO_DEST temp_arg(NULL)
743
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800744static int pref_commutative(TempOptInfo *ti)
745{
746 /* Slight preference for non-zero constants second. */
747 return !ti_is_const(ti) ? 0 : ti_const_val(ti) ? 3 : 2;
748}
749
Richard Henderson24c9ae42012-10-02 11:32:21 -0700750static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
751{
752 TCGArg a1 = *p1, a2 = *p2;
753 int sum = 0;
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800754 sum += pref_commutative(arg_info(a1));
755 sum -= pref_commutative(arg_info(a2));
Richard Henderson24c9ae42012-10-02 11:32:21 -0700756
757 /* Prefer the constant in second argument, and then the form
758 op a, a, b, which is better handled on non-RISC hosts. */
759 if (sum > 0 || (sum == 0 && dest == a2)) {
760 *p1 = a2;
761 *p2 = a1;
762 return true;
763 }
764 return false;
765}
766
Richard Henderson0bfcb862012-10-02 11:32:23 -0700767static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
768{
769 int sum = 0;
Richard Hendersone2f5ee32025-01-14 23:08:24 -0800770 sum += pref_commutative(arg_info(p1[0]));
771 sum += pref_commutative(arg_info(p1[1]));
772 sum -= pref_commutative(arg_info(p2[0]));
773 sum -= pref_commutative(arg_info(p2[1]));
Richard Henderson0bfcb862012-10-02 11:32:23 -0700774 if (sum > 0) {
775 TCGArg t;
776 t = p1[0], p1[0] = p2[0], p2[0] = t;
777 t = p1[1], p1[1] = p2[1], p2[1] = t;
778 return true;
779 }
780 return false;
781}
782
Richard Henderson7e64b112023-10-24 16:53:56 -0700783/*
784 * Return -1 if the condition can't be simplified,
785 * and the result of the condition (0 or 1) if it can.
786 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100787static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700788 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
789{
790 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100791 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700792 bool swap;
793 int r;
794
795 swap = swap_commutative(dest, p1, p2);
796 cond = *pcond;
797 if (swap) {
798 *pcond = cond = tcg_swap_cond(cond);
799 }
800
801 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700802 if (r >= 0) {
803 return r;
804 }
805 if (!is_tst_cond(cond)) {
806 return -1;
807 }
808
Paolo Bonzini35020622024-01-22 10:48:11 +0100809 i1 = arg_info(*p1);
810
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700811 /*
812 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100813 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700814 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100815 if (args_are_copies(*p1, *p2) ||
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800816 (arg_is_const(*p2) && (i1->z_mask & ~arg_const_val(*p2)) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700817 *p2 = arg_new_constant(ctx, 0);
818 *pcond = tcg_tst_eqne_cond(cond);
819 return -1;
820 }
821
Paolo Bonzini35020622024-01-22 10:48:11 +0100822 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800823 if (arg_is_const(*p2) && (arg_const_val(*p2) & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700824 *p2 = arg_new_constant(ctx, 0);
825 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100826 return -1;
827 }
828
829 /* Expand to AND with a temporary if no backend support. */
830 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800831 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100832 TCGArg tmp = arg_new_temp(ctx);
833
834 op2->args[0] = tmp;
835 op2->args[1] = *p1;
836 op2->args[2] = *p2;
837
838 *p1 = tmp;
839 *p2 = arg_new_constant(ctx, 0);
840 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700841 }
842 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700843}
844
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100845static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700846{
847 TCGArg al, ah, bl, bh;
848 TCGCond c;
849 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700850 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700851
852 swap = swap_commutative2(args, args + 2);
853 c = args[4];
854 if (swap) {
855 args[4] = c = tcg_swap_cond(c);
856 }
857
858 al = args[0];
859 ah = args[1];
860 bl = args[2];
861 bh = args[3];
862
863 if (arg_is_const(bl) && arg_is_const(bh)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800864 tcg_target_ulong blv = arg_const_val(bl);
865 tcg_target_ulong bhv = arg_const_val(bh);
Richard Henderson7e64b112023-10-24 16:53:56 -0700866 uint64_t b = deposit64(blv, 32, 32, bhv);
867
868 if (arg_is_const(al) && arg_is_const(ah)) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -0800869 tcg_target_ulong alv = arg_const_val(al);
870 tcg_target_ulong ahv = arg_const_val(ah);
Richard Henderson7e64b112023-10-24 16:53:56 -0700871 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700872
873 r = do_constant_folding_cond_64(a, b, c);
874 if (r >= 0) {
875 return r;
876 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700877 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700878
Richard Henderson7e64b112023-10-24 16:53:56 -0700879 if (b == 0) {
880 switch (c) {
881 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700882 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700883 return 0;
884 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700885 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700886 return 1;
887 default:
888 break;
889 }
890 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700891
892 /* TSTNE x,-1 -> NE x,0 */
893 if (b == -1 && is_tst_cond(c)) {
894 args[3] = args[2] = arg_new_constant(ctx, 0);
895 args[4] = tcg_tst_eqne_cond(c);
896 return -1;
897 }
898
899 /* TSTNE x,sign -> LT x,0 */
900 if (b == INT64_MIN && is_tst_cond(c)) {
901 /* bl must be 0, so copy that to bh */
902 args[3] = bl;
903 args[4] = tcg_tst_ltge_cond(c);
904 return -1;
905 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700906 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700907
Richard Henderson7e64b112023-10-24 16:53:56 -0700908 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700909 r = do_constant_folding_cond_eq(c);
910 if (r >= 0) {
911 return r;
912 }
913
914 /* TSTNE x,x -> NE x,0 */
915 if (is_tst_cond(c)) {
916 args[3] = args[2] = arg_new_constant(ctx, 0);
917 args[4] = tcg_tst_eqne_cond(c);
918 return -1;
919 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700920 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100921
922 /* Expand to AND with a temporary if no backend support. */
923 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800924 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
925 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100926 TCGArg t1 = arg_new_temp(ctx);
927 TCGArg t2 = arg_new_temp(ctx);
928
929 op1->args[0] = t1;
930 op1->args[1] = al;
931 op1->args[2] = bl;
932 op2->args[0] = t2;
933 op2->args[1] = ah;
934 op2->args[2] = bh;
935
936 args[0] = t1;
937 args[1] = t2;
938 args[3] = args[2] = arg_new_constant(ctx, 0);
939 args[4] = tcg_tst_eqne_cond(c);
940 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700941 return -1;
942}
943
Richard Hendersone2577ea2021-08-24 08:00:48 -0700944static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
945{
946 for (int i = 0; i < nb_args; i++) {
947 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000948 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700949 }
950}
951
Richard Henderson8774dde2021-08-24 08:04:47 -0700952static void copy_propagate(OptContext *ctx, TCGOp *op,
953 int nb_oargs, int nb_iargs)
954{
Richard Henderson8774dde2021-08-24 08:04:47 -0700955 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
956 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000957 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700958 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700959 }
960 }
961}
962
Richard Henderson15268552024-12-08 07:45:11 -0600963static void finish_bb(OptContext *ctx)
964{
965 /* We only optimize memory barriers across basic blocks. */
966 ctx->prev_mb = NULL;
967}
968
969static void finish_ebb(OptContext *ctx)
970{
971 finish_bb(ctx);
972 /* We only optimize across extended basic blocks. */
973 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
974 remove_mem_copy_all(ctx);
975}
976
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600977static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700978{
979 const TCGOpDef *def = &tcg_op_defs[op->opc];
980 int i, nb_oargs;
981
Richard Henderson137f1f42021-08-24 08:49:25 -0700982 nb_oargs = def->nb_oargs;
983 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700984 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800985 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700986 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600987 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700988}
989
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700990/*
991 * The fold_* functions return true when processing is complete,
992 * usually by folding the operation to a constant or to a copy,
993 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
994 * like collect information about the value produced, for use in
995 * optimizing a subsequent operation.
996 *
997 * These first fold_* functions are all helpers, used by other
998 * folders for more specific operations.
999 */
1000
1001static bool fold_const1(OptContext *ctx, TCGOp *op)
1002{
1003 if (arg_is_const(op->args[1])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001004 uint64_t t = arg_const_val(op->args[1]);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001005
Richard Henderson67f84c92021-08-25 08:00:20 -07001006 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001007 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1008 }
1009 return false;
1010}
1011
1012static bool fold_const2(OptContext *ctx, TCGOp *op)
1013{
1014 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001015 uint64_t t1 = arg_const_val(op->args[1]);
1016 uint64_t t2 = arg_const_val(op->args[2]);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001017
Richard Henderson67f84c92021-08-25 08:00:20 -07001018 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001019 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1020 }
1021 return false;
1022}
1023
Richard Hendersonc578ff12021-12-16 06:07:25 -08001024static bool fold_commutative(OptContext *ctx, TCGOp *op)
1025{
1026 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1027 return false;
1028}
1029
Richard Henderson7a2f7082021-08-26 07:06:39 -07001030static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1031{
1032 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1033 return fold_const2(ctx, op);
1034}
1035
Richard Hendersond582b142024-12-19 10:43:26 -08001036/*
1037 * Record "zero" and "sign" masks for the single output of @op.
1038 * See TempOptInfo definition of z_mask and s_mask.
1039 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001040 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001041 */
Richard Henderson9e397cc2025-06-02 14:11:51 +01001042static bool fold_masks_zosa(OptContext *ctx, TCGOp *op, uint64_t z_mask,
1043 uint64_t o_mask, int64_t s_mask, uint64_t a_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001044{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001045 const TCGOpDef *def = &tcg_op_defs[op->opc];
1046 TCGTemp *ts;
1047 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001048 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001049
1050 /* Only single-output opcodes are supported here. */
1051 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001052
1053 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001054 * 32-bit ops generate 32-bit results, which for the purpose of
1055 * simplifying tcg are sign-extended. Certainly that's how we
1056 * represent our constants elsewhere. Note that the bits will
1057 * be reset properly for a 64-bit value when encountering the
1058 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001059 */
1060 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001061 z_mask = (int32_t)z_mask;
Richard Henderson56f15f62024-12-22 15:07:31 -08001062 o_mask = (int32_t)o_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001063 s_mask |= INT32_MIN;
Richard Henderson9e397cc2025-06-02 14:11:51 +01001064 a_mask = (uint32_t)a_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001065 }
1066
Richard Henderson56f15f62024-12-22 15:07:31 -08001067 /* Bits that are known 1 and bits that are known 0 must not overlap. */
1068 tcg_debug_assert((o_mask & ~z_mask) == 0);
1069
1070 /* All bits that are not known zero are known one is a constant. */
1071 if (z_mask == o_mask) {
1072 return tcg_opt_gen_movi(ctx, op, op->args[0], o_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001073 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001074
Richard Henderson9e397cc2025-06-02 14:11:51 +01001075 /* If no bits are affected, the operation devolves to a copy. */
1076 if (a_mask == 0) {
1077 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1078 }
1079
Richard Henderson56e06ec2024-12-08 18:26:48 -06001080 ts = arg_temp(op->args[0]);
1081 reset_ts(ctx, ts);
1082
1083 ti = ts_info(ts);
1084 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001085
1086 /* Canonicalize s_mask and incorporate data from z_mask. */
1087 rep = clz64(~s_mask);
1088 rep = MAX(rep, clz64(z_mask));
Richard Henderson56f15f62024-12-22 15:07:31 -08001089 rep = MAX(rep, clz64(~o_mask));
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001090 rep = MAX(rep - 1, 0);
1091 ti->s_mask = INT64_MIN >> rep;
1092
Richard Henderson56e06ec2024-12-08 18:26:48 -06001093 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001094}
1095
Richard Henderson33fceba2024-12-10 08:26:56 -06001096static bool fold_masks_zos(OptContext *ctx, TCGOp *op,
1097 uint64_t z_mask, uint64_t o_mask, uint64_t s_mask)
1098{
1099 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, -1);
1100}
1101
Richard Henderson83c47c32024-12-10 15:48:16 -06001102static bool fold_masks_zo(OptContext *ctx, TCGOp *op,
1103 uint64_t z_mask, uint64_t o_mask)
1104{
1105 return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, -1);
1106}
1107
Richard Henderson56f15f62024-12-22 15:07:31 -08001108static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
1109 uint64_t z_mask, uint64_t s_mask)
1110{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001111 return fold_masks_zosa(ctx, op, z_mask, 0, s_mask, -1);
Richard Henderson56f15f62024-12-22 15:07:31 -08001112}
1113
Richard Henderson81be07f2024-12-08 19:49:17 -06001114static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1115{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001116 return fold_masks_zosa(ctx, op, z_mask, 0, 0, -1);
Richard Henderson81be07f2024-12-08 19:49:17 -06001117}
1118
Richard Hendersonef6be622024-12-08 20:03:15 -06001119static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1120{
Richard Henderson9e397cc2025-06-02 14:11:51 +01001121 return fold_masks_zosa(ctx, op, -1, 0, s_mask, -1);
Richard Hendersonef6be622024-12-08 20:03:15 -06001122}
1123
Richard Henderson045ace32024-12-19 10:33:51 -08001124/*
1125 * An "affected" mask bit is 0 if and only if the result is identical
1126 * to the first input. Thus if the entire mask is 0, the operation
1127 * is equivalent to a copy.
1128 */
1129static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1130{
1131 if (ctx->type == TCG_TYPE_I32) {
1132 a_mask = (uint32_t)a_mask;
1133 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001134 if (a_mask == 0) {
1135 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1136 }
1137 return false;
1138}
1139
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001140/*
1141 * Convert @op to NOT, if NOT is supported by the host.
1142 * Return true f the conversion is successful, which will still
1143 * indicate that the processing is complete.
1144 */
1145static bool fold_not(OptContext *ctx, TCGOp *op);
1146static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1147{
1148 TCGOpcode not_op;
1149 bool have_not;
1150
1151 switch (ctx->type) {
1152 case TCG_TYPE_I32:
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001153 case TCG_TYPE_I64:
Richard Henderson5c62d372025-01-06 23:46:47 -08001154 not_op = INDEX_op_not;
1155 have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001156 break;
1157 case TCG_TYPE_V64:
1158 case TCG_TYPE_V128:
1159 case TCG_TYPE_V256:
1160 not_op = INDEX_op_not_vec;
1161 have_not = TCG_TARGET_HAS_not_vec;
1162 break;
1163 default:
1164 g_assert_not_reached();
1165 }
1166 if (have_not) {
1167 op->opc = not_op;
1168 op->args[1] = op->args[idx];
1169 return fold_not(ctx, op);
1170 }
1171 return false;
1172}
1173
Richard Hendersonda48e272021-08-25 20:42:04 -07001174/* If the binary operation has first argument @i, fold to @i. */
1175static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1176{
Richard Henderson27cdb852023-10-23 11:38:00 -07001177 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001178 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1179 }
1180 return false;
1181}
1182
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001183/* If the binary operation has first argument @i, fold to NOT. */
1184static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1185{
Richard Henderson27cdb852023-10-23 11:38:00 -07001186 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001187 return fold_to_not(ctx, op, 2);
1188 }
1189 return false;
1190}
1191
Richard Hendersone8679952021-08-25 13:19:52 -07001192/* If the binary operation has second argument @i, fold to @i. */
1193static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1194{
Richard Henderson27cdb852023-10-23 11:38:00 -07001195 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001196 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1197 }
1198 return false;
1199}
1200
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001201/* If the binary operation has second argument @i, fold to identity. */
1202static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1203{
Richard Henderson27cdb852023-10-23 11:38:00 -07001204 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001205 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1206 }
1207 return false;
1208}
1209
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001210/* If the binary operation has second argument @i, fold to NOT. */
1211static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1212{
Richard Henderson27cdb852023-10-23 11:38:00 -07001213 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001214 return fold_to_not(ctx, op, 1);
1215 }
1216 return false;
1217}
1218
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001219/* If the binary operation has both arguments equal, fold to @i. */
1220static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1221{
1222 if (args_are_copies(op->args[1], op->args[2])) {
1223 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1224 }
1225 return false;
1226}
1227
Richard Hendersonca7bb042021-08-25 13:14:21 -07001228/* If the binary operation has both arguments equal, fold to identity. */
1229static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1230{
1231 if (args_are_copies(op->args[1], op->args[2])) {
1232 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1233 }
1234 return false;
1235}
1236
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001237/*
1238 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001239 *
1240 * The ordering of the transformations should be:
1241 * 1) those that produce a constant
1242 * 2) those that produce a copy
1243 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001244 */
1245
Richard Hendersonaeb35142025-01-14 18:28:15 -08001246static bool fold_addco(OptContext *ctx, TCGOp *op);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001247static bool fold_or(OptContext *ctx, TCGOp *op);
1248static bool fold_orc(OptContext *ctx, TCGOp *op);
Richard Hendersonaeb35142025-01-14 18:28:15 -08001249static bool fold_subbo(OptContext *ctx, TCGOp *op);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001250static bool fold_xor(OptContext *ctx, TCGOp *op);
1251
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001252static bool fold_add(OptContext *ctx, TCGOp *op)
1253{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001254 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001255 fold_xi_to_x(ctx, op, 0)) {
1256 return true;
1257 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001258 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001259}
1260
Richard Hendersonc578ff12021-12-16 06:07:25 -08001261/* We cannot as yet do_constant_folding with vectors. */
1262static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1263{
1264 if (fold_commutative(ctx, op) ||
1265 fold_xi_to_x(ctx, op, 0)) {
1266 return true;
1267 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001268 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001269}
1270
Richard Hendersonaeb35142025-01-14 18:28:15 -08001271static void squash_prev_carryout(OptContext *ctx, TCGOp *op)
1272{
1273 TempOptInfo *t2;
1274
1275 op = QTAILQ_PREV(op, link);
1276 switch (op->opc) {
1277 case INDEX_op_addco:
1278 op->opc = INDEX_op_add;
1279 fold_add(ctx, op);
1280 break;
1281 case INDEX_op_addcio:
1282 op->opc = INDEX_op_addci;
1283 break;
1284 case INDEX_op_addc1o:
1285 op->opc = INDEX_op_add;
1286 t2 = arg_info(op->args[2]);
1287 if (ti_is_const(t2)) {
1288 op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
1289 /* Perform other constant folding, if needed. */
1290 fold_add(ctx, op);
1291 } else {
1292 TCGArg ret = op->args[0];
1293 op = opt_insert_after(ctx, op, INDEX_op_add, 3);
1294 op->args[0] = ret;
1295 op->args[1] = ret;
1296 op->args[2] = arg_new_constant(ctx, 1);
1297 }
1298 break;
1299 default:
1300 g_assert_not_reached();
1301 }
1302}
1303
1304static bool fold_addci(OptContext *ctx, TCGOp *op)
Richard Henderson76f42782025-01-14 13:58:39 -08001305{
1306 fold_commutative(ctx, op);
Richard Hendersonaeb35142025-01-14 18:28:15 -08001307
1308 if (ctx->carry_state < 0) {
1309 return finish_folding(ctx, op);
1310 }
1311
1312 squash_prev_carryout(ctx, op);
1313 op->opc = INDEX_op_add;
1314
1315 if (ctx->carry_state > 0) {
1316 TempOptInfo *t2 = arg_info(op->args[2]);
1317
1318 /*
1319 * Propagate the known carry-in into a constant, if possible.
1320 * Otherwise emit a second add +1.
1321 */
1322 if (ti_is_const(t2)) {
1323 op->args[2] = arg_new_constant(ctx, ti_const_val(t2) + 1);
1324 } else {
1325 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_add, 3);
1326
1327 op2->args[0] = op->args[0];
1328 op2->args[1] = op->args[1];
1329 op2->args[2] = op->args[2];
1330 fold_add(ctx, op2);
1331
1332 op->args[1] = op->args[0];
1333 op->args[2] = arg_new_constant(ctx, 1);
1334 }
1335 }
1336
1337 ctx->carry_state = -1;
1338 return fold_add(ctx, op);
1339}
1340
1341static bool fold_addcio(OptContext *ctx, TCGOp *op)
1342{
1343 TempOptInfo *t1, *t2;
1344 int carry_out = -1;
1345 uint64_t sum, max;
1346
1347 fold_commutative(ctx, op);
1348 t1 = arg_info(op->args[1]);
1349 t2 = arg_info(op->args[2]);
1350
1351 /*
1352 * The z_mask value is >= the maximum value that can be represented
1353 * with the known zero bits. So adding the z_mask values will not
1354 * overflow if and only if the true values cannot overflow.
1355 */
1356 if (!uadd64_overflow(t1->z_mask, t2->z_mask, &sum) &&
1357 !uadd64_overflow(sum, ctx->carry_state != 0, &sum)) {
1358 carry_out = 0;
1359 }
1360
1361 if (ctx->carry_state < 0) {
1362 ctx->carry_state = carry_out;
1363 return finish_folding(ctx, op);
1364 }
1365
1366 squash_prev_carryout(ctx, op);
1367 if (ctx->carry_state == 0) {
1368 goto do_addco;
1369 }
1370
1371 /* Propagate the known carry-in into a constant, if possible. */
1372 max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
1373 if (ti_is_const(t2)) {
1374 uint64_t v = ti_const_val(t2) & max;
1375 if (v < max) {
1376 op->args[2] = arg_new_constant(ctx, v + 1);
1377 goto do_addco;
1378 }
1379 /* max + known carry in produces known carry out. */
1380 carry_out = 1;
1381 }
1382 if (ti_is_const(t1)) {
1383 uint64_t v = ti_const_val(t1) & max;
1384 if (v < max) {
1385 op->args[1] = arg_new_constant(ctx, v + 1);
1386 goto do_addco;
1387 }
1388 carry_out = 1;
1389 }
1390
1391 /* Adjust the opcode to remember the known carry-in. */
1392 op->opc = INDEX_op_addc1o;
1393 ctx->carry_state = carry_out;
1394 return finish_folding(ctx, op);
1395
1396 do_addco:
1397 op->opc = INDEX_op_addco;
1398 return fold_addco(ctx, op);
1399}
1400
1401static bool fold_addco(OptContext *ctx, TCGOp *op)
1402{
1403 TempOptInfo *t1, *t2;
1404 int carry_out = -1;
1405 uint64_t ign;
1406
1407 fold_commutative(ctx, op);
1408 t1 = arg_info(op->args[1]);
1409 t2 = arg_info(op->args[2]);
1410
1411 if (ti_is_const(t2)) {
1412 uint64_t v2 = ti_const_val(t2);
1413
1414 if (ti_is_const(t1)) {
1415 uint64_t v1 = ti_const_val(t1);
1416 /* Given sign-extension of z_mask for I32, we need not truncate. */
1417 carry_out = uadd64_overflow(v1, v2, &ign);
1418 } else if (v2 == 0) {
1419 carry_out = 0;
1420 }
1421 } else {
1422 /*
1423 * The z_mask value is >= the maximum value that can be represented
1424 * with the known zero bits. So adding the z_mask values will not
1425 * overflow if and only if the true values cannot overflow.
1426 */
1427 if (!uadd64_overflow(t1->z_mask, t2->z_mask, &ign)) {
1428 carry_out = 0;
1429 }
1430 }
1431 ctx->carry_state = carry_out;
Richard Henderson76f42782025-01-14 13:58:39 -08001432 return finish_folding(ctx, op);
1433}
1434
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001435static bool fold_and(OptContext *ctx, TCGOp *op)
1436{
Richard Henderson1e2edf82024-12-09 16:48:36 -06001437 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson1ca73722024-12-08 18:47:15 -06001438 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001439
Richard Henderson7a2f7082021-08-26 07:06:39 -07001440 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001441 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001442 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001443 fold_xx_to_x(ctx, op)) {
1444 return true;
1445 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001446
Richard Henderson1ca73722024-12-08 18:47:15 -06001447 t1 = arg_info(op->args[1]);
1448 t2 = arg_info(op->args[2]);
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001449
Richard Henderson1e2edf82024-12-09 16:48:36 -06001450 z_mask = t1->z_mask & t2->z_mask;
1451 o_mask = t1->o_mask & t2->o_mask;
Richard Henderson1ca73722024-12-08 18:47:15 -06001452
1453 /*
1454 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1455 * Bitwise operations preserve the relative quantity of the repetitions.
1456 */
1457 s_mask = t1->s_mask & t2->s_mask;
1458
Richard Henderson1e2edf82024-12-09 16:48:36 -06001459 /* Affected bits are those not known zero, masked by those known one. */
1460 a_mask = t1->z_mask & ~t2->o_mask;
1461
1462 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001463}
1464
1465static bool fold_andc(OptContext *ctx, TCGOp *op)
1466{
Richard Hendersond4d441e2024-12-22 16:08:42 -08001467 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001468 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001469
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001470 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001471 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001472 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001473 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001474 return true;
1475 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001476
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001477 t1 = arg_info(op->args[1]);
1478 t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001479
Richard Henderson899281c2023-11-15 11:18:55 -08001480 if (ti_is_const(t2)) {
1481 /* Fold andc r,x,i to and r,x,~i. */
1482 switch (ctx->type) {
1483 case TCG_TYPE_I32:
1484 case TCG_TYPE_I64:
1485 op->opc = INDEX_op_and;
1486 break;
1487 case TCG_TYPE_V64:
1488 case TCG_TYPE_V128:
1489 case TCG_TYPE_V256:
1490 op->opc = INDEX_op_and_vec;
1491 break;
1492 default:
1493 g_assert_not_reached();
1494 }
1495 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1496 return fold_and(ctx, op);
1497 }
1498
Richard Hendersond4d441e2024-12-22 16:08:42 -08001499 z_mask = t1->z_mask & ~t2->o_mask;
1500 o_mask = t1->o_mask & ~t2->z_mask;
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001501 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersond4d441e2024-12-22 16:08:42 -08001502
1503 /* Affected bits are those not known zero, masked by those known zero. */
1504 a_mask = t1->z_mask & t2->z_mask;
1505
1506 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001507}
1508
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001509static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1510{
1511 /* If true and false values are the same, eliminate the cmp. */
1512 if (args_are_copies(op->args[2], op->args[3])) {
1513 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1514 }
1515
1516 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001517 uint64_t tv = arg_const_val(op->args[2]);
1518 uint64_t fv = arg_const_val(op->args[3]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001519
1520 if (tv == -1 && fv == 0) {
1521 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1522 }
1523 if (tv == 0 && fv == -1) {
1524 if (TCG_TARGET_HAS_not_vec) {
1525 op->opc = INDEX_op_not_vec;
1526 return fold_not(ctx, op);
1527 } else {
1528 op->opc = INDEX_op_xor_vec;
1529 op->args[2] = arg_new_constant(ctx, -1);
1530 return fold_xor(ctx, op);
1531 }
1532 }
1533 }
1534 if (arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001535 uint64_t tv = arg_const_val(op->args[2]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001536 if (tv == -1) {
1537 op->opc = INDEX_op_or_vec;
1538 op->args[2] = op->args[3];
1539 return fold_or(ctx, op);
1540 }
1541 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1542 op->opc = INDEX_op_andc_vec;
1543 op->args[2] = op->args[1];
1544 op->args[1] = op->args[3];
1545 return fold_andc(ctx, op);
1546 }
1547 }
1548 if (arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001549 uint64_t fv = arg_const_val(op->args[3]);
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001550 if (fv == 0) {
1551 op->opc = INDEX_op_and_vec;
1552 return fold_and(ctx, op);
1553 }
1554 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1555 op->opc = INDEX_op_orc_vec;
1556 op->args[2] = op->args[1];
1557 op->args[1] = op->args[3];
1558 return fold_orc(ctx, op);
1559 }
1560 }
1561 return finish_folding(ctx, op);
1562}
1563
Richard Henderson079b0802021-08-24 09:30:59 -07001564static bool fold_brcond(OptContext *ctx, TCGOp *op)
1565{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001566 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001567 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001568 if (i == 0) {
1569 tcg_op_remove(ctx->tcg, op);
1570 return true;
1571 }
1572 if (i > 0) {
1573 op->opc = INDEX_op_br;
1574 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001575 finish_ebb(ctx);
1576 } else {
1577 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001578 }
Richard Henderson15268552024-12-08 07:45:11 -06001579 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001580}
1581
Richard Henderson764d2ab2021-08-24 09:22:11 -07001582static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1583{
Richard Henderson7e64b112023-10-24 16:53:56 -07001584 TCGCond cond;
1585 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001586 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001587
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001588 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001589 cond = op->args[4];
1590 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001591 if (i >= 0) {
1592 goto do_brcond_const;
1593 }
1594
1595 switch (cond) {
1596 case TCG_COND_LT:
1597 case TCG_COND_GE:
1598 /*
1599 * Simplify LT/GE comparisons vs zero to a single compare
1600 * vs the high word of the input.
1601 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001602 if (arg_is_const_val(op->args[2], 0) &&
1603 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001604 goto do_brcond_high;
1605 }
1606 break;
1607
1608 case TCG_COND_NE:
1609 inv = 1;
1610 QEMU_FALLTHROUGH;
1611 case TCG_COND_EQ:
1612 /*
1613 * Simplify EQ/NE comparisons where one of the pairs
1614 * can be simplified.
1615 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001616 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001617 op->args[2], cond);
1618 switch (i ^ inv) {
1619 case 0:
1620 goto do_brcond_const;
1621 case 1:
1622 goto do_brcond_high;
1623 }
1624
Richard Henderson67f84c92021-08-25 08:00:20 -07001625 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001626 op->args[3], cond);
1627 switch (i ^ inv) {
1628 case 0:
1629 goto do_brcond_const;
1630 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001631 goto do_brcond_low;
1632 }
1633 break;
1634
1635 case TCG_COND_TSTEQ:
1636 case TCG_COND_TSTNE:
1637 if (arg_is_const_val(op->args[2], 0)) {
1638 goto do_brcond_high;
1639 }
1640 if (arg_is_const_val(op->args[3], 0)) {
1641 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001642 }
1643 break;
1644
1645 default:
1646 break;
1647
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001648 do_brcond_low:
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08001649 op->opc = INDEX_op_brcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001650 op->args[1] = op->args[2];
1651 op->args[2] = cond;
1652 op->args[3] = label;
1653 return fold_brcond(ctx, op);
1654
Richard Henderson764d2ab2021-08-24 09:22:11 -07001655 do_brcond_high:
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08001656 op->opc = INDEX_op_brcond;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001657 op->args[0] = op->args[1];
1658 op->args[1] = op->args[3];
1659 op->args[2] = cond;
1660 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001661 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001662
1663 do_brcond_const:
1664 if (i == 0) {
1665 tcg_op_remove(ctx->tcg, op);
1666 return true;
1667 }
1668 op->opc = INDEX_op_br;
1669 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001670 finish_ebb(ctx);
1671 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001672 }
Richard Henderson15268552024-12-08 07:45:11 -06001673
1674 finish_bb(ctx);
1675 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001676}
1677
Richard Henderson09bacdc2021-08-24 11:58:12 -07001678static bool fold_bswap(OptContext *ctx, TCGOp *op)
1679{
Richard Hendersone6e37332024-12-10 15:02:41 -06001680 uint64_t z_mask, o_mask, s_mask;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001681 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersone6e37332024-12-10 15:02:41 -06001682 int flags = op->args[2];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001683
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001684 if (ti_is_const(t1)) {
1685 return tcg_opt_gen_movi(ctx, op, op->args[0],
1686 do_constant_folding(op->opc, ctx->type,
Richard Hendersone6e37332024-12-10 15:02:41 -06001687 ti_const_val(t1), flags));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001688 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001689
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001690 z_mask = t1->z_mask;
Richard Hendersone6e37332024-12-10 15:02:41 -06001691 o_mask = t1->o_mask;
1692 s_mask = 0;
1693
Richard Hendersonfae450b2021-08-25 22:42:19 -07001694 switch (op->opc) {
Richard Henderson0dd07ee2025-01-10 18:51:16 -08001695 case INDEX_op_bswap16:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001696 z_mask = bswap16(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001697 o_mask = bswap16(o_mask);
1698 if (flags & TCG_BSWAP_OS) {
1699 z_mask = (int16_t)z_mask;
1700 o_mask = (int16_t)o_mask;
1701 s_mask = INT16_MIN;
1702 } else if (!(flags & TCG_BSWAP_OZ)) {
1703 z_mask |= MAKE_64BIT_MASK(16, 48);
1704 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001705 break;
Richard Henderson7498d882025-01-10 19:53:51 -08001706 case INDEX_op_bswap32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001707 z_mask = bswap32(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001708 o_mask = bswap32(o_mask);
1709 if (flags & TCG_BSWAP_OS) {
1710 z_mask = (int32_t)z_mask;
1711 o_mask = (int32_t)o_mask;
1712 s_mask = INT32_MIN;
1713 } else if (!(flags & TCG_BSWAP_OZ)) {
1714 z_mask |= MAKE_64BIT_MASK(32, 32);
1715 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001716 break;
Richard Henderson3ad5d4c2025-01-10 21:54:44 -08001717 case INDEX_op_bswap64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001718 z_mask = bswap64(z_mask);
Richard Hendersone6e37332024-12-10 15:02:41 -06001719 o_mask = bswap64(o_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001720 break;
1721 default:
1722 g_assert_not_reached();
1723 }
1724
Richard Hendersone6e37332024-12-10 15:02:41 -06001725 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001726}
1727
Richard Henderson5cf32be2021-08-24 08:17:08 -07001728static bool fold_call(OptContext *ctx, TCGOp *op)
1729{
1730 TCGContext *s = ctx->tcg;
1731 int nb_oargs = TCGOP_CALLO(op);
1732 int nb_iargs = TCGOP_CALLI(op);
1733 int flags, i;
1734
1735 init_arguments(ctx, op, nb_oargs + nb_iargs);
1736 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1737
1738 /* If the function reads or writes globals, reset temp data. */
1739 flags = tcg_call_flags(op);
1740 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1741 int nb_globals = s->nb_globals;
1742
1743 for (i = 0; i < nb_globals; i++) {
1744 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001745 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001746 }
1747 }
1748 }
1749
Richard Hendersonab84dc32023-08-23 23:04:24 -07001750 /* If the function has side effects, reset mem data. */
1751 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1752 remove_mem_copy_all(ctx);
1753 }
1754
Richard Henderson5cf32be2021-08-24 08:17:08 -07001755 /* Reset temp data for outputs. */
1756 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001757 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001758 }
1759
1760 /* Stop optimizing MB across calls. */
1761 ctx->prev_mb = NULL;
1762 return true;
1763}
1764
Richard Henderson29f65862024-12-09 14:09:49 -06001765static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1766{
1767 /* Canonicalize the comparison to put immediate second. */
1768 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1769 op->args[3] = tcg_swap_cond(op->args[3]);
1770 }
1771 return finish_folding(ctx, op);
1772}
1773
1774static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1775{
1776 /* If true and false values are the same, eliminate the cmp. */
1777 if (args_are_copies(op->args[3], op->args[4])) {
1778 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1779 }
1780
1781 /* Canonicalize the comparison to put immediate second. */
1782 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1783 op->args[5] = tcg_swap_cond(op->args[5]);
1784 }
1785 /*
1786 * Canonicalize the "false" input reg to match the destination,
1787 * so that the tcg backend can implement "move if true".
1788 */
1789 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1790 op->args[5] = tcg_invert_cond(op->args[5]);
1791 }
1792 return finish_folding(ctx, op);
1793}
1794
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001795static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1796{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001797 uint64_t z_mask, s_mask;
1798 TempOptInfo *t1 = arg_info(op->args[1]);
1799 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001800
Richard Hendersonce1d6632024-12-08 19:47:51 -06001801 if (ti_is_const(t1)) {
1802 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001803
1804 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001805 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001806 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1807 }
1808 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1809 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001810
1811 switch (ctx->type) {
1812 case TCG_TYPE_I32:
1813 z_mask = 31;
1814 break;
1815 case TCG_TYPE_I64:
1816 z_mask = 63;
1817 break;
1818 default:
1819 g_assert_not_reached();
1820 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001821 s_mask = ~z_mask;
1822 z_mask |= t2->z_mask;
1823 s_mask &= t2->s_mask;
1824
1825 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001826}
1827
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001828static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1829{
Richard Henderson81be07f2024-12-08 19:49:17 -06001830 uint64_t z_mask;
1831
Richard Hendersonfae450b2021-08-25 22:42:19 -07001832 if (fold_const1(ctx, op)) {
1833 return true;
1834 }
1835
1836 switch (ctx->type) {
1837 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001838 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001839 break;
1840 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001841 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001842 break;
1843 default:
1844 g_assert_not_reached();
1845 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001846 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001847}
1848
Richard Henderson1b1907b2021-08-24 10:47:04 -07001849static bool fold_deposit(OptContext *ctx, TCGOp *op)
1850{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001851 TempOptInfo *t1 = arg_info(op->args[1]);
1852 TempOptInfo *t2 = arg_info(op->args[2]);
1853 int ofs = op->args[3];
1854 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001855 int width = 8 * tcg_type_size(ctx->type);
Richard Henderson9d80b3c2024-12-10 14:45:44 -06001856 uint64_t z_mask, o_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001857
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001858 if (ti_is_const(t1) && ti_is_const(t2)) {
1859 return tcg_opt_gen_movi(ctx, op, op->args[0],
1860 deposit64(ti_const_val(t1), ofs, len,
1861 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001862 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001863
Richard Henderson8f7a8402023-08-13 11:03:05 -07001864 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001865 if (ti_is_const_val(t1, 0) && ofs == 0) {
1866 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001867
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001868 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001869 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001870 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001871 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001872 }
1873
1874 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001875 if (ti_is_const_val(t2, 0)) {
1876 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001877
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001878 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001879 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001880 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001881 }
1882
Richard Hendersonedb832c2024-12-19 17:56:05 -08001883 /* The s_mask from the top portion of the deposit is still valid. */
1884 if (ofs + len == width) {
1885 s_mask = t2->s_mask << ofs;
1886 } else {
1887 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1888 }
1889
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001890 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Henderson9d80b3c2024-12-10 14:45:44 -06001891 o_mask = deposit64(t1->o_mask, ofs, len, t2->o_mask);
1892
1893 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001894}
1895
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001896static bool fold_divide(OptContext *ctx, TCGOp *op)
1897{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001898 if (fold_const2(ctx, op) ||
1899 fold_xi_to_x(ctx, op, 1)) {
1900 return true;
1901 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001902 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001903}
1904
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001905static bool fold_dup(OptContext *ctx, TCGOp *op)
1906{
1907 if (arg_is_const(op->args[1])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001908 uint64_t t = arg_const_val(op->args[1]);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001909 t = dup_const(TCGOP_VECE(op), t);
1910 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1911 }
Richard Hendersone089d692024-12-08 20:00:51 -06001912 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001913}
1914
1915static bool fold_dup2(OptContext *ctx, TCGOp *op)
1916{
1917 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08001918 uint64_t t = deposit64(arg_const_val(op->args[1]), 32, 32,
1919 arg_const_val(op->args[2]));
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001920 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1921 }
1922
1923 if (args_are_copies(op->args[1], op->args[2])) {
1924 op->opc = INDEX_op_dup_vec;
1925 TCGOP_VECE(op) = MO_32;
1926 }
Richard Hendersone089d692024-12-08 20:00:51 -06001927 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001928}
1929
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001930static bool fold_eqv(OptContext *ctx, TCGOp *op)
1931{
Richard Henderson33fceba2024-12-10 08:26:56 -06001932 uint64_t z_mask, o_mask, s_mask;
Richard Henderson46c68d72023-11-15 11:51:28 -08001933 TempOptInfo *t1, *t2;
Richard Hendersonef6be622024-12-08 20:03:15 -06001934
Richard Henderson7a2f7082021-08-26 07:06:39 -07001935 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001936 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001937 fold_xi_to_not(ctx, op, 0)) {
1938 return true;
1939 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001940
Richard Henderson46c68d72023-11-15 11:51:28 -08001941 t2 = arg_info(op->args[2]);
1942 if (ti_is_const(t2)) {
1943 /* Fold eqv r,x,i to xor r,x,~i. */
1944 switch (ctx->type) {
1945 case TCG_TYPE_I32:
1946 case TCG_TYPE_I64:
1947 op->opc = INDEX_op_xor;
1948 break;
1949 case TCG_TYPE_V64:
1950 case TCG_TYPE_V128:
1951 case TCG_TYPE_V256:
1952 op->opc = INDEX_op_xor_vec;
1953 break;
1954 default:
1955 g_assert_not_reached();
1956 }
1957 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1958 return fold_xor(ctx, op);
1959 }
1960
1961 t1 = arg_info(op->args[1]);
Richard Henderson33fceba2024-12-10 08:26:56 -06001962
1963 z_mask = (t1->z_mask | ~t2->o_mask) & (t2->z_mask | ~t1->o_mask);
1964 o_mask = ~(t1->z_mask | t2->z_mask) | (t1->o_mask & t2->o_mask);
Richard Henderson46c68d72023-11-15 11:51:28 -08001965 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson33fceba2024-12-10 08:26:56 -06001966
1967 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001968}
1969
Richard Hendersonb6617c82021-08-24 10:44:53 -07001970static bool fold_extract(OptContext *ctx, TCGOp *op)
1971{
Richard Hendersonfcde7362024-12-10 15:34:12 -06001972 uint64_t z_mask, o_mask, a_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001973 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001974 int pos = op->args[2];
1975 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001976
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001977 if (ti_is_const(t1)) {
1978 return tcg_opt_gen_movi(ctx, op, op->args[0],
1979 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001980 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001981
Richard Hendersonfcde7362024-12-10 15:34:12 -06001982 z_mask = extract64(t1->z_mask, pos, len);
1983 o_mask = extract64(t1->o_mask, pos, len);
1984 a_mask = pos ? -1 : t1->z_mask ^ z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001985
Richard Hendersonfcde7362024-12-10 15:34:12 -06001986 return fold_masks_zosa(ctx, op, z_mask, o_mask, 0, a_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001987}
1988
Richard Hendersondcd08992021-08-24 10:41:39 -07001989static bool fold_extract2(OptContext *ctx, TCGOp *op)
1990{
Richard Henderson83c47c32024-12-10 15:48:16 -06001991 TempOptInfo *t1 = arg_info(op->args[1]);
1992 TempOptInfo *t2 = arg_info(op->args[2]);
1993 uint64_t z1 = t1->z_mask;
1994 uint64_t z2 = t2->z_mask;
1995 uint64_t o1 = t1->o_mask;
1996 uint64_t o2 = t2->o_mask;
1997 int shr = op->args[3];
Richard Hendersondcd08992021-08-24 10:41:39 -07001998
Richard Henderson83c47c32024-12-10 15:48:16 -06001999 if (ctx->type == TCG_TYPE_I32) {
2000 z1 = (uint32_t)z1 >> shr;
2001 o1 = (uint32_t)o1 >> shr;
2002 z2 = (uint64_t)((int32_t)z2 << (32 - shr));
2003 o2 = (uint64_t)((int32_t)o2 << (32 - shr));
2004 } else {
2005 z1 >>= shr;
2006 o1 >>= shr;
2007 z2 <<= 64 - shr;
2008 o2 <<= 64 - shr;
Richard Hendersondcd08992021-08-24 10:41:39 -07002009 }
Richard Henderson83c47c32024-12-10 15:48:16 -06002010
2011 return fold_masks_zo(ctx, op, z1 | z2, o1 | o2);
Richard Hendersondcd08992021-08-24 10:41:39 -07002012}
2013
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002014static bool fold_exts(OptContext *ctx, TCGOp *op)
2015{
Richard Hendersonde852572024-12-10 15:55:33 -06002016 uint64_t z_mask, o_mask, s_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06002017 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002018
2019 if (fold_const1(ctx, op)) {
2020 return true;
2021 }
2022
Richard Hendersona9621922024-12-08 20:08:46 -06002023 t1 = arg_info(op->args[1]);
2024 z_mask = t1->z_mask;
Richard Hendersonde852572024-12-10 15:55:33 -06002025 o_mask = t1->o_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06002026 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002027
2028 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07002029 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06002030 s_mask |= INT32_MIN;
2031 z_mask = (int32_t)z_mask;
Richard Hendersonde852572024-12-10 15:55:33 -06002032 o_mask = (int32_t)o_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002033 break;
2034 default:
2035 g_assert_not_reached();
2036 }
Richard Hendersonde852572024-12-10 15:55:33 -06002037 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002038}
2039
2040static bool fold_extu(OptContext *ctx, TCGOp *op)
2041{
Richard Henderson48e8de62024-12-26 12:01:57 -08002042 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002043
2044 if (fold_const1(ctx, op)) {
2045 return true;
2046 }
2047
Richard Henderson48e8de62024-12-26 12:01:57 -08002048 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002049 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07002050 case INDEX_op_extrl_i64_i32:
2051 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002052 z_mask = (uint32_t)z_mask;
2053 break;
2054 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002055 z_mask >>= 32;
2056 break;
2057 default:
2058 g_assert_not_reached();
2059 }
Richard Henderson08abe292024-12-08 20:11:44 -06002060 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002061}
2062
Richard Henderson3eefdf22021-08-25 11:06:43 -07002063static bool fold_mb(OptContext *ctx, TCGOp *op)
2064{
2065 /* Eliminate duplicate and redundant fence instructions. */
2066 if (ctx->prev_mb) {
2067 /*
2068 * Merge two barriers of the same type into one,
2069 * or a weaker barrier into a stronger one,
2070 * or two weaker barriers into a stronger one.
2071 * mb X; mb Y => mb X|Y
2072 * mb; strl => mb; st
2073 * ldaq; mb => ld; mb
2074 * ldaq; strl => ld; mb; st
2075 * Other combinations are also merged into a strong
2076 * barrier. This is stricter than specified but for
2077 * the purposes of TCG is better than not optimizing.
2078 */
2079 ctx->prev_mb->args[0] |= op->args[0];
2080 tcg_op_remove(ctx->tcg, op);
2081 } else {
2082 ctx->prev_mb = op;
2083 }
2084 return true;
2085}
2086
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002087static bool fold_mov(OptContext *ctx, TCGOp *op)
2088{
2089 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2090}
2091
Richard Henderson0c310a32021-08-24 10:37:24 -07002092static bool fold_movcond(OptContext *ctx, TCGOp *op)
2093{
Richard Henderson32202782024-12-08 20:16:38 -06002094 uint64_t z_mask, s_mask;
2095 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002096 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07002097
Richard Henderson141125e2024-09-06 21:00:10 -07002098 /* If true and false values are the same, eliminate the cmp. */
2099 if (args_are_copies(op->args[3], op->args[4])) {
2100 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2101 }
2102
Richard Henderson7a2f7082021-08-26 07:06:39 -07002103 /*
2104 * Canonicalize the "false" input reg to match the destination reg so
2105 * that the tcg backend can implement a "move if true" operation.
2106 */
2107 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07002108 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07002109 }
2110
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002111 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002112 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07002113 if (i >= 0) {
2114 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
2115 }
2116
Richard Henderson32202782024-12-08 20:16:38 -06002117 tt = arg_info(op->args[3]);
2118 ft = arg_info(op->args[4]);
2119 z_mask = tt->z_mask | ft->z_mask;
2120 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002121
Richard Henderson32202782024-12-08 20:16:38 -06002122 if (ti_is_const(tt) && ti_is_const(ft)) {
2123 uint64_t tv = ti_const_val(tt);
2124 uint64_t fv = ti_const_val(ft);
Richard Henderson246c4b72023-10-24 16:36:50 -07002125 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07002126
Richard Henderson0c310a32021-08-24 10:37:24 -07002127 if (tv == 1 && fv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002128 op->opc = INDEX_op_setcond;
Richard Henderson0c310a32021-08-24 10:37:24 -07002129 op->args[3] = cond;
2130 } else if (fv == 1 && tv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002131 op->opc = INDEX_op_setcond;
Richard Henderson0c310a32021-08-24 10:37:24 -07002132 op->args[3] = tcg_invert_cond(cond);
Richard Hendersonf7914582025-01-09 12:48:21 -08002133 } else if (tv == -1 && fv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002134 op->opc = INDEX_op_negsetcond;
Richard Hendersonf7914582025-01-09 12:48:21 -08002135 op->args[3] = cond;
2136 } else if (fv == -1 && tv == 0) {
Richard Hendersona363e1e2025-01-10 09:26:44 -08002137 op->opc = INDEX_op_negsetcond;
Richard Hendersonf7914582025-01-09 12:48:21 -08002138 op->args[3] = tcg_invert_cond(cond);
Richard Henderson0c310a32021-08-24 10:37:24 -07002139 }
2140 }
Richard Henderson32202782024-12-08 20:16:38 -06002141
2142 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002143}
2144
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002145static bool fold_mul(OptContext *ctx, TCGOp *op)
2146{
Richard Hendersone8679952021-08-25 13:19:52 -07002147 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002148 fold_xi_to_i(ctx, op, 0) ||
2149 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002150 return true;
2151 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002152 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002153}
2154
2155static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2156{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002157 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002158 fold_xi_to_i(ctx, op, 0)) {
2159 return true;
2160 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002161 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002162}
2163
Richard Henderson407112b2021-08-26 06:33:04 -07002164static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002165{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002166 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2167
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002168 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002169 uint64_t a = arg_const_val(op->args[2]);
2170 uint64_t b = arg_const_val(op->args[3]);
Richard Henderson407112b2021-08-26 06:33:04 -07002171 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002172 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002173 TCGOp *op2;
2174
2175 switch (op->opc) {
Richard Hendersond7761982025-01-09 09:11:53 -08002176 case INDEX_op_mulu2:
2177 if (ctx->type == TCG_TYPE_I32) {
2178 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2179 h = (int32_t)(l >> 32);
2180 l = (int32_t)l;
2181 } else {
2182 mulu64(&l, &h, a, b);
2183 }
Richard Henderson407112b2021-08-26 06:33:04 -07002184 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08002185 case INDEX_op_muls2:
2186 if (ctx->type == TCG_TYPE_I32) {
2187 l = (int64_t)(int32_t)a * (int32_t)b;
2188 h = l >> 32;
2189 l = (int32_t)l;
2190 } else {
2191 muls64(&l, &h, a, b);
2192 }
Richard Henderson407112b2021-08-26 06:33:04 -07002193 break;
2194 default:
2195 g_assert_not_reached();
2196 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002197
2198 rl = op->args[0];
2199 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002200
2201 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002202 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002203
2204 tcg_opt_gen_movi(ctx, op, rl, l);
2205 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002206 return true;
2207 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002208 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002209}
2210
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002211static bool fold_nand(OptContext *ctx, TCGOp *op)
2212{
Richard Henderson16559c32024-12-09 18:13:15 -06002213 uint64_t z_mask, o_mask, s_mask;
2214 TempOptInfo *t1, *t2;
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002215
Richard Henderson7a2f7082021-08-26 07:06:39 -07002216 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002217 fold_xi_to_not(ctx, op, -1)) {
2218 return true;
2219 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002220
Richard Henderson16559c32024-12-09 18:13:15 -06002221 t1 = arg_info(op->args[1]);
2222 t2 = arg_info(op->args[2]);
2223
2224 z_mask = ~(t1->o_mask & t2->o_mask);
2225 o_mask = ~(t1->z_mask & t2->z_mask);
2226 s_mask = t1->s_mask & t2->s_mask;
2227
2228 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002229}
2230
Richard Hendersone25fe882024-04-04 20:53:50 +00002231static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002232{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002233 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002234 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002235 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002236
Richard Hendersond151fd32024-12-08 20:23:11 -06002237 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002238}
2239
Richard Hendersone25fe882024-04-04 20:53:50 +00002240static bool fold_neg(OptContext *ctx, TCGOp *op)
2241{
2242 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2243}
2244
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002245static bool fold_nor(OptContext *ctx, TCGOp *op)
2246{
Richard Henderson682d6d52024-12-09 21:13:02 -06002247 uint64_t z_mask, o_mask, s_mask;
2248 TempOptInfo *t1, *t2;
Richard Henderson2b7b6952024-12-08 20:25:21 -06002249
Richard Henderson7a2f7082021-08-26 07:06:39 -07002250 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002251 fold_xi_to_not(ctx, op, 0)) {
2252 return true;
2253 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002254
Richard Henderson682d6d52024-12-09 21:13:02 -06002255 t1 = arg_info(op->args[1]);
2256 t2 = arg_info(op->args[2]);
2257
2258 z_mask = ~(t1->o_mask | t2->o_mask);
2259 o_mask = ~(t1->z_mask | t2->z_mask);
2260 s_mask = t1->s_mask & t2->s_mask;
2261
2262 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002263}
2264
2265static bool fold_not(OptContext *ctx, TCGOp *op)
2266{
Richard Hendersond89504b2024-12-09 21:15:37 -06002267 TempOptInfo *t1;
2268
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002269 if (fold_const1(ctx, op)) {
2270 return true;
2271 }
Richard Hendersond89504b2024-12-09 21:15:37 -06002272
2273 t1 = arg_info(op->args[1]);
2274 return fold_masks_zos(ctx, op, ~t1->o_mask, ~t1->z_mask, t1->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002275}
2276
2277static bool fold_or(OptContext *ctx, TCGOp *op)
2278{
Richard Henderson84b399d2024-12-09 21:35:53 -06002279 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson83b1ba32024-12-08 20:28:59 -06002280 TempOptInfo *t1, *t2;
2281
Richard Henderson7a2f7082021-08-26 07:06:39 -07002282 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002283 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002284 fold_xx_to_x(ctx, op)) {
2285 return true;
2286 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002287
Richard Henderson83b1ba32024-12-08 20:28:59 -06002288 t1 = arg_info(op->args[1]);
2289 t2 = arg_info(op->args[2]);
Richard Henderson84b399d2024-12-09 21:35:53 -06002290
Richard Henderson83b1ba32024-12-08 20:28:59 -06002291 z_mask = t1->z_mask | t2->z_mask;
Richard Henderson84b399d2024-12-09 21:35:53 -06002292 o_mask = t1->o_mask | t2->o_mask;
Richard Henderson83b1ba32024-12-08 20:28:59 -06002293 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson84b399d2024-12-09 21:35:53 -06002294
2295 /* Affected bits are those not known one, masked by those known zero. */
2296 a_mask = ~t1->o_mask & t2->z_mask;
2297
2298 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002299}
2300
2301static bool fold_orc(OptContext *ctx, TCGOp *op)
2302{
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002303 uint64_t z_mask, o_mask, s_mask, a_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002304 TempOptInfo *t1, *t2;
Richard Henderson54e26b22024-12-08 20:30:20 -06002305
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002306 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002307 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002308 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002309 fold_ix_to_not(ctx, op, 0)) {
2310 return true;
2311 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002312
Richard Henderson50e40ec2024-12-10 08:13:10 -06002313 t2 = arg_info(op->args[2]);
2314 if (ti_is_const(t2)) {
2315 /* Fold orc r,x,i to or r,x,~i. */
2316 switch (ctx->type) {
2317 case TCG_TYPE_I32:
2318 case TCG_TYPE_I64:
2319 op->opc = INDEX_op_or;
2320 break;
2321 case TCG_TYPE_V64:
2322 case TCG_TYPE_V128:
2323 case TCG_TYPE_V256:
2324 op->opc = INDEX_op_or_vec;
2325 break;
2326 default:
2327 g_assert_not_reached();
2328 }
2329 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
2330 return fold_or(ctx, op);
2331 }
2332
2333 t1 = arg_info(op->args[1]);
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002334
2335 z_mask = t1->z_mask | ~t2->o_mask;
2336 o_mask = t1->o_mask | ~t2->z_mask;
Richard Henderson50e40ec2024-12-10 08:13:10 -06002337 s_mask = t1->s_mask & t2->s_mask;
Richard Hendersoncc4033e2024-12-09 22:22:27 -06002338
2339 /* Affected bits are those not known one, masked by those known one. */
2340 a_mask = ~t1->o_mask & t2->o_mask;
2341
2342 return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002343}
2344
Richard Henderson6813be92024-12-08 20:33:30 -06002345static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002346{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002347 const TCGOpDef *def = &tcg_op_defs[op->opc];
2348 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2349 MemOp mop = get_memop(oi);
2350 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002351 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002352
Richard Henderson57fe5c62021-08-26 12:04:46 -07002353 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002354 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002355 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002356 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002357 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002358 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002359 }
2360
Richard Henderson3eefdf22021-08-25 11:06:43 -07002361 /* Opcodes that touch guest memory stop the mb optimization. */
2362 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002363
2364 return fold_masks_zs(ctx, op, z_mask, s_mask);
2365}
2366
2367static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2368{
2369 /* Opcodes that touch guest memory stop the mb optimization. */
2370 ctx->prev_mb = NULL;
2371 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002372}
2373
2374static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2375{
2376 /* Opcodes that touch guest memory stop the mb optimization. */
2377 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002378 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002379}
2380
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002381static bool fold_remainder(OptContext *ctx, TCGOp *op)
2382{
Richard Henderson267c17e2021-10-25 11:30:33 -07002383 if (fold_const2(ctx, op) ||
2384 fold_xx_to_i(ctx, op, 0)) {
2385 return true;
2386 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002387 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002388}
2389
Richard Henderson95eb2292024-12-08 20:47:59 -06002390/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2391static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002392{
2393 uint64_t a_zmask, b_val;
2394 TCGCond cond;
2395
2396 if (!arg_is_const(op->args[2])) {
2397 return false;
2398 }
2399
2400 a_zmask = arg_info(op->args[1])->z_mask;
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002401 b_val = arg_const_val(op->args[2]);
Richard Henderson8d65cda2024-03-26 16:00:40 -10002402 cond = op->args[3];
2403
2404 if (ctx->type == TCG_TYPE_I32) {
2405 a_zmask = (uint32_t)a_zmask;
2406 b_val = (uint32_t)b_val;
2407 }
2408
2409 /*
2410 * A with only low bits set vs B with high bits set means that A < B.
2411 */
2412 if (a_zmask < b_val) {
2413 bool inv = false;
2414
2415 switch (cond) {
2416 case TCG_COND_NE:
2417 case TCG_COND_LEU:
2418 case TCG_COND_LTU:
2419 inv = true;
2420 /* fall through */
2421 case TCG_COND_GTU:
2422 case TCG_COND_GEU:
2423 case TCG_COND_EQ:
2424 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2425 default:
2426 break;
2427 }
2428 }
2429
2430 /*
2431 * A with only lsb set is already boolean.
2432 */
2433 if (a_zmask <= 1) {
2434 bool convert = false;
2435 bool inv = false;
2436
2437 switch (cond) {
2438 case TCG_COND_EQ:
2439 inv = true;
2440 /* fall through */
2441 case TCG_COND_NE:
2442 convert = (b_val == 0);
2443 break;
2444 case TCG_COND_LTU:
2445 case TCG_COND_TSTEQ:
2446 inv = true;
2447 /* fall through */
2448 case TCG_COND_GEU:
2449 case TCG_COND_TSTNE:
2450 convert = (b_val == 1);
2451 break;
2452 default:
2453 break;
2454 }
2455 if (convert) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002456 if (!inv && !neg) {
2457 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2458 }
2459
Richard Henderson8d65cda2024-03-26 16:00:40 -10002460 if (!inv) {
Richard Henderson69713582025-01-06 22:48:57 -08002461 op->opc = INDEX_op_neg;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002462 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002463 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002464 op->args[2] = arg_new_constant(ctx, -1);
2465 } else {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002466 op->opc = INDEX_op_xor;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002467 op->args[2] = arg_new_constant(ctx, 1);
2468 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002469 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002470 }
2471 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002472 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002473}
2474
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002475static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2476{
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002477 TCGCond cond = op->args[3];
2478 TCGArg ret, src1, src2;
2479 TCGOp *op2;
2480 uint64_t val;
2481 int sh;
2482 bool inv;
2483
2484 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2485 return;
2486 }
2487
2488 src2 = op->args[2];
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002489 val = arg_const_val(src2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002490 if (!is_power_of_2(val)) {
2491 return;
2492 }
2493 sh = ctz64(val);
2494
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002495 ret = op->args[0];
2496 src1 = op->args[1];
2497 inv = cond == TCG_COND_TSTEQ;
2498
Richard Hendersonfa361ee2025-01-12 11:50:09 -08002499 if (sh && neg && !inv && TCG_TARGET_sextract_valid(ctx->type, sh, 1)) {
2500 op->opc = INDEX_op_sextract;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002501 op->args[1] = src1;
2502 op->args[2] = sh;
2503 op->args[3] = 1;
2504 return;
Richard Henderson07d5d502025-01-11 09:01:46 -08002505 } else if (sh && TCG_TARGET_extract_valid(ctx->type, sh, 1)) {
2506 op->opc = INDEX_op_extract;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002507 op->args[1] = src1;
2508 op->args[2] = sh;
2509 op->args[3] = 1;
2510 } else {
2511 if (sh) {
Richard Henderson74dbd362025-01-07 22:52:10 -08002512 op2 = opt_insert_before(ctx, op, INDEX_op_shr, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002513 op2->args[0] = ret;
2514 op2->args[1] = src1;
2515 op2->args[2] = arg_new_constant(ctx, sh);
2516 src1 = ret;
2517 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002518 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002519 op->args[1] = src1;
2520 op->args[2] = arg_new_constant(ctx, 1);
2521 }
2522
2523 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002524 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002525 op2->args[0] = ret;
2526 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002527 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002528 } else if (inv) {
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08002529 op2 = opt_insert_after(ctx, op, INDEX_op_xor, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002530 op2->args[0] = ret;
2531 op2->args[1] = ret;
2532 op2->args[2] = arg_new_constant(ctx, 1);
2533 } else if (neg) {
Richard Henderson69713582025-01-06 22:48:57 -08002534 op2 = opt_insert_after(ctx, op, INDEX_op_neg, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002535 op2->args[0] = ret;
2536 op2->args[1] = ret;
2537 }
2538}
2539
Richard Hendersonc63ff552021-08-24 09:35:30 -07002540static bool fold_setcond(OptContext *ctx, TCGOp *op)
2541{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002542 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002543 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002544 if (i >= 0) {
2545 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2546 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002547
Richard Henderson95eb2292024-12-08 20:47:59 -06002548 i = fold_setcond_zmask(ctx, op, false);
2549 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002550 return true;
2551 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002552 if (i == 0) {
2553 fold_setcond_tst_pow2(ctx, op, false);
2554 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002555
Richard Henderson2c8a2832024-12-08 20:50:37 -06002556 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002557}
2558
Richard Henderson36355022023-08-04 23:24:04 +00002559static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2560{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002561 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002562 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002563 if (i >= 0) {
2564 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2565 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002566
Richard Henderson95eb2292024-12-08 20:47:59 -06002567 i = fold_setcond_zmask(ctx, op, true);
2568 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002569 return true;
2570 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002571 if (i == 0) {
2572 fold_setcond_tst_pow2(ctx, op, true);
2573 }
Richard Henderson36355022023-08-04 23:24:04 +00002574
2575 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002576 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002577}
2578
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002579static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2580{
Richard Henderson7e64b112023-10-24 16:53:56 -07002581 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002582 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002583
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002584 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002585 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002586 if (i >= 0) {
2587 goto do_setcond_const;
2588 }
2589
2590 switch (cond) {
2591 case TCG_COND_LT:
2592 case TCG_COND_GE:
2593 /*
2594 * Simplify LT/GE comparisons vs zero to a single compare
2595 * vs the high word of the input.
2596 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002597 if (arg_is_const_val(op->args[3], 0) &&
2598 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002599 goto do_setcond_high;
2600 }
2601 break;
2602
2603 case TCG_COND_NE:
2604 inv = 1;
2605 QEMU_FALLTHROUGH;
2606 case TCG_COND_EQ:
2607 /*
2608 * Simplify EQ/NE comparisons where one of the pairs
2609 * can be simplified.
2610 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002611 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002612 op->args[3], cond);
2613 switch (i ^ inv) {
2614 case 0:
2615 goto do_setcond_const;
2616 case 1:
2617 goto do_setcond_high;
2618 }
2619
Richard Henderson67f84c92021-08-25 08:00:20 -07002620 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002621 op->args[4], cond);
2622 switch (i ^ inv) {
2623 case 0:
2624 goto do_setcond_const;
2625 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002626 goto do_setcond_low;
2627 }
2628 break;
2629
2630 case TCG_COND_TSTEQ:
2631 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002632 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002633 goto do_setcond_high;
2634 }
2635 if (arg_is_const_val(op->args[4], 0)) {
2636 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002637 }
2638 break;
2639
2640 default:
2641 break;
2642
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002643 do_setcond_low:
2644 op->args[2] = op->args[3];
2645 op->args[3] = cond;
Richard Hendersona363e1e2025-01-10 09:26:44 -08002646 op->opc = INDEX_op_setcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002647 return fold_setcond(ctx, op);
2648
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002649 do_setcond_high:
2650 op->args[1] = op->args[2];
2651 op->args[2] = op->args[4];
2652 op->args[3] = cond;
Richard Hendersona363e1e2025-01-10 09:26:44 -08002653 op->opc = INDEX_op_setcond;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002654 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002655 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002656
Richard Hendersona53502c2024-12-08 20:56:36 -06002657 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002658
2659 do_setcond_const:
2660 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2661}
2662
Richard Hendersonb6617c82021-08-24 10:44:53 -07002663static bool fold_sextract(OptContext *ctx, TCGOp *op)
2664{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002665 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002666 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002667 int pos = op->args[2];
2668 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002669
Richard Hendersonbaff5072024-12-08 21:09:30 -06002670 if (ti_is_const(t1)) {
2671 return tcg_opt_gen_movi(ctx, op, op->args[0],
2672 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002673 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002674
Richard Hendersonbaff5072024-12-08 21:09:30 -06002675 s_mask_old = t1->s_mask;
2676 s_mask = s_mask_old >> pos;
2677 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002678
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002679 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002680 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002681 }
2682
Richard Hendersonbaff5072024-12-08 21:09:30 -06002683 z_mask = sextract64(t1->z_mask, pos, len);
2684 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002685}
2686
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002687static bool fold_shift(OptContext *ctx, TCGOp *op)
2688{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002689 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002690 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002691
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002692 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002693 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002694 fold_xi_to_x(ctx, op, 0)) {
2695 return true;
2696 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002697
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002698 t1 = arg_info(op->args[1]);
2699 t2 = arg_info(op->args[2]);
2700 s_mask = t1->s_mask;
2701 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002702
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002703 if (ti_is_const(t2)) {
2704 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002705
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002706 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002707 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002708
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002709 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002710 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002711
2712 switch (op->opc) {
Richard Henderson3949f362025-01-08 08:05:18 -08002713 case INDEX_op_sar:
Richard Henderson93a967f2021-08-26 13:24:59 -07002714 /*
2715 * Arithmetic right shift will not reduce the number of
2716 * input sign repetitions.
2717 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002718 return fold_masks_s(ctx, op, s_mask);
Richard Henderson74dbd362025-01-07 22:52:10 -08002719 case INDEX_op_shr:
Richard Henderson93a967f2021-08-26 13:24:59 -07002720 /*
2721 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002722 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002723 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002724 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002725 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002726 }
2727 break;
2728 default:
2729 break;
2730 }
2731
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002732 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002733}
2734
Richard Henderson9caca882021-08-24 13:30:32 -07002735static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2736{
2737 TCGOpcode neg_op;
2738 bool have_neg;
2739
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002740 if (!arg_is_const_val(op->args[1], 0)) {
Richard Henderson9caca882021-08-24 13:30:32 -07002741 return false;
2742 }
2743
2744 switch (ctx->type) {
2745 case TCG_TYPE_I32:
Richard Henderson9caca882021-08-24 13:30:32 -07002746 case TCG_TYPE_I64:
Richard Henderson69713582025-01-06 22:48:57 -08002747 neg_op = INDEX_op_neg;
Richard Hendersonb701f192023-10-25 21:14:04 -07002748 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002749 break;
2750 case TCG_TYPE_V64:
2751 case TCG_TYPE_V128:
2752 case TCG_TYPE_V256:
2753 neg_op = INDEX_op_neg_vec;
2754 have_neg = (TCG_TARGET_HAS_neg_vec &&
2755 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2756 break;
2757 default:
2758 g_assert_not_reached();
2759 }
2760 if (have_neg) {
2761 op->opc = neg_op;
2762 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002763 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002764 }
2765 return false;
2766}
2767
Richard Hendersonc578ff12021-12-16 06:07:25 -08002768/* We cannot as yet do_constant_folding with vectors. */
2769static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002770{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002771 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002772 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002773 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002774 return true;
2775 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002776 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002777}
2778
Richard Hendersonc578ff12021-12-16 06:07:25 -08002779static bool fold_sub(OptContext *ctx, TCGOp *op)
2780{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002781 if (fold_const2(ctx, op) ||
2782 fold_xx_to_i(ctx, op, 0) ||
2783 fold_xi_to_x(ctx, op, 0) ||
2784 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002785 return true;
2786 }
2787
2788 /* Fold sub r,x,i to add r,x,-i */
2789 if (arg_is_const(op->args[2])) {
Richard Hendersonc1fa1b32025-02-17 15:17:47 -08002790 uint64_t val = arg_const_val(op->args[2]);
Richard Henderson6334a962023-10-25 18:39:43 -07002791
Richard Henderson79602f62025-01-06 09:11:39 -08002792 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002793 op->args[2] = arg_new_constant(ctx, -val);
2794 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002795 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002796}
2797
Richard Hendersonaeb35142025-01-14 18:28:15 -08002798static void squash_prev_borrowout(OptContext *ctx, TCGOp *op)
2799{
2800 TempOptInfo *t2;
2801
2802 op = QTAILQ_PREV(op, link);
2803 switch (op->opc) {
2804 case INDEX_op_subbo:
2805 op->opc = INDEX_op_sub;
2806 fold_sub(ctx, op);
2807 break;
2808 case INDEX_op_subbio:
2809 op->opc = INDEX_op_subbi;
2810 break;
2811 case INDEX_op_subb1o:
2812 t2 = arg_info(op->args[2]);
2813 if (ti_is_const(t2)) {
2814 op->opc = INDEX_op_add;
2815 op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
2816 /* Perform other constant folding, if needed. */
2817 fold_add(ctx, op);
2818 } else {
2819 TCGArg ret = op->args[0];
2820 op->opc = INDEX_op_sub;
2821 op = opt_insert_after(ctx, op, INDEX_op_add, 3);
2822 op->args[0] = ret;
2823 op->args[1] = ret;
2824 op->args[2] = arg_new_constant(ctx, -1);
2825 }
2826 break;
2827 default:
2828 g_assert_not_reached();
2829 }
2830}
2831
2832static bool fold_subbi(OptContext *ctx, TCGOp *op)
2833{
2834 TempOptInfo *t2;
2835 int borrow_in = ctx->carry_state;
2836
2837 if (borrow_in < 0) {
2838 return finish_folding(ctx, op);
2839 }
2840 ctx->carry_state = -1;
2841
2842 squash_prev_borrowout(ctx, op);
2843 if (borrow_in == 0) {
2844 op->opc = INDEX_op_sub;
2845 return fold_sub(ctx, op);
2846 }
2847
2848 /*
2849 * Propagate the known carry-in into any constant, then negate to
2850 * transform from sub to add. If there is no constant, emit a
2851 * separate add -1.
2852 */
2853 t2 = arg_info(op->args[2]);
2854 if (ti_is_const(t2)) {
2855 op->args[2] = arg_new_constant(ctx, -(ti_const_val(t2) + 1));
2856 } else {
2857 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_sub, 3);
2858
2859 op2->args[0] = op->args[0];
2860 op2->args[1] = op->args[1];
2861 op2->args[2] = op->args[2];
2862 fold_sub(ctx, op2);
2863
2864 op->args[1] = op->args[0];
2865 op->args[2] = arg_new_constant(ctx, -1);
2866 }
2867 op->opc = INDEX_op_add;
2868 return fold_add(ctx, op);
2869}
2870
2871static bool fold_subbio(OptContext *ctx, TCGOp *op)
2872{
2873 TempOptInfo *t1, *t2;
2874 int borrow_out = -1;
2875
2876 if (ctx->carry_state < 0) {
2877 return finish_folding(ctx, op);
2878 }
2879
2880 squash_prev_borrowout(ctx, op);
2881 if (ctx->carry_state == 0) {
2882 goto do_subbo;
2883 }
2884
2885 t1 = arg_info(op->args[1]);
2886 t2 = arg_info(op->args[2]);
2887
2888 /* Propagate the known borrow-in into a constant, if possible. */
2889 if (ti_is_const(t2)) {
2890 uint64_t max = ctx->type == TCG_TYPE_I32 ? UINT32_MAX : UINT64_MAX;
2891 uint64_t v = ti_const_val(t2) & max;
2892
2893 if (v < max) {
2894 op->args[2] = arg_new_constant(ctx, v + 1);
2895 goto do_subbo;
2896 }
2897 /* subtracting max + 1 produces known borrow out. */
2898 borrow_out = 1;
2899 }
2900 if (ti_is_const(t1)) {
2901 uint64_t v = ti_const_val(t1);
2902 if (v != 0) {
2903 op->args[2] = arg_new_constant(ctx, v - 1);
2904 goto do_subbo;
2905 }
2906 }
2907
2908 /* Adjust the opcode to remember the known carry-in. */
2909 op->opc = INDEX_op_subb1o;
2910 ctx->carry_state = borrow_out;
2911 return finish_folding(ctx, op);
2912
2913 do_subbo:
2914 op->opc = INDEX_op_subbo;
2915 return fold_subbo(ctx, op);
2916}
2917
2918static bool fold_subbo(OptContext *ctx, TCGOp *op)
2919{
2920 TempOptInfo *t1 = arg_info(op->args[1]);
2921 TempOptInfo *t2 = arg_info(op->args[2]);
2922 int borrow_out = -1;
2923
2924 if (ti_is_const(t2)) {
2925 uint64_t v2 = ti_const_val(t2);
2926 if (v2 == 0) {
2927 borrow_out = 0;
2928 } else if (ti_is_const(t1)) {
2929 uint64_t v1 = ti_const_val(t1);
2930 borrow_out = v1 < v2;
2931 }
2932 }
2933 ctx->carry_state = borrow_out;
2934 return finish_folding(ctx, op);
2935}
2936
Richard Hendersonfae450b2021-08-25 22:42:19 -07002937static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2938{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002939 uint64_t z_mask = -1, s_mask = 0;
2940
Richard Hendersonfae450b2021-08-25 22:42:19 -07002941 /* We can't do any folding with a load, but we can record bits. */
2942 switch (op->opc) {
Richard Hendersone9968042025-01-21 21:47:16 -08002943 case INDEX_op_ld8s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002944 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002945 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002946 case INDEX_op_ld8u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002947 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002948 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002949 case INDEX_op_ld16s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002950 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002951 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002952 case INDEX_op_ld16u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002953 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002954 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002955 case INDEX_op_ld32s:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002956 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002957 break;
Richard Hendersone9968042025-01-21 21:47:16 -08002958 case INDEX_op_ld32u:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002959 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002960 break;
2961 default:
2962 g_assert_not_reached();
2963 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002964 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002965}
2966
Richard Hendersonab84dc32023-08-23 23:04:24 -07002967static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2968{
2969 TCGTemp *dst, *src;
2970 intptr_t ofs;
2971 TCGType type;
2972
2973 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002974 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002975 }
2976
2977 type = ctx->type;
2978 ofs = op->args[2];
2979 dst = arg_temp(op->args[0]);
2980 src = find_mem_copy_for(ctx, type, ofs);
2981 if (src && src->base_type == type) {
2982 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2983 }
2984
2985 reset_ts(ctx, dst);
2986 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2987 return true;
2988}
2989
2990static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2991{
2992 intptr_t ofs = op->args[2];
2993 intptr_t lm1;
2994
2995 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2996 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002997 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002998 }
2999
3000 switch (op->opc) {
Richard Hendersona28f1512025-01-22 13:28:55 -08003001 case INDEX_op_st8:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003002 lm1 = 0;
3003 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003004 case INDEX_op_st16:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003005 lm1 = 1;
3006 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003007 case INDEX_op_st32:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003008 lm1 = 3;
3009 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003010 case INDEX_op_st:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003011 case INDEX_op_st_vec:
3012 lm1 = tcg_type_size(ctx->type) - 1;
3013 break;
3014 default:
3015 g_assert_not_reached();
3016 }
3017 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06003018 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07003019}
3020
3021static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
3022{
3023 TCGTemp *src;
3024 intptr_t ofs, last;
3025 TCGType type;
3026
3027 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06003028 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07003029 }
3030
3031 src = arg_temp(op->args[0]);
3032 ofs = op->args[2];
3033 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07003034
3035 /*
3036 * Eliminate duplicate stores of a constant.
3037 * This happens frequently when the target ISA zero-extends.
3038 */
3039 if (ts_is_const(src)) {
3040 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
3041 if (src == prev) {
3042 tcg_op_remove(ctx->tcg, op);
3043 return true;
3044 }
3045 }
3046
Richard Hendersonab84dc32023-08-23 23:04:24 -07003047 last = ofs + tcg_type_size(type) - 1;
3048 remove_mem_copy_in(ctx, ofs, last);
3049 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06003050 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07003051}
3052
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003053static bool fold_xor(OptContext *ctx, TCGOp *op)
3054{
Richard Henderson787190e2024-12-10 08:39:56 -06003055 uint64_t z_mask, o_mask, s_mask;
Richard Hendersonc890fd72024-12-08 21:39:01 -06003056 TempOptInfo *t1, *t2;
3057
Richard Henderson7a2f7082021-08-26 07:06:39 -07003058 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07003059 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07003060 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07003061 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07003062 return true;
3063 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07003064
Richard Hendersonc890fd72024-12-08 21:39:01 -06003065 t1 = arg_info(op->args[1]);
3066 t2 = arg_info(op->args[2]);
Richard Henderson787190e2024-12-10 08:39:56 -06003067
3068 z_mask = (t1->z_mask | t2->z_mask) & ~(t1->o_mask & t2->o_mask);
3069 o_mask = (t1->o_mask & ~t2->z_mask) | (t2->o_mask & ~t1->z_mask);
Richard Hendersonc890fd72024-12-08 21:39:01 -06003070 s_mask = t1->s_mask & t2->s_mask;
Richard Henderson787190e2024-12-10 08:39:56 -06003071
3072 return fold_masks_zos(ctx, op, z_mask, o_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003073}
3074
Kirill Batuzov22613af2011-07-07 16:37:13 +04003075/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02003076void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003077{
Richard Henderson5cf32be2021-08-24 08:17:08 -07003078 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07003079 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07003080 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07003081
Richard Hendersonab84dc32023-08-23 23:04:24 -07003082 QSIMPLEQ_INIT(&ctx.mem_free);
3083
Kirill Batuzov22613af2011-07-07 16:37:13 +04003084 /* Array VALS has an element for each temp.
3085 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02003086 If this temp is a copy of other ones then the other copies are
3087 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003088
3089 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07003090 for (i = 0; i < nb_temps; ++i) {
3091 s->temps[i].state_ptr = NULL;
3092 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003093
Richard Henderson15fa08f2017-11-02 15:19:14 +01003094 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003095 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07003096 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07003097 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003098
Richard Henderson5cf32be2021-08-24 08:17:08 -07003099 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003100 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07003101 fold_call(&ctx, op);
3102 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07003103 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07003104
3105 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07003106 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
3107 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04003108
Richard Henderson67f84c92021-08-25 08:00:20 -07003109 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08003110 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07003111
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003112 /*
3113 * Process each opcode.
3114 * Sorted alphabetically by opcode as much as possible.
3115 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07003116 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08003117 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003118 done = fold_add(&ctx, op);
3119 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003120 case INDEX_op_add_vec:
3121 done = fold_add_vec(&ctx, op);
3122 break;
Richard Henderson76f42782025-01-14 13:58:39 -08003123 case INDEX_op_addci:
Richard Hendersonaeb35142025-01-14 18:28:15 -08003124 done = fold_addci(&ctx, op);
3125 break;
Richard Henderson76f42782025-01-14 13:58:39 -08003126 case INDEX_op_addcio:
Richard Hendersonaeb35142025-01-14 18:28:15 -08003127 done = fold_addcio(&ctx, op);
3128 break;
3129 case INDEX_op_addco:
3130 done = fold_addco(&ctx, op);
Richard Henderson76f42782025-01-14 13:58:39 -08003131 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08003132 case INDEX_op_and:
3133 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003134 done = fold_and(&ctx, op);
3135 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08003136 case INDEX_op_andc:
3137 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003138 done = fold_andc(&ctx, op);
3139 break;
Richard Hendersonb6d69fc2025-01-10 11:49:22 -08003140 case INDEX_op_brcond:
Richard Henderson079b0802021-08-24 09:30:59 -07003141 done = fold_brcond(&ctx, op);
3142 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07003143 case INDEX_op_brcond2_i32:
3144 done = fold_brcond2(&ctx, op);
3145 break;
Richard Henderson0dd07ee2025-01-10 18:51:16 -08003146 case INDEX_op_bswap16:
Richard Henderson7498d882025-01-10 19:53:51 -08003147 case INDEX_op_bswap32:
Richard Henderson3ad5d4c2025-01-10 21:54:44 -08003148 case INDEX_op_bswap64:
Richard Henderson09bacdc2021-08-24 11:58:12 -07003149 done = fold_bswap(&ctx, op);
3150 break;
Richard Henderson5a5bb0a2025-01-08 16:12:46 -08003151 case INDEX_op_clz:
Richard Hendersonc96447d2025-01-08 17:07:01 -08003152 case INDEX_op_ctz:
Richard Henderson30dd0bf2021-08-24 10:51:34 -07003153 done = fold_count_zeros(&ctx, op);
3154 break;
Richard Henderson97218ae2025-01-08 18:37:43 -08003155 case INDEX_op_ctpop:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003156 done = fold_ctpop(&ctx, op);
3157 break;
Richard Henderson4d137ff2025-01-12 20:48:57 -08003158 case INDEX_op_deposit:
Richard Henderson1b1907b2021-08-24 10:47:04 -07003159 done = fold_deposit(&ctx, op);
3160 break;
Richard Hendersonb2c514f2025-01-07 13:22:56 -08003161 case INDEX_op_divs:
Richard Henderson961b80a2025-01-07 14:27:19 -08003162 case INDEX_op_divu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003163 done = fold_divide(&ctx, op);
3164 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07003165 case INDEX_op_dup_vec:
3166 done = fold_dup(&ctx, op);
3167 break;
3168 case INDEX_op_dup2_vec:
3169 done = fold_dup2(&ctx, op);
3170 break;
Richard Henderson5c0968a2025-01-06 15:47:53 -08003171 case INDEX_op_eqv:
3172 case INDEX_op_eqv_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003173 done = fold_eqv(&ctx, op);
3174 break;
Richard Henderson07d5d502025-01-11 09:01:46 -08003175 case INDEX_op_extract:
Richard Hendersonb6617c82021-08-24 10:44:53 -07003176 done = fold_extract(&ctx, op);
3177 break;
Richard Henderson61d6a872025-01-12 21:40:43 -08003178 case INDEX_op_extract2:
Richard Hendersondcd08992021-08-24 10:41:39 -07003179 done = fold_extract2(&ctx, op);
3180 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003181 case INDEX_op_ext_i32_i64:
3182 done = fold_exts(&ctx, op);
3183 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003184 case INDEX_op_extu_i32_i64:
3185 case INDEX_op_extrl_i64_i32:
3186 case INDEX_op_extrh_i64_i32:
3187 done = fold_extu(&ctx, op);
3188 break;
Richard Hendersone9968042025-01-21 21:47:16 -08003189 case INDEX_op_ld8s:
3190 case INDEX_op_ld8u:
3191 case INDEX_op_ld16s:
3192 case INDEX_op_ld16u:
3193 case INDEX_op_ld32s:
3194 case INDEX_op_ld32u:
Richard Hendersonfae450b2021-08-25 22:42:19 -07003195 done = fold_tcg_ld(&ctx, op);
3196 break;
Richard Hendersone9968042025-01-21 21:47:16 -08003197 case INDEX_op_ld:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003198 case INDEX_op_ld_vec:
3199 done = fold_tcg_ld_memcopy(&ctx, op);
3200 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003201 case INDEX_op_st8:
3202 case INDEX_op_st16:
3203 case INDEX_op_st32:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003204 done = fold_tcg_st(&ctx, op);
3205 break;
Richard Hendersona28f1512025-01-22 13:28:55 -08003206 case INDEX_op_st:
Richard Hendersonab84dc32023-08-23 23:04:24 -07003207 case INDEX_op_st_vec:
3208 done = fold_tcg_st_memcopy(&ctx, op);
3209 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07003210 case INDEX_op_mb:
3211 done = fold_mb(&ctx, op);
3212 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08003213 case INDEX_op_mov:
3214 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003215 done = fold_mov(&ctx, op);
3216 break;
Richard Hendersonea46c4b2025-01-10 13:41:25 -08003217 case INDEX_op_movcond:
Richard Henderson0c310a32021-08-24 10:37:24 -07003218 done = fold_movcond(&ctx, op);
3219 break;
Richard Hendersond2c3eca2025-01-07 09:32:18 -08003220 case INDEX_op_mul:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003221 done = fold_mul(&ctx, op);
3222 break;
Richard Hendersonc7428242025-01-07 11:19:29 -08003223 case INDEX_op_mulsh:
Richard Hendersonaa28c9e2025-01-07 10:36:24 -08003224 case INDEX_op_muluh:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003225 done = fold_mul_highpart(&ctx, op);
3226 break;
Richard Hendersonbfe96482025-01-09 07:24:32 -08003227 case INDEX_op_muls2:
Richard Hendersond7761982025-01-09 09:11:53 -08003228 case INDEX_op_mulu2:
Richard Henderson407112b2021-08-26 06:33:04 -07003229 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07003230 break;
Richard Henderson59379a42025-01-06 20:32:54 -08003231 case INDEX_op_nand:
3232 case INDEX_op_nand_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003233 done = fold_nand(&ctx, op);
3234 break;
Richard Henderson69713582025-01-06 22:48:57 -08003235 case INDEX_op_neg:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003236 done = fold_neg(&ctx, op);
3237 break;
Richard Henderson3a8c4e92025-01-06 21:02:17 -08003238 case INDEX_op_nor:
3239 case INDEX_op_nor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003240 done = fold_nor(&ctx, op);
3241 break;
Richard Henderson5c62d372025-01-06 23:46:47 -08003242 case INDEX_op_not:
3243 case INDEX_op_not_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003244 done = fold_not(&ctx, op);
3245 break;
Richard Henderson49bd7512025-01-06 14:00:40 -08003246 case INDEX_op_or:
3247 case INDEX_op_or_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003248 done = fold_or(&ctx, op);
3249 break;
Richard Henderson6aba25e2025-01-06 14:46:26 -08003250 case INDEX_op_orc:
3251 case INDEX_op_orc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003252 done = fold_orc(&ctx, op);
3253 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003254 case INDEX_op_qemu_ld:
Richard Henderson6813be92024-12-08 20:33:30 -06003255 done = fold_qemu_ld_1reg(&ctx, op);
3256 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003257 case INDEX_op_qemu_ld2:
Richard Henderson6813be92024-12-08 20:33:30 -06003258 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003259 break;
Richard Hendersonaae24562025-02-09 12:55:15 -08003260 case INDEX_op_qemu_st:
3261 case INDEX_op_qemu_st2:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003262 done = fold_qemu_st(&ctx, op);
3263 break;
Richard Henderson9a6bc182025-01-07 19:00:51 -08003264 case INDEX_op_rems:
Richard Hendersoncd9acd22025-01-07 20:25:14 -08003265 case INDEX_op_remu:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003266 done = fold_remainder(&ctx, op);
3267 break;
Richard Henderson005a87e2025-01-08 10:42:16 -08003268 case INDEX_op_rotl:
3269 case INDEX_op_rotr:
Richard Henderson3949f362025-01-08 08:05:18 -08003270 case INDEX_op_sar:
Richard Henderson6ca59452025-01-07 21:50:04 -08003271 case INDEX_op_shl:
Richard Henderson74dbd362025-01-07 22:52:10 -08003272 case INDEX_op_shr:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003273 done = fold_shift(&ctx, op);
3274 break;
Richard Hendersona363e1e2025-01-10 09:26:44 -08003275 case INDEX_op_setcond:
Richard Hendersonc63ff552021-08-24 09:35:30 -07003276 done = fold_setcond(&ctx, op);
3277 break;
Richard Hendersona363e1e2025-01-10 09:26:44 -08003278 case INDEX_op_negsetcond:
Richard Henderson36355022023-08-04 23:24:04 +00003279 done = fold_negsetcond(&ctx, op);
3280 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003281 case INDEX_op_setcond2_i32:
3282 done = fold_setcond2(&ctx, op);
3283 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003284 case INDEX_op_cmp_vec:
3285 done = fold_cmp_vec(&ctx, op);
3286 break;
3287 case INDEX_op_cmpsel_vec:
3288 done = fold_cmpsel_vec(&ctx, op);
3289 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003290 case INDEX_op_bitsel_vec:
3291 done = fold_bitsel_vec(&ctx, op);
3292 break;
Richard Hendersonfa361ee2025-01-12 11:50:09 -08003293 case INDEX_op_sextract:
Richard Hendersonb6617c82021-08-24 10:44:53 -07003294 done = fold_sextract(&ctx, op);
3295 break;
Richard Henderson60f34f52025-01-06 22:06:32 -08003296 case INDEX_op_sub:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003297 done = fold_sub(&ctx, op);
3298 break;
Richard Hendersonaeb35142025-01-14 18:28:15 -08003299 case INDEX_op_subbi:
3300 done = fold_subbi(&ctx, op);
3301 break;
3302 case INDEX_op_subbio:
3303 done = fold_subbio(&ctx, op);
3304 break;
3305 case INDEX_op_subbo:
3306 done = fold_subbo(&ctx, op);
3307 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003308 case INDEX_op_sub_vec:
3309 done = fold_sub_vec(&ctx, op);
3310 break;
Richard Hendersonfffd3dc2025-01-06 15:18:35 -08003311 case INDEX_op_xor:
3312 case INDEX_op_xor_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003313 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003314 break;
Richard Henderson15268552024-12-08 07:45:11 -06003315 case INDEX_op_set_label:
3316 case INDEX_op_br:
3317 case INDEX_op_exit_tb:
3318 case INDEX_op_goto_tb:
3319 case INDEX_op_goto_ptr:
3320 finish_ebb(&ctx);
3321 done = true;
3322 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003323 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003324 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003325 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003326 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003327 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003328 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003329}